text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="square-off" format="2">
<advance width="1200"/>
<unicode hex="F12EE"/>
<note>
square-off
</note>
<outline>
<contour>
<point x="1180" y="200" type="line"/>
<point x="90" y="1290" type="line"/>
<point x="20" y="1220" type="line"/>
<point x="124" y="1117" type="line"/>
<point x="124" y="226" type="line"/>
<point x="1014" y="226" type="line"/>
<point x="1110" y="130" type="line"/>
</contour>
<contour>
<point x="1118" y="402" type="line"/>
<point x="1118" y="1220" type="line"/>
<point x="300" y="1220" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/square-off.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/square-off.glif",
"repo_id": "cascadia-code",
"token_count": 322
}
| 661 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="star-plus" format="2">
<advance width="1200"/>
<unicode hex="F1563"/>
<note>
star-plus
</note>
<outline>
<contour>
<point x="600" y="402" type="line"/>
<point x="616" y="533"/>
<point x="815" y="710"/>
<point x="949" y="710" type="qcurve" smooth="yes"/>
<point x="995" y="710" type="line"/>
<point x="1180" y="873" type="line"/>
<point x="763" y="909" type="line"/>
<point x="600" y="1290" type="line"/>
<point x="437" y="909" type="line"/>
<point x="20" y="873" type="line"/>
<point x="333" y="593" type="line"/>
<point x="241" y="187" type="line"/>
</contour>
<contour>
<point x="1006" y="593" type="line"/>
<point x="891" y="593" type="line"/>
<point x="891" y="419" type="line"/>
<point x="717" y="419" type="line"/>
<point x="717" y="304" type="line"/>
<point x="891" y="304" type="line"/>
<point x="891" y="130" type="line"/>
<point x="1006" y="130" type="line"/>
<point x="1006" y="304" type="line"/>
<point x="1180" y="304" type="line"/>
<point x="1180" y="419" type="line"/>
<point x="1006" y="419" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star-plus.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star-plus.glif",
"repo_id": "cascadia-code",
"token_count": 606
}
| 662 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="step-forward-2" format="2">
<advance width="1200"/>
<unicode hex="F04D8"/>
<note>
step-forward-2
</note>
<outline>
<contour>
<point x="294" y="324" type="line"/>
<point x="460" y="324" type="line"/>
<point x="460" y="1096" type="line"/>
<point x="294" y="1096" type="line"/>
</contour>
<contour>
<point x="572" y="324" type="line"/>
<point x="1180" y="710" type="line"/>
<point x="572" y="1096" type="line"/>
</contour>
<contour>
<point x="20" y="324" type="line"/>
<point x="186" y="324" type="line"/>
<point x="186" y="1096" type="line"/>
<point x="20" y="1096" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/step-forward-2.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/step-forward-2.glif",
"repo_id": "cascadia-code",
"token_count": 359
}
| 663 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="subdirectory-arrow-right" format="2">
<advance width="1200"/>
<unicode hex="F060D"/>
<note>
subdirectory-arrow-right
</note>
<outline>
<contour>
<point x="716" y="980" type="line"/>
<point x="607" y="871" type="line"/>
<point x="886" y="592" type="line"/>
<point x="176" y="592" type="line"/>
<point x="176" y="1368" type="line"/>
<point x="20" y="1368" type="line"/>
<point x="20" y="440" type="line"/>
<point x="886" y="440" type="line"/>
<point x="607" y="161" type="line"/>
<point x="716" y="52" type="line"/>
<point x="1180" y="516" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/subdirectory-arrow-right.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/subdirectory-arrow-right.glif",
"repo_id": "cascadia-code",
"token_count": 328
}
| 664 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="tally-mark-1" format="2">
<advance width="1200"/>
<unicode hex="F1ABC"/>
<note>
tally-mark-1
</note>
<outline>
<contour>
<point x="700" y="1420" type="line"/>
<point x="500" y="1420" type="line"/>
<point x="500" y="0" type="line"/>
<point x="700" y="0" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tally-mark-1.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tally-mark-1.glif",
"repo_id": "cascadia-code",
"token_count": 184
}
| 665 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="temperature-kelvin" format="2">
<advance width="1200"/>
<unicode hex="F0506"/>
<note>
temperature-kelvin
</note>
<outline>
<contour>
<point x="32" y="0" type="line"/>
<point x="316" y="0" type="line"/>
<point x="316" y="462" type="line"/>
<point x="485" y="648" type="line"/>
<point x="826" y="0" type="line"/>
<point x="1168" y="0" type="line"/>
<point x="684" y="874" type="line"/>
<point x="1168" y="1420" type="line"/>
<point x="791" y="1420" type="line"/>
<point x="316" y="852" type="line"/>
<point x="316" y="1420" type="line"/>
<point x="32" y="1420" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/temperature-kelvin.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/temperature-kelvin.glif",
"repo_id": "cascadia-code",
"token_count": 352
}
| 666 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="timer-sand-paused" format="2">
<advance width="1200"/>
<unicode hex="F19A0"/>
<note>
timer-sand-paused
</note>
<outline>
<contour>
<point x="831" y="1059" type="line"/>
<point x="600" y="827" type="line"/>
<point x="369" y="1059" type="line"/>
<point x="20" y="1059" type="line"/>
<point x="20" y="361" type="line"/>
<point x="369" y="361" type="line"/>
<point x="600" y="593" type="line"/>
<point x="831" y="361" type="line"/>
<point x="1180" y="361" type="line"/>
<point x="1180" y="1059" type="line"/>
</contour>
<contour>
<point x="134" y="479" type="line"/>
<point x="134" y="941" type="line"/>
<point x="339" y="941" type="line"/>
<point x="570" y="710" type="line"/>
<point x="339" y="479" type="line"/>
</contour>
<contour>
<point x="861" y="941" type="line"/>
<point x="1066" y="941" type="line"/>
<point x="1066" y="479" type="line"/>
<point x="861" y="479" type="line"/>
<point x="630" y="710" type="line"/>
</contour>
<contour>
<point x="785" y="710" type="line"/>
<point x="902" y="593" type="line"/>
<point x="949" y="593" type="line"/>
<point x="949" y="710" type="line"/>
</contour>
<contour>
<point x="251" y="710" type="line"/>
<point x="251" y="593" type="line"/>
<point x="298" y="593" type="line"/>
<point x="415" y="710" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/timer-sand-paused.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/timer-sand-paused.glif",
"repo_id": "cascadia-code",
"token_count": 745
}
| 667 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="toggle-switch-off-outline" format="2">
<advance width="1200"/>
<unicode hex="F0A19"/>
<note>
toggle-switch-off-outline
</note>
<outline>
<contour>
<point x="948" y="1026"/>
<point x="864" y="1026" type="qcurve" smooth="yes"/>
<point x="336" y="1026" type="line" smooth="yes"/>
<point x="252" y="1026"/>
<point x="106" y="942"/>
<point x="20" y="796"/>
<point x="20" y="624"/>
<point x="106" y="478"/>
<point x="252" y="394"/>
<point x="336" y="394" type="qcurve" smooth="yes"/>
<point x="864" y="394" type="line" smooth="yes"/>
<point x="948" y="394"/>
<point x="1094" y="478"/>
<point x="1180" y="624"/>
<point x="1180" y="796"/>
<point x="1094" y="942"/>
</contour>
<contour>
<point x="950" y="500"/>
<point x="864" y="500" type="qcurve" smooth="yes"/>
<point x="336" y="500" type="line" smooth="yes"/>
<point x="250" y="500"/>
<point x="126" y="624"/>
<point x="126" y="796"/>
<point x="250" y="920"/>
<point x="336" y="920" type="qcurve" smooth="yes"/>
<point x="864" y="920" type="line" smooth="yes"/>
<point x="950" y="920"/>
<point x="1074" y="796"/>
<point x="1074" y="624"/>
</contour>
<contour>
<point x="403" y="868"/>
<point x="336" y="868" type="qcurve" smooth="yes"/>
<point x="272" y="868"/>
<point x="178" y="777"/>
<point x="178" y="643"/>
<point x="272" y="552"/>
<point x="403" y="552"/>
<point x="494" y="643"/>
<point x="494" y="777"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/toggle-switch-off-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/toggle-switch-off-outline.glif",
"repo_id": "cascadia-code",
"token_count": 834
}
| 668 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="transit-connection" format="2">
<advance width="1200"/>
<unicode hex="F0D3C"/>
<note>
transit-connection
</note>
<outline>
<contour>
<point x="793" y="650"/>
<point x="793" y="710" type="qcurve" smooth="yes"/>
<point x="793" y="770"/>
<point x="721" y="870"/>
<point x="663" y="891" type="qcurve"/>
<point x="663" y="1045" type="line"/>
<point x="721" y="1063"/>
<point x="793" y="1163"/>
<point x="793" y="1227" type="qcurve" smooth="yes"/>
<point x="793" y="1305"/>
<point x="682" y="1420"/>
<point x="518" y="1420"/>
<point x="407" y="1305"/>
<point x="407" y="1227" type="qcurve" smooth="yes"/>
<point x="407" y="1163"/>
<point x="479" y="1063"/>
<point x="537" y="1045" type="qcurve"/>
<point x="537" y="891" type="line"/>
<point x="479" y="870"/>
<point x="407" y="770"/>
<point x="407" y="650"/>
<point x="479" y="550"/>
<point x="537" y="529" type="qcurve"/>
<point x="537" y="375" type="line"/>
<point x="479" y="357"/>
<point x="407" y="257"/>
<point x="407" y="193" type="qcurve" smooth="yes"/>
<point x="407" y="115"/>
<point x="518" y="0"/>
<point x="682" y="0"/>
<point x="793" y="115"/>
<point x="793" y="193" type="qcurve" smooth="yes"/>
<point x="793" y="257"/>
<point x="721" y="357"/>
<point x="663" y="375" type="qcurve"/>
<point x="663" y="529" type="line"/>
<point x="721" y="550"/>
</contour>
<contour>
<point x="573" y="1290"/>
<point x="600" y="1290" type="qcurve" smooth="yes"/>
<point x="627" y="1290"/>
<point x="663" y="1254"/>
<point x="663" y="1199"/>
<point x="627" y="1160"/>
<point x="573" y="1160"/>
<point x="537" y="1199"/>
<point x="537" y="1254"/>
</contour>
<contour>
<point x="627" y="130"/>
<point x="600" y="130" type="qcurve" smooth="yes"/>
<point x="573" y="130"/>
<point x="537" y="166"/>
<point x="537" y="221"/>
<point x="573" y="260"/>
<point x="627" y="260"/>
<point x="663" y="221"/>
<point x="663" y="166"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/transit-connection.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/transit-connection.glif",
"repo_id": "cascadia-code",
"token_count": 1149
}
| 669 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="triangle-wave" format="2">
<advance width="1200"/>
<unicode hex="F147C"/>
<note>
triangle-wave
</note>
<outline>
<contour>
<point x="1052" y="710" type="line"/>
<point x="886" y="364" type="line"/>
<point x="309" y="1290" type="line"/>
<point x="20" y="710" type="line"/>
<point x="148" y="710" type="line"/>
<point x="314" y="1056" type="line"/>
<point x="891" y="130" type="line"/>
<point x="1180" y="710" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/triangle-wave.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/triangle-wave.glif",
"repo_id": "cascadia-code",
"token_count": 263
}
| 670 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="truck-fast-outline" format="2">
<advance width="1200"/>
<unicode hex="F129C"/>
<note>
truck-fast-outline
</note>
<outline>
<contour>
<point x="59" y="866" type="line"/>
<point x="567" y="866" type="line"/>
<point x="528" y="944" type="line"/>
<point x="20" y="944" type="line"/>
</contour>
<contour>
<point x="110" y="710" type="line"/>
<point x="618" y="710" type="line"/>
<point x="579" y="788" type="line"/>
<point x="71" y="788" type="line"/>
</contour>
<contour>
<point x="887" y="371"/>
<point x="841" y="417"/>
<point x="841" y="483"/>
<point x="887" y="527"/>
<point x="950" y="527"/>
<point x="997" y="483"/>
<point x="997" y="417"/>
<point x="950" y="371"/>
<point x="919" y="371" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="1099" y="710" type="line"/>
<point x="867" y="710" type="line"/>
<point x="867" y="839" type="line"/>
<point x="997" y="839" type="line"/>
</contour>
<contour>
<point x="364" y="371"/>
<point x="320" y="417"/>
<point x="320" y="483"/>
<point x="364" y="527"/>
<point x="430" y="527"/>
<point x="477" y="483"/>
<point x="477" y="417"/>
<point x="430" y="371"/>
<point x="399" y="371" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="867" y="918" type="line"/>
<point x="867" y="1128" type="line"/>
<point x="242" y="1128" type="line" smooth="yes"/>
<point x="198" y="1128"/>
<point x="137" y="1067"/>
<point x="137" y="1023" type="qcurve"/>
<point x="762" y="1023" type="line"/>
<point x="762" y="554" type="line"/>
<point x="513" y="554" type="line"/>
<point x="467" y="605"/>
<point x="328" y="605"/>
<point x="281" y="554" type="qcurve"/>
<point x="242" y="554" type="line"/>
<point x="242" y="632" type="line"/>
<point x="137" y="632" type="line"/>
<point x="137" y="449" type="line"/>
<point x="242" y="449" type="line"/>
<point x="242" y="385"/>
<point x="333" y="292"/>
<point x="462" y="292"/>
<point x="555" y="385"/>
<point x="555" y="449" type="qcurve"/>
<point x="762" y="449" type="line"/>
<point x="762" y="385"/>
<point x="853" y="292"/>
<point x="985" y="292"/>
<point x="1075" y="385"/>
<point x="1075" y="449" type="qcurve"/>
<point x="1180" y="449" type="line"/>
<point x="1180" y="710" type="line"/>
<point x="1024" y="918" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/truck-fast-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/truck-fast-outline.glif",
"repo_id": "cascadia-code",
"token_count": 1350
}
| 671 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="view-quilt-outline" format="2">
<advance width="1200"/>
<unicode hex="F148E"/>
<note>
view-quilt-outline
</note>
<outline>
<contour>
<point x="1180" y="1153" type="line"/>
<point x="20" y="1153" type="line"/>
<point x="20" y="267" type="line"/>
<point x="1180" y="267" type="line"/>
</contour>
<contour>
<point x="362" y="405" type="line"/>
<point x="157" y="405" type="line"/>
<point x="157" y="1018" type="line"/>
<point x="362" y="1018" type="line"/>
</contour>
<contour>
<point x="704" y="405" type="line"/>
<point x="499" y="405" type="line"/>
<point x="499" y="641" type="line"/>
<point x="704" y="641" type="line"/>
</contour>
<contour>
<point x="1043" y="641" type="line"/>
<point x="1043" y="405" type="line"/>
<point x="838" y="405" type="line"/>
<point x="838" y="641" type="line"/>
</contour>
<contour>
<point x="1043" y="779" type="line"/>
<point x="499" y="779" type="line"/>
<point x="499" y="1018" type="line"/>
<point x="1043" y="1018" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-quilt-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-quilt-outline.glif",
"repo_id": "cascadia-code",
"token_count": 582
}
| 672 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="wechat" format="2">
<advance width="1200"/>
<unicode hex="F0611"/>
<note>
wechat
</note>
<outline>
<contour>
<point x="595" y="1176"/>
<point x="456" y="1176" type="qcurve" smooth="yes"/>
<point x="336" y="1176"/>
<point x="137" y="1080"/>
<point x="20" y="920"/>
<point x="20" y="827" type="qcurve" smooth="yes"/>
<point x="20" y="745"/>
<point x="104" y="604"/>
<point x="181" y="555" type="qcurve"/>
<point x="134" y="419" type="line"/>
<point x="281" y="506" type="line"/>
<point x="363" y="479"/>
<point x="450" y="479" type="qcurve"/>
<point x="426" y="536"/>
<point x="426" y="593" type="qcurve" smooth="yes"/>
<point x="426" y="688"/>
<point x="535" y="849"/>
<point x="723" y="941"/>
<point x="831" y="941" type="qcurve" smooth="yes"/>
<point x="853" y="941"/>
<point x="864" y="941" type="qcurve"/>
<point x="821" y="1045"/>
</contour>
<contour>
<point x="257" y="1029"/>
<point x="281" y="1029" type="qcurve" smooth="yes"/>
<point x="306" y="1029"/>
<point x="339" y="996"/>
<point x="339" y="947"/>
<point x="306" y="914"/>
<point x="257" y="914"/>
<point x="222" y="947"/>
<point x="222" y="996"/>
</contour>
<contour>
<point x="546" y="1029"/>
<point x="570" y="1029" type="qcurve" smooth="yes"/>
<point x="595" y="1029"/>
<point x="630" y="996"/>
<point x="630" y="947"/>
<point x="595" y="914"/>
<point x="546" y="914"/>
<point x="513" y="947"/>
<point x="513" y="996"/>
</contour>
<contour>
<point x="927" y="884"/>
<point x="831" y="884" type="qcurve" smooth="yes"/>
<point x="739" y="884"/>
<point x="578" y="805"/>
<point x="483" y="672"/>
<point x="483" y="514"/>
<point x="578" y="381"/>
<point x="739" y="304"/>
<point x="831" y="304" type="qcurve" smooth="yes"/>
<point x="891" y="304"/>
<point x="943" y="318" type="qcurve"/>
<point x="1066" y="244" type="line"/>
<point x="1028" y="353" type="line"/>
<point x="1098" y="394"/>
<point x="1180" y="522"/>
<point x="1180" y="672"/>
<point x="1087" y="805"/>
</contour>
<contour>
<point x="693" y="740"/>
<point x="717" y="740" type="qcurve" smooth="yes"/>
<point x="742" y="740"/>
<point x="774" y="705"/>
<point x="774" y="656"/>
<point x="742" y="623"/>
<point x="693" y="623"/>
<point x="657" y="656"/>
<point x="657" y="705"/>
</contour>
<contour>
<point x="924" y="740"/>
<point x="949" y="740" type="qcurve" smooth="yes"/>
<point x="973" y="740"/>
<point x="1006" y="705"/>
<point x="1006" y="656"/>
<point x="973" y="623"/>
<point x="924" y="623"/>
<point x="891" y="656"/>
<point x="891" y="705"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/wechat.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/wechat.glif",
"repo_id": "cascadia-code",
"token_count": 1566
}
| 673 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="widgets-outline" format="2">
<advance width="1200"/>
<unicode hex="F1355"/>
<note>
widgets-outline
</note>
<outline>
<contour>
<point x="676" y="952" type="line"/>
<point x="842" y="1121" type="line"/>
<point x="1011" y="952" type="line"/>
<point x="842" y="786" type="line"/>
</contour>
<contour>
<point x="141" y="1090" type="line"/>
<point x="380" y="1090" type="line"/>
<point x="380" y="851" type="line"/>
<point x="141" y="851" type="line"/>
</contour>
<contour>
<point x="741" y="490" type="line"/>
<point x="980" y="490" type="line"/>
<point x="980" y="251" type="line"/>
<point x="741" y="251" type="line"/>
</contour>
<contour>
<point x="1180" y="952" type="line"/>
<point x="842" y="1290" type="line"/>
<point x="501" y="952" type="line"/>
<point x="842" y="611" type="line"/>
<point x="620" y="611" type="line"/>
<point x="620" y="130" type="line"/>
<point x="1101" y="130" type="line"/>
<point x="1101" y="611" type="line"/>
<point x="842" y="611" type="line"/>
</contour>
<contour>
<point x="501" y="730" type="line"/>
<point x="501" y="1211" type="line"/>
<point x="20" y="1211" type="line"/>
<point x="20" y="730" type="line"/>
</contour>
<contour>
<point x="141" y="490" type="line"/>
<point x="380" y="490" type="line"/>
<point x="380" y="251" type="line"/>
<point x="141" y="251" type="line"/>
</contour>
<contour>
<point x="501" y="130" type="line"/>
<point x="501" y="611" type="line"/>
<point x="20" y="611" type="line"/>
<point x="20" y="130" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/widgets-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/widgets-outline.glif",
"repo_id": "cascadia-code",
"token_count": 881
}
| 674 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="window-shutter" format="2">
<advance width="1200"/>
<unicode hex="F111C"/>
<note>
window-shutter
</note>
<outline>
<contour>
<point x="20" y="967" type="line"/>
<point x="150" y="967" type="line"/>
<point x="150" y="193" type="line"/>
<point x="277" y="193" type="line"/>
<point x="277" y="967" type="line"/>
<point x="923" y="967" type="line"/>
<point x="923" y="193" type="line"/>
<point x="1050" y="193" type="line"/>
<point x="1050" y="967" type="line"/>
<point x="1180" y="967" type="line"/>
<point x="1180" y="1227" type="line"/>
<point x="20" y="1227" type="line"/>
</contour>
<contour>
<point x="343" y="773" type="line"/>
<point x="857" y="773" type="line"/>
<point x="857" y="903" type="line"/>
<point x="343" y="903" type="line"/>
</contour>
<contour>
<point x="343" y="580" type="line"/>
<point x="857" y="580" type="line"/>
<point x="857" y="710" type="line"/>
<point x="343" y="710" type="line"/>
</contour>
<contour>
<point x="343" y="387" type="line"/>
<point x="857" y="387" type="line"/>
<point x="857" y="517" type="line"/>
<point x="343" y="517" type="line"/>
</contour>
<contour>
<point x="343" y="193" type="line"/>
<point x="857" y="193" type="line"/>
<point x="857" y="323" type="line"/>
<point x="343" y="323" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/window-shutter.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/window-shutter.glif",
"repo_id": "cascadia-code",
"token_count": 746
}
| 675 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>.notdef</key>
<string>_notdef.glif</string>
<key>uni2630</key>
<string>uni2630.glif</string>
<key>uniE0A0</key>
<string>uniE_0A_0.glif</string>
<key>uniE0A1</key>
<string>uniE_0A_1.glif</string>
<key>uniE0A2</key>
<string>uniE_0A_2.glif</string>
<key>uniE0A3</key>
<string>uniE_0A_3.glif</string>
<key>uniE0B0</key>
<string>uniE_0B_0.glif</string>
<key>uniE0B1</key>
<string>uniE_0B_1.glif</string>
<key>uniE0B2</key>
<string>uniE_0B_2.glif</string>
<key>uniE0B3</key>
<string>uniE_0B_3.glif</string>
<key>uniE0B4</key>
<string>uniE_0B_4.glif</string>
<key>uniE0B5</key>
<string>uniE_0B_5.glif</string>
<key>uniE0B6</key>
<string>uniE_0B_6.glif</string>
<key>uniE0B7</key>
<string>uniE_0B_7.glif</string>
<key>uniE0B8</key>
<string>uniE_0B_8.glif</string>
<key>uniE0B9</key>
<string>uniE_0B_9.glif</string>
<key>uniE0BA</key>
<string>uniE_0B_A_.glif</string>
<key>uniE0BB</key>
<string>uniE_0B_B_.glif</string>
<key>uniE0BC</key>
<string>uniE_0B_C_.glif</string>
<key>uniE0BD</key>
<string>uniE_0B_D_.glif</string>
<key>uniE0BE</key>
<string>uniE_0B_E_.glif</string>
<key>uniE0BF</key>
<string>uniE_0B_F_.glif</string>
<key>uniE0C0</key>
<string>uniE_0C_0.glif</string>
<key>uniE0C1</key>
<string>uniE_0C_1.glif</string>
<key>uniE0C2</key>
<string>uniE_0C_2.glif</string>
<key>uniE0C3</key>
<string>uniE_0C_3.glif</string>
<key>uniE0C4</key>
<string>uniE_0C_4.glif</string>
<key>uniE0C5</key>
<string>uniE_0C_5.glif</string>
<key>uniE0C6</key>
<string>uniE_0C_6.glif</string>
<key>uniE0C7</key>
<string>uniE_0C_7.glif</string>
<key>uniE0C8</key>
<string>uniE_0C_8.glif</string>
<key>uniE0CA</key>
<string>uniE_0C_A_.glif</string>
<key>uniE0CC</key>
<string>uniE_0C_C_.glif</string>
<key>uniE0CD</key>
<string>uniE_0C_D_.glif</string>
<key>uniE0CE</key>
<string>uniE_0C_E_.glif</string>
<key>uniE0CF</key>
<string>uniE_0C_F_.glif</string>
<key>uniE0D0</key>
<string>uniE_0D_0.glif</string>
<key>uniE0D1</key>
<string>uniE_0D_1.glif</string>
<key>uniE0D2</key>
<string>uniE_0D_2.glif</string>
<key>uniE0D4</key>
<string>uniE_0D_4.glif</string>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/PowerlineExtraSymbols.ufo/glyphs/contents.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/PowerlineExtraSymbols.ufo/glyphs/contents.plist",
"repo_id": "cascadia-code",
"token_count": 1464
}
| 676 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="uniE0BA" format="2">
<advance width="1200"/>
<unicode hex="E0BA"/>
<note>
uniE0BA
</note>
<outline>
<contour>
<point x="1954" y="2280" type="line"/>
<point x="-754" y="-426" type="line"/>
<point x="1954" y="-426" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/PowerlineExtraSymbols.ufo/glyphs/uniE_0B_A_.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/PowerlineExtraSymbols.ufo/glyphs/uniE_0B_A_.glif",
"repo_id": "cascadia-code",
"token_count": 164
}
| 677 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="uniE0CC" format="2">
<advance width="1200"/>
<unicode hex="E0CC"/>
<note>
uniE0CC
</note>
<outline>
<contour>
<point x="1432" y="388" type="line"/>
<point x="1749" y="937" type="line"/>
<point x="1432" y="1485" type="line"/>
<point x="799" y="1485" type="line"/>
<point x="482" y="937" type="line"/>
<point x="799" y="388" type="line"/>
</contour>
<contour>
<point x="85" y="-397" type="line"/>
<point x="401" y="152" type="line"/>
<point x="85" y="700" type="line"/>
<point x="-549" y="700" type="line"/>
<point x="-549" y="-397" type="line"/>
</contour>
<contour>
<point x="85" y="1212" type="line"/>
<point x="401" y="1761" type="line"/>
<point x="85" y="2309" type="line"/>
<point x="-549" y="2309" type="line"/>
<point x="-549" y="1212" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/PowerlineExtraSymbols.ufo/glyphs/uniE_0C_C_.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/PowerlineExtraSymbols.ufo/glyphs/uniE_0C_C_.glif",
"repo_id": "cascadia-code",
"token_count": 468
}
| 678 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="POWER SYMBOL" format="2">
<advance width="1200"/>
<unicode hex="23FB"/>
<note>
POWER SYMBOL
</note>
<outline>
<contour>
<point x="599" y="1374" type="line" smooth="yes"/>
<point x="545" y="1374"/>
<point x="501" y="1329"/>
<point x="501" y="1275" type="curve" smooth="yes"/>
<point x="501" y="1275"/>
<point x="501" y="1274"/>
<point x="501" y="1274" type="curve" smooth="yes"/>
<point x="501" y="626" type="line"/>
<point x="501" y="625" type="line" smooth="yes"/>
<point x="501" y="570"/>
<point x="546" y="526"/>
<point x="601" y="526" type="curve" smooth="yes"/>
<point x="655" y="526"/>
<point x="700" y="570"/>
<point x="700" y="625" type="curve" smooth="yes"/>
<point x="700" y="626" type="line"/>
<point x="700" y="1274" type="line"/>
<point x="700" y="1275" type="line" smooth="yes"/>
<point x="700" y="1330"/>
<point x="655" y="1374"/>
<point x="601" y="1374" type="curve" smooth="yes"/>
</contour>
<contour>
<point x="244" y="1048" type="line" smooth="yes"/>
<point x="219" y="1048"/>
<point x="186" y="1033"/>
<point x="169" y="1014" type="curve" smooth="yes"/>
<point x="77" y="911"/>
<point x="20" y="774"/>
<point x="20" y="626" type="curve" smooth="yes"/>
<point x="20" y="307"/>
<point x="281" y="46"/>
<point x="600" y="46" type="curve" smooth="yes"/>
<point x="919" y="46"/>
<point x="1180" y="306"/>
<point x="1180" y="626" type="curve" smooth="yes"/>
<point x="1180" y="774"/>
<point x="1123" y="911"/>
<point x="1031" y="1014" type="curve" smooth="yes"/>
<point x="1015" y="1033"/>
<point x="981" y="1048"/>
<point x="956" y="1048" type="curve" smooth="yes"/>
<point x="901" y="1048"/>
<point x="857" y="1004"/>
<point x="857" y="949" type="curve" smooth="yes"/>
<point x="857" y="927"/>
<point x="869" y="897"/>
<point x="883" y="881" type="curve" smooth="yes"/>
<point x="944" y="814"/>
<point x="982" y="724"/>
<point x="982" y="626" type="curve" smooth="yes"/>
<point x="982" y="413"/>
<point x="812" y="244"/>
<point x="600" y="244" type="curve" smooth="yes"/>
<point x="388" y="244"/>
<point x="218" y="414"/>
<point x="218" y="626" type="curve" smooth="yes"/>
<point x="218" y="724"/>
<point x="256" y="814"/>
<point x="317" y="881" type="curve" smooth="yes"/>
<point x="331" y="897"/>
<point x="343" y="927"/>
<point x="343" y="949" type="curve" smooth="yes"/>
<point x="343" y="1003"/>
<point x="299" y="1047"/>
<point x="246" y="1048" type="curve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/Unicode_IEC_symbol_font.ufo/glyphs/P_O_W_E_R_ S_Y_M_B_O_L_.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/Unicode_IEC_symbol_font.ufo/glyphs/P_O_W_E_R_ S_Y_M_B_O_L_.glif",
"repo_id": "cascadia-code",
"token_count": 1406
}
| 679 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="arrow-small-left" format="2">
<advance width="1200"/>
<unicode hex="EA9E"/>
<note>
arrow-small-left
</note>
<outline>
<contour>
<point x="567" y="524" type="line"/>
<point x="421" y="665" type="line"/>
<point x="916" y="665" type="line"/>
<point x="916" y="755" type="line"/>
<point x="421" y="755" type="line"/>
<point x="567" y="901" type="line"/>
<point x="506" y="962" type="line"/>
<point x="284" y="741" type="line"/>
<point x="284" y="679" type="line"/>
<point x="506" y="458" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/arrow-small-left.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/arrow-small-left.glif",
"repo_id": "cascadia-code",
"token_count": 304
}
| 680 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="briefcase" format="2">
<advance width="1200"/>
<unicode hex="EAAC"/>
<note>
briefcase
</note>
<outline>
<contour>
<point x="1180" y="960" type="line"/>
<point x="1140" y="1000" type="line"/>
<point x="848" y="1000" type="line"/>
<point x="848" y="1124" type="line"/>
<point x="808" y="1168" type="line"/>
<point x="392" y="1168" type="line"/>
<point x="352" y="1124" type="line"/>
<point x="352" y="1000" type="line"/>
<point x="60" y="1000" type="line"/>
<point x="20" y="960" type="line"/>
<point x="20" y="296" type="line"/>
<point x="60" y="252" type="line"/>
<point x="1140" y="252" type="line"/>
<point x="1180" y="296" type="line"/>
</contour>
<contour>
<point x="436" y="1000" type="line"/>
<point x="436" y="1084" type="line"/>
<point x="768" y="1084" type="line"/>
<point x="768" y="1000" type="line"/>
</contour>
<contour>
<point x="104" y="916" type="line"/>
<point x="1100" y="916" type="line"/>
<point x="1100" y="854" type="line"/>
<point x="768" y="668" type="line"/>
<point x="768" y="708" type="line"/>
<point x="724" y="752" type="line"/>
<point x="476" y="752" type="line"/>
<point x="436" y="708" type="line"/>
<point x="436" y="668" type="line"/>
<point x="104" y="858" type="line"/>
</contour>
<contour>
<point x="516" y="668" type="line"/>
<point x="684" y="668" type="line"/>
<point x="684" y="584" type="line"/>
<point x="516" y="584" type="line"/>
</contour>
<contour>
<point x="1100" y="336" type="line"/>
<point x="104" y="336" type="line"/>
<point x="104" y="761" type="line"/>
<point x="436" y="571" type="line"/>
<point x="436" y="544" type="line"/>
<point x="476" y="504" type="line"/>
<point x="724" y="504" type="line"/>
<point x="768" y="544" type="line"/>
<point x="768" y="571" type="line"/>
<point x="1100" y="756" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/briefcase.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/briefcase.glif",
"repo_id": "cascadia-code",
"token_count": 1016
}
| 681 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="chrome-maximize" format="2">
<advance width="1200"/>
<unicode hex="EAB9"/>
<note>
chrome-maximize
</note>
<outline>
<contour>
<point x="1180" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="130" type="line"/>
<point x="1180" y="130" type="line"/>
</contour>
<contour>
<point x="1063" y="1173" type="line"/>
<point x="1063" y="247" type="line"/>
<point x="137" y="247" type="line"/>
<point x="137" y="1173" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/chrome-maximize.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/chrome-maximize.glif",
"repo_id": "cascadia-code",
"token_count": 282
}
| 682 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="code" format="2">
<advance width="1200"/>
<unicode hex="EAC4"/>
<note>
code
</note>
<outline>
<contour>
<point x="273" y="994" type="line"/>
<point x="20" y="737" type="line"/>
<point x="20" y="678" type="line"/>
<point x="273" y="426" type="line"/>
<point x="331" y="484" type="line"/>
<point x="110" y="710" type="line"/>
<point x="331" y="931" type="line"/>
</contour>
<contour>
<point x="1180" y="737" type="line"/>
<point x="927" y="994" type="line"/>
<point x="864" y="931" type="line"/>
<point x="1090" y="710" type="line"/>
<point x="864" y="484" type="line"/>
<point x="927" y="426" type="line"/>
<point x="1180" y="678" type="line"/>
</contour>
<contour>
<point x="774" y="1152" type="line"/>
<point x="349" y="304" type="line"/>
<point x="426" y="268" type="line"/>
<point x="851" y="1112" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/code.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/code.glif",
"repo_id": "cascadia-code",
"token_count": 496
}
| 683 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="desktop-download" format="2">
<advance width="1200"/>
<unicode hex="EA78"/>
<note>
desktop-download
</note>
<outline>
<contour>
<point x="742" y="170" type="line"/>
<point x="538" y="378" type="line"/>
<point x="578" y="418" type="line"/>
<point x="104" y="418" type="line"/>
<point x="104" y="1166" type="line"/>
<point x="1100" y="1166" type="line"/>
<point x="1100" y="524" type="line"/>
<point x="1180" y="440" type="line"/>
<point x="1180" y="1206" type="line"/>
<point x="1140" y="1250" type="line"/>
<point x="60" y="1250" type="line"/>
<point x="20" y="1206" type="line"/>
<point x="20" y="378" type="line"/>
<point x="60" y="334" type="line"/>
<point x="436" y="334" type="line"/>
<point x="436" y="312"/>
<point x="427" y="298" type="qcurve" smooth="yes"/>
<point x="418" y="276"/>
<point x="343" y="254"/>
<point x="268" y="254" type="qcurve"/>
<point x="268" y="170" type="line"/>
</contour>
<contour>
<point x="861" y="170" type="line"/>
<point x="919" y="170" type="line"/>
<point x="1127" y="378" type="line"/>
<point x="1069" y="435" type="line"/>
<point x="932" y="298" type="line"/>
<point x="932" y="834" type="line"/>
<point x="848" y="834" type="line"/>
<point x="848" y="298" type="line"/>
<point x="715" y="435" type="line"/>
<point x="653" y="378" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/desktop-download.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/desktop-download.glif",
"repo_id": "cascadia-code",
"token_count": 748
}
| 684 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="files" format="2">
<advance width="1200"/>
<unicode hex="EAF0"/>
<note>
files
</note>
<outline>
<contour>
<point x="1180" y="1126" type="line"/>
<point x="932" y="1374" type="line"/>
<point x="432" y="1374" type="line"/>
<point x="352" y="1290" type="line"/>
<point x="352" y="1042" type="line"/>
<point x="100" y="1042" type="line"/>
<point x="20" y="958" type="line"/>
<point x="20" y="126" type="line"/>
<point x="100" y="46" type="line"/>
<point x="768" y="46" type="line"/>
<point x="848" y="126" type="line"/>
<point x="848" y="378" type="line"/>
<point x="1109" y="378" type="line"/>
<point x="1180" y="458" type="line"/>
</contour>
<contour>
<point x="932" y="1126" type="line"/>
<point x="932" y="1259" type="line"/>
<point x="1065" y="1126" type="line"/>
</contour>
<contour>
<point x="764" y="378" type="line"/>
<point x="764" y="130" type="line"/>
<point x="100" y="130" type="line"/>
<point x="100" y="958" type="line"/>
<point x="352" y="958" type="line"/>
<point x="352" y="458" type="line"/>
<point x="432" y="378" type="line"/>
</contour>
<contour>
<point x="1096" y="1042" type="line"/>
<point x="1096" y="462" type="line"/>
<point x="432" y="462" type="line"/>
<point x="432" y="1290" type="line"/>
<point x="848" y="1290" type="line"/>
<point x="848" y="1042" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/files.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/files.glif",
"repo_id": "cascadia-code",
"token_count": 771
}
| 685 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="gripper" format="2">
<advance width="1200"/>
<unicode hex="EB04"/>
<note>
gripper
</note>
<outline>
<contour>
<point x="177" y="1133" type="line"/>
<point x="456" y="1133" type="line"/>
<point x="456" y="1420" type="line"/>
<point x="177" y="1420" type="line"/>
</contour>
<contour>
<point x="177" y="566" type="line"/>
<point x="456" y="566" type="line"/>
<point x="456" y="854" type="line"/>
<point x="177" y="854" type="line"/>
</contour>
<contour>
<point x="177" y="0" type="line"/>
<point x="456" y="0" type="line"/>
<point x="456" y="287" type="line"/>
<point x="177" y="287" type="line"/>
</contour>
<contour>
<point x="744" y="1133" type="line"/>
<point x="1023" y="1133" type="line"/>
<point x="1023" y="1420" type="line"/>
<point x="744" y="1420" type="line"/>
</contour>
<contour>
<point x="744" y="566" type="line"/>
<point x="1023" y="566" type="line"/>
<point x="1023" y="854" type="line"/>
<point x="744" y="854" type="line"/>
</contour>
<contour>
<point x="744" y="0" type="line"/>
<point x="1023" y="0" type="line"/>
<point x="1023" y="287" type="line"/>
<point x="744" y="287" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/gripper.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/gripper.glif",
"repo_id": "cascadia-code",
"token_count": 683
}
| 686 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="list-tree" format="2">
<advance width="1200"/>
<unicode hex="EB86"/>
<note>
list-tree
</note>
<outline>
<contour>
<point x="20" y="1195" type="line"/>
<point x="20" y="1097" type="line"/>
<point x="1180" y="1097" type="line"/>
<point x="1180" y="1195" type="line"/>
</contour>
<contour>
<point x="407" y="906" type="line"/>
<point x="407" y="808" type="line"/>
<point x="1082" y="808" type="line"/>
<point x="1082" y="906" type="line"/>
</contour>
<contour>
<point x="309" y="612" type="line"/>
<point x="309" y="519" type="line"/>
<point x="1082" y="519" type="line"/>
<point x="1082" y="612" type="line"/>
</contour>
<contour>
<point x="309" y="323" type="line"/>
<point x="309" y="225" type="line"/>
<point x="1082" y="225" type="line"/>
<point x="1082" y="323" type="line"/>
</contour>
<contour>
<point x="309" y="225" type="line"/>
<point x="407" y="225" type="line"/>
<point x="407" y="1097" type="line"/>
<point x="309" y="1097" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/list-tree.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/list-tree.glif",
"repo_id": "cascadia-code",
"token_count": 575
}
| 687 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="mirror" format="2">
<advance width="1200"/>
<unicode hex="EA69"/>
<note>
mirror
</note>
<outline>
<contour>
<point x="551" y="1290" type="line"/>
<point x="38" y="958" type="line"/>
<point x="20" y="927" type="line"/>
<point x="20" y="165" type="line"/>
<point x="82" y="130" type="line"/>
<point x="600" y="462" type="line"/>
<point x="1118" y="130" type="line"/>
<point x="1180" y="165" type="line"/>
<point x="1180" y="927" type="line"/>
<point x="1162" y="958" type="line"/>
<point x="649" y="1290" type="line"/>
</contour>
<contour>
<point x="560" y="533" type="line"/>
<point x="560" y="533" type="line"/>
<point x="104" y="241" type="line"/>
<point x="104" y="905" type="line"/>
<point x="560" y="1197" type="line"/>
<point x="560" y="958" type="line"/>
<point x="644" y="958" type="line"/>
<point x="644" y="1197" type="line"/>
<point x="1100" y="905" type="line"/>
<point x="1100" y="241" type="line"/>
<point x="644" y="533" type="line"/>
</contour>
<contour>
<point x="852" y="794" type="line"/>
<point x="348" y="794" type="line"/>
<point x="485" y="931" type="line"/>
<point x="427" y="989" type="line"/>
<point x="224" y="781" type="line"/>
<point x="224" y="723" type="line"/>
<point x="427" y="520" type="line"/>
<point x="485" y="577" type="line"/>
<point x="352" y="710" type="line"/>
<point x="852" y="710" type="line"/>
<point x="720" y="577" type="line"/>
<point x="777" y="520" type="line"/>
<point x="981" y="723" type="line"/>
<point x="981" y="781" type="line"/>
<point x="777" y="989" type="line"/>
<point x="715" y="931" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/mirror.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/mirror.glif",
"repo_id": "cascadia-code",
"token_count": 908
}
| 688 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="package" format="2">
<advance width="1200"/>
<unicode hex="EB29"/>
<note>
package
</note>
<outline>
<contour>
<point x="583" y="1199" type="line"/>
<point x="73" y="1061" type="line"/>
<point x="20" y="1018" type="line"/>
<point x="20" y="416" type="line"/>
<point x="49" y="373" type="line"/>
<point x="598" y="221" type="line"/>
<point x="1147" y="373" type="line"/>
<point x="1180" y="416" type="line"/>
<point x="1180" y="1018" type="line"/>
<point x="1123" y="1061" type="line"/>
<point x="607" y="1199" type="line"/>
</contour>
<contour>
<point x="951" y="1018" type="line"/>
<point x="870" y="994" type="line"/>
<point x="598" y="922" type="line"/>
<point x="292" y="1004" type="line"/>
<point x="244" y="1018" type="line"/>
<point x="602" y="1109" type="line"/>
</contour>
<contour>
<point x="106" y="961" type="line"/>
<point x="555" y="841" type="line"/>
<point x="555" y="326" type="line"/>
<point x="106" y="450" type="line"/>
</contour>
<contour>
<point x="822" y="889" type="line"/>
<point x="822" y="660" type="line"/>
<point x="913" y="684" type="line"/>
<point x="913" y="913" type="line"/>
<point x="1094" y="961" type="line"/>
<point x="1094" y="450" type="line"/>
<point x="645" y="326" type="line"/>
<point x="645" y="841" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/package.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/package.glif",
"repo_id": "cascadia-code",
"token_count": 745
}
| 689 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="project" format="2">
<advance width="1200"/>
<unicode hex="EB30"/>
<note>
project
</note>
<outline>
<contour>
<point x="20" y="1250" type="line"/>
<point x="20" y="170" type="line"/>
<point x="60" y="130" type="line"/>
<point x="1140" y="130" type="line"/>
<point x="1180" y="170" type="line"/>
<point x="1180" y="1250" type="line"/>
<point x="1140" y="1290" type="line"/>
<point x="60" y="1290" type="line"/>
</contour>
<contour>
<point x="104" y="1210" type="line"/>
<point x="1100" y="1210" type="line"/>
<point x="1100" y="214" type="line"/>
<point x="104" y="214" type="line"/>
</contour>
<contour>
<point x="184" y="294" type="line"/>
<point x="352" y="294" type="line"/>
<point x="352" y="1126" type="line"/>
<point x="184" y="1126" type="line"/>
</contour>
<contour>
<point x="684" y="626" type="line"/>
<point x="684" y="1126" type="line"/>
<point x="516" y="1126" type="line"/>
<point x="516" y="626" type="line"/>
</contour>
<contour>
<point x="848" y="462" type="line"/>
<point x="1016" y="462" type="line"/>
<point x="1016" y="1126" type="line"/>
<point x="848" y="1126" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/project.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/project.glif",
"repo_id": "cascadia-code",
"token_count": 653
}
| 690 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="sign-in" format="2">
<advance width="1200"/>
<unicode hex="EA6F"/>
<note>
sign-in
</note>
<outline>
<contour>
<point x="823" y="1221" type="line"/>
<point x="823" y="1150" type="line"/>
<point x="823" y="1155" type="line"/>
<point x="909" y="1064" type="line"/>
<point x="909" y="1264" type="line"/>
<point x="866" y="1311" type="line"/>
<point x="63" y="1311" type="line"/>
<point x="20" y="1264" type="line"/>
<point x="20" y="304" type="line"/>
<point x="49" y="261" type="line"/>
<point x="495" y="109" type="line"/>
<point x="552" y="151" type="line"/>
<point x="552" y="242" type="line"/>
<point x="866" y="242" type="line"/>
<point x="909" y="285" type="line"/>
<point x="909" y="484" type="line"/>
<point x="823" y="394" type="line"/>
<point x="823" y="327" type="line"/>
<point x="552" y="327" type="line"/>
<point x="552" y="1069" type="line"/>
<point x="524" y="1107" type="line"/>
<point x="201" y="1221" type="line"/>
</contour>
<contour>
<point x="462" y="1036" type="line"/>
<point x="462" y="213" type="line"/>
<point x="106" y="332" type="line"/>
<point x="106" y="1155" type="line"/>
</contour>
<contour>
<point x="885" y="964" type="line"/>
<point x="823" y="1026" type="line"/>
<point x="600" y="803" type="line"/>
<point x="600" y="741" type="line"/>
<point x="819" y="522" type="line"/>
<point x="885" y="584" type="line"/>
<point x="743" y="727" type="line"/>
<point x="1180" y="727" type="line"/>
<point x="1180" y="817" type="line"/>
<point x="738" y="817" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/sign-in.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/sign-in.glif",
"repo_id": "cascadia-code",
"token_count": 877
}
| 691 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="symbol-constant" format="2">
<advance width="1200"/>
<unicode hex="EB5D"/>
<note>
symbol-constant
</note>
<outline>
<contour>
<point x="268" y="794" type="line"/>
<point x="932" y="794" type="line"/>
<point x="932" y="878" type="line"/>
<point x="268" y="878" type="line"/>
</contour>
<contour>
<point x="932" y="546" type="line"/>
<point x="932" y="626" type="line"/>
<point x="268" y="626" type="line"/>
<point x="268" y="546" type="line"/>
</contour>
<contour>
<point x="20" y="378" type="line"/>
<point x="104" y="294" type="line"/>
<point x="1100" y="294" type="line"/>
<point x="1180" y="378" type="line"/>
<point x="1180" y="1042" type="line"/>
<point x="1100" y="1126" type="line"/>
<point x="104" y="1126" type="line"/>
<point x="20" y="1042" type="line"/>
</contour>
<contour>
<point x="1100" y="1042" type="line"/>
<point x="1100" y="378" type="line"/>
<point x="104" y="378" type="line"/>
<point x="104" y="1042" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/symbol-constant.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/symbol-constant.glif",
"repo_id": "cascadia-code",
"token_count": 562
}
| 692 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="symbol-ruler" format="2">
<advance width="1200"/>
<unicode hex="EA96"/>
<note>
symbol-ruler
</note>
<outline>
<contour>
<point x="1006" y="1420" type="line"/>
<point x="194" y="1420" type="line"/>
<point x="91" y="1322" type="line"/>
<point x="91" y="103" type="line"/>
<point x="194" y="0" type="line"/>
<point x="1006" y="0" type="line"/>
<point x="1109" y="103" type="line"/>
<point x="1109" y="1322" type="line"/>
</contour>
<contour>
<point x="600" y="1219" type="line"/>
<point x="194" y="1219" type="line"/>
<point x="194" y="1322" type="line"/>
<point x="1006" y="1322" type="line"/>
<point x="1006" y="103" type="line"/>
<point x="194" y="103" type="line"/>
<point x="194" y="201" type="line"/>
<point x="399" y="201" type="line"/>
<point x="399" y="304" type="line"/>
<point x="194" y="304" type="line"/>
<point x="194" y="509" type="line"/>
<point x="600" y="509" type="line"/>
<point x="600" y="607" type="line"/>
<point x="194" y="607" type="line"/>
<point x="194" y="813" type="line"/>
<point x="399" y="813" type="line"/>
<point x="399" y="916" type="line"/>
<point x="194" y="916" type="line"/>
<point x="194" y="1116" type="line"/>
<point x="600" y="1116" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/symbol-ruler.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/symbol-ruler.glif",
"repo_id": "cascadia-code",
"token_count": 693
}
| 693 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="terminal-tmux" format="2">
<advance width="1200"/>
<unicode hex="EBC8"/>
<note>
terminal-tmux
</note>
<outline>
<contour>
<point x="1180" y="1290" type="line"/>
<point x="1132" y="1333" type="line"/>
<point x="63" y="1333" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="130" type="line"/>
<point x="63" y="87" type="line"/>
<point x="1132" y="87" type="line"/>
<point x="1180" y="130" type="line"/>
</contour>
<contour>
<point x="552" y="1247" type="line"/>
<point x="552" y="753" type="line"/>
<point x="552" y="263" type="line"/>
<point x="110" y="263" type="line"/>
<point x="110" y="1247" type="line"/>
</contour>
<contour>
<point x="1090" y="710" type="line"/>
<point x="1090" y="263" type="line"/>
<point x="643" y="263" type="line"/>
<point x="643" y="710" type="line"/>
</contour>
<contour>
<point x="1090" y="1247" type="line"/>
<point x="1090" y="800" type="line"/>
<point x="643" y="800" type="line"/>
<point x="643" y="1247" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/terminal-tmux.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/terminal-tmux.glif",
"repo_id": "cascadia-code",
"token_count": 584
}
| 694 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="type-hierarchy" format="2">
<advance width="1200"/>
<unicode hex="EBB9"/>
<note>
type-hierarchy
</note>
<outline>
<contour>
<point x="1180" y="311" type="line"/>
<point x="1132" y="353" type="line"/>
<point x="976" y="353" type="line"/>
<point x="823" y="506" type="line"/>
<point x="823" y="843" type="line"/>
<point x="776" y="891" type="line"/>
<point x="643" y="891" type="line"/>
<point x="643" y="1067" type="line"/>
<point x="686" y="1067" type="line"/>
<point x="733" y="1109" type="line"/>
<point x="733" y="1290" type="line"/>
<point x="686" y="1333" type="line"/>
<point x="510" y="1333" type="line"/>
<point x="467" y="1290" type="line"/>
<point x="467" y="1109" type="line"/>
<point x="510" y="1067" type="line"/>
<point x="552" y="1067" type="line"/>
<point x="552" y="891" type="line"/>
<point x="419" y="891" type="line"/>
<point x="377" y="843" type="line"/>
<point x="377" y="506" type="line"/>
<point x="224" y="353" type="line"/>
<point x="63" y="353" type="line"/>
<point x="20" y="311" type="line"/>
<point x="20" y="130" type="line"/>
<point x="63" y="87" type="line"/>
<point x="243" y="87" type="line"/>
<point x="286" y="130" type="line"/>
<point x="286" y="292" type="line"/>
<point x="438" y="444" type="line"/>
<point x="757" y="444" type="line"/>
<point x="909" y="292" type="line"/>
<point x="909" y="130" type="line"/>
<point x="957" y="87" type="line"/>
<point x="1132" y="87" type="line"/>
<point x="1180" y="130" type="line"/>
</contour>
<contour>
<point x="552" y="1157" type="line"/>
<point x="552" y="1247" type="line"/>
<point x="643" y="1247" type="line"/>
<point x="643" y="1157" type="line"/>
</contour>
<contour>
<point x="467" y="534" type="line"/>
<point x="467" y="800" type="line"/>
<point x="733" y="800" type="line"/>
<point x="733" y="534" type="line"/>
</contour>
<contour>
<point x="196" y="263" type="line"/>
<point x="196" y="178" type="line"/>
<point x="110" y="178" type="line"/>
<point x="110" y="263" type="line"/>
</contour>
<contour>
<point x="1090" y="263" type="line"/>
<point x="1090" y="178" type="line"/>
<point x="999" y="178" type="line"/>
<point x="999" y="263" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/type-hierarchy.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/type-hierarchy.glif",
"repo_id": "cascadia-code",
"token_count": 1236
}
| 695 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="warning" format="2">
<advance width="1200"/>
<unicode hex="EA6C"/>
<note>
warning
</note>
<outline>
<contour>
<point x="20" y="230" type="line"/>
<point x="55" y="172" type="line"/>
<point x="1145" y="172" type="line"/>
<point x="1180" y="230" type="line"/>
<point x="635" y="1248" type="line"/>
<point x="565" y="1248" type="line"/>
</contour>
<contour>
<point x="1074" y="252" type="line"/>
<point x="126" y="252" type="line"/>
<point x="600" y="1142" type="line"/>
</contour>
<contour>
<point x="547" y="336" type="line"/>
<point x="653" y="336" type="line"/>
<point x="653" y="420" type="line"/>
<point x="547" y="420" type="line"/>
</contour>
<contour>
<point x="653" y="504" type="line"/>
<point x="653" y="836" type="line"/>
<point x="547" y="836" type="line"/>
<point x="547" y="504" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/warning.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/warning.glif",
"repo_id": "cascadia-code",
"token_count": 492
}
| 696 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="GTK" format="2">
<advance width="1200"/>
<unicode hex="F362"/>
<note>
GTK
</note>
<outline>
<contour>
<point x="37" y="1160" type="line"/>
<point x="461" y="842" type="line"/>
<point x="676" y="908" type="line"/>
<point x="676" y="1336" type="line"/>
</contour>
<contour>
<point x="693" y="916" type="line"/>
<point x="1158" y="1060" type="line"/>
<point x="693" y="1334" type="line"/>
</contour>
<contour>
<point x="47" y="482" type="line"/>
<point x="443" y="600" type="line"/>
<point x="443" y="818" type="line"/>
<point x="20" y="1136" type="line"/>
</contour>
<contour>
<point x="695" y="884" type="line"/>
<point x="695" y="668" type="line"/>
<point x="1126" y="367" type="line"/>
<point x="1180" y="1033" type="line"/>
</contour>
<contour>
<point x="473" y="815" type="line"/>
<point x="473" y="610" type="line"/>
<point x="678" y="671" type="line"/>
<point x="678" y="879" type="line"/>
</contour>
<contour>
<point x="473" y="590" type="line"/>
<point x="473" y="84" type="line"/>
<point x="1116" y="353" type="line"/>
<point x="686" y="654" type="line"/>
</contour>
<contour>
<point x="47" y="465" type="line"/>
<point x="443" y="93" type="line"/>
<point x="443" y="583" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/glyphs/G_T_K_.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/glyphs/G_T_K_.glif",
"repo_id": "cascadia-code",
"token_count": 723
}
| 697 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>com.schriftgestaltung.appVersion</key>
<string>3241</string>
<key>com.schriftgestaltung.disablesAutomaticAlignment</key>
<true/>
<key>com.schriftgestaltung.fontMaster.customParameters</key>
<array>
<dict>
<key>name</key>
<string>CVT Table</string>
<key>value</key>
<string>(
17,
324
)</string>
</dict>
</array>
<key>com.schriftgestaltung.fontMasterID</key>
<string>m01</string>
<key>com.schriftgestaltung.formatVersion</key>
<integer>3</integer>
<key>com.schriftgestaltung.master.name</key>
<string>logos</string>
<key>com.schriftgestaltung.useGlyphOrder</key>
<true/>
<key>com.schriftgestaltung.useNiceNames</key>
<false/>
<key>public.glyphOrder</key>
<array>
<string>Alpine</string>
<string>AOSC OS</string>
<string>Apple</string>
<string>Arch Linux</string>
<string>CentOS</string>
<string>CoreOS</string>
<string>Debian</string>
<string>Devuan</string>
<string>Docker</string>
<string>elementary OS</string>
<string>Fedora</string>
<string>Fedora (inverse)</string>
<string>FreeBSD</string>
<string>Gentoo</string>
<string>Linux Mint</string>
<string>Linux Mint (inverse)</string>
<string>Mageia</string>
<string>Mandriva</string>
<string>Manjaro</string>
<string>NixOS</string>
<string>OpenSUSE</string>
<string>Raspberry pi</string>
<string>Red Hat</string>
<string>Sabayon</string>
<string>Slackware</string>
<string>Slackware (inverse)</string>
<string>Tux</string>
<string>Ubuntu</string>
<string>Ubuntu (inverse)</string>
<string>Alma Linux</string>
<string>ArchLabs</string>
<string>Artix Linux</string>
<string>Budgie</string>
<string>Deepin</string>
<string>Endeavour OS</string>
<string>Ferris</string>
<string>Flathub</string>
<string>GNU Guix</string>
<string>illumos</string>
<string>Kali Linux</string>
<string>OpenBSD</string>
<string>Parrot OS</string>
<string>Pop!_OS</string>
<string>Rocky Linux</string>
<string>Snappy</string>
<string>Solus</string>
<string>Void</string>
<string>Zorin OS</string>
<string>Codeberg</string>
<string>KDE Neon</string>
<string>KDE Plasma</string>
<string>Kubuntu</string>
<string>Kubuntu (inverse)</string>
<string>Forgejo</string>
<string>FreeCAD</string>
<string>Garuda Linux</string>
<string>GIMP</string>
<string>Gitea</string>
<string>Hyperbola GNU/Linux-libre</string>
<string>Inkscape</string>
<string>Kdenlive</string>
<string>Krita</string>
<string>LXLE Linux</string>
<string>MX Linux</string>
<string>Parabola GNU/Linux-libre</string>
<string>Puppy Linux</string>
<string>QubesOS</string>
<string>Tails</string>
<string>Trisquel GNU/Linux</string>
<string>Archcraft</string>
<string>ArcoLinux</string>
<string>BigLinux</string>
<string>Crystal Linux</string>
<string>Loc-OS</string>
<string>XeroLinux</string>
<string>Arduino</string>
<string>KiCad</string>
<string>Octoprint</string>
<string>OpenSCAD</string>
<string>OSH</string>
<string>OSHWA</string>
<string>Prusa Slicer</string>
<string>RepRap</string>
<string>RISC-V</string>
<string>Awesome WM</string>
<string>bspwm</string>
<string>dwm</string>
<string>Enlightenment</string>
<string>Fluxbox</string>
<string>Hyprland</string>
<string>i3</string>
<string>JWM</string>
<string>Qtile</string>
<string>Sway</string>
<string>Xmonad</string>
<string>Cinnamon</string>
<string>freedesktop.org</string>
<string>GNOME</string>
<string>GTK</string>
<string>LXDE</string>
<string>LXQt</string>
<string>MATE</string>
<string>Vanilla OS</string>
<string>Wayland</string>
<string>XFCE</string>
<string>Xorg</string>
<string>F-droid</string>
<string>FOSDEM</string>
<string>OSI</string>
<string>Wikimedia</string>
<string>mpv</string>
<string>Neovim</string>
<string>Thunderbird</string>
<string>Tor Browser</string>
<string>VS Codium</string>
</array>
<key>public.postscriptNames</key>
<dict>
<key>AOSC OS</key>
<string>uniF301</string>
<key>Alma Linux</key>
<string>uniF31D</string>
<key>Alpine</key>
<string>uniF300</string>
<key>Apple</key>
<string>uniF302</string>
<key>Arch Linux</key>
<string>uniF303</string>
<key>ArchLabs</key>
<string>uniF31E</string>
<key>Archcraft</key>
<string>uniF345</string>
<key>ArcoLinux</key>
<string>uniF346</string>
<key>Arduino</key>
<string>uniF34B</string>
<key>Artix Linux</key>
<string>uniF31F</string>
<key>Awesome WM</key>
<string>uniF354</string>
<key>BigLinux</key>
<string>uniF347</string>
<key>Budgie</key>
<string>uniF320</string>
<key>CentOS</key>
<string>uniF304</string>
<key>Cinnamon</key>
<string>uniF35F</string>
<key>Codeberg</key>
<string>uniF330</string>
<key>CoreOS</key>
<string>uniF305</string>
<key>Crystal Linux</key>
<string>uniF348</string>
<key>Debian</key>
<string>uniF306</string>
<key>Deepin</key>
<string>uniF321</string>
<key>Devuan</key>
<string>uniF307</string>
<key>Docker</key>
<string>uniF308</string>
<key>Endeavour OS</key>
<string>uniF322</string>
<key>Enlightenment</key>
<string>uniF357</string>
<key>F-droid</key>
<string>uniF36A</string>
<key>FOSDEM</key>
<string>uniF36B</string>
<key>Fedora</key>
<string>uniF30A</string>
<key>Fedora (inverse)</key>
<string>uniF30B</string>
<key>Ferris</key>
<string>uniF323</string>
<key>Flathub</key>
<string>uniF324</string>
<key>Fluxbox</key>
<string>uniF358</string>
<key>Forgejo</key>
<string>uniF335</string>
<key>FreeBSD</key>
<string>uniF30C</string>
<key>FreeCAD</key>
<string>uniF336</string>
<key>GIMP</key>
<string>uniF338</string>
<key>GNOME</key>
<string>uniF361</string>
<key>GNU Guix</key>
<string>uniF325</string>
<key>GTK</key>
<string>uniF362</string>
<key>Garuda Linux</key>
<string>uniF337</string>
<key>Gentoo</key>
<string>uniF30D</string>
<key>Gitea</key>
<string>uniF339</string>
<key>Hyperbola GNU/Linux-libre</key>
<string>uniF33A</string>
<key>Hyprland</key>
<string>uniF359</string>
<key>Inkscape</key>
<string>uniF33B</string>
<key>JWM</key>
<string>uniF35B</string>
<key>KDE Neon</key>
<string>uniF331</string>
<key>KDE Plasma</key>
<string>uniF332</string>
<key>Kali Linux</key>
<string>uniF327</string>
<key>Kdenlive</key>
<string>uniF33C</string>
<key>KiCad</key>
<string>uniF34C</string>
<key>Krita</key>
<string>uniF33D</string>
<key>Kubuntu</key>
<string>uniF333</string>
<key>Kubuntu (inverse)</key>
<string>uniF334</string>
<key>LXDE</key>
<string>uniF363</string>
<key>LXLE Linux</key>
<string>uniF33E</string>
<key>LXQt</key>
<string>uniF364</string>
<key>Linux Mint</key>
<string>uniF30E</string>
<key>Linux Mint (inverse)</key>
<string>uniF30F</string>
<key>Loc-OS</key>
<string>uniF349</string>
<key>MATE</key>
<string>uniF365</string>
<key>MX Linux</key>
<string>uniF33F</string>
<key>Mageia</key>
<string>uniF310</string>
<key>Mandriva</key>
<string>uniF311</string>
<key>Manjaro</key>
<string>uniF312</string>
<key>Neovim</key>
<string>uniF36F</string>
<key>NixOS</key>
<string>uniF313</string>
<key>OSH</key>
<string>uniF34F</string>
<key>OSHWA</key>
<string>uniF350</string>
<key>OSI</key>
<string>uniF36C</string>
<key>Octoprint</key>
<string>uniF34D</string>
<key>OpenBSD</key>
<string>uniF328</string>
<key>OpenSCAD</key>
<string>uniF34E</string>
<key>OpenSUSE</key>
<string>uniF314</string>
<key>Parabola GNU/Linux-libre</key>
<string>uniF340</string>
<key>Parrot OS</key>
<string>uniF329</string>
<key>Pop!_OS</key>
<string>uniF32A</string>
<key>Prusa Slicer</key>
<string>uniF351</string>
<key>Puppy Linux</key>
<string>uniF341</string>
<key>Qtile</key>
<string>uniF35C</string>
<key>QubesOS</key>
<string>uniF342</string>
<key>RISC-V</key>
<string>uniF353</string>
<key>Raspberry pi</key>
<string>uniF315</string>
<key>Red Hat</key>
<string>uniF316</string>
<key>RepRap</key>
<string>uniF352</string>
<key>Rocky Linux</key>
<string>uniF32B</string>
<key>Sabayon</key>
<string>uniF317</string>
<key>Slackware</key>
<string>uniF318</string>
<key>Slackware (inverse)</key>
<string>uniF319</string>
<key>Snappy</key>
<string>uniF32C</string>
<key>Solus</key>
<string>uniF32D</string>
<key>Sway</key>
<string>uniF35D</string>
<key>Tails</key>
<string>uniF343</string>
<key>Thunderbird</key>
<string>uniF370</string>
<key>Tor Browser</key>
<string>uniF371</string>
<key>Trisquel GNU/Linux</key>
<string>uniF344</string>
<key>Tux</key>
<string>uniF31A</string>
<key>Ubuntu</key>
<string>uniF31B</string>
<key>Ubuntu (inverse)</key>
<string>uniF31C</string>
<key>VS Codium</key>
<string>uniF372</string>
<key>Vanilla OS</key>
<string>uniF366</string>
<key>Void</key>
<string>uniF32E</string>
<key>Wayland</key>
<string>uniF367</string>
<key>Wikimedia</key>
<string>uniF36D</string>
<key>XFCE</key>
<string>uniF368</string>
<key>XeroLinux</key>
<string>uniF34A</string>
<key>Xmonad</key>
<string>uniF35E</string>
<key>Xorg</key>
<string>uniF369</string>
<key>Zorin OS</key>
<string>uniF32F</string>
<key>bspwm</key>
<string>uniF355</string>
<key>dwm</key>
<string>uniF356</string>
<key>elementary OS</key>
<string>uniF309</string>
<key>freedesktop.org</key>
<string>uniF360</string>
<key>i3</key>
<string>uniF35A</string>
<key>illumos</key>
<string>uniF326</string>
<key>mpv</key>
<string>uniF36E</string>
</dict>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/lib.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/lib.plist",
"repo_id": "cascadia-code",
"token_count": 5689
}
| 698 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="arrow-up-left" format="2">
<advance width="1200"/>
<unicode hex="F45C"/>
<note>
arrow-up-left
</note>
<outline>
<contour>
<point x="309" y="511" type="line" smooth="yes"/>
<point x="309" y="489"/>
<point x="341" y="457"/>
<point x="386" y="457"/>
<point x="418" y="489"/>
<point x="418" y="511" type="qcurve" smooth="yes"/>
<point x="418" y="815" type="line"/>
<point x="513" y="720" type="line" smooth="yes"/>
<point x="608" y="625"/>
<point x="799" y="434"/>
<point x="799" y="434" type="qcurve"/>
<point x="815" y="419"/>
<point x="837" y="419" type="qcurve" smooth="yes"/>
<point x="859" y="419"/>
<point x="891" y="451"/>
<point x="891" y="473" type="qcurve" smooth="yes"/>
<point x="891" y="495"/>
<point x="876" y="511" type="qcurve" smooth="yes"/>
<point x="495" y="892" type="line"/>
<point x="799" y="892" type="line" smooth="yes"/>
<point x="821" y="892"/>
<point x="853" y="924"/>
<point x="853" y="969"/>
<point x="821" y="1001"/>
<point x="799" y="1001" type="qcurve" smooth="yes"/>
<point x="363" y="1001" type="line" smooth="yes"/>
<point x="341" y="1001"/>
<point x="309" y="969"/>
<point x="309" y="947" type="qcurve" smooth="yes"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:49:52 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/arrow-up-left.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/arrow-up-left.glif",
"repo_id": "cascadia-code",
"token_count": 761
}
| 699 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>accessibility</key>
<string>accessibility.glif</string>
<key>accessibility-inset</key>
<string>accessibility-inset.glif</string>
<key>alert</key>
<string>alert.glif</string>
<key>alert-fill</key>
<string>alert-fill.glif</string>
<key>apps</key>
<string>apps.glif</string>
<key>archive</key>
<string>archive.glif</string>
<key>arrow-both</key>
<string>arrow-both.glif</string>
<key>arrow-down</key>
<string>arrow-down.glif</string>
<key>arrow-down-left</key>
<string>arrow-down-left.glif</string>
<key>arrow-down-right</key>
<string>arrow-down-right.glif</string>
<key>arrow-left</key>
<string>arrow-left.glif</string>
<key>arrow-right</key>
<string>arrow-right.glif</string>
<key>arrow-switch</key>
<string>arrow-switch.glif</string>
<key>arrow-up</key>
<string>arrow-up.glif</string>
<key>arrow-up-left</key>
<string>arrow-up-left.glif</string>
<key>arrow-up-right</key>
<string>arrow-up-right.glif</string>
<key>beaker</key>
<string>beaker.glif</string>
<key>bell</key>
<string>bell.glif</string>
<key>bell-fill</key>
<string>bell-fill.glif</string>
<key>bell-slash</key>
<string>bell-slash.glif</string>
<key>blocked</key>
<string>blocked.glif</string>
<key>bold</key>
<string>bold.glif</string>
<key>book</key>
<string>book.glif</string>
<key>bookmark</key>
<string>bookmark.glif</string>
<key>bookmark-fill</key>
<string>bookmark-fill.glif</string>
<key>bookmark-slash</key>
<string>bookmark-slash.glif</string>
<key>bookmark-slash-fill</key>
<string>bookmark-slash-fill.glif</string>
<key>briefcase</key>
<string>briefcase.glif</string>
<key>broadcast</key>
<string>broadcast.glif</string>
<key>browser</key>
<string>browser.glif</string>
<key>bug</key>
<string>bug.glif</string>
<key>cache</key>
<string>cache.glif</string>
<key>calendar</key>
<string>calendar.glif</string>
<key>check</key>
<string>check.glif</string>
<key>check-circle</key>
<string>check-circle.glif</string>
<key>check-circle-fill</key>
<string>check-circle-fill.glif</string>
<key>checkbox</key>
<string>checkbox.glif</string>
<key>checklist</key>
<string>checklist.glif</string>
<key>chevron-down</key>
<string>chevron-down.glif</string>
<key>chevron-left</key>
<string>chevron-left.glif</string>
<key>chevron-right</key>
<string>chevron-right.glif</string>
<key>chevron-up</key>
<string>chevron-up.glif</string>
<key>circle</key>
<string>circle.glif</string>
<key>circle-slash</key>
<string>circle-slash.glif</string>
<key>clock</key>
<string>clock.glif</string>
<key>clock-fill</key>
<string>clock-fill.glif</string>
<key>cloud</key>
<string>cloud.glif</string>
<key>cloud-offline</key>
<string>cloud-offline.glif</string>
<key>code</key>
<string>code.glif</string>
<key>code-of-conduct</key>
<string>code-of-conduct.glif</string>
<key>code-review</key>
<string>code-review.glif</string>
<key>code-square</key>
<string>code-square.glif</string>
<key>codescan</key>
<string>codescan.glif</string>
<key>codescan-checkmark</key>
<string>codescan-checkmark.glif</string>
<key>codespaces</key>
<string>codespaces.glif</string>
<key>columns</key>
<string>columns.glif</string>
<key>command-palette</key>
<string>command-palette.glif</string>
<key>comment</key>
<string>comment.glif</string>
<key>comment-discussion</key>
<string>comment-discussion.glif</string>
<key>commit</key>
<string>commit.glif</string>
<key>container</key>
<string>container.glif</string>
<key>copilot</key>
<string>copilot.glif</string>
<key>copilot-error</key>
<string>copilot-error.glif</string>
<key>copilot-warning</key>
<string>copilot-warning.glif</string>
<key>copy</key>
<string>copy.glif</string>
<key>cpu</key>
<string>cpu.glif</string>
<key>credit-card</key>
<string>credit-card.glif</string>
<key>cross-reference</key>
<string>cross-reference.glif</string>
<key>dash</key>
<string>dash.glif</string>
<key>database</key>
<string>database.glif</string>
<key>dependabot</key>
<string>dependabot.glif</string>
<key>desktop-download</key>
<string>desktop-download.glif</string>
<key>device-camera</key>
<string>device-camera.glif</string>
<key>device-camera-video</key>
<string>device-camera-video.glif</string>
<key>device-desktop</key>
<string>device-desktop.glif</string>
<key>device-mobile</key>
<string>device-mobile.glif</string>
<key>diamond</key>
<string>diamond.glif</string>
<key>diff</key>
<string>diff.glif</string>
<key>diff-added</key>
<string>diff-added.glif</string>
<key>diff-ignored</key>
<string>diff-ignored.glif</string>
<key>diff-modified</key>
<string>diff-modified.glif</string>
<key>diff-removed</key>
<string>diff-removed.glif</string>
<key>diff-renamed</key>
<string>diff-renamed.glif</string>
<key>discussion-closed</key>
<string>discussion-closed.glif</string>
<key>discussion-duplicate</key>
<string>discussion-duplicate.glif</string>
<key>discussion-outdated</key>
<string>discussion-outdated.glif</string>
<key>dot</key>
<string>dot.glif</string>
<key>dot-fill</key>
<string>dot-fill.glif</string>
<key>download</key>
<string>download.glif</string>
<key>duplicate</key>
<string>duplicate.glif</string>
<key>ellipsis</key>
<string>ellipsis.glif</string>
<key>eye</key>
<string>eye.glif</string>
<key>eye-closed</key>
<string>eye-closed.glif</string>
<key>feed-discussion</key>
<string>feed-discussion.glif</string>
<key>feed-forked</key>
<string>feed-forked.glif</string>
<key>feed-heart</key>
<string>feed-heart.glif</string>
<key>feed-merged</key>
<string>feed-merged.glif</string>
<key>feed-person</key>
<string>feed-person.glif</string>
<key>feed-repo</key>
<string>feed-repo.glif</string>
<key>feed-rocket</key>
<string>feed-rocket.glif</string>
<key>feed-star</key>
<string>feed-star.glif</string>
<key>feed-tag</key>
<string>feed-tag.glif</string>
<key>feed-trophy</key>
<string>feed-trophy.glif</string>
<key>file</key>
<string>file.glif</string>
<key>file-added</key>
<string>file-added.glif</string>
<key>file-badge</key>
<string>file-badge.glif</string>
<key>file-binary</key>
<string>file-binary.glif</string>
<key>file-code</key>
<string>file-code.glif</string>
<key>file-diff</key>
<string>file-diff.glif</string>
<key>file-directory</key>
<string>file-directory.glif</string>
<key>file-directory-fill</key>
<string>file-directory-fill.glif</string>
<key>file-directory-open-fill</key>
<string>file-directory-open-fill.glif</string>
<key>file-media</key>
<string>file-media.glif</string>
<key>file-moved</key>
<string>file-moved.glif</string>
<key>file-removed</key>
<string>file-removed.glif</string>
<key>file-submodule</key>
<string>file-submodule.glif</string>
<key>file-symlink-directory</key>
<string>file-symlink-directory.glif</string>
<key>file-symlink-file</key>
<string>file-symlink-file.glif</string>
<key>file-zip</key>
<string>file-zip.glif</string>
<key>filter</key>
<string>filter.glif</string>
<key>fiscal-host</key>
<string>fiscal-host.glif</string>
<key>flame</key>
<string>flame.glif</string>
<key>fold</key>
<string>fold.glif</string>
<key>fold-down</key>
<string>fold-down.glif</string>
<key>fold-up</key>
<string>fold-up.glif</string>
<key>gear</key>
<string>gear.glif</string>
<key>gift</key>
<string>gift.glif</string>
<key>git-branch</key>
<string>git-branch.glif</string>
<key>git-commit</key>
<string>git-commit.glif</string>
<key>git-compare</key>
<string>git-compare.glif</string>
<key>git-merge</key>
<string>git-merge.glif</string>
<key>git-merge-queue</key>
<string>git-merge-queue.glif</string>
<key>git-pull-request</key>
<string>git-pull-request.glif</string>
<key>git-pull-request-closed</key>
<string>git-pull-request-closed.glif</string>
<key>git-pull-request-draft</key>
<string>git-pull-request-draft.glif</string>
<key>globe</key>
<string>globe.glif</string>
<key>goal</key>
<string>goal.glif</string>
<key>grabber</key>
<string>grabber.glif</string>
<key>graph</key>
<string>graph.glif</string>
<key>hash</key>
<string>hash.glif</string>
<key>heading</key>
<string>heading.glif</string>
<key>heart</key>
<string>heart.glif</string>
<key>heart-fill</key>
<string>heart-fill.glif</string>
<key>history</key>
<string>history.glif</string>
<key>home</key>
<string>home.glif</string>
<key>home-fill</key>
<string>home-fill.glif</string>
<key>horizontal-rule</key>
<string>horizontal-rule.glif</string>
<key>hourglass</key>
<string>hourglass.glif</string>
<key>hubot</key>
<string>hubot.glif</string>
<key>id-badge</key>
<string>id-badge.glif</string>
<key>image</key>
<string>image.glif</string>
<key>inbox</key>
<string>inbox.glif</string>
<key>infinity</key>
<string>infinity.glif</string>
<key>info</key>
<string>info.glif</string>
<key>issue-closed</key>
<string>issue-closed.glif</string>
<key>issue-draft</key>
<string>issue-draft.glif</string>
<key>issue-opened</key>
<string>issue-opened.glif</string>
<key>issue-reopened</key>
<string>issue-reopened.glif</string>
<key>issue-tracked-by</key>
<string>issue-tracked-by.glif</string>
<key>issue-tracks</key>
<string>issue-tracks.glif</string>
<key>italic</key>
<string>italic.glif</string>
<key>iterations</key>
<string>iterations.glif</string>
<key>kebab-horizontal</key>
<string>kebab-horizontal.glif</string>
<key>key</key>
<string>key.glif</string>
<key>key-asterisk</key>
<string>key-asterisk.glif</string>
<key>law</key>
<string>law.glif</string>
<key>light-bulb</key>
<string>light-bulb.glif</string>
<key>link</key>
<string>link.glif</string>
<key>link-external</key>
<string>link-external.glif</string>
<key>list-ordered</key>
<string>list-ordered.glif</string>
<key>list-unordered</key>
<string>list-unordered.glif</string>
<key>location</key>
<string>location.glif</string>
<key>lock</key>
<string>lock.glif</string>
<key>log</key>
<string>log.glif</string>
<key>logo-gist</key>
<string>logo-gist.glif</string>
<key>logo-github</key>
<string>logo-github.glif</string>
<key>mail</key>
<string>mail.glif</string>
<key>mark-github</key>
<string>mark-github.glif</string>
<key>markdown</key>
<string>markdown.glif</string>
<key>megaphone</key>
<string>megaphone.glif</string>
<key>mention</key>
<string>mention.glif</string>
<key>meter</key>
<string>meter.glif</string>
<key>milestone</key>
<string>milestone.glif</string>
<key>mirror</key>
<string>mirror.glif</string>
<key>moon</key>
<string>moon.glif</string>
<key>mortar-board</key>
<string>mortar-board.glif</string>
<key>move-to-bottom</key>
<string>move-to-bottom.glif</string>
<key>move-to-end</key>
<string>move-to-end.glif</string>
<key>move-to-start</key>
<string>move-to-start.glif</string>
<key>move-to-top</key>
<string>move-to-top.glif</string>
<key>multi-select</key>
<string>multi-select.glif</string>
<key>mute</key>
<string>mute.glif</string>
<key>no-entry</key>
<string>no-entry.glif</string>
<key>north-star</key>
<string>north-star.glif</string>
<key>note</key>
<string>note.glif</string>
<key>number</key>
<string>number.glif</string>
<key>organization</key>
<string>organization.glif</string>
<key>package</key>
<string>package.glif</string>
<key>package-dependencies</key>
<string>package-dependencies.glif</string>
<key>package-dependents</key>
<string>package-dependents.glif</string>
<key>paintbrush</key>
<string>paintbrush.glif</string>
<key>paper-airplane</key>
<string>paper-airplane.glif</string>
<key>paperclip</key>
<string>paperclip.glif</string>
<key>passkey-fill</key>
<string>passkey-fill.glif</string>
<key>paste</key>
<string>paste.glif</string>
<key>pencil</key>
<string>pencil.glif</string>
<key>people</key>
<string>people.glif</string>
<key>person</key>
<string>person.glif</string>
<key>person-add</key>
<string>person-add.glif</string>
<key>person-fill</key>
<string>person-fill.glif</string>
<key>pin</key>
<string>pin.glif</string>
<key>play</key>
<string>play.glif</string>
<key>plug</key>
<string>plug.glif</string>
<key>plus</key>
<string>plus.glif</string>
<key>plus-circle</key>
<string>plus-circle.glif</string>
<key>project</key>
<string>project.glif</string>
<key>project-roadmap</key>
<string>project-roadmap.glif</string>
<key>project-symlink</key>
<string>project-symlink.glif</string>
<key>project-template</key>
<string>project-template.glif</string>
<key>pulse</key>
<string>pulse.glif</string>
<key>question</key>
<string>question.glif</string>
<key>quote</key>
<string>quote.glif</string>
<key>read</key>
<string>read.glif</string>
<key>rel-file-path</key>
<string>rel-file-path.glif</string>
<key>reply</key>
<string>reply.glif</string>
<key>repo</key>
<string>repo.glif</string>
<key>repo-clone</key>
<string>repo-clone.glif</string>
<key>repo-deleted</key>
<string>repo-deleted.glif</string>
<key>repo-forked</key>
<string>repo-forked.glif</string>
<key>repo-locked</key>
<string>repo-locked.glif</string>
<key>repo-pull</key>
<string>repo-pull.glif</string>
<key>repo-push</key>
<string>repo-push.glif</string>
<key>repo-template</key>
<string>repo-template.glif</string>
<key>report</key>
<string>report.glif</string>
<key>rocket</key>
<string>rocket.glif</string>
<key>rows</key>
<string>rows.glif</string>
<key>rss</key>
<string>rss.glif</string>
<key>ruby</key>
<string>ruby.glif</string>
<key>screen-full</key>
<string>screen-full.glif</string>
<key>screen-normal</key>
<string>screen-normal.glif</string>
<key>search</key>
<string>search.glif</string>
<key>server</key>
<string>server.glif</string>
<key>share</key>
<string>share.glif</string>
<key>share-android</key>
<string>share-android.glif</string>
<key>shield</key>
<string>shield.glif</string>
<key>shield-check</key>
<string>shield-check.glif</string>
<key>shield-lock</key>
<string>shield-lock.glif</string>
<key>shield-slash</key>
<string>shield-slash.glif</string>
<key>shield-x</key>
<string>shield-x.glif</string>
<key>sidebar-collapse</key>
<string>sidebar-collapse.glif</string>
<key>sidebar-expand</key>
<string>sidebar-expand.glif</string>
<key>sign-in</key>
<string>sign-in.glif</string>
<key>sign-out</key>
<string>sign-out.glif</string>
<key>single-select</key>
<string>single-select.glif</string>
<key>skip</key>
<string>skip.glif</string>
<key>skip-fill</key>
<string>skip-fill.glif</string>
<key>sliders</key>
<string>sliders.glif</string>
<key>smiley</key>
<string>smiley.glif</string>
<key>sort-asc</key>
<string>sort-asc.glif</string>
<key>sort-desc</key>
<string>sort-desc.glif</string>
<key>sparkle-fill</key>
<string>sparkle-fill.glif</string>
<key>sponsor-tiers</key>
<string>sponsor-tiers.glif</string>
<key>square</key>
<string>square.glif</string>
<key>square-fill</key>
<string>square-fill.glif</string>
<key>squirrel</key>
<string>squirrel.glif</string>
<key>stack</key>
<string>stack.glif</string>
<key>star</key>
<string>star.glif</string>
<key>star-fill</key>
<string>star-fill.glif</string>
<key>stop</key>
<string>stop.glif</string>
<key>stopwatch</key>
<string>stopwatch.glif</string>
<key>strikethrough</key>
<string>strikethrough.glif</string>
<key>sun</key>
<string>sun.glif</string>
<key>sync</key>
<string>sync.glif</string>
<key>tab</key>
<string>tab.glif</string>
<key>tab-external</key>
<string>tab-external.glif</string>
<key>table</key>
<string>table.glif</string>
<key>tag</key>
<string>tag.glif</string>
<key>tasklist</key>
<string>tasklist.glif</string>
<key>telescope</key>
<string>telescope.glif</string>
<key>telescope-fill</key>
<string>telescope-fill.glif</string>
<key>terminal</key>
<string>terminal.glif</string>
<key>three-bars</key>
<string>three-bars.glif</string>
<key>thumbsdown</key>
<string>thumbsdown.glif</string>
<key>thumbsup</key>
<string>thumbsup.glif</string>
<key>tools</key>
<string>tools.glif</string>
<key>trash</key>
<string>trash.glif</string>
<key>triangle-down</key>
<string>triangle-down.glif</string>
<key>triangle-left</key>
<string>triangle-left.glif</string>
<key>triangle-right</key>
<string>triangle-right.glif</string>
<key>triangle-up</key>
<string>triangle-up.glif</string>
<key>trophy</key>
<string>trophy.glif</string>
<key>typography</key>
<string>typography.glif</string>
<key>unfold</key>
<string>unfold.glif</string>
<key>unlink</key>
<string>unlink.glif</string>
<key>unlock</key>
<string>unlock.glif</string>
<key>unmute</key>
<string>unmute.glif</string>
<key>unread</key>
<string>unread.glif</string>
<key>unverified</key>
<string>unverified.glif</string>
<key>upload</key>
<string>upload.glif</string>
<key>verified</key>
<string>verified.glif</string>
<key>versions</key>
<string>versions.glif</string>
<key>video</key>
<string>video.glif</string>
<key>webhook</key>
<string>webhook.glif</string>
<key>workflow</key>
<string>workflow.glif</string>
<key>x</key>
<string>x.glif</string>
<key>x-circle</key>
<string>x-circle.glif</string>
<key>x-circle-fill</key>
<string>x-circle-fill.glif</string>
<key>zap</key>
<string>zap.glif</string>
<key>zoom-in</key>
<string>zoom-in.glif</string>
<key>zoom-out</key>
<string>zoom-out.glif</string>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/contents.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/contents.plist",
"repo_id": "cascadia-code",
"token_count": 8926
}
| 700 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="diamond" format="2">
<advance width="1200"/>
<unicode hex="F4BF"/>
<note>
diamond
</note>
<outline>
<contour>
<point x="171" y="507" type="line" smooth="yes"/>
<point x="284" y="394"/>
<point x="511" y="167"/>
<point x="511" y="167" type="qcurve"/>
<point x="547" y="130"/>
<point x="653" y="130"/>
<point x="689" y="167" type="qcurve" smooth="yes"/>
<point x="803" y="281" type="line" smooth="yes"/>
<point x="916" y="394"/>
<point x="1143" y="621"/>
<point x="1143" y="621" type="qcurve"/>
<point x="1180" y="657"/>
<point x="1180" y="763"/>
<point x="1143" y="799" type="qcurve" smooth="yes"/>
<point x="1029" y="913" type="line" smooth="yes"/>
<point x="916" y="1026"/>
<point x="689" y="1253"/>
<point x="689" y="1253" type="qcurve"/>
<point x="653" y="1290"/>
<point x="547" y="1290"/>
<point x="510" y="1253" type="qcurve" smooth="yes"/>
<point x="397" y="1139" type="line" smooth="yes"/>
<point x="283" y="1026"/>
<point x="57" y="799"/>
<point x="57" y="799" type="qcurve"/>
<point x="20" y="763"/>
<point x="20" y="657"/>
<point x="57" y="621" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="247" y="836" type="line" smooth="yes"/>
<point x="360" y="950"/>
<point x="587" y="1176"/>
<point x="587" y="1176" type="qcurve"/>
<point x="593" y="1181"/>
<point x="607" y="1181"/>
<point x="613" y="1176" type="qcurve" smooth="yes"/>
<point x="1066" y="723" type="line" smooth="yes"/>
<point x="1071" y="717"/>
<point x="1071" y="703"/>
<point x="1066" y="697" type="qcurve" smooth="yes"/>
<point x="953" y="584" type="line" smooth="yes"/>
<point x="840" y="470"/>
<point x="613" y="244"/>
<point x="613" y="244" type="qcurve"/>
<point x="607" y="239"/>
<point x="593" y="239"/>
<point x="587" y="244" type="qcurve" smooth="yes"/>
<point x="134" y="697" type="line" smooth="yes"/>
<point x="129" y="703"/>
<point x="129" y="717"/>
<point x="134" y="723" type="qcurve" smooth="yes"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:49:53 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/diamond.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/diamond.glif",
"repo_id": "cascadia-code",
"token_count": 1200
}
| 701 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="issue-opened" format="2">
<advance width="1200"/>
<unicode hex="F41B"/>
<note>
issue-opened
</note>
<outline>
<contour>
<point x="709" y="665"/>
<point x="709" y="755"/>
<point x="645" y="819"/>
<point x="555" y="819"/>
<point x="491" y="755"/>
<point x="491" y="665"/>
<point x="555" y="601"/>
<point x="645" y="601"/>
</contour>
<contour>
<point x="268" y="1198"/>
<point x="112" y="1042"/>
<point x="20" y="828"/>
<point x="20" y="592"/>
<point x="112" y="378"/>
<point x="268" y="222"/>
<point x="482" y="130"/>
<point x="718" y="130"/>
<point x="932" y="222"/>
<point x="1088" y="378"/>
<point x="1180" y="592"/>
<point x="1180" y="828"/>
<point x="1088" y="1042"/>
<point x="932" y="1198"/>
<point x="718" y="1290"/>
<point x="482" y="1290"/>
</contour>
<contour>
<point x="204" y="980"/>
<point x="330" y="1106"/>
<point x="504" y="1181"/>
<point x="696" y="1181"/>
<point x="870" y="1106"/>
<point x="996" y="980"/>
<point x="1071" y="806"/>
<point x="1071" y="614"/>
<point x="996" y="440"/>
<point x="870" y="314"/>
<point x="696" y="239"/>
<point x="504" y="239"/>
<point x="330" y="314"/>
<point x="204" y="440"/>
<point x="129" y="614"/>
<point x="129" y="806"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:49:52 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/issue-opened.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/issue-opened.glif",
"repo_id": "cascadia-code",
"token_count": 866
}
| 702 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="i_seti_argdown" format="2">
<advance width="1200"/>
<unicode hex="E636"/>
<note>
i_seti_argdown
</note>
<outline>
<contour>
<point x="1180" y="663" type="line"/>
<point x="923" y="663" type="line"/>
<point x="923" y="1408" type="line"/>
<point x="705" y="1272" type="line"/>
<point x="705" y="420" type="line"/>
<point x="600" y="302" type="line"/>
<point x="496" y="419" type="line"/>
<point x="496" y="1272" type="line"/>
<point x="278" y="1408" type="line"/>
<point x="278" y="663" type="line"/>
<point x="20" y="663" type="line"/>
<point x="600" y="12" type="line"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:42:08 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_argdown.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_argdown.glif",
"repo_id": "cascadia-code",
"token_count": 427
}
| 703 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="i_seti_rescript" format="2">
<advance width="1200"/>
<unicode hex="E688"/>
<note>
i_seti_rescript
</note>
<outline>
<contour>
<point x="1180" y="1030" type="curve" smooth="yes"/>
<point x="1180" y="1175"/>
<point x="1061" y="1287"/>
<point x="923" y="1287" type="curve" smooth="yes"/>
<point x="778" y="1287"/>
<point x="666" y="1175"/>
<point x="666" y="1030" type="curve" smooth="yes"/>
<point x="666" y="885"/>
<point x="778" y="773"/>
<point x="923" y="773" type="curve" smooth="yes"/>
<point x="1068" y="773"/>
<point x="1180" y="891"/>
</contour>
<contour>
<point x="468" y="351" type="curve" smooth="yes"/>
<point x="468" y="1293" type="line"/>
<point x="251" y="1293" type="line" smooth="yes"/>
<point x="172" y="1293"/>
<point x="132" y="1293"/>
<point x="99" y="1280" type="curve" smooth="yes"/>
<point x="73" y="1267"/>
<point x="46" y="1241"/>
<point x="33" y="1214" type="curve" smooth="yes"/>
<point x="20" y="1188"/>
<point x="20" y="1142"/>
<point x="20" y="1063" type="curve" smooth="yes"/>
<point x="20" y="351" type="line" smooth="yes"/>
<point x="20" y="278"/>
<point x="20" y="245"/>
<point x="33" y="219" type="curve" smooth="yes"/>
<point x="46" y="186"/>
<point x="79" y="153"/>
<point x="112" y="140" type="curve" smooth="yes"/>
<point x="145" y="127"/>
<point x="178" y="127"/>
<point x="244" y="127" type="curve" smooth="yes"/>
<point x="317" y="127"/>
<point x="350" y="127"/>
<point x="376" y="140" type="curve" smooth="yes"/>
<point x="409" y="153"/>
<point x="442" y="186"/>
<point x="455" y="219" type="curve" smooth="yes"/>
<point x="468" y="252"/>
<point x="468" y="285"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:42:08 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_rescript.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_rescript.glif",
"repo_id": "cascadia-code",
"token_count": 1030
}
| 704 |
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import importlib.metadata as importlib_metadata
import time
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
distribution = importlib_metadata.distribution("causica")
project = distribution.metadata["Name"]
author = distribution.metadata["Author"] or "Microsoft Research - Causica"
copyright = f"{time.strftime('%Y')}, {author} and contributors" # pylint: disable=redefined-builtin
# The version info for the project you"re documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = distribution.version
# The full version, including alpha/beta/rc tags.
release = version
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
# this helped:
# https://stackoverflow.com/questions/2701998/automatically-document-all-modules-recursively-with-sphinx-autodoc
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.githubpages",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"autoapi.extension",
"myst_parser",
"numpydoc",
"sphinx_immaterial",
]
templates_path = ["templates"]
exclude_patterns = ["Thumbs.db", ".DS_Store"]
autoapi_dirs = ["../../src/causica/"]
autoapi_template_dir = "templates/autoapi"
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_immaterial"
html_static_path = ["static"]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"features": [
"navigation.sections",
"navigation.instant",
"navigation.path",
"navigation.prune",
],
"globaltoc_collapse": False,
"repo_url": "https://github.com/microsoft/causica",
"repo_name": "causica",
"edit_uri": "edit/master/doc",
"palette": [
{
"media": "(prefers-color-scheme: light)",
"scheme": "default",
"primary": "red",
"accent": "light-blue",
"toggle": {
"icon": "material/lightbulb-outline",
"name": "Switch to dark mode",
},
},
{
"media": "(prefers-color-scheme: dark)",
"scheme": "slate",
"primary": "red",
"accent": "orange",
"toggle": {
"icon": "material/lightbulb",
"name": "Switch to light mode",
},
},
],
"toc_title_is_page_title": True,
"version_dropdown": True,
"version_json": "../im_versions.json",
}
# The master toctree document.
master_doc = "index"
|
causica/docs/src/conf.py/0
|
{
"file_path": "causica/docs/src/conf.py",
"repo_id": "causica",
"token_count": 1453
}
| 705 |
"""This file is for conducting the experiments on netsim dataset"""
import argparse
import pytorch_lightning as pl
import torch
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, TQDMProgressBar
from pytorch_lightning.loggers import MLFlowLogger
from scotch.latent_learning.scotch_data_module import SCOTCHDataModule
from scotch.latent_learning.scotch_module import SCOTCHModule
from tensordict import TensorDict
from causica.datasets.causica_dataset_format import Variable, VariablesMetadata
from causica.datasets.variable_types import VariableTypeEnum
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run Netsim experiments.")
parser.add_argument("-e", "--epoch", type=int, help="max number of epochs", default=20000)
parser.add_argument("-l", "--lr", type=float, help="learning rate", default=3e-3)
parser.add_argument("-s", "--sparsity", type=float, help="sparsity penalty", default=500)
parser.add_argument("-t", "--dt", type=float, help="dt", default=0.05)
parser.add_argument("-nor", "--normalize", action="store_true", help="whether to normalize")
parser.add_argument("-sd", "--seed", type=int, help="random seed", required=True)
parser.add_argument("-en", "--experiment_name", type=str, help="experiment name", required=True)
parser.add_argument("-res", "--res_connection", action="store_true", help="whether to use res_connection")
parser.add_argument("-ln", "--layer_norm", action="store_true", help="whether to use layer_norm")
parser.add_argument("-warm", "--lr_warmup", type=int, default=10000, help="warmup epochs")
parser.add_argument("-deci", "--deci_diffusion", action="store_true", help="whether to use deci diffusion function")
parser.add_argument(
"-sig",
"--sigmoid_output",
action="store_true",
help="whether to use sigmoid output for deci diffusion function",
)
# ADDED
parser.add_argument("-p", "--missing_prob", type=float, help="missing probability", required=True)
args = parser.parse_args()
seed_everything(args.seed)
# HParams
experiment_name = args.experiment_name
max_epochs = args.epoch
default_lr = args.lr
res_connection = args.res_connection
layer_norm = args.layer_norm
deci_diffusion = args.deci_diffusion
sigmoid_output = args.sigmoid_output
missing_prob = args.missing_prob
lrs = {
"graph": default_lr,
"qz0_mean_net": default_lr,
"qz0_logstd_net": default_lr,
"pz0_mean": default_lr,
"pz0_logstd": default_lr,
"prior_drift_fn": default_lr,
"diffusion_fn": default_lr,
"posterior_drift_fn": default_lr,
"trajectory_encoder": default_lr,
}
prior_sparsity_lambda = args.sparsity
train_size = 5
val_size = -1
t_max = 10.0
num_time_points = 200
dt = args.dt # sde solver dt = observation interval
normalize = args.normalize
lr_warmup_iters = args.lr_warmup
hparams = {
"seed": args.seed,
"epoch": args.epoch,
"dt": args.dt,
"default_lr": default_lr,
"train_size": train_size,
"val_size": val_size,
"prior_sparsity_lambda": prior_sparsity_lambda,
"t_max": t_max,
"num_time_points": num_time_points,
"normalize": normalize,
"lr_warmup_iters": lr_warmup_iters,
"res_connection": res_connection,
"layer_rnorm": layer_norm,
"deci_diffusion": deci_diffusion,
"sigmoid_output": sigmoid_output,
"missing_prob": missing_prob,
}
# netsim
state_size = 15
variables_metadata = VariablesMetadata(
[Variable(name=f"x{i}", type=VariableTypeEnum.CONTINUOUS, group_name=f"x{i}") for i in range(state_size)]
)
subf = "norm" if args.normalize else "unnorm"
ts = torch.load(f"data/netsim_processed/{subf}/times_{str(args.missing_prob)}_{args.seed}.pt")
training_data = torch.load(f"data/netsim_processed/{subf}/data_{str(args.missing_prob)}_{args.seed}.pt")
true_graph = torch.load(f"data/netsim_processed/{subf}/true_graph.pt")
training_data = TensorDict(
{f"x{i}": training_data[:, :, i].unsqueeze(dim=2) for i in range(state_size)},
batch_size=[train_size],
)
validation_data = training_data
scotch_data = SCOTCHDataModule(
ts=ts,
training_data=training_data,
validation_data=validation_data,
true_graph=true_graph,
variables_metadata=variables_metadata,
batch_size=1024,
)
# SCOTCH Module
scotch = SCOTCHModule(
learning_rates=lrs,
prior_sparsity_lambda=prior_sparsity_lambda,
dt=dt,
layer_norm=layer_norm,
res_connections=res_connection,
deci_diffusion=True,
add_diffusion_self_connections=True,
sigmoid_output=sigmoid_output,
)
mlf_logger = MLFlowLogger(
experiment_name=experiment_name,
tracking_uri="file:./mlflow_logs/mlruns",
)
mlf_logger.log_hyperparams(hparams)
trainer = pl.Trainer(
accelerator="auto",
max_epochs=max_epochs,
fast_dev_run=False,
callbacks=[
TQDMProgressBar(refresh_rate=19),
ModelCheckpoint(every_n_epochs=50),
],
check_val_every_n_epoch=50,
logger=mlf_logger,
)
trainer.fit(scotch, datamodule=scotch_data)
|
causica/research_experiments/scotch/src/scotch/experiments/netsim.py/0
|
{
"file_path": "causica/research_experiments/scotch/src/scotch/experiments/netsim.py",
"repo_id": "causica",
"token_count": 2323
}
| 706 |
import itertools
import random
import warnings
from itertools import zip_longest
from typing import Iterable, Iterator
import torch
from torch.utils.data import BatchSampler, Sampler, SubsetRandomSampler
class SubsetBatchSampler(Sampler[list[int]]):
"""A Pytorch batch sampler that samples batches from a list of subsets.
Each batch will be sampled from a single subset. The subsets are sampled in random order if shuffle is True.
"""
def __init__(
self, subset_lengths: list[int], batch_size: int, shuffle: bool = True, drop_last: bool = False
) -> None:
"""
Args:
subset_lengths: The lengths of each subset
batch_size: The batch size to use
shuffle: Whether to shuffle the indices
drop_last: Whether to drop the last batch of each subset if it is smaller than the batch size.
"""
if batch_size > min(subset_lengths):
warnings.warn("Batch size is larger than the smallest subset length")
if any(length < 1 for length in subset_lengths):
raise ValueError("Subset lengths must be at least 1")
self.subset_lengths = subset_lengths
self.batch_size = batch_size
self.shuffle = shuffle
self.drop_last = drop_last
self.start_indices = torch.cumsum(torch.tensor([0] + self.subset_lengths[:-1]), 0)
self.batch_samplers: list[Iterator | BatchSampler]
self.indices_lists = [
(torch.arange(length) + offset).tolist() for length, offset in zip(self.subset_lengths, self.start_indices)
]
if self.shuffle:
self.batch_samplers = [
BatchSampler(SubsetRandomSampler(indices), batch_size, drop_last=drop_last)
for indices in self.indices_lists
]
else:
# Replace with `itertools.batched` when Python >= 3.12
self.batch_samplers = []
for indices in self.indices_lists:
args = [iter(indices)] * batch_size
self.batch_samplers.append(zip_longest(*args, fillvalue=None))
def __iter__(self):
# Support function to truncate batches to the correct size when not shuffling
if not self.shuffle:
yield from itertools.chain.from_iterable(
yield_truncated_batch_from_zip(iterator, self.batch_size, self.drop_last)
for iterator in self.batch_samplers
)
iterators = [iter(batch_sampler) for batch_sampler in self.batch_samplers]
while iterators:
permutation = list(enumerate(iterators))
random.shuffle(permutation)
stopped = []
for i, iterator in permutation:
try:
yield next(iterator)
except StopIteration:
stopped.append(i)
# Delete in reverse order to preserve indices
for i in sorted(stopped, reverse=True):
del iterators[i]
def __len__(self) -> int:
if self.drop_last:
return sum(length // self.batch_size for length in self.subset_lengths)
return sum((length + self.batch_size - 1) // self.batch_size for length in self.subset_lengths)
def yield_truncated_batch_from_zip(iterator: Iterable, batch_size: int, drop_last: bool = False):
"""Yield truncated batches from an iterator (dropping None elements), skipping the last batch if necessary."""
for batch in iterator:
batch = list(b for b in batch if b is not None)
if not drop_last or len(batch) == batch_size:
yield batch
else:
return
|
causica/src/causica/datasets/samplers.py/0
|
{
"file_path": "causica/src/causica/datasets/samplers.py",
"repo_id": "causica",
"token_count": 1563
}
| 707 |
from typing import Optional
import torch
import torch.distributions as td
import torch.nn.functional as F
from torch import nn
from causica.distributions.noise.noise import IndependentNoise, Noise, NoiseModule
class BernoulliNoise(td.Bernoulli, Noise):
def __init__(self, delta_logits: torch.Tensor, base_logits: torch.Tensor):
"""
A Bernoulli distribution with parameters defined by base_logits and x_hat (predictions for noiseless value).
Args:
delta_logits: Tensor with shape sample_shape + batch_shape. These are the predicted values.
base_logits: Tensor with shape batch_shape
"""
self.delta_logits = delta_logits
super().__init__(logits=base_logits + delta_logits, validate_args=False)
def sample_to_noise(self, samples: torch.Tensor) -> torch.Tensor:
"""
Transform from the sample observations to corresponding noise variables.
This will draw from the noise posterior given the observations
A posterior sample of the Gumbel noise random variables given observation x and probabilities
`self.base_logits + logit_deltas`.
This methodology is described in https://arxiv.org/pdf/1905.05824.pdf.
See https://cmaddis.github.io/gumbel-machinery for derivation of Gumbel posteriors.
For a derivation of this exact algorithm using softplus, see https://www.overleaf.com/8628339373sxjmtvyxcqnx.
Args:
samples: Tensor of shape sample_shape + batch_shape + event_shape
Returns:
The generated samples with shape sample_shape + batch_shape + event_shape
"""
assert (
samples.shape == self.delta_logits.shape
), "The shape of the input does not match the shape of the logit_deltas"
device = self.delta_logits.device
dist = td.Gumbel(torch.tensor(0.0, device=device), torch.tensor(1.0, device=device))
diff_sample = dist.sample(samples.shape) - dist.sample(samples.shape) # sample_shape + batch_shape
neg_log_prob_non_sampled = F.softplus(self.logits * samples - self.logits * (1 - samples))
positive_sample = F.softplus(diff_sample + neg_log_prob_non_sampled)
return positive_sample * samples - positive_sample * (1 - samples) - self.delta_logits
def noise_to_sample(self, noise: torch.Tensor) -> torch.Tensor:
"""
Generate samples using the given exogenous noise.
Args:
noise: noise variable with shape sample_shape + batch_shape.
Returns:
The generated samples with shape sample_shape + batch_shape + event_shape
"""
return ((self.delta_logits + noise) > 0).float()
@property
def mode(self):
"""
Override the default `mode` method to prevent it returning nan's.
We favour sparseness, so if logit == 0, set the mode to be zero.
"""
return (self.logits > 0).to(self.logits, non_blocking=True)
class BernoulliNoiseModule(NoiseModule[IndependentNoise[BernoulliNoise]]):
"""Represents a BernoulliNoise distribution with learnable logits."""
def __init__(self, dim: int, init_base_logits: float | torch.Tensor = 0.0):
"""
Args:
dim: Number of dimensions (independent Bernouilli's).
"""
super().__init__()
if isinstance(init_base_logits, torch.Tensor):
if init_base_logits.squeeze().ndim == 0:
init_base_logits = torch.full(torch.Size([dim]), init_base_logits.item())
else:
assert init_base_logits.ndim == 1
assert init_base_logits.shape[0] == dim
else:
init_base_logits = torch.full(torch.Size([dim]), init_base_logits)
self.base_logits = nn.Parameter(init_base_logits)
def forward(self, x: Optional[torch.Tensor] = None) -> IndependentNoise[BernoulliNoise]:
if x is None:
x = torch.zeros_like(self.base_logits)
return IndependentNoise(BernoulliNoise(delta_logits=x, base_logits=self.base_logits), 1)
|
causica/src/causica/distributions/noise/bernoulli.py/0
|
{
"file_path": "causica/src/causica/distributions/noise/bernoulli.py",
"repo_id": "causica",
"token_count": 1650
}
| 708 |
import torch
from tensordict import TensorDict
from causica.functional_relationships.functional_relationships import FunctionalRelationships
from causica.nn import DECIEmbedNN
class DECIEmbedFunctionalRelationships(FunctionalRelationships):
"""
This is a `FunctionalRelationsips` that wraps the `DECIEmbedNN` module.
"""
def __init__(
self,
shapes: dict[str, torch.Size],
embedding_size: int,
out_dim_g: int,
num_layers_g: int,
num_layers_zeta: int,
) -> None:
super().__init__(shapes=shapes)
self.nn = DECIEmbedNN(self.stacked_key_masks, embedding_size, out_dim_g, num_layers_g, num_layers_zeta)
def forward(self, samples: TensorDict, graphs: torch.Tensor) -> TensorDict:
return self.tensor_to_td(self.nn(self.tensor_to_td.inv(samples), graphs))
|
causica/src/causica/functional_relationships/deci_functional_relationships.py/0
|
{
"file_path": "causica/src/causica/functional_relationships/deci_functional_relationships.py",
"repo_id": "causica",
"token_count": 348
}
| 709 |
from typing import Sequence
from lightning_utilities.core.rank_zero import rank_zero_only
from mlflow.entities import Metric, Param, RunTag
from pytorch_lightning.loggers import MLFlowLogger
class BufferingMlFlowLogger(MLFlowLogger):
"""MlFlowLogger that buffers metrics on logging and flushes on finalize or when the buffer is full."""
def __init__(self, buffer_size: int, *args, **kwargs):
"""
Args:
buffer_size: The maximum number of metrics to buffer before flushing
*args: Passed to `MLFlowLogger`
**kwargs: Passed to `MLFlowLogger`
"""
super().__init__(*args, **kwargs)
self._buffer_size = buffer_size
self._buffer: list[Metric] = []
self._original_log_batch = self.experiment.log_batch
self.experiment.log_batch = self._buffer_log_batch_metrics(self.experiment.log_batch)
@rank_zero_only
def _buffer_log_batch_metrics(self, original_log_batch):
"""Returns a decorated `log_batch` that buffers metrics and flushes them when the buffer is full."""
def log_batch(
run_id: str,
metrics: Sequence[Metric] = (),
params: Sequence[Param] = (),
tags: Sequence[RunTag] = (),
) -> None:
if metrics:
self._buffer.extend(metrics)
if len(self._buffer) >= self._buffer_size:
self.flush()
if params or tags:
original_log_batch(run_id=run_id, params=params, tags=tags)
return log_batch
def get_buffer_count(self) -> int:
"""Return the current number of buffered messages."""
return len(self._buffer)
@rank_zero_only
def flush(self):
if self._buffer:
self._original_log_batch(run_id=self.run_id, metrics=self._buffer)
self._buffer.clear()
@rank_zero_only
def finalize(self, *args, **kwargs) -> None:
self.flush()
return super().finalize(*args, **kwargs)
@rank_zero_only
def __del__(self) -> None:
self.flush()
|
causica/src/causica/lightning/loggers.py/0
|
{
"file_path": "causica/src/causica/lightning/loggers.py",
"repo_id": "causica",
"token_count": 909
}
| 710 |
from typing import Any, Iterable, Optional, Type, Union
from torch import Tensor
from torchmetrics import Metric, MetricCollection
from torchmetrics.regression import MeanAbsoluteError, MeanAbsolutePercentageError
from torchmetrics.wrappers import MultitaskWrapper
class MeanAbsoluteErrorWithThreshold(MeanAbsoluteError):
"""This compute the MAE with a minimum filter on the target values.
This compute the MAE for target that is higher than the min_threshold.
"""
def __init__(self, min_threshold: float, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.threshold = min_threshold
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with filtered predictions and targets."""
# filter based on the threshold
mask = target > self.threshold
filtered_preds = preds[mask]
filtered_target = target[mask]
super().update(filtered_preds, filtered_target)
class MeanAbsolutePercentageErrorWithThreshold(MeanAbsolutePercentageError):
"""This compute the MAPE with a minimum filter on the target values.
This compute the MAPE for target that is higher than the min_threshold.
"""
def __init__(self, min_threshold: float, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.threshold = min_threshold
def update(self, preds: Tensor, target: Tensor) -> None:
"""Update state with filtered predictions and targets."""
# filter based on the threshold
mask = target > self.threshold
filtered_preds = preds[mask]
filtered_target = target[mask]
super().update(filtered_preds, filtered_target)
def create_metrics_for_variables(
variables: Iterable[str],
metrics: MetricCollection,
min_thresholds: Optional[dict[str, float]] = None,
threshold_metrics: Optional[dict[str, Type[Metric]]] = None,
) -> MultitaskWrapper:
"""
This function will create a MultiTaskWrapper obj with keys to be the variable names.
Args:
variables: Variables name list to calculate the metrics for.
metrics: MetricCollection to store the metrics we want to compute.
min_thresholds: A dictionary containing the threshold values for each of the variables.
threshold_metrics: The dict containing the thresholded torchmetrics class. If None, then no additional threshold
metrics will be added.
Returns:
MultitaskWrapper obj with keys to be the variable names.
"""
metrics_dict: dict[str, Union[Metric, MetricCollection]] = {
key: metrics.clone(postfix=f".{key}") for key in variables
}
if threshold_metrics is not None and min_thresholds is not None:
for variable_name, threshold in min_thresholds.items():
for name, cur_threshold_metric_class in threshold_metrics.items():
cur_threshold_metric = cur_threshold_metric_class(min_threshold=threshold)
metrics_dict[variable_name].add_module(name, cur_threshold_metric)
return MultitaskWrapper(metrics_dict)
def filter_metrics_wrapper(variable_list: Optional[list[str]], metrics_wrapper: MultitaskWrapper) -> MultitaskWrapper:
"""
This will filter the MultiTaskWrapper obj to select only the variables in variable_list. If None, the metrics will not be filtered.
Args:
variable_list: List of variables to filter the metrics_dict.
metrics_wrapper: MultitaskWrapper obj with keys to be the variable names.
Returns:
Filtered MultitaskWrapper obj.
"""
if variable_list is None:
return metrics_wrapper
filtered_dict = {key: metrics_wrapper.task_metrics[key] for key in variable_list}
return MultitaskWrapper(filtered_dict)
|
causica/src/causica/training/per_variable_metrics.py/0
|
{
"file_path": "causica/src/causica/training/per_variable_metrics.py",
"repo_id": "causica",
"token_count": 1299
}
| 711 |
import numpy as np
import torch
from torch.distributions.utils import probs_to_logits
from causica.distributions import ThreeWayAdjacencyDistribution
def test_threeway_entropy():
"""Test entropy is correct for a known distribution"""
num_nodes = 3
logits = torch.nn.Parameter(
torch.zeros(((num_nodes * (num_nodes - 1)) // 2, 3))
) # create a max entropy distribution
dist = ThreeWayAdjacencyDistribution(logits=logits)
entropy = dist.entropy()
np.testing.assert_allclose(entropy.detach().numpy(), logits.shape[0] * np.log(3))
# check the gradient of the max entropy is ~zero
entropy.backward()
np.testing.assert_allclose(logits.grad, np.zeros_like(logits.detach().numpy()), atol=1e-7)
def test_threeway():
"""Test ThreeWay methods are correct for a known distribution."""
probs = torch.Tensor([[0.4, 0.25, 0.35]])
dist = ThreeWayAdjacencyDistribution(logits=probs_to_logits(probs))
np.testing.assert_allclose(dist.mean, np.array([[0.0, 0.25], [0.4, 0.0]]), rtol=1e-6)
np.testing.assert_allclose(dist.mode, np.array([[0.0, 0.0], [1.0, 0.0]]), rtol=1e-6)
samples = torch.tensor([[[0.0, 1.0], [0.0, 0.0]], [[0.0, 0.0], [1.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]])
np.testing.assert_allclose(dist.log_prob(samples), np.array([np.log(0.25), np.log(0.4), np.log(0.35)]))
|
causica/test/distributions/adjacency/test_three_way.py/0
|
{
"file_path": "causica/test/distributions/adjacency/test_three_way.py",
"repo_id": "causica",
"token_count": 568
}
| 712 |
import math
import pytest
import torch
from tensordict import TensorDict
from causica.functional_relationships import (
DECIEmbedFunctionalRelationships,
LinearFunctionalRelationships,
RFFFunctionalRelationships,
)
@pytest.fixture(name="two_variable_dict")
def fixture_two_variable_dict():
return {"x1": torch.Size([1]), "x2": torch.Size([2])}
@pytest.fixture(name="two_variable_sample")
def fixture_two_variable_sample():
return TensorDict({"x1": torch.randn((3, 1)), "x2": torch.randn((3, 2))}, batch_size=torch.Size([3]))
@pytest.fixture(name="two_variable_graph")
def fixture_two_variable_graph():
return torch.Tensor([[0.0, 1.0], [0.0, 0.0]])
@pytest.fixture(name="two_variable_graphs")
def fixture_two_variable_graphs():
return torch.Tensor([[[0.0, 1.0], [0.0, 0.0]], [[0.0, 0.0], [1.0, 0.0]]])
def test_func_rel_init(two_variable_dict):
func_rel = DECIEmbedFunctionalRelationships(two_variable_dict, 32, 32, 2, 2)
assert func_rel.tensor_to_td.output_shape == 3
def test_func_rel_forward(two_variable_dict, two_variable_graph, two_variable_sample):
func_rel = DECIEmbedFunctionalRelationships(two_variable_dict, 32, 32, 2, 2)
func_rel.nn.w = torch.nn.Parameter(torch.ones_like(func_rel.nn.w), requires_grad=False)
prediction = func_rel(two_variable_sample, two_variable_graph)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2)
assert torch.allclose(prediction["x1"], prediction["x1"][0, 0])
assert not torch.allclose(prediction["x2"][..., 0], prediction["x2"][0, 0])
assert not torch.allclose(prediction["x2"][..., 1], prediction["x2"][0, 1])
def test_func_rel_forward_multigraph(two_variable_dict, two_variable_graphs, two_variable_sample):
func_rel = DECIEmbedFunctionalRelationships(two_variable_dict, 32, 32, 2, 2)
func_rel.nn.w = torch.nn.Parameter(torch.ones_like(func_rel.nn.w), requires_grad=False)
prediction = func_rel(two_variable_sample.unsqueeze(1).expand(3, 2), two_variable_graphs)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 2, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2, 2)
# x1 and x2 are initial nodes for graphs 0 and 1 respectively, so should just get bias terms
assert torch.allclose(prediction["x1"][:, 0, :], prediction["x1"][0, 0, :])
assert torch.allclose(prediction["x2"][:, 1, :], prediction["x2"][0, 1, :])
# Pass a new sample through the model and check that the predictions are the same for the initial nodes
sample2 = TensorDict({"x1": torch.randn((3, 1)), "x2": torch.randn((3, 2))}, batch_size=torch.Size([3]))
prediction2 = func_rel(sample2.unsqueeze(1).expand(3, 2), two_variable_graphs)
assert torch.allclose(prediction2["x1"][:, 0, :], prediction["x1"][:, 0, :])
assert torch.allclose(prediction2["x2"][:, 1, :], prediction["x2"][:, 1, :])
# for the other graphs they shouldn't be equal
assert not torch.allclose(prediction2["x1"][:, 1, :], prediction["x1"][:, 1, :])
assert not torch.allclose(prediction2["x2"][:, 0, :], prediction["x2"][:, 0, :])
def test_linear_forward(two_variable_dict, two_variable_graph, two_variable_sample):
coef_matrix = torch.rand((3, 3))
func_rel = LinearFunctionalRelationships(two_variable_dict, coef_matrix)
prediction = func_rel(two_variable_sample, two_variable_graph)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2)
assert torch.all(prediction["x1"] == 0.0)
true_x2_prediction = torch.matmul(two_variable_sample["x1"].unsqueeze(-2), coef_matrix[:1, 1:]).squeeze(-2)
assert torch.all(prediction["x2"] == true_x2_prediction)
def test_linear_forward_multigraph(two_variable_dict, two_variable_graphs, two_variable_sample):
coef_matrix = torch.rand((3, 3))
func_rel = LinearFunctionalRelationships(two_variable_dict, coef_matrix)
prediction = func_rel(two_variable_sample.unsqueeze(1).expand(3, 2), two_variable_graphs)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 2, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2, 2)
# x1 and x2 are initial nodes for graphs 0 and 1 respectively
assert torch.all(prediction["x1"][:, 0, :] == 0.0)
assert torch.all(prediction["x2"][:, 1, :] == 0.0)
# x1 and x2 are linear transformations of the other nodes for graphs 1 and 0 respectively
true_x1_prediction = torch.matmul(two_variable_sample["x2"], coef_matrix[1:, :1])
assert torch.allclose(prediction["x1"][:, 1, :], true_x1_prediction)
true_x2_prediction = torch.matmul(two_variable_sample["x1"], coef_matrix[:1, 1:])
assert torch.allclose(prediction["x2"][:, 0, :], true_x2_prediction)
def test_linear_forward_with_bias(two_variable_dict, two_variable_graph, two_variable_sample):
coef_matrix = torch.rand((3, 3))
initial_bias = torch.rand((3,))
func_rel = LinearFunctionalRelationships(two_variable_dict, coef_matrix, initial_bias=initial_bias)
prediction = func_rel(two_variable_sample, two_variable_graph)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2)
assert torch.all(prediction["x1"] == initial_bias[0])
true_x2_prediction = initial_bias[1:] + torch.matmul(
two_variable_sample["x1"].unsqueeze(-2), coef_matrix[:1, 1:]
).squeeze(-2)
assert torch.all(prediction["x2"] == true_x2_prediction)
def test_linear_forward_multigraph_with_bias(two_variable_dict, two_variable_graphs, two_variable_sample):
coef_matrix = torch.rand((3, 3))
initial_bias = torch.rand((3,))
func_rel = LinearFunctionalRelationships(two_variable_dict, coef_matrix, initial_bias=initial_bias)
prediction = func_rel(two_variable_sample.unsqueeze(1).expand(3, 2), two_variable_graphs)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 2, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2, 2)
# x1 and x2 are initial nodes for graphs 0 and 1 respectively
assert torch.allclose(prediction["x1"][:, 0, :], initial_bias[0].repeat(3, 1))
assert torch.allclose(prediction["x2"][:, 1, :], initial_bias[1:].repeat(3, 1))
# x1 and x2 are linear transformations of the other nodes for graphs 1 and 0 respectively
true_x1_prediction = initial_bias[0] + torch.matmul(two_variable_sample["x2"], coef_matrix[1:, :1])
assert torch.allclose(prediction["x1"][:, 1, :], true_x1_prediction)
true_x2_prediction = initial_bias[1:].reshape(1, -1) + torch.matmul(two_variable_sample["x1"], coef_matrix[:1, 1:])
assert torch.allclose(prediction["x2"][:, 0, :], true_x2_prediction)
def test_non_linear_forward(two_variable_dict, two_variable_graph, two_variable_sample):
random_features = torch.rand((5, 3))
coeff_alpha = torch.rand((5,))
func_rel = RFFFunctionalRelationships(two_variable_dict, random_features, coeff_alpha)
prediction = func_rel(two_variable_sample, two_variable_graph)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2)
assert torch.allclose(prediction["x1"], torch.zeros_like(prediction["x1"]), atol=1e-6)
true_inner_prods = two_variable_sample["x1"] * random_features[:, 0]
transformed_inner_prods = torch.cos(true_inner_prods - (math.pi / 2)) * coeff_alpha
true_x2_prediction = math.sqrt(2 / 5) * torch.sum(transformed_inner_prods, dim=-1)
true_x2_prediction = true_x2_prediction.unsqueeze(-1).repeat(1, 2)
assert torch.allclose(prediction["x2"], true_x2_prediction)
def test_non_linear_forward_multigraph(two_variable_dict, two_variable_graphs, two_variable_sample):
random_features = torch.rand((5, 3))
coeff_alpha = torch.rand((5,))
func_rel = RFFFunctionalRelationships(two_variable_dict, random_features, coeff_alpha)
prediction = func_rel(two_variable_sample.unsqueeze(1).expand(3, 2), two_variable_graphs)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 2, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2, 2)
# x1 and x2 are initial nodes for graphs 0 and 1 respectively
assert torch.allclose(prediction["x1"][:, 0, :], torch.zeros_like(prediction["x1"][:, 0, :]), atol=1e-6)
assert torch.allclose(prediction["x2"][:, 1, :], torch.zeros_like(prediction["x2"][:, 1, :]), atol=1e-6)
true_inner_prods = two_variable_sample["x1"] * random_features[:, 0]
transformed_inner_prods = torch.cos(true_inner_prods - (math.pi / 2)) * coeff_alpha
true_x2_prediction = math.sqrt(2 / 5) * torch.sum(transformed_inner_prods, dim=-1)
true_x2_prediction = true_x2_prediction.unsqueeze(-1).repeat(1, 2)
assert torch.allclose(prediction["x2"][:, 0, :], true_x2_prediction)
true_inner_prods = torch.matmul(two_variable_sample["x2"], random_features[:, 1:].transpose(-2, -1))
transformed_inner_prods = torch.cos(true_inner_prods - (math.pi / 2)) * coeff_alpha
true_x1_prediction = math.sqrt(2 / 5) * torch.sum(transformed_inner_prods, dim=-1)
true_x1_prediction = true_x1_prediction.unsqueeze(-1)
assert torch.allclose(prediction["x1"][:, 1, :], true_x1_prediction)
def test_non_linear_forward_full(two_variable_dict, two_variable_graph, two_variable_sample):
length_scales = torch.rand((3,))
out_scales = torch.rand((3,))
random_features = torch.rand((5, 3))
coeff_alpha = torch.rand((5,))
func_rel = RFFFunctionalRelationships(
two_variable_dict,
random_features,
coeff_alpha,
initial_length_scales=length_scales,
initial_output_scales=out_scales,
)
prediction = func_rel(two_variable_sample, two_variable_graph)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2)
assert torch.allclose(prediction["x1"], torch.zeros_like(prediction["x1"]), atol=1e-6)
true_inner_prods = two_variable_sample["x1"] * random_features[:, 0]
true_inner_prods_rescaled = true_inner_prods.repeat(2, 1, 1) / length_scales[1:].reshape(2, 1, 1)
transformed_inner_prods = torch.cos(true_inner_prods_rescaled - (math.pi / 2)) * coeff_alpha
true_x2_prediction = math.sqrt(2 / 5) * out_scales[1:].reshape(2, 1) * torch.sum(transformed_inner_prods, dim=-1)
assert torch.allclose(prediction["x2"], true_x2_prediction.transpose(-1, -2))
def test_non_linear_forward_multigraph_full(two_variable_dict, two_variable_graphs, two_variable_sample):
length_scales = torch.rand((3,))
out_scales = torch.rand((3,))
random_features = torch.rand((5, 3))
coeff_alpha = torch.rand((5,))
func_rel = RFFFunctionalRelationships(
two_variable_dict,
random_features,
coeff_alpha,
initial_length_scales=length_scales,
initial_output_scales=out_scales,
)
prediction = func_rel(two_variable_sample.unsqueeze(1).expand(3, 2), two_variable_graphs)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 2, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2, 2)
# x1 and x2 are initial nodes for graphs 0 and 1 respectively
assert torch.allclose(prediction["x1"][:, 0, :], torch.zeros_like(prediction["x1"][:, 0, :]), atol=1e-6)
assert torch.allclose(prediction["x2"][:, 1, :], torch.zeros_like(prediction["x2"][:, 1, :]), atol=1e-6)
true_inner_prods = two_variable_sample["x1"] * random_features[:, 0]
true_inner_prods_rescaled = true_inner_prods.repeat(2, 1, 1) / length_scales[1:].reshape(2, 1, 1)
transformed_inner_prods = torch.cos(true_inner_prods_rescaled - (math.pi / 2)) * coeff_alpha
true_x2_prediction = math.sqrt(2 / 5) * out_scales[1:].reshape(2, 1) * torch.sum(transformed_inner_prods, dim=-1)
assert torch.allclose(prediction["x2"][:, 0, :], true_x2_prediction.transpose(-1, -2))
true_inner_prods = torch.matmul(two_variable_sample["x2"], random_features[:, 1:].transpose(-2, -1))
true_inner_prods_rescaled = true_inner_prods / length_scales[0]
transformed_inner_prods = torch.cos(true_inner_prods_rescaled - (math.pi / 2)) * coeff_alpha
true_x1_prediction = math.sqrt(2 / 5) * out_scales[0] * torch.sum(transformed_inner_prods, dim=-1)
true_x1_prediction = true_x1_prediction.unsqueeze(-1)
assert torch.allclose(prediction["x1"][:, 1, :], true_x1_prediction)
def test_non_linear_forward_full_with_bias_and_angle(two_variable_dict, two_variable_graph, two_variable_sample):
length_scales = torch.rand((3,))
out_scales = torch.rand((3,))
random_features = torch.rand((5, 3))
coeff_alpha = torch.rand((5,))
initial_bias = torch.rand((3,))
initial_angles = torch.rand((5,))
func_rel = RFFFunctionalRelationships(
two_variable_dict,
random_features,
coeff_alpha,
initial_bias=initial_bias,
initial_length_scales=length_scales,
initial_output_scales=out_scales,
initial_angles=initial_angles,
)
prediction = func_rel(two_variable_sample, two_variable_graph)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2)
res = initial_bias[0] + out_scales[0] * math.sqrt(2 / 5) * torch.sum(torch.cos(initial_angles) * coeff_alpha)
assert torch.allclose(prediction["x1"], res.repeat(3, 1), atol=1e-6)
true_inner_prods = two_variable_sample["x1"] * random_features[:, 0]
true_inner_prods_rescaled = true_inner_prods.repeat(2, 1, 1) / length_scales[1:].reshape(2, 1, 1)
transformed_inner_prods = torch.cos(true_inner_prods_rescaled + initial_angles) * coeff_alpha
true_x2_prediction = initial_bias[1:].reshape(2, 1) + math.sqrt(2 / 5) * out_scales[1:].reshape(2, 1) * torch.sum(
transformed_inner_prods, dim=-1
)
assert torch.allclose(prediction["x2"], true_x2_prediction.transpose(-1, -2))
def test_non_linear_forward_multigraph_full_with_bias_and_angle(
two_variable_dict, two_variable_graphs, two_variable_sample
):
length_scales = torch.rand((3,))
out_scales = torch.rand((3,))
random_features = torch.rand((5, 3))
coeff_alpha = torch.rand((5,))
initial_bias = torch.rand((3,))
initial_angles = torch.rand((5,))
func_rel = RFFFunctionalRelationships(
two_variable_dict,
random_features,
coeff_alpha,
initial_bias=initial_bias,
initial_length_scales=length_scales,
initial_output_scales=out_scales,
initial_angles=initial_angles,
)
prediction = func_rel(two_variable_sample.unsqueeze(1).expand(3, 2), two_variable_graphs)
assert set(prediction.keys()) == {"x1", "x2"}
assert prediction["x1"].shape == (3, 2, 1), f"got {prediction['x1'].shape}"
assert prediction["x2"].shape == (3, 2, 2)
# x1 and x2 are initial nodes for graphs 0 and 1 respectively
res_1 = initial_bias[0] + out_scales[0] * math.sqrt(2 / 5) * torch.sum(torch.cos(initial_angles) * coeff_alpha)
assert torch.allclose(prediction["x1"][:, 0, :], res_1, atol=1e-6)
res_2 = initial_bias[1:] + out_scales[1:] * math.sqrt(2 / 5) * torch.sum(torch.cos(initial_angles) * coeff_alpha)
assert torch.allclose(prediction["x2"][:, 1, :], res_2.repeat(3, 1), atol=1e-6)
true_inner_prods = two_variable_sample["x1"] * random_features[:, 0]
true_inner_prods_rescaled = true_inner_prods.repeat(2, 1, 1) / length_scales[1:].reshape(2, 1, 1)
transformed_inner_prods = torch.cos(true_inner_prods_rescaled + initial_angles) * coeff_alpha
true_x2_prediction = initial_bias[1:].reshape(2, 1) + math.sqrt(2 / 5) * out_scales[1:].reshape(2, 1) * torch.sum(
transformed_inner_prods, dim=-1
)
assert torch.allclose(prediction["x2"][:, 0, :], true_x2_prediction.transpose(-1, -2))
true_inner_prods = torch.matmul(two_variable_sample["x2"], random_features[:, 1:].transpose(-2, -1))
true_inner_prods_rescaled = true_inner_prods / length_scales[0]
transformed_inner_prods = torch.cos(true_inner_prods_rescaled + initial_angles) * coeff_alpha
true_x1_prediction = initial_bias[0] + math.sqrt(2 / 5) * out_scales[0] * torch.sum(transformed_inner_prods, dim=-1)
true_x1_prediction = true_x1_prediction.unsqueeze(-1)
assert torch.allclose(prediction["x1"][:, 1, :], true_x1_prediction)
|
causica/test/functional_relationships/test_functional_relationships.py/0
|
{
"file_path": "causica/test/functional_relationships/test_functional_relationships.py",
"repo_id": "causica",
"token_count": 6838
}
| 713 |
import math
import pytest
import torch
from tensordict import TensorDict
from causica.sem.structural_equation_model import ate, counterfactual, ite
from . import create_lingauss_sem
@pytest.fixture(name="two_variable_dict")
def fixture_two_variable_dict():
return {"x1": torch.Size([1]), "x2": torch.Size([2])}
@pytest.fixture(name="three_variable_dict")
def fixture_three_variable_dict():
return {"x1": torch.Size([2]), "x2": torch.Size([2]), "x3": torch.Size([1])}
@pytest.mark.parametrize("graph", [torch.tensor([[0, 0], [1, 0.0]]), torch.tensor([[0, 1], [0, 0.0]])])
def test_ate_ite_cf_two_node(graph, two_variable_dict):
coef_matrix = torch.rand((3, 3))
sem = create_lingauss_sem(two_variable_dict, coef_matrix, graph, log_scale=math.log(1e-8))
intervention_values_a = TensorDict({"x2": torch.tensor([1.42, 0.42])}, batch_size=tuple())
intervention_values_b = TensorDict({"x2": torch.tensor([0.42, 1.42])}, batch_size=tuple())
average_treatment_effect = ate(sem, intervention_values_a, intervention_values_b)
sample_size = 100
factual_data = sem.sample(torch.Size([sample_size]))
if graph[0, 1] > 0.0:
expected_treatment_effect = torch.zeros_like(average_treatment_effect["x1"])
expected_mean_a = factual_data["x1"]
else:
expected_mean_a = torch.einsum("i,ij->j", intervention_values_a["x2"], coef_matrix[1:, :1])
expected_mean_b = torch.einsum("i,ij->j", intervention_values_b["x2"], coef_matrix[1:, :1])
expected_treatment_effect = expected_mean_a - expected_mean_b
torch.testing.assert_close(average_treatment_effect["x1"], expected_treatment_effect)
individual_treatment_effect = ite(sem, factual_data, intervention_values_a, intervention_values_b)
cf_effect = counterfactual(sem, factual_data, intervention_values_a)
torch.testing.assert_close(individual_treatment_effect["x1"], expected_treatment_effect.expand((sample_size, 1)))
torch.testing.assert_close(cf_effect["x1"], expected_mean_a.expand((sample_size, 1)))
def test_ate_ite_cf_three_node(three_variable_dict):
"""x1->x2->x3"""
coef_matrix = torch.rand((5, 5))
graph = torch.zeros(3, 3)
graph[0, 1] = graph[1, 2] = 1
sem = create_lingauss_sem(three_variable_dict, coef_matrix, graph, log_scale=math.log(1e-8))
intervention_values_a = TensorDict({"x2": torch.tensor([1.42, 0.42])}, batch_size=tuple())
intervention_values_b = TensorDict({"x2": torch.tensor([0.42, 1.42])}, batch_size=tuple())
average_treatment_effect = ate(sem, intervention_values_a, intervention_values_b)
torch.testing.assert_close(average_treatment_effect["x1"], torch.zeros_like(average_treatment_effect["x1"]))
expected_mean_a = torch.einsum("i,ij->j", intervention_values_a["x2"], coef_matrix[2:4, 4:])
expected_mean_b = torch.einsum("i,ij->j", intervention_values_b["x2"], coef_matrix[2:4, 4:])
expected_treatment_effect = expected_mean_a - expected_mean_b
torch.testing.assert_close(average_treatment_effect["x3"], expected_treatment_effect)
sample_size = 100
factual_data = sem.sample(torch.Size([sample_size]))
individual_treatment_effect = ite(sem, factual_data, intervention_values_a, intervention_values_b)
cf_effect = counterfactual(sem, factual_data, intervention_values_a)
torch.testing.assert_close(individual_treatment_effect["x1"], torch.zeros_like(individual_treatment_effect["x1"]))
torch.testing.assert_close(individual_treatment_effect["x3"], expected_treatment_effect.expand((sample_size, 1)))
torch.testing.assert_close(cf_effect["x1"], factual_data["x1"])
torch.testing.assert_close(cf_effect["x3"], expected_mean_a.expand((sample_size, 1)))
|
causica/test/sem/test_treatment_effects.py/0
|
{
"file_path": "causica/test/sem/test_treatment_effects.py",
"repo_id": "causica",
"token_count": 1417
}
| 714 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
###########################################################################################
# 3D MODELS AS USED In THE PAPER: #
# Clifford Neural Layers for PDE Modeling #
###########################################################################################
from typing import Callable, Union
import torch
from torch import nn
from torch.nn import functional as F
from cliffordlayers.nn.modules.cliffordconv import CliffordConv3d
from cliffordlayers.nn.modules.cliffordfourier import CliffordSpectralConv3d
from cliffordlayers.nn.modules.groupnorm import CliffordGroupNorm3d
from cliffordlayers.models.basic.custom_layers import CliffordConv3dMaxwellEncoder, CliffordConv3dMaxwellDecoder
class CliffordFourierBasicBlock3d(nn.Module):
"""3D building block for Clifford FNO architectures.
Args:
g (Union[tuple, list, torch.Tensor]): Signature of Clifford algebra.
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
activation (Callable, optional): Activation function. Defaults to F.gelu.
kernel_size (int, optional): Kernel size of Clifford convolution. Defaults to 3.
stride (int, optional): Stride of Clifford convolution. Defaults to 1.
padding (int, optional): Padding of Clifford convolution. Defaults to 1.
norm (bool, optional): Wether to use Clifford (group) normalization. Defaults to False.
num_groups (int, optional): Number of groups when using Clifford (group) normalization. Defaults to 1.
modes1 (int, optional): Number of Fourier modes in the first dimension. Defaults to 8.
modes2 (int, optional): Number of Fourier modes in the second dimension. Defaults to 8.
modes3 (int, optional): Number of Fourier modes in the third dimension. Defaults to 8.
"""
expansion: int = 1
def __init__(
self,
g: Union[tuple, list, torch.Tensor],
in_channels: int,
out_channels: int,
activation: Callable = F.gelu,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
norm: bool = False,
num_groups: int = 1,
modes1: int = 8,
modes2: int = 8,
modes3: int = 8,
):
super().__init__()
self.fourier = CliffordSpectralConv3d(
g,
in_channels,
out_channels,
modes1=modes1,
modes2=modes2,
modes3=modes3,
)
self.conv = CliffordConv3d(
g,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=True,
)
self.norm = CliffordGroupNorm3d(g, num_groups, in_channels) if norm else nn.Identity()
self.activation = activation
def forward(self, x: torch.Tensor) -> torch.Tensor:
x1 = self.fourier(x)
x2 = self.conv(x)
return self.activation(self.norm(x1 + x2))
class CliffordMaxwellNet3d(nn.Module):
"""3D building block for Clifford architectures with ResNet backbone network.
The backbone networks follows these three steps:
1. Clifford vector+bivector encoding.
2. Basic blocks as provided.
3. Clifford vector+bivector decoding.
Args:
g (Union[tuple, list, torch.Tensor]): Signature of Clifford algebra.
block (nn.Module): Choice of basic blocks.
num_blocks (list): List of basic blocks in each residual block.
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
activation (Callable, optional): Activation function. Defaults to F.gelu.
norm (bool, optional): Wether to use Clifford (group) normalization. Defaults to False.
num_groups (int, optional): Number of groups when using Clifford (group) normalization. Defaults to 1.
"""
# For periodic boundary conditions, set padding = 0.
padding = 2
def __init__(
self,
g: Union[tuple, list, torch.Tensor],
block: nn.Module,
num_blocks: list,
in_channels: int,
out_channels: int,
hidden_channels: int,
activation: Callable,
norm: bool = False,
num_groups: int = 1,
):
super().__init__()
self.activation = activation
# Encoding and decoding layers.
self.encoder = CliffordConv3dMaxwellEncoder(
g,
in_channels=in_channels,
out_channels=hidden_channels,
kernel_size=1,
padding=0,
)
self.decoder = CliffordConv3dMaxwellDecoder(
g,
in_channels=hidden_channels,
out_channels=out_channels,
kernel_size=1,
padding=0,
)
# Residual blocks.
self.layers = nn.ModuleList(
[
self._make_basic_block(
g,
block,
hidden_channels,
num_blocks[i],
activation=activation,
norm=norm,
num_groups=num_groups,
)
for i in range(len(num_blocks))
]
)
def _make_basic_block(
self,
g,
block: nn.Module,
hidden_channels: int,
num_blocks: int,
activation: Callable,
norm: bool,
num_groups: int,
) -> nn.Sequential:
blocks = []
for _ in range(num_blocks):
blocks.append(
block(
g,
hidden_channels,
hidden_channels,
activation=activation,
norm=norm,
num_groups=num_groups,
)
)
return nn.Sequential(*blocks)
def forward(self, x: torch.Tensor) -> torch.Tensor:
assert x.dim() == 6
# Encoding layer.
x = self.encoder(self.activation(x))
# Embed for non-periodic boundaries.
if self.padding > 0:
B_dim, C_dim, *D_dims, I_dim = range(len(x.shape))
x = x.permute(B_dim, I_dim, C_dim, *D_dims)
x = F.pad(x, [0, self.padding, 0, self.padding, 0, self.padding])
B_dim, I_dim, C_dim, *D_dims = range(len(x.shape))
x = x.permute(B_dim, C_dim, *D_dims, I_dim)
# Apply residual layers.
for layer in self.layers:
x = layer(x)
# Decoding layer.
if self.padding > 0:
B_dim, C_dim, *D_dims, I_dim = range(len(x.shape))
x = x.permute(B_dim, I_dim, C_dim, *D_dims)
x = x[..., : -self.padding, : -self.padding, : -self.padding]
B_dim, I_dim, C_dim, *D_dims = range(len(x.shape))
x = x.permute(B_dim, C_dim, *D_dims, I_dim)
# Output layer.
x = self.decoder(x)
return x
|
cliffordlayers/cliffordlayers/models/basic/threed.py/0
|
{
"file_path": "cliffordlayers/cliffordlayers/models/basic/threed.py",
"repo_id": "cliffordlayers",
"token_count": 3375
}
| 715 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
import torch
import torch.nn.functional as F
from torch import nn
from ...cliffordkernels import (
get_1d_clifford_kernel,
get_2d_clifford_kernel,
get_3d_clifford_kernel,
)
from ...signature import CliffordSignature
class CliffordLinear(nn.Module):
"""Clifford linear layer.
Args:
g (Union[List, Tuple]): Clifford signature tensor.
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
bias (bool, optional): If True, adds a learnable bias to the output. Defaults to True.
"""
def __init__(
self,
g,
in_channels: int,
out_channels: int,
bias: bool = True,
) -> None:
super().__init__()
sig = CliffordSignature(g)
self.register_buffer("g", sig.g)
self.dim = sig.dim
self.n_blades = sig.n_blades
if self.dim == 1:
self._get_kernel = get_1d_clifford_kernel
elif self.dim == 2:
self._get_kernel = get_2d_clifford_kernel
elif self.dim == 3:
self._get_kernel = get_3d_clifford_kernel
else:
raise NotImplementedError(
f"Clifford linear layers are not implemented for {self.dim} dimensions. Wrong Clifford signature."
)
self.in_channels = in_channels
self.out_channels = out_channels
self.weight = nn.Parameter(torch.empty(self.n_blades, out_channels, in_channels))
if bias:
self.bias = nn.Parameter(torch.empty(self.n_blades, out_channels))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
# Initialization of the Clifford linear weight and bias tensors.
# The number of blades is taken into account when calculated the bounds of Kaiming uniform.
nn.init.kaiming_uniform_(
self.weight.view(self.out_channels, self.in_channels * self.n_blades),
a=math.sqrt(5),
)
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(
self.weight.view(self.out_channels, self.in_channels * self.n_blades)
)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Reshape x such that the Clifford kernel can be applied.
B, _, I = x.shape
if not (I == self.n_blades):
raise ValueError(f"Input has {I} blades, but Clifford layer expects {self.n_blades}.")
B_dim, C_dim, I_dim = range(len(x.shape))
x = x.permute(B_dim, -1, C_dim)
x = x.reshape(B, -1)
# Get Clifford kernel, apply it.
_, weight = self._get_kernel(self.weight, self.g)
output = F.linear(x, weight, self.bias.view(-1))
# Reshape back.
output = output.view(B, I, -1)
B_dim, I_dim, C_dim = range(len(output.shape))
output = output.permute(B_dim, C_dim, I_dim)
return output
|
cliffordlayers/cliffordlayers/nn/modules/cliffordlinear.py/0
|
{
"file_path": "cliffordlayers/cliffordlayers/nn/modules/cliffordlinear.py",
"repo_id": "cliffordlayers",
"token_count": 1465
}
| 716 |
{% extends "main.html" %}
<!-- Render hero under tabs -->
{% block tabs %}
{{ super() }}
<!-- github button -->
<script async defer src="https://buttons.github.io/buttons.js"></script>
<style>
.md-footer-copyright {
display: none
}
.md-footer-nav__inner {
display: none
}
.md-content {
display: none
}
.tx-container {
height: fit-content;
padding-top: 0rem;
background: linear-gradient(var(--md-primary-fg-color), var(--md-primary-fg-color--dark) 60%, #fff 99%);
}
.tx-hero {
color: var(--md-primary-bg-color);
justify-content: center;
margin: 32px 2.5rem;
}
.tx-hero h1 {
margin-bottom: 0rem;
color: currentColor;
font-weight: 700
}
.tx-hero__content {
padding-bottom: 6rem;
margin: 0 auto;
/* justify-content: left;
padding-right: 3rem; */
}
.tx-hero__content h1 {
font-size: 2.5rem;
}
.tx-hero__content_small {
justify-content: left;
padding-right: 3rem;
}
.tx-hero__image {
max-width: 100%;
max-height: 100%;
order: 1;
padding-right: 1.5rem;
padding-bottom: 2.5rem;
}
.tx-hero .md-button {
margin-top: .5rem;
margin-right: .5rem;
color: var(--md-primary-bg-color)
}
.tx-container-2 {
padding: 0rem;
background-color: white;
margin-bottom: 0px;
}
.tx-hero__image-2 {
max-width: 100%;
max-height: 100%;
order: 1;
padding-right: 0.1rem;
padding-left: 0.25rem;
padding-top: 10px;
padding-bottom: 10px;
float: left;
}
.tx-hero__content-2 {
margin-left: 50px;
justify-content: left;
/* color: #009485; */
font-weight: 300;
padding: 0 0px;
padding-bottom: 40px;
word-break: break-word;
float: right;
}
.tx-hero__content-2 h1 {
margin-top: 10px;
color: black;
/* color: #009485; */
font-weight: 600;
/* font-size: 36px; */
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
line-height: normal;
/*font-family: inherit;*/
}
.tx-hero__content-2 p {
font-size: 17px;
line-height: 1.8em;
text-rendering: optimizeLegibility;
color: black;
display: block;
}
.tx-container-3 {
height: auto;
}
@media screen and (min-width: 60em) {
.md-sidebar--secondary {
display: none
}
.tx-hero {
display: flex;
align-items: center;
justify-content: center;
}
.tx-hero__content {
max-width: 28rem;
margin-top: 3.5rem;
margin-bottom: 3.5rem;
margin-left: 1.0rem;
margin-right: 4.0rem;
align-items: center;
}
}
@media screen and (min-width: 76.25em) {
.md-sidebar--primary {
display: none
}
.top-hr {
width: 100%;
display: flex;
max-width: 61rem;
margin-right: auto;
margin-left: auto;
padding: 0 .2rem;
}
.bottom-hr {
margin-top: 10px;
width: 100%;
display: flex;
max-width: 61rem;
margin-right: auto;
margin-left: auto;
padding: 0 .2rem;
}
}
</style>
<!-- 1st Section -->
<section class="tx-container">
<div class="md-grid md-typeset">
<div class="tx-hero">
<!-- Hero image -->
<div class="tx-hero__image">
<img
src="assets/images/logo.png"
alt=""
width="480"
draggable="false"
>
</div>
<!-- Hero content -->
<div class="tx-hero__content">
<h1 id="pdearena">CliffordLayers</h1>
<p>
Neural Network layers inspired by Clifford / Geometric Algebras. <br><br>
<!-- Based on the
<strong>
<a href="https://www.pytorchlightning.ai/">
PyTorch Lightning Framework
</a>.
</strong> -->
</p><br>
<a
href="{{ page.next_page.url | url }}"
title="{{ page.next_page.title | e }}"
class="md-button md-button--primary"
>
Get started
</a><br>
<a
href="https://github.com/microsoft/cliffordlayers"
title="{{ lang.t('source.link.title') }}"
class="md-button">
Contribute on GitHub
<img
class = icon
src="assets/images/icons/github-white.svg"
alt=""
width="24"
draggable="false"
>
</a>
<br>
<a
href="./research"
title="{{ lang.t('source.link.title') }}"
class="md-button md-button--primary">
Research
<svg style="width:24px;height:24px" viewBox="0 0 24 24">
<path fill="currentColor" d="M20,11H4V8H20M20,15H13V13H20M20,19H13V17H20M11,19H4V13H11M20.33,4.67L18.67,3L17,4.67L15.33,3L13.67,4.67L12,3L10.33,4.67L8.67,3L7,4.67L5.33,3L3.67,4.67L2,3V19A2,2 0 0,0 4,21H20A2,2 0 0,0 22,19V3L20.33,4.67Z" />
</svg>
</a><br>
</div>
</div>
</div>
</section>
<section class="tx-container-2">
</section>
<section class="tx-container-3">
<div class ="md-grid md-typeset">
<div class="tx-hero__content-2">
<h1 id="citation">Citation</h1>
<div class="highlight"><pre id="__code_2"><span></span><button class="md-clipboard md-icon" title="Copy to clipboard" data-clipboard-target="#__code_2 > code"></button><code tabindex="0"><a id="__codelineno-1-1" name="__codelineno-1-1" href="#__codelineno-1-1"></a><span class="nc">@article</span><span class="p">{</span><span class="nl">brandstetter2022clifford</span><span class="p">,</span><span class="w"></span>
<a id="__codelineno-1-2" name="__codelineno-1-2" href="#__codelineno-1-2"></a><span class="w"> </span><span class="na">title</span><span class="p">=</span><span class="s">{Clifford Neural Layers for PDE Modeling}</span><span class="p">,</span><span class="w"></span>
<a id="__codelineno-1-3" name="__codelineno-1-3" href="#__codelineno-1-3"></a><span class="w"> </span><span class="na">author</span><span class="p">=</span><span class="s">{Brandstetter, Johannes and Berg, Rianne van den and Welling, Max and Gupta, Jayesh K}</span><span class="p">,</span><span class="w"></span>
<a id="__codelineno-1-4" name="__codelineno-1-4" href="#__codelineno-1-4"></a><span class="w"> </span><span class="na">journal</span><span class="p">=</span><span class="s">{arXiv preprint arXiv:2209.04934}</span><span class="p">,</span><span class="w"></span>
<a id="__codelineno-1-5" name="__codelineno-1-5" href="#__codelineno-1-5"></a><span class="w"> </span><span class="na">year</span><span class="p">=</span><span class="s">{2022}</span><span class="w"></span>
<a id="__codelineno-1-6" name="__codelineno-1-6" href="#__codelineno-1-6"></a><span class="p">}</span><span class="w"></span>
</code></pre></div>
<div class="highlight"><pre id="__code_2"><span></span><button class="md-clipboard md-icon" title="Copy to clipboard" data-clipboard-target="#__code_2 > code"></button><code tabindex="0"><a id="__codelineno-1-1" name="__codelineno-1-1" href="#__codelineno-1-1"></a><span class="nc">@article</span><span class="p">{</span><span class="nl">ruhe2023geometric</span><span class="p">,</span><span class="w"></span>
<a id="__codelineno-1-2" name="__codelineno-1-2" href="#__codelineno-1-2"></a><span class="w"> </span><span class="na">title</span><span class="p">=</span><span class="s">{Geometric Clifford Algebra Networks}</span><span class="p">,</span><span class="w"></span>
<a id="__codelineno-1-3" name="__codelineno-1-3" href="#__codelineno-1-3"></a><span class="w"> </span><span class="na">author</span><span class="p">=</span><span class="s">{Ruhe, David and Gupta, Jayesh K and de Keninck, Steven and Welling, Max and Brandstetter, Johannes}</span><span class="p">,</span><span class="w"></span>
<a id="__codelineno-1-4" name="__codelineno-1-4" href="#__codelineno-1-4"></a><span class="w"> </span><span class="na">journal</span><span class="p">=</span><span class="s">{arXiv preprint arXiv:2302.06594}</span><span class="p">,</span><span class="w"></span>
<a id="__codelineno-1-5" name="__codelineno-1-5" href="#__codelineno-1-5"></a><span class="w"> </span><span class="na">year</span><span class="p">=</span><span class="s">{2023}</span><span class="w"></span>
<a id="__codelineno-1-6" name="__codelineno-1-6" href="#__codelineno-1-6"></a><span class="p">}</span><span class="w"></span>
</code></pre></div>
<p>Also consider starring <a href="https://github.com/microsoft/cliffordlayers">the github repo</a>.
<a class="github-button" href="https://github.com/microsoft/cliffordlayers" data-icon="octicon-star" data-show-count="true" aria-label="Star microsoft/cliffordlayers on GitHub">Star</a> </p>
</div>
</div>
</section>
{% endblock %}
<!-- Footer Section -->
{% block footer %}{% endblock %}
|
cliffordlayers/docs/overrides/home.html/0
|
{
"file_path": "cliffordlayers/docs/overrides/home.html",
"repo_id": "cliffordlayers",
"token_count": 5024
}
| 717 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
from cliffordlayers.nn.modules.cliffordfourier_deprecated import (
CliffordSpectralConv2d_deprecated,
CliffordSpectralConv3d_deprecated,
)
from cliffordlayers.nn.modules.cliffordfourier import (
CliffordSpectralConv2d,
CliffordSpectralConv3d,
)
def test_clifford_fourier_layer_2d():
"""Test 2d CFNO implementation for g=[1, 1] vs deprecated implementation."""
# The deprecated CFNO only works if number of input and output channels are the same.
old_cfno2d = CliffordSpectralConv2d_deprecated(
in_channels=8,
out_channels=8,
modes1=16,
modes2=16,
)
input = torch.rand(1, 8, 128, 128, 4)
input_spinor = torch.cat((input[..., 0].unsqueeze(-1), input[..., 3].unsqueeze(-1)), dim=-1)
input_vector = torch.cat((input[..., 1].unsqueeze(-1), input[..., 2].unsqueeze(-1)), dim=-1)
output_old = old_cfno2d(torch.view_as_complex(input_vector), torch.view_as_complex(input_spinor))
new_cfn2d = CliffordSpectralConv2d(
g=[1, 1],
in_channels=8,
out_channels=8,
modes1=16,
modes2=16,
)
new_cfn2d.weights = nn.Parameter(old_cfno2d.weights.permute(0, 2, 1, 3, 4))
output_new = new_cfn2d(input)
vector, spinor = output_old
output_old_trans = torch.cat(
(
spinor.real.unsqueeze(-1),
vector.real.unsqueeze(-1),
vector.imag.unsqueeze(-1),
spinor.imag.unsqueeze(-1),
),
dim=-1,
)
torch.testing.assert_close(output_old_trans, output_new)
def test_clifford_fourier_layer_3d():
"""Test 3d CFNO implementation for g=[1, 1, 1] vs deprecated implementation."""
# The old CFNO only works if number of input and output channels are the same.
old_cfno3d = CliffordSpectralConv3d_deprecated(
in_channels=4,
out_channels=4,
modes1=16,
modes2=16,
modes3=16,
)
input = torch.rand(1, 4, 32, 32, 32, 8)
input_dual_1 = torch.cat((input[..., 0].unsqueeze(-1), input[..., 7].unsqueeze(-1)), dim=-1)
input_dual_2 = torch.cat((input[..., 1].unsqueeze(-1), input[..., 6].unsqueeze(-1)), dim=-1)
input_dual_3 = torch.cat((input[..., 2].unsqueeze(-1), input[..., 5].unsqueeze(-1)), dim=-1)
input_dual_4 = torch.cat((input[..., 3].unsqueeze(-1), input[..., 4].unsqueeze(-1)), dim=-1)
dual_1, dual_2, dual_3, dual_4 = old_cfno3d(
torch.view_as_complex(input_dual_1),
torch.view_as_complex(input_dual_2),
torch.view_as_complex(input_dual_3),
torch.view_as_complex(input_dual_4),
)
new_cfno3d = CliffordSpectralConv3d(
g=[1, 1, 1],
in_channels=4,
out_channels=4,
modes1=16,
modes2=16,
modes3=16,
)
new_cfno3d.weights = nn.Parameter(old_cfno3d.weights.permute(0, 2, 1, 3, 4, 5))
output_new = new_cfno3d(input)
output_old_trans = torch.cat(
(
dual_1.real.unsqueeze(-1),
dual_2.real.unsqueeze(-1),
dual_3.real.unsqueeze(-1),
dual_4.real.unsqueeze(-1),
dual_4.imag.unsqueeze(-1),
dual_3.imag.unsqueeze(-1),
dual_2.imag.unsqueeze(-1),
dual_1.imag.unsqueeze(-1),
),
dim=-1,
)
torch.testing.assert_close(output_old_trans, output_new)
def test_unit_weights_clifford_fourier_layer_2d():
"""Test 2d CFNO implementation vs CFFT and inverse CFFT without weight multiplication.
Input and output channels need to be the same; Fourier modes have to correspond to spatial resolution.
"""
in_channels = 8
nx = 128
ny = 128
input = torch.rand(1, 8, nx, ny, 4)
cfn2d = CliffordSpectralConv2d(
g=[1, 1],
in_channels=in_channels,
out_channels=in_channels,
modes1=nx,
modes2=ny,
multiply=False,
)
output = cfn2d(input)
torch.testing.assert_close(output, input)
def test_unit_weights_clifford_fourier_layer_3d():
"""Test 3d CFNO implementation vs CFFT and inverse CFFT without weight multiplication.
Input and output channels need to be the same; Fourier modes have to correspond to spatial resolution.
"""
in_channels = 8
nx = 32
ny = 32
nz = 32
input = torch.rand(1, 8, nx, ny, nz, 8)
cfn3d = CliffordSpectralConv3d(
g=[1, 1, 1],
in_channels=in_channels,
out_channels=in_channels,
modes1=nx,
modes2=ny,
modes3=nz,
multiply=False,
)
output = cfn3d(input)
torch.testing.assert_close(output, input)
|
cliffordlayers/tests/test_clifford_fourier.py/0
|
{
"file_path": "cliffordlayers/tests/test_clifford_fourier.py",
"repo_id": "cliffordlayers",
"token_count": 2226
}
| 718 |
# Sharing Updatable Models (SUM) on Blockchain Demo
(formerly Decentralized & Collaborative AI on Blockchain Demo)
[](https://github.com/microsoft/0xDeCA10B/actions/workflows/demo-test.yml)
A dashboard and examples for deploying updatable AI models to Ethereum. A video demo is available There is a video example showing how to deploy <a href="https://aka.ms/0xDeCA10B-demo" target="_blank">here</a>.
This folder also contains Solidity examples for models, data handlers, and incentive mechanisms for deploying models that are free to use for inference as initially proposed in our introductory paper.
This project is made from a React project with Truffle added. This [Truffle example][truffle-react] was used to help add Truffle.
This work in its current form is just meant as an example and proof of concept.
It is not ready to be deployed for production yet.
# Setup
This section explains how to set up locally on Linux/WSL, alternatively, you can skip ahead and use a Docker image.
The following steps are made for Linux/WSL and require `npm` with `node` version 10.
Other later versions of node might work too but some node-gyp issues occurred with version 14.
You will need to have `make` installed.
On Debian (e.g. Ubuntu) you can do:
```bash
sudo apt install build-essential
```
Run
```bash
./setup.sh
```
## Troubleshooting Setup
If you have problems running the setup steps related to node-gyp, then you might need to set Python 2.7 to be your default (just during the installation).
Recommendation: Temporarily set up a Python 2.7 Conda environment (just for the installation) and activate it:
```bash
conda create --name python2 python=2
conda activate python2
```
## Docker Setup
1. Clone this repo.
2. Navigate into the folder containing this README.md file:
```bash
cd 0xDeCA10B/demo
```
3. You can use a Docker image by running:
```bash
docker run --rm -it -p 3000:3000 -p 5387:5387 -p 7545:7545 -v ${PWD}:/root/workspace/demo -v /root/workspace/demo/node_modules -v /root/workspace/demo/client/node_modules --name decai-demo mcr.microsoft.com/samples/blockchain-ai/0xdeca10b-demo bash
# If this is your first time setting up then run:
./setup_libs.sh
# So that you can start a few processes in the Docker container, run:
byobu
```
4. You have completed the setup steps.
Next, refer to the Deploy steps lower down on this page.
If you want to use a different version of the Docker image, then you can find the available tags for the Docker image [here](https://mcr.microsoft.com/v2/samples/blockchain-ai/0xdeca10b-demo/tags/list) and check the details for the latest tag [here](https://mcr.microsoft.com/v2/samples/blockchain-ai/0xdeca10b-demo/manifests/latest).
### Building the Docker Image
(Optional)
If you want to build your own fresh image:
```bash
docker build -t decai-demo .
```
#### (Microsoft Devs) Updating the Public Image
First get permission to push 0xdeca10bcontainerreg.azurecr.io.
Then
```bash
docker login 0xdeca10bcontainerreg.azurecr.io
newVersion=<Set the new version. E.g. 1.2.0>
docker tag decai-demo 0xdeca10bcontainerreg.azurecr.io/public/samples/blockchain-ai/0xdeca10b-demo:${newVersion}
docker tag decai-demo 0xdeca10bcontainerreg.azurecr.io/public/samples/blockchain-ai/0xdeca10b-demo:latest
docker push 0xdeca10bcontainerreg.azurecr.io/public/samples/blockchain-ai/0xdeca10b-demo:${newVersion}
docker push 0xdeca10bcontainerreg.azurecr.io/public/samples/blockchain-ai/0xdeca10b-demo:latest
```
(Microsoft Devs) To update the production website, see the instructions at the top of [service.Dockerfile](./service.Dockerfile).
## Update
To update dependencies after already setting up:
```bash
./setup.sh
```
# Deploy
There is a video example showing how to deploy <a href="https://aka.ms/0xDeCA10B-deploy" target="_blank">here</a>.
## Blockchain
Models and data are stored on a local blockchain.
Start the blockchain (uses `ganache-cli`) in one terminal.
Run:
```bash
yarn blockchain
```
Do once:
* In your browser, you will need to add http://localhost:7545 as a custom RPC, you can use the MetaMask extension for this.
* If required, you can set the chain ID to: 0xDeCA10B
* Copy the first private key output by the above command.
* Use that private key to create a new account in MetaMask.
## Server
(Optional) The server is used to store model meta-data and original data when for training models.
The server is used by default in development mode but not in production mode.
If you want to store meta-data in a local database file instead of just within the browser, then start the server in one terminal.
This step allows you to see models listed when you open the dashboard for the first time.
If you do want to use the database file then you should have `REACT_APP_ENABLE_SERVICE_DATA_STORE=true` in your `client/.env.development` file.
Run:
```bash
yarn server
```
## Client
The website is the "client", it allows you to interract with the blockchain and optional server.
In another terminal.
Run:
```bash
yarn client
```
## Troubleshooting Deployment
### Blockchain Issues
Run `yarn clean` to delete your local blockchain and cached contracts. This will delete any transactions done but should make everything work again.
### Errors about a contract not found at a certain address
If you get errors about calling a contract then it's probably because you restarted your blockchain (Ganache) and the contract doesn't exist anymore on the blockchain.
This could happen if you restarted your computer.
You have to delete the generated .json files that keep track of contract addresses:
```bash
rm -f client/{build,src}/contracts/compiled/*.json
```
Then you should be able to deploy normally.
You can also try to `rm -rf blockchain_db` to delete your blockchain and restart from scratch.
### MetaMask Issues
#### Issues about nonce
If MetaMask gives issues about the nonce not being right for a transaction then it's probably because you restarted your blockchain but MetaMask caches some basic things on the URL and the network ID.
You can first try to reset your account in the MetaMask settings. This will clear your transaction history.
You shouldn't need to if you've been consistently using a blockchain for just this project but you can also try changing the network ID for Ganache. This can be done in the Ganache UI or CLI (--networkId).
#### MetaMask Loading Issues
If MetaMask is spinning and non-responsive:
1. Disable the extension here: chrome://extensions
2. Re-enable the extension.
3. Open the extension.
4. Change the network to use. E.g. Select Main Ethereum Network.
5. Log in to MetaMask.
6. Change the network back to the custom one.
7. Reject any queued transactions.
If you get the spinning issue again, then also try following the steps above with resetting your account as well as restarting the blockchain by:
1. Stop the `yarn blockchain` process.
2. Run `yarn clean`.
3. Run `yarn blockchain`.
[ganache]: https://truffleframework.com/ganache
[truffle-react]: https://truffleframework.com/boxes/react
# Testing
To run all automated tests:
```bash
yarn test
```
A local blockchain will be started and stopped so it's best not to have a blockchain running at the same address and port (e.g. one running through `yarn blockchain`).
## Manual Testing
Not all tests are automated (yet, maybe one we'll automate them all).
Some things that should be manually tested in the UI after completing the deployment:
* Pick a model
* PREDICT: Verify that you can use the model to classify some data
* TRAIN: Add "incorrect" data as user "Bad"
* Add "correct" data as user "Good"
* REFUND: Verify that "Good" can get a refund for the "correct" data
* REWARD: Verify that "Good" can report "Bad"'s "incorrect" data
* Add a new model
## Running Specific Tests
To run specific smart contract tests and save time by not waiting for Truffle migrations:
* In one terminal, start a blockchain: `yarn blockchain`
* In another terminal session, run:
```bash
cd client
npx truffle test [<test file paths>] --network skipMigrations
# For example:
npx truffle test test/contracts/*.js test/contracts/**/*.js --network skipMigrations
```
# Linting
Run `yarn lint`.
Run `yarn lint-fix` to automatically resolve some issues.
## Solidity Files
We use [Ethlint][ethlint] for linting and enforce it on pull requests.
The above `yarn lint` and `yarn lint-fix` commands will also check Solidity files.
[deploy-video]: https://aka.ms/0xDeCA10B-deploy
[demo-video]: https://aka.ms/0xDeCA10B-demo
[ethlint]: https://github.com/duaraghav8/Ethlint
[overview-paper]: https://aka.ms/0xDeCA10B-paper
[overview-paper-dark]: https://aka.ms/0xDeCA10B-paper-dark
|
0xDeCA10B/demo/README.md/0
|
{
"file_path": "0xDeCA10B/demo/README.md",
"repo_id": "0xDeCA10B",
"token_count": 2571
}
| 0 |
import React from 'react'
import ReactDOM from 'react-dom'
import App from '../App'
it('renders without crashing', () => {
const div = document.createElement('div')
ReactDOM.render(<App />, div)
ReactDOM.unmountComponentAtNode(div)
})
|
0xDeCA10B/demo/client/src/__tests__/App.test.js/0
|
{
"file_path": "0xDeCA10B/demo/client/src/__tests__/App.test.js",
"repo_id": "0xDeCA10B",
"token_count": 79
}
| 1 |
pragma solidity ^0.6;
import "../../../lib/SafeMath.sol";
import {Ownable} from "../ownership/Ownable.sol";
/**
* Stores information for added training data and corresponding meta-data.
*/
interface DataHandler {
function updateClaimableAmount(bytes32 dataKey, uint rewardAmount) external;
}
/**
* Stores information for added training data and corresponding meta-data.
*/
contract DataHandler64 is Ownable, DataHandler {
using SafeMath for uint256;
struct StoredData {
/**
* The data stored.
*/
// Don't store the data because it's not really needed since we emit events when data is added.
// The main reason for storing the data in here is to ensure equality on future interactions like when refunding.
// This extra equality check is only necessary if you're worried about hash collisions.
// int64[] d;
/**
* The classification for the data.
*/
uint64 c;
/**
* The time it was added.
*/
uint t;
/**
* The address that added the data.
*/
address sender;
/**
* The amount that was initially given to deposit this data.
*/
uint initialDeposit;
/**
* The amount of the deposit that can still be claimed.
*/
uint claimableAmount;
/**
* The number of claims that have been made for refunds or reports.
* This should be the size of `claimedBy`.
*/
uint numClaims;
/**
* The set of addresses that claimed a refund or reward on this data.
*/
mapping(address => bool) claimedBy;
}
/**
* Meta-data for data that has been added.
*/
mapping(bytes32 => StoredData) public addedData;
function getClaimableAmount(int64[] memory data, uint64 classification, uint addedTime, address originalAuthor)
public view returns (uint) {
bytes32 key = keccak256(abi.encodePacked(data, classification, addedTime, originalAuthor));
StoredData storage existingData = addedData[key];
// Validate found value.
// usually unnecessary: require(isDataEqual(existingData.d, data), "Data is not equal.");
require(existingData.c == classification, "Classification is not equal.");
require(existingData.t == addedTime, "Added time is not equal.");
require(existingData.sender == originalAuthor, "Data isn't from the right author.");
return existingData.claimableAmount;
}
function getInitialDeposit(int64[] memory data, uint64 classification, uint addedTime, address originalAuthor)
public view returns (uint) {
bytes32 key = keccak256(abi.encodePacked(data, classification, addedTime, originalAuthor));
StoredData storage existingData = addedData[key];
// Validate found value.
// usually unnecessary: require(isDataEqual(existingData.d, data), "Data is not equal.");
require(existingData.c == classification, "Classification is not equal.");
require(existingData.t == addedTime, "Added time is not equal.");
require(existingData.sender == originalAuthor, "Data isn't from the right author.");
return existingData.initialDeposit;
}
function getNumClaims(int64[] memory data, uint64 classification, uint addedTime, address originalAuthor)
public view returns (uint) {
bytes32 key = keccak256(abi.encodePacked(data, classification, addedTime, originalAuthor));
StoredData storage existingData = addedData[key];
// Validate found value.
// usually unnecessary: require(isDataEqual(existingData.d, data), "Data is not equal.");
require(existingData.c == classification, "Classification is not equal.");
require(existingData.t == addedTime, "Added time is not equal.");
require(existingData.sender == originalAuthor, "Data isn't from the right author.");
return existingData.numClaims;
}
/**
* Check if two arrays of training data are equal.
*/
function isDataEqual(int64[] memory d1, int64[] memory d2) public pure returns (bool) {
if (d1.length != d2.length) {
return false;
}
for (uint i = 0; i < d1.length; ++i) {
if (d1[i] != d2[i]) {
return false;
}
}
return true;
}
/**
* Log an attempt to add data.
*
* @param msgSender The address of the one attempting to add data.
* @param cost The cost required to add new data.
* @param data A single sample of training data for the model.
* @param classification The label for `data`.
* @return time The time which the data was added, i.e. the current time in seconds.
*/
function handleAddData(address msgSender, uint cost, int64[] memory data, uint64 classification)
public onlyOwner
returns (uint time) {
time = now; // solium-disable-line security/no-block-members
bytes32 key = keccak256(abi.encodePacked(data, classification, time, msgSender));
StoredData storage existingData = addedData[key];
bool okayToOverwrite = existingData.sender == address(0) || existingData.claimableAmount == 0;
require(okayToOverwrite, "Conflicting data key. The data may have already been added.");
// Maybe we do want to allow duplicate data to be added but just not from the same address.
// Of course that is not sybil-proof.
// Store data.
addedData[key] = StoredData({
// not necessary: d: data,
c: classification,
t: time,
sender: msgSender,
initialDeposit: cost,
claimableAmount: cost,
numClaims: 0
});
}
/**
* Log a refund attempt.
*
* @param submitter The address of the one attempting a refund.
* @param data The data for which to attempt a refund.
* @param classification The label originally submitted for `data`.
* @param addedTime The time in seconds for which the data was added.
* @return claimableAmount The amount that can be claimed for the refund.
* @return claimedBySubmitter `true` if the data has already been claimed by `submitter`, otherwise `false`.
* @return numClaims The number of claims that have been made for the contribution before this request.
*/
function handleRefund(address submitter, int64[] memory data, uint64 classification, uint addedTime)
public onlyOwner
returns (uint claimableAmount, bool claimedBySubmitter, uint numClaims) {
bytes32 key = keccak256(abi.encodePacked(data, classification, addedTime, submitter));
StoredData storage existingData = addedData[key];
// Validate found value.
require(existingData.sender != address(0), "Data not found.");
// usually unnecessary: require(isDataEqual(existingData.d, data), "Data is not equal.");
require(existingData.c == classification, "Classification is not equal.");
require(existingData.t == addedTime, "Added time is not equal.");
require(existingData.sender == submitter, "Data is not from the sender.");
claimableAmount = existingData.claimableAmount;
claimedBySubmitter = existingData.claimedBy[submitter];
numClaims = existingData.numClaims;
// Upon successful completion of the refund the values will be claimed.
existingData.claimableAmount = 0;
existingData.claimedBy[submitter] = true;
existingData.numClaims = numClaims.add(1);
}
/**
* Retrieve information about the data to report.
*
* @param reporter The address of the one reporting the data.
* @param data The data to report.
* @param classification The label submitted for `data`.
* @param addedTime The time in seconds for which the data was added.
* @param originalAuthor The address that originally added the data.
* @return initialDeposit The amount that was initially deposited when the data contribution was submitted.
* @return claimableAmount The amount remainining that can be claimed.
* @return claimedByReporter `true` if the data has already been claimed by `reporter`, otherwise `false`.
* @return numClaims The number of claims that have been made for the contribution before this request.
* @return dataKey The key to the stored data.
*/
function handleReport(
address reporter,
int64[] memory data, uint64 classification, uint addedTime, address originalAuthor)
public onlyOwner
returns (uint initialDeposit, uint claimableAmount, bool claimedByReporter, uint numClaims, bytes32 dataKey) {
dataKey = keccak256(abi.encodePacked(data, classification, addedTime, originalAuthor));
StoredData storage existingData = addedData[dataKey];
// Validate found value.
require(existingData.sender != address(0), "Data not found.");
// usually unnecessary: require(isDataEqual(existingData.d, data), "Data is not equal.");
require(existingData.c == classification, "Classification is not equal.");
require(existingData.t == addedTime, "Added time is not equal.");
require(existingData.sender == originalAuthor, "Sender is not equal.");
initialDeposit = existingData.initialDeposit;
claimableAmount = existingData.claimableAmount;
claimedByReporter = existingData.claimedBy[reporter];
numClaims = existingData.numClaims;
existingData.claimedBy[reporter] = true;
existingData.numClaims = numClaims.add(1);
}
/**
* @return `true` if the contribution has already been claimed by `claimer`, otherwise `false`.
*/
function hasClaimed(
int64[] memory data, uint64 classification,
uint addedTime, address originalAuthor,
address claimer)
public view returns (bool) {
bytes32 key = keccak256(abi.encodePacked(data, classification, addedTime, originalAuthor));
StoredData storage existingData = addedData[key];
// Validate found value.
// usually unnecessary: require(isDataEqual(existingData.d, data), "Data is not equal.");
require(existingData.c == classification, "Classification is not equal.");
require(existingData.t == addedTime, "Added time is not equal.");
require(existingData.sender == originalAuthor, "Data isn't from the right author.");
return existingData.claimedBy[claimer];
}
function updateClaimableAmount(bytes32 dataKey, uint rewardAmount)
public override onlyOwner {
StoredData storage existingData = addedData[dataKey];
// Already validated key lookup.
existingData.claimableAmount = existingData.claimableAmount.sub(rewardAmount);
}
}
|
0xDeCA10B/demo/client/src/contracts/data/DataHandler.sol/0
|
{
"file_path": "0xDeCA10B/demo/client/src/contracts/data/DataHandler.sol",
"repo_id": "0xDeCA10B",
"token_count": 3874
}
| 2 |
import assert from 'assert'
import Web3 from 'web3'
import { convertNum } from '../../float-utils'
import { ModelDeployer } from '../deploy-model'
import { CentroidInfo, DensePerceptronModel, Model, NaiveBayesModel, NearestCentroidModel, SparseCentroidInfo, SparseNearestCentroidModel, SparsePerceptronModel } from '../model-interfaces'
declare const web3: Web3
function assertEqualNumbers(actual: any, expected: any, message?: string | Error): void {
if (message) {
message += "\n"
}
if (web3.utils.isBN(actual)) {
if (web3.utils.isBN(expected)) {
message = `${message || ""}actual: ${actual} (BN)\nexpected: ${expected} (BN)`
return assert(actual.eq(expected), message)
} else {
const expectedBN = web3.utils.toBN(expected)
message = `${message || ""}actual: ${actual} (BN)\nexpected: ${expected} (${typeof expected}) => BN: ${expectedBN}`
return assert(actual.eq(expectedBN), message)
}
} else if (web3.utils.isBN(expected)) {
const actualBN = web3.utils.toBN(actual)
message = `${message || ""}actual: ${actual} (${typeof actual}) => BN: ${actualBN}\nexpected: ${expected} (BN)`
return assert(actualBN.eq(expected), message)
} else {
if (typeof actual === 'string') {
actual = parseInt(actual)
}
return assert.strictEqual(actual, expected, message)
}
}
describe("ModelDeployer", () => {
let account: string
const deployer = new ModelDeployer(web3)
beforeAll(async () => {
const accounts = await web3.eth.getAccounts()
// Pick a random account between 2 and 9 since 0 and 1 are usually used in the browser.
account = accounts[2 + Math.min(Math.floor(Math.random() * 8), 7)]
})
it("should deploy Naive Bayes", async () => {
const model = new NaiveBayesModel(
'naive bayes',
[
"A",
"B",
],
[
2,
3
],
[
[[0, 2], [1, 1]],
[[1, 3], [2, 2]],
],
9,
1.0,
)
const m = await deployer.deployModel(
model,
{
account,
})
assertEqualNumbers(await m.methods.smoothingFactor().call(), convertNum(model.smoothingFactor, web3), "smoothingFactor")
for (let i = 0; i < model.classifications.length; ++i) {
assert.strictEqual(await m.methods.classifications(i).call(), model.classifications[i])
assertEqualNumbers(await m.methods.getNumSamples(i).call(), model.classCounts[i])
for (const [featureIndex, count] of model.featureCounts[i]) {
assertEqualNumbers(await m.methods.getFeatureCount(i, featureIndex).call(), count)
}
}
assertEqualNumbers(await m.methods.getClassTotalFeatureCount(0).call(), 3)
assertEqualNumbers(await m.methods.getClassTotalFeatureCount(1).call(), 5)
})
it("should deploy dense Nearest Centroid", async () => {
const model = new NearestCentroidModel(
'dense nearest centroid classifier',
{
"AA": new CentroidInfo([-1, -1, 4.88, -8.44, -3], 2),
"BB": new CentroidInfo([+1, -1.8, 9.07, 3, -3], 2),
}
)
const m = await deployer.deployModel(
model,
{
account,
})
let i = -1
for (const [classification, centroidInfo] of Object.entries(model.centroids)) {
++i
assert.strictEqual(await m.methods.classifications(i).call(), classification)
assertEqualNumbers(await m.methods.getNumSamples(i).call(), centroidInfo.dataCount)
for (let j = 0; j < centroidInfo.centroid.length; ++j) {
const actual = await m.methods.getCentroidValue(i, j).call()
assertEqualNumbers(actual, convertNum(centroidInfo.centroid[j], web3), `centroid value for class ${i}[${j}]`)
}
}
})
it("should deploy sparse Nearest Centroid", async () => {
const model = new SparseNearestCentroidModel(
'sparse nearest centroid classifier',
{
// Values should all be positive since the representation is sparse.
"AA": new SparseCentroidInfo({ '0': 0, '1': +1, '7': 1.5, }, 2),
"BB": new SparseCentroidInfo({ '0': +1, '1': 0, '5': 0.5 }, 2),
}
)
const m = await deployer.deployModel(
model,
{
account,
})
let i = -1
for (const [classification, centroidInfo] of Object.entries(model.centroids)) {
++i
assert.strictEqual(await m.methods.classifications(i).call(), classification)
assertEqualNumbers(await m.methods.getNumSamples(i).call(), centroidInfo.dataCount)
for (const [featureIndex, value] of Object.entries(centroidInfo.centroid)) {
assertEqualNumbers(await m.methods.getCentroidValue(i, featureIndex).call(), convertNum(value, web3), `centroid value for class ${i}[${featureIndex}]`)
}
}
})
it("should deploy sparse Nearest Centroid with array centroids", async () => {
// This shouldn't happen but it could if a model gets exported from the Python code
// and the type is set correctly.
const model = {
type: 'sparse nearest centroid classifier',
centroids: {
"AA": {
centroid: [0, 1, 1.5, 2, 87.88],
dataCount: 2
},
"BB": {
centroid: [1, 0, 0.5, 3.787],
dataCount: 2
},
}
}
const m = await deployer.deployModel(
model as Model,
{
account,
})
let i = -1
for (const [classification, centroidInfo] of Object.entries(model.centroids)) {
++i
assert.strictEqual(await m.methods.classifications(i).call(), classification)
assertEqualNumbers(await m.methods.getNumSamples(i).call(), centroidInfo.dataCount)
for (const [featureIndex, value] of Object.entries(centroidInfo.centroid)) {
assertEqualNumbers(await m.methods.getCentroidValue(i, featureIndex).call(), convertNum(value, web3), `centroid value for class ${i}[${featureIndex}]`)
}
}
})
it("should deploy dense Perceptron", async () => {
const classifications = ["A", "B"]
const weights = [1, -1, 2.33, -8.66]
const intercept = 0
const m = await deployer.deployModel(
new DensePerceptronModel(
'dense perceptron',
classifications,
weights,
intercept,
),
{
account,
})
for (let i = 0; i < classifications.length; ++i) {
assert.strictEqual(await m.methods.classifications(i).call(), classifications[i])
}
for (let i = 0; i < weights.length; ++i) {
assertEqualNumbers(await m.methods.weights(i).call(), convertNum(weights[i], web3))
}
assertEqualNumbers(await m.methods.intercept().call(), convertNum(intercept, web3))
})
it("should deploy sparse Perceptron", async () => {
const classifications = ["AA", "BB"]
const weights = [2, -2, 2.44, -7.55, 0.537080412, 2000, -23232.32]
const sparseWeights = null
const intercept = 3
const m = await deployer.deployModel(
new SparsePerceptronModel(
'sparse perceptron',
classifications,
weights, sparseWeights,
intercept,
),
{
account,
})
for (let i = 0; i < classifications.length; ++i) {
assert.strictEqual(await m.methods.classifications(i).call(), classifications[i])
}
assertEqualNumbers(await m.methods.intercept().call(), convertNum(intercept, web3), "intercept:")
assertEqualNumbers(await m.methods.learningRate().call(), convertNum(0.5, web3), "learningRate:")
for (let i = 0; i < weights.length; ++i) {
assertEqualNumbers(await m.methods.weights(i).call(), convertNum(weights[i], web3), `weight ${i}:`)
}
})
it("should deploy sparse Perceptron - with sparseWeights", async () => {
const classifications = ["AA", "BB"]
const weights = [2, -2, 2.44, -7.55, -3]
const sparseWeights = { '8': 7, '11': 8, '12': 8.21, '15': -4.55, '17': -3 }
const intercept = 3
const m = await deployer.deployModel(
new SparsePerceptronModel(
'sparse perceptron',
classifications,
weights, sparseWeights,
intercept,
),
{
account,
// notify: console.debug,
})
for (let i = 0; i < classifications.length; ++i) {
assert.strictEqual(await m.methods.classifications(i).call(), classifications[i])
}
assertEqualNumbers(await m.methods.intercept().call(), convertNum(intercept, web3), "intercept:")
assertEqualNumbers(await m.methods.learningRate().call(), convertNum(0.5, web3), "learningRate:")
for (let i = 0; i < weights.length; ++i) {
const actual = await m.methods.weights(i).call()
assertEqualNumbers(actual, convertNum(weights[i], web3), `weight ${i}:`)
}
for (const [featureIndexKey, weight] of Object.entries(sparseWeights)) {
const featureIndex = parseInt(featureIndexKey, 10)
const actual = await m.methods.weights(featureIndex).call()
assertEqualNumbers(actual, convertNum(weight, web3), `sparseWeight ${featureIndex}:`)
}
})
})
|
0xDeCA10B/demo/client/src/ml-models/__tests__/deploy-model.test.ts/0
|
{
"file_path": "0xDeCA10B/demo/client/src/ml-models/__tests__/deploy-model.test.ts",
"repo_id": "0xDeCA10B",
"token_count": 3287
}
| 3 |
// In production, we register a service worker to serve assets from local cache.
// This lets the app load faster on subsequent visits in production, and gives
// it offline capabilities. However, it also means that developers (and users)
// will only see deployed updates on the "N+1" visit to a page, since previously
// cached resources are updated in the background.
// To learn more about the benefits of this model, read https://goo.gl/KwvDNy.
// This link also includes instructions on opting out of this behavior.
const isLocalhost = Boolean(
window.location.hostname === 'localhost' ||
// [::1] is the IPv6 localhost address.
window.location.hostname === '[::1]' ||
// 127.0.0.1/8 is considered localhost for IPv4.
window.location.hostname.match(
/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/
)
)
export default function register() {
if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) {
// The URL constructor is available in all browsers that support SW.
const publicUrl = new URL(process.env.PUBLIC_URL, window.location)
if (publicUrl.origin !== window.location.origin) {
// Our service worker won't work if PUBLIC_URL is on a different origin
// from what our page is served on. This might happen if a CDN is used to
// serve assets; see https://github.com/facebookincubator/create-react-app/issues/2374
return
}
window.addEventListener('load', () => {
const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`
if (isLocalhost) {
// This is running on localhost. Lets check if a service worker still exists or not.
checkValidServiceWorker(swUrl)
// Add some additional logging to localhost, pointing developers to the
// service worker/PWA documentation.
navigator.serviceWorker.ready.then(() => {
console.log(
'This web app is being served cache-first by a service ' +
'worker. To learn more, visit https://goo.gl/SC7cgQ'
)
})
} else {
// Is not local host. Just register service worker
registerValidSW(swUrl)
}
})
}
}
function registerValidSW(swUrl) {
navigator.serviceWorker
.register(swUrl)
.then(registration => {
registration.onupdatefound = () => {
const installingWorker = registration.installing
installingWorker.onstatechange = () => {
if (installingWorker.state === 'installed') {
if (navigator.serviceWorker.controller) {
// At this point, the old content will have been purged and
// the fresh content will have been added to the cache.
// It's the perfect time to display a "New content is
// available; please refresh." message in your web app.
console.log('New content is available; please refresh.')
} else {
// At this point, everything has been precached.
// It's the perfect time to display a
// "Content is cached for offline use." message.
console.log('Content is cached for offline use.')
}
}
}
}
})
.catch(error => {
console.error('Error during service worker registration:', error)
})
}
function checkValidServiceWorker(swUrl) {
// Check if the service worker can be found. If it can't reload the page.
fetch(swUrl)
.then(response => {
// Ensure service worker exists, and that we really are getting a JS file.
if (
response.status === 404 ||
response.headers.get('content-type').indexOf('javascript') === -1
) {
// No service worker found. Probably a different app. Reload the page.
navigator.serviceWorker.ready.then(registration => {
registration.unregister().then(() => {
window.location.reload()
})
})
} else {
// Service worker found. Proceed as normal.
registerValidSW(swUrl)
}
})
.catch(() => {
console.log(
'No internet connection found. App is running in offline mode.'
)
})
}
export function unregister() {
if ('serviceWorker' in navigator) {
navigator.serviceWorker.ready.then(registration => {
registration.unregister()
})
}
}
|
0xDeCA10B/demo/client/src/registerServiceWorker.js/0
|
{
"file_path": "0xDeCA10B/demo/client/src/registerServiceWorker.js",
"repo_id": "0xDeCA10B",
"token_count": 1410
}
| 4 |
const NaiveBayesClassifier = artifacts.require("./classification/NaiveBayesClassifier")
const { convertNum } = require('../../../src/float-utils-node')
contract('NaiveBayesClassifier', function (accounts) {
const toFloat = 1E9
const smoothingFactor = convertNum(1, web3, toFloat)
const classifications = ["ALARM", "WEATHER"]
const vocab = {}
let vocabLength = 0
let classifier
function parseBN(num) {
if (web3.utils.isBN(num)) {
return num.toNumber()
} else {
assert.typeOf(num, 'number')
return num
}
}
function mapFeatures(query) {
return query.toLocaleLowerCase('en').split(/\s+/).map(w => {
let result = vocab[w]
if (result === undefined) {
vocab[w] = result = vocabLength++
}
return result
})
}
before("deploy classifier", async () => {
const queries = [
// ALARM
"alarm for 11 am tomorrow",
// WEATHER
"will I need a jacket for tomorrow"]
const featureMappedQueries = queries.map(mapFeatures)
const featureCounts = featureMappedQueries.map(fv => {
const result = {}
fv.forEach(v => {
if (!(v in result)) {
result[v] = 0
}
result[v] += 1
})
return Object.entries(result).map(pair => [parseInt(pair[0]), pair[1]].map(web3.utils.toBN))
})
const classCounts = [1, 1]
const totalNumFeatures = vocabLength
classifier = await NaiveBayesClassifier.new(classifications, classCounts, featureCounts, totalNumFeatures, smoothingFactor)
assert.equal(await classifier.getClassTotalFeatureCount(0).then(parseBN), 5,
"Total feature count for class 0.")
assert.equal(await classifier.getClassTotalFeatureCount(1).then(parseBN), 7,
"Total feature count for class 1.")
for (let featureIndex = 0; featureIndex < 5; ++featureIndex) {
assert.equal(await classifier.getFeatureCount(0, featureIndex).then(parseBN), 1)
}
for (let featureIndex = 5; featureIndex < 11; ++featureIndex) {
assert.equal(await classifier.getFeatureCount(0, featureIndex).then(parseBN), 0)
}
assert.equal(await classifier.getFeatureCount(1, 0).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(1, 1).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(1, 2).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(1, 3).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(1, 4).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(1, 5).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(1, 6).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(1, 7).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(1, 8).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(1, 9).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(1, 10).then(parseBN), 0)
})
it("...should predict the classification ALARM", async () => {
const data = mapFeatures("alarm for 9 am tomorrow")
const prediction = await classifier.predict(data).then(parseBN)
assert.equal(prediction, 0)
})
it("...should predict the classification WEATHER", async () => {
const data = mapFeatures("will i need a jacket today")
const prediction = await classifier.predict(data).then(parseBN)
assert.equal(prediction, 1)
})
it("...should update", async () => {
const newFeature = vocabLength + 10
const predictionData = [newFeature]
assert.equal(await classifier.predict(predictionData).then(parseBN), 0)
const data = [0, 1, 2, newFeature]
const classification = 1
const prevFeatureCounts = []
for (let i in data) {
const featureIndex = data[i]
const featureCount = await classifier.getFeatureCount(classification, featureIndex).then(parseBN)
await prevFeatureCounts.push(featureCount)
}
const prevTotalFeatureCount = await classifier.getClassTotalFeatureCount(classification).then(parseBN)
const prevNumSamples = await classifier.getNumSamples(classification).then(parseBN)
const updateResponse = await classifier.update(data, classification)
// To help with optimizing gas usage:
// console.log(` update gasUsed: ${updateResponse.receipt.gasUsed}`);
assert.isBelow(updateResponse.receipt.gasUsed, 113704 + 1, "Too much gas used.")
for (let i in prevFeatureCounts) {
const featureIndex = data[i]
const featureCount = await classifier.getFeatureCount(classification, featureIndex).then(parseBN)
assert.equal(featureCount, prevFeatureCounts[i] + 1)
}
const totalFeatureCount = await classifier.getClassTotalFeatureCount(classification).then(parseBN)
assert.equal(totalFeatureCount, prevTotalFeatureCount + data.length)
const numSamples = await classifier.getNumSamples(classification).then(parseBN)
assert.equal(numSamples, prevNumSamples + 1)
assert.equal(await classifier.predict(predictionData).then(parseBN), classification)
})
it("...should add class", async () => {
const classCount = 3
const featureCounts = [[0, 2], [1, 3], [6, 5]]
const classification = "NEW"
const originalNumClassifications = await classifier.getNumClassifications().then(parseBN)
classifier.addClass(classCount, featureCounts, classification)
const newNumClassifications = await classifier.getNumClassifications().then(parseBN)
assert.equal(newNumClassifications, originalNumClassifications + 1)
const classIndex = originalNumClassifications
assert.equal(await classifier.getClassTotalFeatureCount(classIndex).then(parseBN),
featureCounts.map(pair => pair[1]).reduce((a, b) => a + b),
"Total feature count for the new class is wrong.")
assert.equal(await classifier.getFeatureCount(classIndex, 0).then(parseBN), 2)
assert.equal(await classifier.getFeatureCount(classIndex, 1).then(parseBN), 3)
assert.equal(await classifier.getFeatureCount(classIndex, 2).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(classIndex, 3).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(classIndex, 4).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(classIndex, 5).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(classIndex, 6).then(parseBN), 5)
assert.equal(await classifier.getFeatureCount(classIndex, 7).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(classIndex, 8).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(classIndex, 9).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(classIndex, 10).then(parseBN), 0)
assert.equal(await classifier.predict([0, 1, 6]).then(parseBN), classIndex)
})
it("... should add feature counts", async () => {
const featureCounts = [
[[0, 1]],
[[1, 1]],
]
const classCounts = [1, 1]
const totalNumFeatures = 2
const classifier = await NaiveBayesClassifier.new(["0", "1"], classCounts, featureCounts, totalNumFeatures, smoothingFactor)
assert.equal(await classifier.getFeatureCount(0, 0).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(0, 1).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(1, 0).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(1, 1).then(parseBN), 1)
// Overrides
await classifier.initializeCounts([[0, 0]], 0)
assert.equal(await classifier.getFeatureCount(0, 0).then(parseBN), 0)
await classifier.initializeCounts([[0, 1], [2, 2]], 0)
assert.equal(await classifier.getFeatureCount(0, 0).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(0, 2).then(parseBN), 2)
assert.equal(await classifier.getClassTotalFeatureCount(0).then(parseBN), 1 + 0 + 1 + 2)
await classifier.initializeCounts([[2, 1], [3, 2]], 1)
assert.equal(await classifier.getFeatureCount(1, 0).then(parseBN), 0)
assert.equal(await classifier.getFeatureCount(1, 1).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(1, 2).then(parseBN), 1)
assert.equal(await classifier.getFeatureCount(1, 3).then(parseBN), 2)
assert.equal(await classifier.getClassTotalFeatureCount(1).then(parseBN), 1 + 1 + 2)
// A new class.
})
})
|
0xDeCA10B/demo/client/test/contracts/classification/naivebayes.js/0
|
{
"file_path": "0xDeCA10B/demo/client/test/contracts/classification/naivebayes.js",
"repo_id": "0xDeCA10B",
"token_count": 2787
}
| 5 |
#!/bin/bash
set -ex
if [ "${CI}" == "true" ]; then
# Don't do globally in CI because of possible permissions issues.
npm install yarn
else
npm install -g yarn
fi
yarn install
(cd client && yarn install)
./setup_libs.sh
|
0xDeCA10B/demo/setup.sh/0
|
{
"file_path": "0xDeCA10B/demo/setup.sh",
"repo_id": "0xDeCA10B",
"token_count": 84
}
| 6 |
from skmultiflow.trees import HAT, RegressionHAT
from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule
class DecisionTreeModule(SciKitClassifierModule):
def __init__(self, regression=False):
if regression:
model_initializer = lambda: RegressionHAT(
# leaf_prediction='mc'
)
else:
model_initializer = lambda: HAT(
# leaf_prediction='mc',
# nominal_attributes=[ 4],
)
super().__init__(_model_initializer=model_initializer)
|
0xDeCA10B/simulation/decai/simulation/contract/classification/decision_tree.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/contract/classification/decision_tree.py",
"repo_id": "0xDeCA10B",
"token_count": 260
}
| 7 |
import unittest
from collections import defaultdict
from typing import cast
from injector import Injector
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.classification.perceptron import PerceptronModule
from decai.simulation.contract.data.data_handler import StoredData
from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism
from decai.simulation.contract.incentive.prediction_market import MarketPhase, \
PredictionMarket, PredictionMarketImModule
from decai.simulation.contract.objects import Msg, TimeMock
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.simple_data_loader import SimpleDataModule
from decai.simulation.logging_module import LoggingModule
class TestPredictionMarket(unittest.TestCase):
def test_market_like_original_paper(self):
inj = Injector([
SimpleDataModule,
LoggingModule,
PerceptronModule,
PredictionMarketImModule(
allow_greater_deposit=False,
group_contributions=False,
reset_model_during_reward_phase=False,
),
])
balances = inj.get(Balances)
data = inj.get(DataLoader)
im = cast(PredictionMarket, inj.get(IncentiveMechanism))
im.owner = 'owner'
assert isinstance(im, PredictionMarket)
init_train_data_portion = 0.2
initializer_address = 'initializer'
total_bounty = 100_000
balances.initialize(initializer_address, total_bounty)
good_contributor_address = 'good_contributor'
initial_good_balance = 10_000
balances.initialize(good_contributor_address, initial_good_balance)
bad_contributor_address = 'bad_contributor'
initial_bad_balance = 10_000
balances.initialize(bad_contributor_address, initial_bad_balance)
(x_train, y_train), (x_test, y_test) = data.load_data()
init_idx = int(len(x_train) * init_train_data_portion)
assert init_idx > 0
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
# Split test set into pieces.
num_pieces = 10
test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)
# Ending criteria:
min_length_s = 100
min_num_contributions = min(len(x_remaining), 100)
# Commitment Phase
self.assertIsNone(im.state)
im.model.init_model(x_init_data, y_init_data, save_model=True)
hashes_split = 3
test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
test_dataset_hashes[:hashes_split],
min_length_s, min_num_contributions)
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
test_reveal_index = im.add_test_set_hashes(Msg(initializer_address, 0), test_dataset_hashes[hashes_split:])
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
im.reveal_init_test_set(test_sets[test_reveal_index])
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
# Participation Phase
value = 100
total_deposits = defaultdict(float)
for i in range(min_num_contributions):
data = x_remaining[i]
classification = y_remaining[i]
if i % 2 == 0:
contributor = good_contributor_address
else:
contributor = bad_contributor_address
classification = 1 - classification
cost, _ = im.handle_add_data(contributor, value, data, classification)
self.assertEqual(im.min_stake, cost, "Cost should be the minimum stake because of the options passed in.")
balances.send(contributor, im.owner, cost)
total_deposits[contributor] += cost
# Reward Phase
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
im.end_market()
self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
for i, test_set_portion in enumerate(test_sets):
if i != test_reveal_index:
im.verify_next_test_set(test_set_portion)
self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
while im.remaining_bounty_rounds > 0:
im.process_contribution()
# Collect rewards.
self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)
for contributor in [good_contributor_address, bad_contributor_address]:
# Don't need to pass the right StoredData.
# noinspection PyTypeChecker
reward = im.handle_refund(contributor, None, 0, False, None)
balances.send(im.owner, contributor, reward)
self.assertGreater(total_deposits[good_contributor_address], 0)
self.assertGreater(total_deposits[bad_contributor_address], 0)
# General checks that should be true for a market with a reasonably sensitive model.
self.assertLess(balances[im.owner], total_bounty,
f"Some of the bounty should be distributed.\n"
f"Balances: {balances.get_all()}")
self.assertLess(0, balances[im.owner])
# Sometimes the bad contributor happens to get some value but not much.
self.assertAlmostEqual(balances[bad_contributor_address], initial_bad_balance, delta=2,
msg=f"The bad contributor should lose funds.\n"
f"Balances: {balances.get_all()}")
self.assertGreater(balances[good_contributor_address], initial_good_balance)
self.assertLess(balances[bad_contributor_address], balances[good_contributor_address])
self.assertLessEqual(balances[good_contributor_address] - balances[bad_contributor_address],
total_bounty)
self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
balances[good_contributor_address] + balances[bad_contributor_address] +
balances[im.owner],
"Should be a zero-sum.")
def test_market(self):
inj = Injector([
SimpleDataModule,
LoggingModule,
PerceptronModule,
PredictionMarketImModule(
allow_greater_deposit=True,
group_contributions=True,
reset_model_during_reward_phase=True,
),
])
balances = inj.get(Balances)
data = inj.get(DataLoader)
im = cast(PredictionMarket, inj.get(IncentiveMechanism))
im.owner = 'owner'
assert isinstance(im, PredictionMarket)
init_train_data_portion = 0.2
initializer_address = 'initializer'
total_bounty = 100_000
balances.initialize(initializer_address, total_bounty)
good_contributor_address = 'good_contributor'
initial_good_balance = 10_000
balances.initialize(good_contributor_address, initial_good_balance)
bad_contributor_address = 'bad_contributor'
initial_bad_balance = 10_000
balances.initialize(bad_contributor_address, initial_bad_balance)
(x_train, y_train), (x_test, y_test) = data.load_data()
init_idx = int(len(x_train) * init_train_data_portion)
assert init_idx > 0
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
# Split test set into pieces.
num_pieces = 10
test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)
# Ending criteria:
min_length_s = 100
min_num_contributions = min(len(x_remaining), 100)
# Commitment Phase
self.assertIsNone(im.state)
im.model.init_model(x_init_data, y_init_data, save_model=True)
hashes_split = 3
test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
test_dataset_hashes[:hashes_split],
min_length_s, min_num_contributions)
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
test_reveal_index = im.add_test_set_hashes(Msg(initializer_address, 0), test_dataset_hashes[hashes_split:])
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
im.reveal_init_test_set(test_sets[test_reveal_index])
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
# Participation Phase
value = 100
total_deposits = defaultdict(float)
for i in range(min_num_contributions):
data = x_remaining[i]
classification = y_remaining[i]
if i % 2 == 0:
contributor = good_contributor_address
else:
contributor = bad_contributor_address
classification = 1 - classification
cost, _ = im.handle_add_data(contributor, value, data, classification)
balances.send(contributor, im.owner, cost)
total_deposits[contributor] += cost
# Reward Phase
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
im.end_market()
self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
for i, test_set_portion in enumerate(test_sets):
if i != test_reveal_index:
im.verify_next_test_set(test_set_portion)
self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
while im.remaining_bounty_rounds > 0:
im.process_contribution()
# Collect rewards.
self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)
for contributor in [good_contributor_address, bad_contributor_address]:
# Don't need to pass the right StoredData.
# noinspection PyTypeChecker
reward = im.handle_refund(contributor, None, 0, False, None)
balances.send(im.owner, contributor, reward)
self.assertGreater(total_deposits[good_contributor_address], 0)
self.assertGreater(total_deposits[bad_contributor_address], 0)
# General checks that should be true for a market with a reasonably sensitive model.
self.assertLess(balances[im.owner], total_bounty,
f"Some of the bounty should be distributed.\n"
f"Balances: {balances.get_all()}")
self.assertLess(0, balances[im.owner])
self.assertLess(balances[bad_contributor_address], initial_bad_balance)
self.assertGreater(balances[good_contributor_address], initial_good_balance)
self.assertLess(balances[bad_contributor_address], balances[good_contributor_address])
self.assertLessEqual(balances[good_contributor_address] - balances[bad_contributor_address],
total_bounty)
self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
balances[good_contributor_address] + balances[bad_contributor_address] +
balances[im.owner],
"Should be a zero-sum.")
self.assertEqual(initial_bad_balance - total_deposits[bad_contributor_address],
balances[bad_contributor_address],
"The bad contributor should lose all of their deposits.")
def test_report(self):
inj = Injector([
SimpleDataModule,
LoggingModule,
PerceptronModule,
PredictionMarketImModule(
allow_greater_deposit=True,
group_contributions=True,
reset_model_during_reward_phase=True,
),
])
balances = inj.get(Balances)
data = inj.get(DataLoader)
im = cast(PredictionMarket, inj.get(IncentiveMechanism))
im.owner = 'owner'
time_method = inj.get(TimeMock)
assert isinstance(im, PredictionMarket)
init_train_data_portion = 0.2
initializer_address = 'initializer'
total_bounty = 100_000
balances.initialize(initializer_address, total_bounty)
good_contributor_address = 'good_contributor'
initial_good_balance = 10_000
balances.initialize(good_contributor_address, initial_good_balance)
bad_contributor_address = 'bad_contributor'
initial_bad_balance = 10_000
balances.initialize(bad_contributor_address, initial_bad_balance)
(x_train, y_train), (x_test, y_test) = data.load_data()
init_idx = int(len(x_train) * init_train_data_portion)
assert init_idx > 0
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
# Split test set into pieces.
num_pieces = 10
test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)
# Ending criteria:
min_length_s = 100
min_num_contributions = min(len(x_remaining), 100)
# Commitment Phase
self.assertIsNone(im.state)
im.model.init_model(x_init_data, y_init_data, save_model=True)
test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
test_dataset_hashes,
min_length_s, min_num_contributions)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
assert 0 <= test_reveal_index < len(test_dataset_hashes)
im.reveal_init_test_set(test_sets[test_reveal_index])
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
# Participation Phase
value = 100
total_deposits = defaultdict(float)
stored_data = None
for i in range(min_num_contributions):
time_method.add_time(60)
data = x_remaining[i]
classification = y_remaining[i]
if i % 2 == 0:
contributor = good_contributor_address
else:
contributor = bad_contributor_address
classification = 1 - classification
cost, _ = im.handle_add_data(contributor, value, data, classification)
if stored_data is None:
stored_data = StoredData(classification, time_method(), contributor, cost, cost)
balances.send(contributor, im.owner, cost)
total_deposits[contributor] += cost
# Reward Phase
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
im.end_market()
time_method.add_time(60)
self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
for i, test_set_portion in enumerate(test_sets):
if i != test_reveal_index:
im.verify_next_test_set(test_set_portion)
self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
while im.remaining_bounty_rounds > 0:
time_method.add_time(60)
im.process_contribution()
# Collect rewards.
self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)
# Get some stored data.
# Make sure reporting doesn't work yet.
reward = im.handle_report(bad_contributor_address, stored_data, False, None)
self.assertEqual(0, reward, "There should be no reward yet.")
time_method.add_time(im.any_address_claim_wait_time_s)
reward = im.handle_report(bad_contributor_address, stored_data, False, None)
balances.send(im.owner, bad_contributor_address, reward)
# Don't need to pass the right StoredData.
# noinspection PyTypeChecker
reward = im.handle_refund(bad_contributor_address, None, 0, False, None)
balances.send(im.owner, bad_contributor_address, reward)
# General checks that should be true for a market with a reasonably sensitive model.
self.assertLess(balances[im.owner], total_bounty,
f"Some of the bounty should be distributed.\n"
f"Balances: {balances.get_all()}")
self.assertLess(0, balances[im.owner])
self.assertGreater(total_deposits[good_contributor_address], 0)
self.assertGreater(total_deposits[bad_contributor_address], 0)
# The bad contributor profited because they reported the good contributor.
self.assertGreater(balances[bad_contributor_address], initial_bad_balance)
self.assertLess(balances[good_contributor_address], initial_good_balance)
self.assertLess(balances[good_contributor_address], balances[bad_contributor_address])
self.assertLessEqual(balances[bad_contributor_address] - balances[good_contributor_address],
total_bounty)
self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
balances[good_contributor_address] + balances[bad_contributor_address] +
balances[im.owner],
"Should be a zero-sum.")
self.assertEqual(initial_good_balance - total_deposits[good_contributor_address],
balances[good_contributor_address],
"The good contributor should lose all of their deposits.")
|
0xDeCA10B/simulation/decai/simulation/contract/incentive/tests/test_prediction_market.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/contract/incentive/tests/test_prediction_market.py",
"repo_id": "0xDeCA10B",
"token_count": 8090
}
| 8 |
import itertools
import json
import os
import random
import time
from collections import Counter
from dataclasses import dataclass
from enum import Enum
from logging import Logger
from operator import itemgetter
from pathlib import Path
from typing import Collection, List, Optional, Tuple
import numpy as np
import pandas as pd
import spacy
from injector import ClassAssistedBuilder, inject, Module, provider, singleton
from sklearn.feature_extraction.text import TfidfVectorizer
from spacy.cli import download
from tqdm import tqdm
from .data_loader import DataLoader
class Label(Enum):
RELIABLE = 0
UNRELIABLE = 1
@dataclass
class News:
text: Optional[str]
label: Label
@inject
@dataclass
class _SignalMediaDataLoader(DataLoader):
"""
INCOMPLETE BECAUSE MAPPING THE SOURCE NAMES TO DOMAIN NAMES IS TRICKY.
See https://github.com/aldengolab/fake-news-detection/issues/4
Following logic of https://github.com/aldengolab/fake-news-detection.
Requires the Signal Media dataset from http://research.signalmedia.co/newsir16/signal-dataset.html to be at
simulation/training_data/news/sample-1M.jsonl
and https://github.com/OpenSourcesGroup/opensources with sources.json in simulation/training_data/news/
"""
_logger: Logger
_media_types = {'News'}
def classifications(self) -> List[str]:
raise NotImplementedError
def find_source_site(self, source_name: str, sources: Collection[str]) -> Optional[str]:
"""
:param source_name: The name of the source.
:param sources: Source domain names.
:return: The source domain name from `sources` or `None` if no mapping can be found.
"""
# TODO
result = None
return result
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
data_folder_path = os.path.join(__file__, '../../../../training_data/news')
signal_data_path = os.path.join(data_folder_path, 'sample-1M.jsonl')
if not os.path.exists(signal_data_path):
raise Exception(f"Could not find the Signal Media dataset at \"{signal_data_path}\"."
"\nYou must obtain it from http://research.signalmedia.co/newsir16/signal-dataset.html"
f" and follow the instructions to obtain it. Then extract it to \"{signal_data_path}\".")
sources_path = os.path.join(data_folder_path, 'sources.json')
if not os.path.exists(sources_path):
raise Exception(f"Could not find the sources dataset at \"{sources_path}\"."
"\nYou must obtain it from https://github.com/OpenSourcesGroup/opensources and put"
f" sources.json in \"{data_folder_path}\".")
with open(sources_path) as f:
loaded_sources = json.load(f)
sources = dict()
for source, info in loaded_sources.items():
problem_types = (info['type'], info['2nd type'], info['3rd type'])
sources[source] = set(filter(None, problem_types))
self._logger.info("Found %d sources with labels.", len(sources))
# Name: website name in `sources`.
source_mapping = {}
not_found_flag = -1
with open(signal_data_path) as f:
for index, line in tqdm(enumerate(f),
desc="Filtering news articles",
unit_scale=True, mininterval=2, unit=" articles"
):
news = json.loads(line)
news_id = news['id']
title = news['title']
text = news['content']
source = news['source']
# media-type is either "News" or "Blog"
media_type = news['media-type']
published_date = news['published']
if media_type not in self._media_types:
continue
source_site = source_mapping.get(source)
if source_site is None:
source_site = self.find_source_site(source, sources)
if source_site is not None:
source_mapping[source] = source_site
else:
source_mapping[source] = not_found_flag
continue
elif source_site == not_found_flag:
continue
# TODO Use article and set label.
with open(os.path.join(data_folder_path, 'source_mapping.json')) as f:
sorted(source_mapping.items(), key=itemgetter(0))
self._logger.info("Found %d sources in the articles.", len(source_mapping))
# TODO Set up output.
(x_train, y_train), (x_test, y_test) = (None, None), (None, None)
if train_size is not None:
x_train, y_train = x_train[:train_size], y_train[:train_size]
if test_size is not None:
x_test, y_test = x_test[:test_size], y_test[:test_size]
self._logger.info("Done loading news data.")
return (x_train, y_train), (x_test, y_test)
@inject
@dataclass
class NewsDataLoader(DataLoader):
"""
Load data from news sources.
Requires data from https://www.kaggle.com/c/fake-news/data to be saved to "simulation/trainin_data/news/fake-news/train.csv".
"""
_logger: Logger
_train_split = 0.7
_replace_entities_enabled = False
"""
If True, entities will be replaced in text with the entity's label surrounded by angle brackets: "<LABEL>".
Accuracy with replacement: 0.9172
Accuracy without replacement: 0.9173
Disabled because using spaCy is slow, it will be tricky to use spaCy in JavaScript,
and it didn't change the evaluation metrics much.
"""
_entity_types_to_replace = {'PERSON', 'GPE', 'ORG', 'DATE', 'TIME', 'PERCENT',
'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL'}
def classifications(self) -> List[str]:
return ["RELIABLE", "UNRELIABLE"]
def __post_init__(self):
spacy_model = 'en_core_web_lg'
download(spacy_model)
self._nlp = spacy.load(spacy_model, disable={'tagger', 'parser', 'textcat'})
def _load_kaggle_data(self, data_folder_path: str) -> Collection[News]:
"""
Load data from https://www.kaggle.com/c/fake-news/data.
"""
# Don't use the test data because it has no labels.
fake_news_data_path = os.path.join(data_folder_path, 'fake-news', 'train.csv')
if not os.path.exists(fake_news_data_path):
raise Exception(f"Could not find the Fake News dataset at \"{fake_news_data_path}\"."
"\nYou must obtain it from https://www.kaggle.com/c/fake-news/data.")
data = pd.read_csv(fake_news_data_path, na_values=dict(text=[]), keep_default_na=False)
result = []
for row in data.itertuples():
label = Label.RELIABLE if row.label == 0 else Label.UNRELIABLE
if len(row.text) > 0:
result.append(News(row.text, label))
# Consistent shuffle to aim for a mostly even distribution of labels.
random.shuffle(result, lambda: 0.618)
return result
def _replace_entities(self, doc) -> str:
# Remove names in text using spaCy.
result = doc.text
for ent in doc.ents[::-1]:
if ent.label_ in self._entity_types_to_replace:
result = result[:ent.start_char] + "<" + ent.label_ + ">" + result[ent.end_char:]
return result
def _pre_process_text(self, doc) -> str:
# TODO Remove name of news sources.
if self._replace_entities_enabled:
result = self._replace_entities(doc)
else:
assert isinstance(doc, str)
result = doc
return result
def _pre_process(self, news_articles: Collection[News], train_size: int, test_size: int) -> \
Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
self._logger.info("Getting features for %d articles.", len(news_articles))
# Only use binary features.
ngram_range = (2, 2)
# Don't use IDF because we need integer features.
t = TfidfVectorizer(max_features=1000, ngram_range=ngram_range, norm=None, use_idf=False)
test_start = len(news_articles) - test_size
x_train = map(lambda news: news.text, itertools.islice(news_articles, train_size))
x_test = map(lambda news: news.text, itertools.islice(news_articles, test_start, len(news_articles)))
if self._replace_entities_enabled:
self._logger.debug("Will replace entities.")
x_train = self._nlp.pipe(x_train, batch_size=128)
x_test = self._nlp.pipe(x_test, batch_size=128)
else:
self._logger.debug("Replacing entities is disabled.")
x_train = map(self._pre_process_text, x_train)
x_test = map(self._pre_process_text, x_test)
x_train = t.fit_transform(tqdm(x_train,
desc="Processing training data",
total=train_size,
unit_scale=True, mininterval=2,
unit=" articles"
)).toarray()
x_test = t.transform(tqdm(x_test,
desc="Processing testing data",
total=test_size,
unit_scale=True, mininterval=2,
unit=" articles"
)).toarray()
y_train = np.array([news.label.value for news in itertools.islice(news_articles, train_size)], np.int8)
y_test = np.array([news.label.value for news in itertools.islice(news_articles,
test_start, len(news_articles))], np.int8)
self._logger.debug("Training labels: %s", Counter(y_train))
self._logger.debug("Test labels: %s", Counter(y_test))
self._logger.info("Done getting features.")
return (x_train, y_train), (x_test, y_test)
def load_data(self, train_size: int = None, test_size: int = None) -> \
Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
self._logger.info("Loading news data.")
data_folder_path = os.path.join(__file__, '../../../../training_data/news')
# Look for cached data.
file_identifier = f'news-data-{train_size}-{test_size}-replace_ents_{self._replace_entities_enabled}.npy'
base_path = Path(os.path.dirname(__file__)) / 'cached_data'
os.makedirs(base_path, exist_ok=True)
cache_paths = {
'x_train': base_path / f'x_train-{file_identifier}',
'y_train': base_path / f'y_train-{file_identifier}',
'x_test': base_path / f'x_test-{file_identifier}',
'y_test': base_path / f'y_test-{file_identifier}'
}
# Use if modified in the last day.
if all([p.exists() for p in cache_paths.values()]) and \
all([time.time() - p.stat().st_mtime < 60 * 60 * 24 for p in cache_paths.values()]):
self._logger.info("Loaded cached News data from %s.", cache_paths)
return (np.load(cache_paths['x_train']), np.load(cache_paths['y_train'])), \
(np.load(cache_paths['x_test']), np.load(cache_paths['y_test']))
data = self._load_kaggle_data(data_folder_path)
# Separate train and test data.
if train_size is None:
if test_size is None:
train_size = int(self._train_split * len(data))
else:
train_size = len(data) - test_size
if test_size is None:
test_size = len(data) - train_size
if train_size + test_size > len(data):
raise Exception("There is not enough data for the requested sizes."
f"\n data size: {len(data)}"
f"\n train size: {train_size}"
f"\n test size: {test_size}")
(x_train, y_train), (x_test, y_test) = self._pre_process(data, train_size, test_size)
np.save(cache_paths['x_train'], x_train, allow_pickle=False)
np.save(cache_paths['y_train'], y_train, allow_pickle=False)
np.save(cache_paths['x_test'], x_test, allow_pickle=False)
np.save(cache_paths['y_test'], y_test, allow_pickle=False)
self._logger.info("Done loading news data.")
return (x_train, y_train), (x_test, y_test)
@dataclass
class NewsDataModule(Module):
@provider
@singleton
def provide_data_loader(self, builder: ClassAssistedBuilder[NewsDataLoader]) -> DataLoader:
return builder.build()
|
0xDeCA10B/simulation/decai/simulation/data/news_data_loader.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/data/news_data_loader.py",
"repo_id": "0xDeCA10B",
"token_count": 5986
}
| 9 |
import os
import sys
import math
from injector import inject, Injector
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.classification.decision_tree import DecisionTreeModule
from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.stakeable import StakeableImModule
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.titanic_data_loader import TitanicDataModule
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent, Simulator
# For `bokeh serve`.
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
class Runner(object):
@inject
def __init__(self,
data: DataLoader,
simulator: Simulator,
):
self._data = data
self._s = simulator
def run(self):
init_train_data_portion = 0.10
# Set up the agents that will act in the simulation.
agents = [
# Good
Agent(address="Good",
start_balance=10_000,
mean_deposit=5,
stdev_deposit=1,
mean_update_wait_s=10 * 60,
),
# Malicious: determined with the goal of disrupting others.
Agent(address="Bad",
start_balance=10_000,
mean_deposit=10,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
]
# Start the simulation.
self._s.simulate(agents,
baseline_accuracy=0.806,
init_train_data_portion=init_train_data_portion,
accuracy_plot_wait_s=math.inf,
)
# Run with `bokeh serve PATH`.
if __name__.startswith('bk_script_'):
# Set up the data, model, and incentive mechanism.
inj = Injector([
DecisionTreeModule,
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
TitanicDataModule,
])
inj.get(Runner).run()
if __name__ == '__main__':
# Play the game.
inj = Injector([
DecisionTreeModule(regression=False),
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
TitanicDataModule
])
d = inj.get(DataLoader)
(x_train, y_train), (x_test, y_test) = d.load_data()
c = inj.get(Classifier)
c.init_model(x_train, y_train)
score = c.evaluate(x_train, y_train)
import random
for _ in range(10):
i = random.randrange(len(x_train))
print(f"{i:04d}: {x_train[i]}: {y_train[i]}")
print(f"Prediction: {c.predict(x_train[i])}")
print(f"Evaluation on training data: {score * 100:0.2f}%")
if len(x_test) > 0:
score = c.evaluate(x_test, y_test)
print(f"Evaluation on test data: {score * 100:0.2f}%")
|
0xDeCA10B/simulation/decai/simulation/simulate_titanic_dt.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/simulate_titanic_dt.py",
"repo_id": "0xDeCA10B",
"token_count": 1440
}
| 10 |
authorName: default
experimentName: cifar10
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 10
trainingServicePlatform: local
searchSpacePath: search_space.json
useAnnotation: false
tuner:
builtinTunerName: TPE
trial:
command: python main.py
codeDir: .
gpuNum: 0
|
AI-System/Labs/AdvancedLabs/Lab8/hpo-answer/config.yml/0
|
{
"file_path": "AI-System/Labs/AdvancedLabs/Lab8/hpo-answer/config.yml",
"repo_id": "AI-System",
"token_count": 94
}
| 11 |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='mylinear_cuda',
ext_modules=[
CUDAExtension('mylinear_cuda', [
'mylinear_cuda.cpp',
'mylinear_cuda_kernel.cu',
])
],
cmdclass={
'build_ext': BuildExtension
})
|
AI-System/Labs/BasicLabs/Lab3/mylinear_cuda_extension/setup.py/0
|
{
"file_path": "AI-System/Labs/BasicLabs/Lab3/mylinear_cuda_extension/setup.py",
"repo_id": "AI-System",
"token_count": 155
}
| 12 |
# Docker部署PyTorch推理程序
## 创建TorchServe镜像
#### 准备TorchServe源码:
```bash
# $BRANCH_NAME可以指定需要使用的版本
$ BRANCH_NAME=v0.1.0
$ rm -rf serve
$ git clone https://github.com/pytorch/serve.git
$ cd serve
$ git checkout -b $BRANCH_NAME
$ cd ..
```
#### 创建基于CPU镜像:
```bash
$ docker build --file Dockerfile.infer.cpu -t torchserve:0.1-cpu .
```
如果成功,显示类似日志:
```
...
Successfully built e36f1a01e514
Successfully tagged torchserve:0.1-cpu
```
#### 创建基于GPU镜像:
```bash
$ docker build --file Dockerfile.infer.gpu -t torchserve:0.1-gpu .
```
如果成功,显示类似日志:
```
Successfully built 456de47eb88e
Successfully tagged torchserve:0.1-gpu
```
## 使用TorchServe镜像启动一个容器
以下的实例会启动容器并打开8080/81端口,并暴露给主机
#### 启动CPU容器
如果想用特定的版本,可以传递特定的标签确定使用(ex: 0.1-cpu):
```bash
$ docker run --rm -it -p 8080:8080 -p 8081:8081 torchserve:0.1-cpu
```
启动后,会看到日志:
```
TS Home: /usr/local/lib/python3.6/dist-packages
Current directory: /home/model-server
Temp directory: /home/model-server/tmp
Number of GPUs: 0
Number of CPUs: 12
Max heap size: 28200 M
Python executable: /usr/bin/python3
Config file: /home/model-server/config.properties
Inference address: http://0.0.0.0:8080
Management address: http://0.0.0.0:8081
Model Store: /home/model-server/model-store
Initial Models: N/A
Log dir: /home/model-server/logs
Metrics dir: /home/model-server/logs
Netty threads: 32
Netty client threads: 0
Default workers per model: 12
Blacklist Regex: N/A
Maximum Response Size: 6553500
Maximum Request Size: 6553500
2021-03-08 13:41:40,625 [INFO ] main org.pytorch.serve.ModelServer - Initialize Inference server with: EpollServerSocketChannel.
2021-03-08 13:41:40,690 [INFO ] main org.pytorch.serve.ModelServer - Inference API bind to: http://0.0.0.0:8080
2021-03-08 13:41:40,690 [INFO ] main org.pytorch.serve.ModelServer - Initialize Management server with: EpollServerSocketChannel.
2021-03-08 13:41:40,691 [INFO ] main org.pytorch.serve.ModelServer - Management API bind to: http://0.0.0.0:8081
Model server started.
...
```
如果想使用官方最新版本的镜像,你可以使用`latest`标签:
```
$ docker run --rm -it -p 8080:8080 -p 8081:8081 pytorch/torchserve:latest
```
#### 启动GPU容器
如果想用特定的版本,可以传递特定的标签确定使用(e.g., 0.1-cuda10):
```bash
$ docker run --rm -it --gpus all -p 8080:8080 -p 8081:8081 pytorch/torchserve:0.1-cuda10.1-cudnn7-runtime
```
对最新的官方版本镜像,你可以使用`gpu-latest`标签:
```bash
$ docker run --rm -it --gpus all -p 8080:8080 -p 8081:8081 pytorch/torchserve:latest-gpu
```
#### 在相同主机,访问TorchServe APIs
在相同主机开启新的会话,TorchServe的推理和管理APIs可以通过主机的8080和8081端口访问。例如:
```bash
# 访问服务
$ curl http://localhost:8080/ping
```
返回正常状态:
```
{
"status": "Healthy"
}
```
#### 在主机检查正在运行的容器
```bash
$ docker ps
```
返回结果:
```
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
7d3f0e9be89a torchserve:0.1-cpu "/usr/local/bin/dock…" About a minute ago Up About a minute 0.0.0.0:8080-8081->8080-8081/tcp sad_engelbart
```
#### 进入容器
执行进入容器命令:
```
$ docker exec -it <containerid> /bin/bash
```
返回结果:
```
# 进入容器控制台
root@7d3f0e9be89a:/home/model-server#
```
#### 检查容器的端口映射
```bash
$ docker port <containerid>
```
返回结果:
```
8081/tcp -> 0.0.0.0:8081
8080/tcp -> 0.0.0.0:8080
```
#### 停止TorchServe容器
```bash
$ docker container stop <containerid>
```
Container ID可以通过`docker ps`命令查询。
## 部署模型,进行推理
使用TorchServe推理,第一步需要把模型使用model archiver归档为MAR文件,
#### 进入容器
```
$ docker exec -it <containerid> /bin/bash
```
#### 创建或进入保存模型的文件夹
```bash
$ cd /home/model-server/model-store
```
#### 下载一个模型
```bash
$ apt-get update
$ apt-get install wget
$ wget https://download.pytorch.org/models/densenet161-8d451a50.pth
```
返回结果:
```
20xx-0x-0x 05:13:05 (84.6 MB/s) - 'densenet161-8d451a50.pth' saved [115730790/115730790]
```
#### 使用model archiver进行模型归档
安装:
```
$ cd /serve/model-archiver
$ pip install .
```
执行命令:
```bash
$ torch-model-archiver --model-name densenet161 --version 1.0 --model-file /serve/examples/image_classifier/densenet_161/model.py --serialized-file /home/model-server/model-store/densenet161-8d451a50.pth --export-path /home/model-server/model-store --extra-files /serve/examples/image_classifier/index_to_name.json --handler image_classifier
```
成功后可以看到mar文件,执行ls:
```
$ ls /home/model-server/model-store/
densenet161-8d451a50.pth densenet161.mar
```
#### 启动TorchServe进行推理模型
当已经归档和存储了模型,使用`torchserve`命令进行模型推理。
关闭之前的如果已经启动的TorchServe
```
$ torchserve --stop
```
如果停止成功,会看到日志:
```
TorchServe has stopped.
```
```bash
$ cd /home/model-server/
$ torchserve --start --ncs --model-store model-store --models densenet161.mar
```
如果启动成功,会观察到类似以下日志:
```
e89a,timestamp:1591593790
2020-06-08 05:23:10,347 [INFO ] W-9009-densenet161_1.0 org.pytorch.serve.wlm.WorkerThread - Backend response time: 6595
2020-06-08 05:23:10,348 [DEBUG] W-9009-densenet161_1.0 org.pytorch.serve.wlm.WorkerThread - W-9009-densenet161_1.0 State change WORKER_STARTED -> WORKER_MODEL_LOADED
2020-06-08 05:23:10,348 [INFO ] W-9009-densenet161_1.0 TS_METRICS - W-9009-densenet161_1.0.ms:6816|#Level:Host|#hostname:7d3f0e9be89a,timestamp:1591593790
2020-06-08 05:23:10,358 [INFO ] W-9010-densenet161_1.0 org.pytorch.serve.wlm.WorkerThread - Backend response time: 6606
2020-06-08 05:23:10,358 [DEBUG] W-9010-densenet161_1.0 org.pytorch.serve.wlm.WorkerThread - W-9010-densenet161_1.0 State change WORKER_STARTED -> WORKER_MODEL_LOADED
2020-06-08 05:23:10,358 [INFO ] W-9010-densenet161_1.0 TS_METRICS - W-9010-densenet161_1.0.ms:6826|#Level:Host|#hostname:7d3f0e9be89a,timestamp:1591593790
2020-06-08 05:23:10,362 [INFO ] W-9006-densenet161_1.0 org.pytorch.serve.wlm.WorkerThread - Backend response time: 6610
2020-06-08 05:23:10,362 [DEBUG] W-9006-densenet161_1.0 org.pytorch.serve.wlm.WorkerThread - W-9006-densenet161_1.0 State change WORKER_STARTED -> WORKER_MODEL_LOADED
2020-06-08 05:23:10,362 [INFO ] W-9006-densenet161_1.0 TS_METRICS - W-9006-densenet161_1.0.ms:6832|#Level:Host|#hostname:7d3f0e9be89a,timestamp:1591593790
```
#### 使用模型进行推理
为了测试模型服务,发送请求到服务器的`predictions` API.
参考以下步骤执行:
* 开启新的终端窗口
* 使用`curl`命令下载一个实例[cute pictures of a kitten](https://www.google.com/search?q=cute+kitten&tbm=isch&hl=en&cr=&safe=images)
并且通过 `-o` f标识重命名其为`kitten.jpg`.
* 使用`curl`发送kitten图像,`POST`到TorchServe `predict`终端入口.
下面的代码完成了所有的三个步骤
在主机开启新的会话,执行以下命令:
```bash
$ curl -O https://s3.amazonaws.com/model-server/inputs/kitten.jpg
$ curl -X POST http://127.0.0.1:8080/predictions/densenet161 -T kitten.jpg
```
预测的终端可以返回JSON格式的应答. 例如下面的例子:
```json
[
{
"tiger_cat": 0.46933549642562866
},
{
"tabby": 0.4633878469467163
},
{
"Egyptian_cat": 0.06456148624420166
},
{
"lynx": 0.0012828214094042778
},
{
"plastic_bag": 0.00023323034110944718
}
]
```
## 本节作业
请同学参考以上步骤,进行自己的模型的服务部署,并返回推理请求的结果截图。
|
AI-System/Labs/BasicLabs/Lab5/inference.md/0
|
{
"file_path": "AI-System/Labs/BasicLabs/Lab5/inference.md",
"repo_id": "AI-System",
"token_count": 4262
}
| 13 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 10.2.1 分布式强化学习算法
- [10.2.1 分布式强化学习算法](#1021-分布式强化学习算法)
- [基本概念](#基本概念)
- [分布式强化学习算法的发展](#分布式强化学习算法的发展)
- [DQN](#dqn)
- [GORILA](#gorila)
- [A3C](#a3c)
- [ApeX](#apex)
- [IMPALA](#impala)
- [SEEDRL](#seedrl)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
深度强化学习算法的发展与进步让许多行业受益。然而复杂的应用场景和大规模的工业应用对算法的要求也特别高,与此同时,深度强化学习由于需要探索试错的模式进行学习,因而强化学习需要探索大量的数据。模型的计算时间长、收敛速度慢是导致整个强化学习的迭代速度慢的重要原因,从而也抑制了强化学习在工业界的快速发展。传统的单机CPU、GPU等计算已经远远不能够满足大数据时代的要求,经典的分布式集群(多机)、GPU集群运算开始进入了深度强化学习领域。
| 游戏智能体 | CPU cores | GPU cores | TPU cores |
| :---- | ---- | ---- | ---- |
| AlphaStar[<sup>[7]</sup>](#alphastar) | ~50,000 | N/A | ~3,000 |
|OpenAI Five[<sup>[8]</sup>](#openfive) | ~80,000 - 173,000 | 480 - 1,536 | N/A |
| 王者荣耀5v5[<sup>[9]</sup>](#moba) | 300,000 | ~200 | N/A |
从表中可以看到,在复杂游戏中训练一个高水平的智能体所需的计算资源是巨大的。尽管目前学术界有许多工作在想方设法地优化算法来提升样本利用率,但是整体上来讲,目前深度强化学习对于训练样本的需求量仍然是非常惊人的。所以,也有不少工作在致力于如何更高效地利用计算资源,设计更好的计算架构,从而在更短的时间内产生更多的样本,达到更好的训练效果。许多分布式强化学习(Distributed Reinforcement Learning)的算法和架构应运而生,分布式强化学习提到了极大的发展。
<div align="center">
<img src="./img/distributed_rl_development_update.png" ch="1000" />
</div>
<div align=center>图10.2.1 分布式强化学习发展路线</div>
例如,在图10.2.1里展示了和DQN相关的分布式强化学习的发展路线,从2013的DQN到2019年的SEEDRL,许多研究人员基于前人的工作不断地向前改进和发展。在下面的章节里,我们首先会介绍一些和分布式强化学习相关的基本概念,然后我们会讨论这些算法是如何演化的。
## 基本概念
- **采样器**(Actor):负责和环境交互采集数据,从学习器里拿到推理用的模型,将采样到地数据发给学习器或者重放缓冲区。采样器和环境交互时,根据当前状态给环境发送要执行的动作,环境返回下一个状态和单步的奖励。采样器将历史的数据(包括当前状态,动作,下一步状态等)收集起来,为学习器的训练样本提供储备。
- **学习器**(Learner):主要功能是拿到训练样本来训练强化学习的模型(例如:策略网络或者价值网络),更新模型参数。
- **重放缓冲区**(Replay Buffer): 用来缓存采样到的数据。用户可以定义特定的采样策略,通过采样策略生成训练样本给学习器。对于在线策略学习来说,重放缓冲区通常是一个先进先出的队列;而对于某些离线策略算法(例如,DQN)来说,重放缓冲区通常被设计为根据优先级采样的存储器。
- **行为策略**(Behavior Policy):采样器和环境交互时采用的策略。通常存在于采样器中,区别于目标策略。
- **目标策略**(Target Policy):根据行为策略产生的样本不断学习和优化的策略,即训练完成最终用来使用的策略。
- **在线策略**(On-policy)算法和**离线策略**(Off-policy)算法:在线策略算法就是指这类算法要求行为策略和目标策略得保持一致,而离线策略算法则不需要这个限制条件,目标策略可以根据任意行为策略产生的样本来学习和优化。
- **架构**和**交互方式**:架构指的是强化学习里不同模块之间连接关系,以及不同模块使用的硬件资源。交互的方式指的是模块之间数据流动的方式以及传输的数据内容。模块之间的交互方式包括同步或者异步等。
## 分布式强化学习算法的发展
我们根据时间轴来讲述分布式强化学习算法和架构的发展和变化。
### DQN
原始的**DQN**[<sup>[1]</sup>](#dqn)的架构非常简单,所有的模块都可以由单进程里实现。
如图10.2.2所示:
首先,**采样器**在**环境**里交互并采集训练数据。采样器根据当前状态 $s$ 做出动作 $a$,环境收到动作以后返回下一个状态 $s^{'}$ 和 单步的奖励 $r$ 给采样器。
采样器将收集到的一系列的 $(s, a, s^{'})$ 数据放到**重放缓冲区**里。
**学习器**每隔一定的间隔,将数据从重放缓冲区里拿出并更新强化学习Q网络(Q Network);而采样器也会用新更新得Q网络来进行新一轮数据的采样。
很显然,当学习器和采样器在一个进程里的时候,他们互相等待且不能并行,即:学习器要等待采样器收集新的训练数据,而采样器在开始下一轮收集数据之前要更新当前的行为策略。这降低了学习器和采样器的工作效率。即使将他们并行起来,由于学习器需要大量的数据去拟合,而单个采样器的效率太低,导致学习器大部分时间处于效率不高的状态。
<div align="center">
<img src="./img/DQN_arch.png" ch="500" width=70% />
</div>
<div align=center>图10.2.2 DQN的架构 </div>
### GORILA
**Gorila**[<sup>[2]</sup>](#gorila)是早期的将深度强化学习拓展到大规模并行场景的经典工作之一。
当时深度强化学习的SOTA还是DQN算法,因此该工作基于DQN提出了变体,拓展到大规模并行的场景。
在该架构中,采样器和学习器不必再互相等待。如图10.2.3所示,在Gorila的架构中,**学习器**可以是多个实例。并且每个学习器中的Q网络的参数梯度会发给**参数服务器**(Parameter Server)。**参数服务器**收到后以异步SGD的方式更新网络模型。这个模型以一定的频率同步到采样器中。同样的,在Gorila的架构里,采样器也可以是多个实例。**采样器**基于该模型产生动作在环境中采样,产生的经验轨迹发往**重放缓冲区**。重放缓冲区中的数据再被学习器采样拿去学习。另外,每过N步学习器还会从参数服务器同步最新的Q网络模型参数。在这个闭环中有四个角色:采样器, 学习器, 参数服务器和重放缓冲区。
那么GORILA相比于DQN, 主要的**区别**在于:
- 对于采样器:GORILA里定义了一个捆绑模式(Bundled Mode),即采样器的策略与学习器中实时更新的Q-Network是捆绑的。
- 对于学习器: 学习器中对于Q-Network的参数梯度会发给参数服务器。
- 对于重放缓冲区:在GORILA里分两种形式,在本地模式下就存在采样器所在的机器上;而多机模式下将所有的数据聚合在分布式数据库中,这样的优点是可伸缩性好,缺点是会有额外的通信开销。
- 对于参数服务器:存储Q网络中参数的梯度(Gradient)的变化,好处是可以让Q网络进行回滚,并且可以通过多个梯度来使训练过程更加稳定。在分布式环境中,不可避免的就是稳定性问题(比如节点消失、网速变慢或机器变慢)。GORILA中采用了几个策略来解决这个问题,如丢弃过旧的和损失值(Loss)太过偏离均值时的梯度。
GORILA中可以配置多个学习器、采样器和参数服务器,放在多个进程或多台机器上以分布式的方式并行执行。如实验中参数服务器使用了31台机器,学习器和采样器进程都有100个。实验部分与DQN一样基于Atari平台。在使用相同参数的情况下,该框架中在49中的41个游戏中表现好于非并行版本传统DQN,同时训练耗时也有显著减少。
<div align="center">
<img src="./img/GORILA_arch.png" ch="500" />
</div>
<div align=center>图10.2.3 GORILA的架构 </div>
### A3C
**A3C**[<sup>[3]</sup>](#a3c)是一个基于Actor-Critic算法。在A3C里没有参数服务器、没有公用的重放缓冲区。
具体来说:
- 每一个工作器(worker)实际上包含一个采样器、一个学习器还有一个小的缓冲区(通常是先进先出)。
- 每一个工作器(worker)中学习器计算得出梯度后都发送给全局网络(global network)。每一个工作器(Worker)中采样器都可以用不同的探索策略与环境进行交互,这些样本可以存在一个小缓冲区中。
- 全局网络(global network)接收多组梯度后再更新参数,再把异步地把参数拷贝给所有工作器。
<div align="center">
<img src="./img/A3C_arch.png" ch="500" width="70%"/>
</div>
<div align=center>图10.2.4 A3C的架构 </div>
而A3C架构的**优点**是:
- 每一个采样器可以用不同的策略探索环境,使得样本更具有多样性,探索到的状态空间更大。
- 全局网络等所有工作器都传递了梯度后再更新,使训练更稳定。
- 大规模并行非常容易。
- 在A3C架构中,每个工作器都独自计算梯度,全局网络只负责使用梯度,所以全局网络的计算量并不大。在作者的原始实现中,A3C不需要GPU资源,只需要CPU即可在Atari等游戏上达到很好的效果。
但同时A3C本身存在着**问题**:
- 当模型变得复杂时,在CPU上计算梯度的耗时会变得非常大,而如果迁移到GPU上,由于每个工作器都需要一个模型的副本,又会需要大量的GPU资源。
- 当模型变大时,传输梯度和模型参数的网络开销也会变得巨大。
- 全局网络使用异步方式更新梯度,这意味着在训练过程中,部分梯度的方向并不正确,从而可能影响最终的训练效果。这个现象会随着工作器的数量增多变得越来越严重,这也一定程度上限制了A3C的横向扩展能力。
### ApeX
**ApeX**[<sup>[4]</sup>](#apex)是2018年在DQN, GORILA之后的又一个基于DQN的工作。
在ApeX的结构里,采样器可以有多个实例,而学习器只有一个。
它和DQN,GORILA的**差别**是:
- 对于采样器: 以不同的探索策略和环境交互。例如,有的采样器以更大的概率去探索,有的采样器以小概率去探索。
- 对于学习器: 和GORILA不同,ApeX里中心学习器只有一个,从重放缓冲区里拿到数据学习。
- 对于重放缓冲区:不是均匀采样,而是按照优先级来采样,从而让算法专注于那些重要的数据。
<div align="center">
<img src="./img/ApeX_arch.png" ch="500" width="80%"/>
</div>
<div align=center>图10.2.5 ApeX的架构 </div>
ApeX的架构上可以适配DQN(ApeX-DQN)或者DDPG(ApeX-DDPG)等算法。
在ApeX[<sup>[4]</sup>](#apex)的实验里提供了ApeX架构在Atari环境上的测试。ApeX的一大优势是采样器可以很方便的扩展。 在ApeX[<sup>[4]</sup>](#apex)的实验里,ApeX DQN的采样器最大扩展到了360个CPU。采样器积以异步方式将采集到的经验发送给重放缓冲区。而学习器以异步方式拿数据。异步的交互方式解耦了采样器,学习器和重放缓冲区的联系。实验结果表示ApeX DQN和DQN,GORILA相比,在训练速度和效果上都更有优势。
### IMPALA
**IMPALA**[<sup>[5]</sup>](#impala)(Importance Weighted Actor-Learner Architectures)是基于Actor-Critic和A3C的改进,最大的创新是提出了V-trace算法,对off-policy现象做了一定的修正。
在IMPALA架构中,每个采样器都拥有一个模型的副本,采样器发送训练样本给学习器,学习器更新模型之后,会将新模型发送给采样器。在整个过程中,采样器和学习器以异步的方式运行,即学习器只要收到训练数据就会更新模型,不会等待所有的采样器;而采样器在学习器更新模型时依然在采样,不会等待最新的模型。
IMPALA与A3C具体的**区别**在于:
- 对于采样器来说:每一个采样器执行的行为策略不再是只来自一个学习器,可以来自多个学习器.
- 对于重放缓冲区来说:IMPALA里有两种模式,一种是由先进先出的队列实现的重放缓冲区,本质上是一种on-policy的算法;另一种是由一个数据池实现,但是每次随机采样其中的数据;
显然这样的运行方式会产生行为策略和目标策略不一致的现象,即:训练用的样本不是由当前的目标策略产生,而是由行为策略产生的,这对算法的收敛提出新的挑战。在IMPALA中,作者在数学上推导出了一种严谨的修正方式:V-trace算法。该算法显著降低了(由和目标策略不一样的行为策略生成的)训练样本带来的影响。下表可知,使用V-trace修正之后,相比于其他几种修正方式,最终的收敛效果有明显提升。
<div align="center">
<img src="./img/IMPALA_arch.png" ch="500" width="60%"/>
</div>
<div align=center>图10.2.6 IMPALA的架构 </div>
### SEEDRL
IMPALA在神经网络模型比较简单的时候性能很好,但当神经网络变得复杂的时候,该架构也有瓶颈。主要的**问题**有以下几点:
- 采样的时候,推理(Inference)放在采样器上执行,因为采样器是运行在CPU上的,所以当神经网络变复杂之后,推理的耗时就会变得很长,影响最终的运行效率。
- 采样器上执行了两种操作,一个是和环境交互,另一个是用行为策略做推理。很多游戏或者环境都是单线程实现的,而神经网络的推理计算则可以使用多线程加速,将两种操作放在一起,整体上会降低CPU的使用率。
- 当模型很大的时候,模型参数的分发会占用大量的带宽。
因为**SEEDRL**[<sup>[6]</sup>](#seedrl)的工作就是解决这些问题,SEEDRL的架构对比见下图:
<div align="center">
<img src="./img/seed_rl.png" ch="500" width="90%"/>
</div>
<div align=center>图10.2.7 SEEDRL的架构 </div>
和IMPALA相比,SEEDRL的**区别**主要是:
- 把采样器上的推理过程和学习器放在一同一块TPU上。而采样器和学习器之间只交换状态和采取的动作。
在SEEDRL中,采样器和学习器分布在不同的节点中,采样器通过gRPC来和学习器进行通信,SEEDRL同样使用了V-Trace来进行off-policy修正。
## 小结与讨论
通过本章的学习,可以发现强化学习算法,尤其是分布式强化学习算法之间的架构是差距非常大的。体现在智能体的运行的硬件,交互的方式,智能体里不同模块之间的连接关系等都会有很大的差别。在下一章里,我们会讨论算法架构之间的差异会给设计强化学习系统带来什么样的挑战。
## 参考文献
<div id="dqn"></div>
1. Mnih V, Kavukcuoglu K, Silver D, et al. Playing atari with deep reinforcement learning[J.. arXiv preprint arXiv:1312.5602, 2013.
<div id="gorila"></div>
2. Nair A, Srinivasan P, Blackwell S, et al. Massively parallel methods for deep reinforcement learning[J.. arXiv preprint arXiv:1507.04296, 2015.
<div id="a3c"></div>
3. Mnih V, Badia A P, Mirza M, et al. Asynchronous methods for deep reinforcement learning[C.//International conference on machine learning. PMLR, 2016: 1928-1937.
<div id="apex"></div>
4. D. Horgan, J. Quan, D. Budden, G. Barth-Maron, M. Hessel, H. Van Hasselt and D. Silver, "Distributed prioritized experience replay," arXiv preprint arXiv:1803.00933, 2018.
<div id="impala"></div>
5. Espeholt L, Soyer H, Munos R, et al. Impala: Scalable distributed deep-rl with importance weighted actor-learner architectures[C.//International Conference on Machine Learning. PMLR, 2018: 1407-1416.
<div id="seedrl"></div>
6. Espeholt L, Marinier R, Stanczyk P, et al. Seed rl: Scalable and efficient deep-rl with accelerated central inference[J.. arXiv preprint arXiv:1910.06591, 2019.
<div id="alphastar"></div>
7. Arulkumaran K, Cully A, Togelius J. Alphastar: An evolutionary computation perspective[C.//Proceedings of the genetic and evolutionary computation conference companion. 2019: 314-315.
<div id="openfive"></div>
8. Berner C, Brockman G, Chan B, et al. Dota 2 with large scale deep reinforcement learning[J.. arXiv preprint arXiv:1912.06680, 2019.
<div id="moba"></div>
9. Ye D, Liu Z, Sun M, et al. Mastering complex control in moba games with deep reinforcement learning[C.//Proceedings of the AAAI Conference on Artificial Intelligence. 2020, 34(04): 6672-6679.
|
AI-System/Textbook/第10章-强化学习系统/10.2.1-分布式强化学习算法.md/0
|
{
"file_path": "AI-System/Textbook/第10章-强化学习系统/10.2.1-分布式强化学习算法.md",
"repo_id": "AI-System",
"token_count": 11186
}
| 14 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 11.2 基于稀疏化的模型压缩
- [11.2 基于稀疏化的模型压缩](#112-基于稀疏化的模型压缩)
- [11.2.1 人类大脑的稀疏性](#1121-人类大脑的稀疏性)
- [11.2.2 深度神经网络的稀疏性](#1122-深度神经网络的稀疏性)
- [权重稀疏](#权重稀疏)
- [激活稀疏](#激活稀疏)
- [梯度稀疏](#梯度稀疏)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
## 11.2.1 人类大脑的稀疏性
生物研究发现人脑是高度稀疏的。例如当人类识别一只猫时,我们不会仔细检查每一个毛发的纹理,仅仅使用简单的几何边缘就足以做出判别。在交通场景中,当人类看到眼前物体时,我们的神经系统不会处理所有像素,因为那样无法在瞬息万变的场景中迅速做出反应。我们此时仅会关注视野中主要的物体,比如图中用颜色分割的交通参与者(人、车辆)。这些人类认知的本能是千百万年进化与自然选择的结果。同样,在语音识别、医疗成像、自动驾驶、社交网络等各行各业的海量数据中,每种数据都有其内在的结构性。自动学习数据内生的结构性是人工智能算法的核心,数据的结构性带来了信息表达中的稀疏性,高效的人工智能系统应该充分利用这种稀疏性。
其实对于AI模型稀疏度的追求早在深度学习风行之前就已得到了广泛的研究,稀疏编码曾经是实现人脸识别主流的技术手段之一。这是因为训练数据的噪声使得模型中包含大量冗余信息,降低了模型的推广(Generalization)能力,而通过模型稀疏化则可以消除这部分冗余从而提升模型精度,但是过度的稀疏则会丢失模型中的关键信息而严重损坏精度指标。在深度神经网络中,对模型尺寸、浮点运算量等性能(Performance)指标的追求成为主要的关注点,模型稀疏度的增加可以持续提升上述性能。如何在模型精度与性能之间寻求最优的折衷是一个复杂的研究话题,也是神经网络稀疏化的研究目标。
<center> <img src=".\img\2\cat.png" /></center>
<center>图11.2.1 人类的视觉系统是稀疏的,不需要处理所有像素即可迅速做出判断。</center>
## 11.2.2 深度神经网络的稀疏性
根据深度学习模型中可以被稀疏化的对象,深度神经网络中的稀疏性主要包括权重稀疏,激活稀疏和梯度稀疏。
### 权重稀疏
在大多数类型的深度神经网络中,通过对各层卷积核元素的数值(即网络权重)进行数值统计,人们发现许多层权重的数值分布很像是正态分布(或者是多正态分布的混合),越接近于0,权重就越多。这就是深度神经网络中的权重稀疏现象,一个典型的网络权重分布直方图如图11.2.2所示。舍弃掉其中接近0值的权重,相当于在网络中剪除部分连接,对网络精度影响并不大,这就是权重剪枝。
这么做的道理是因为权重数值的绝对值大小可以看做重要性的一种度量,较大的权重意味着对最终输出的贡献较大,也相对更加重要,反之则相对不重要。不重要的权重删去对精度影响就应该较小。
<center> <img src=".\img\2\weight Gaussian.png" /></center>
<center>图11.2.2 深度网络中存在权重稀疏性:(a)剪枝前的权重分布;(b)剪除0值附近权值后的权重分布;(c)网络微调后的权重分布</center>
即使是移除绝对值接近于0的权重也会带来推理精度的损失。为了恢复网络精度,通常在剪枝之后需要进行再次的训练,这个过程称为微调(fine-tuning)。微调之后的权重分布将部分地恢复高斯分布的特性,如图11.2.3所示,同时网络精度也会达到或接近剪枝前的水平。大多数的权重剪枝算法都遵循这一“正则化-剪枝-微调”反复迭代的流程,如图8所示,直到网络规模和精度的折衷达到预设的目标为止。
<center> <img src=".\img\2\three step.png"/></center>
<center>图11.2.3 剪枝算法常用的迭代计算流程</center>
### 激活稀疏
神经网络模型中的非线性激活单元(activation)是对人类神经元细胞中轴突末梢(输出)的一种功能模拟。早期的神经网络模型——多层感知机(MLP)中,多采用Sigmoid函数作为激活单元。然而随着网络层数的加深,Sigmoid函数引起的梯度消失和梯度爆炸问题严重影响了后向传播算法的实用性。为解决上述问题,多种多样新的非线性激活单元被提出,其中ReLU函数是目前应用最为广泛的激活函数,“2D卷积-ReLU激活函数-池化”三个算子相串接而成的基本单元就构成了CNN网络的一个完整层,如下述TensorFlow代码片段所示:
```python
L1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME')
L1 = tf.nn.relu(L1)
L1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
```
ReLU激活函数的定义为:
$$
\phi(x)=max(0,x)
$$
该函数使得负半轴的输入都产生0值的输出,图19中的特征图经过非线性激活后,产生激活输出,可以看出激活函数给网络带了另一种类型的稀疏性,红圈标识了特征图中被稀疏化的元素。
<center> <img src=".\img\2\relu.png"/></center>
<center>图11.2.4 激活稀疏效果示意图</center>
为了利用上述稀疏特性来压缩模型,文献[6]提出了一种神经元剪枝算法。首先,定义网络中每个神经元经ReLU映射后输出的零值平均百分比(APoZ)指标为:
$$APoZ^{(i)}_c = APoZ(O_c^{(i)}) = \frac{\sum_k^N \sum_j^M f(O^{(i)}_{c,j}(k=0))}{N \times M} $$
这里,$O_c^{(i)}$表示网络第$i$层中第$c$个通道(特征图)的结果,$N$与$M$分别表示用于验证的图像样本个数、及每个特征图的维度,$f\left( \cdot \right)$对真的表达式输出1,反之输出0。由于每个特征图均来自一个滤波器(神经元)的卷积及激活映射结果,因此上式衡量了该神经元对一组特定图像的计算结果中0值输出的平均比例。图10给出了在VGG-16网络的CONV5-3层中,利用50,000张ImageNet图像样本计算得到的所有512个神经元的APoZ指标分布图。可以看出大多数神经元的该项指标都分布在93%附近。实际上,该网络中共有631个神经元的APoZ值超过90%。激活函数的引入反映出VGG网络存在着大量的稀疏与冗余性。
<center> <img src=".\img\2\histo-APoZ.png"/></center>
<center>图11.2.5 激活稀疏算法示例:ReLU激活函数输出结果中存在高度的稀疏性。</center>
### 梯度稀疏
在第五章中我们已经看到,大模型(如BERT)由于参数量庞大,单台主机难以满足其训练时的计算资源需求,往往需要借助分布式训练的方式在多台节点(Worker)上协作完成。采用分布式随机梯度下降(Distributed SGD)算法可以允许$N$台节点共同完成梯度更新的后向传播训练任务。其中每台主机均保存一份完整的参数拷贝,并负责其中$1/N$参数的更新计算任务。按照一定时间间隔,节点在网络上发布自身更新的梯度,并获取其他$N-1$台节点发布的梯度计算结果,从而更新本地的参数拷贝。
可以看出,随着参与训练任务节点数目的增多,网络上传输的模型梯度数据量也急剧增加,网络通信所占据的资源开销将逐渐超过梯度计算本身所消耗的资源,从而严重影响大规模分布式训练的效率。另一方面,大多数深度网络模型参数的梯度是高度稀疏的,研究表明在分布式SGD算法中,99.9%的梯度交换都是冗余的。图15显示了在AlexNet的训练早期,各层参数梯度的幅值还是较高的。但随着训练周期的增加,参数梯度的稀疏度显著增大,大约30个训练周期后,各层梯度稀疏度都趋于饱和。显然,将这些0值附近的梯度进行交换,对网络带宽资源是一种极大的浪费。
<center> <img src=".\img\2\Alex grad sparse.png"/></center>
<center>图11.2.6 深度神经网络训练中的各层梯度值存在高度稀疏特性。</center>
梯度稀疏的目的在于压缩分布式训练时被传输的梯度数据,减少通信资源开销。由于SGD算法产生的梯度数值是高度噪声的,移除其中并不重要的部分并不会显著影响网络收敛过程,与之相反,有时还会带来正则化的效果,从而提升网络精度。梯度稀疏实现的途径包括:1)预设阈值:在网络上仅仅传输那些幅度超过预设阈值的梯度;2)预设比例:在网络上传输根据一定比例选出的一部分正、负梯度更新值;3)梯度丢弃:在各层梯度完成归一化后,按照预设阈值丢弃掉绝大多数幅值较低的梯度。一些梯度稀疏算法在机器翻译任务中可以节省99%的梯度交换,而仅带来0.3%的模型精度损失;可以将ResNet-50模型训练的梯度交换参数量从97MB压缩为0.35MB而并不损失训练精度[9]。
<center> <img src=".\img\2\gradient sparse.png"/></center>
<center>图11.2.7 通过梯度稀疏可以在分布式训练任务中大幅减少通信时间开销从而提升模型训练效率[9]。</center>
## 小结与讨论
神经网络稀疏化尽管近年来已经取得了丰富的研究成果,但是作为一个新的研究方向,并没有完全成熟的知识体系,许多固有结论不断地被打破和重建,深度网络模型的稀疏与压缩仍然具有巨大的潜力和研究空间。下面对现有剪枝方法进行小结,并指出未来该领域的部分挑战性问题。
- 早期的剪枝工作多针对非结构化剪枝及启发式方法,当前结构化剪枝及自动化剪枝受到越来越多的关注,因为其更易获得实际的模型加速机会及更高的模型压缩率。
- 卷积层相比全连接层由于其冗余性更小,因而剪枝方法的设计更具挑战性。因此,那些没有大规模全连接结构的神经网络,如ResNet、GoogLeNet、DenseNet等,就要比拥有较多全连接结构的网络,如VGG、AlexNet等更加难于压缩。
- 神经元剪枝相比权重剪枝更易损失模型精度,训练阶段的梯度则拥有最多的稀疏度。如何优化模型稀疏度与剪枝后精度间的折衷仍是当前该领域的研究重点。
- 图8所示的网络剪枝一般流程也并不是一成不变的,最新的研究表明,对于随机初始化网络先进行剪枝操作再进行训练,有可能会比剪枝预训练网络获得更高的稀疏度和精度。因此,究竟剪枝后的残余连接结构与残余权重值两者哪个更为关键,就成为一个开放的研究问题。
## 参考文献
1. Wright J, Yang A Y, Ganesh A, et al. Robust face recognition via sparse representation. IEEE transactions on pattern analysis and machine intelligence, 2008, 31(2): 210-227.
2. 纪荣嵘,林绍辉,晁飞,吴永坚,黄飞跃.深度神经网络压缩与加速综述.计算机研究与发展,2018,55(09):1871-1888.
3. Hoefler T, Alistarh D, Ben-Nun T, et al. Sparsity in Deep Learning: Pruning and growth for efficient inference and training in neural networks. Journal of Machine Learning Research, 2021, 22(241): 1-124.
4. Li H, Kadav A, Durdanovic I, et al. Pruning filters for efficient convnets. arXiv preprint arXiv:1608.08710, 2016.
5. Liu Z, Li J, Shen Z, et al. Learning efficient convolutional networks through network slimming. Proceedings of the IEEE international conference on computer vision. 2017: 2736-2744.
6. Hu H, Peng R, Tai Y W, et al. Network trimming: A data-driven neuron pruning approach towards efficient deep architectures. arXiv preprint arXiv:1607.03250, 2016.
7. Ren M, Pokrovsky A, Yang B, et al. Sbnet: Sparse blocks network for fast inference. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2018: 8711-8720.
8. Aji A F, Heafield K. Sparse communication for distributed gradient descent. arXiv preprint arXiv:1704.05021, 2017.
9. Lin Y, Han S, Mao H, et al. Deep gradient compression: Reducing the communication bandwidth for distributed training. arXiv preprint arXiv:1712.01887, 2017.
10. Deng L, Li G, Han S, et al. Model compression and hardware acceleration for neural networks: A comprehensive survey. Proceedings of the IEEE, 2020, 108(4): 485-532.
|
AI-System/Textbook/第11章-模型压缩与加速/11.2-基于稀疏化的模型压缩.md/0
|
{
"file_path": "AI-System/Textbook/第11章-模型压缩与加速/11.2-基于稀疏化的模型压缩.md",
"repo_id": "AI-System",
"token_count": 8490
}
| 15 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 13.1 简介与趋势
我们介绍学习增强系统的初衷和范式转移。本章包含以下内容:
- [13.1 简介与趋势](#131-简介与趋势)
- [13.1.1 系统设计的范式转移](#1311-系统设计的范式转移)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
## 13.1.1 系统设计的范式转移
计算机系统的设计和运维过去一直以来都依靠工程师的经验和试错。在系统的规模和复杂度较小时,工程师可以经由大量的实验来评估系统的性能并理解系统的行为。工程师从这些实验里来获得经验,并手写成系统里的启发式算法和决策规则。另外,这些经验不仅能帮助工程师来优化现有的系统,也能帮助他们设计未来的系统。
然而,现代计算机系统的复杂性和规模快速提升,为这种依赖人力和经验的方式带来了前所未有的挑战,尤其人力的增长赶不上系统规模和动态性上升的速度。我们这边用现代系统常用的微服务架构,来当作是一个例子。微服务架构强调模块化 —— 一个系统由多个模块(即微服务)组成,而模块的实现就基于像 Docker 的容器技术和 Hypervisor 的虚拟技术。模块化使得现代系统能够很好地支持横向扩展,纵向扩展,和持续更新,尤其是配合像 Kubernetes 的自动集群管理下。随着时间,用户的需求和场景有了改变,集群的规模也能做出相对应的调整来保证系统的服务品质。然而,每一次的变化都代表着工程师需要重新理解系统的行为,来优化和维护系统。但是,人的理解力难以理解大规模系统的行为是如何被每一个设定参数和决策所影响。系统里的每一微服务有着不同的设定参数和决策,然后微服务和微服务之间有着不同的执行依赖关系。例如,计算机系统里常用的数据库有着上百个设定参数,也有着像数据索引的决策。另外,操作系统里有调度策略,分布系统里有资源的分配策略,云微服务有扩容和调参策略,互联网路有拥塞控制(Congestion Control)和流量控制(Flow Control)的设定参数,视频流应用有网路品质的评估策略,防火墙有规则匹配策略,中央处理器里有缓存置换和预存取算法,甚至代码编译器也有设定参数,等等。
在近几年,学习增强系统的范式转移已成为系统设计的趋势。自从机器学习和深度学习在计算机视觉和自然语言处理等领域取得突破,计算机系统领域也开始探讨如何利用机器学习和深度学习。系统的性能和行为,与决策和参数的关系,可以被想象成一个非线性的空间。而学习增强系统的范式就是在这空间里学习并搜索全局最优解。学习这空间可能很复杂;但现代系统普遍有完善的行为监测机制和精细的日志,加上近期机器学习的进步(例如深度学习和强化学习),大大提升了数据驱动的可行性。
学习增强系统普遍有 3 种实现的方式。第一,机器学习被用来辅助启发式算法和决策规则的执行;第二,机器学习被用来取代现有的启发式算法和决策规则;第三,机器学习被用来设计新的启发式算法和决策规则。不同的实现方式有着不同的折衷与取舍,尤其是在以下的维度:系统所需要的决策准确度,系统所能提供的数据,系统所能容忍的模型训练时间与资源开销,系统所能容忍的模型推理时间与资源开销,系统所能容忍的模型推理误差,等等。比如,相比于辅助启发式算法和决策规则的执行,用机器学习来取代启发式算法和决策规则可以大大地利用机器学习在优化问题上的能力。但是,从过往的经验上来看,机器学习的执行时间往往比简单的启发式算法大,并且推理的误差可能造成系统错误。另外,虽然利用机器学习来设计新的启发式算法和决策规则可以降低工程师的工作量,其所需在系统上采集的训练数据可能非常地庞大。之后在这章节里,我们将从我们过去的科研和产品经验里,来总结这些折衷与取舍为机器学习所带来新的挑战。
## 小结与讨论
机器学习可以从海量的系统数据中归纳总结出其内在的行为规律。在进入下一个章节前,读者可以思考有哪些系统问题适合用机器学习的思维来解决。
## 参考文献
1. Docker (software). [https://en.wikipedia.org/wiki/Docker_(software)](https://en.wikipedia.org/wiki/Docker_(software))
2. Kubernetes. [https://en.wikipedia.org/wiki/Kubernetes](https://en.wikipedia.org/wiki/Kubernetes)
|
AI-System/Textbook/第13章-人工智能优化计算机系统/13.1-简介与趋势.md/0
|
{
"file_path": "AI-System/Textbook/第13章-人工智能优化计算机系统/13.1-简介与趋势.md",
"repo_id": "AI-System",
"token_count": 3561
}
| 16 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 7.1 异构计算集群管理系统简介
本章介绍异构集群管理系统的设计初衷,需要解决的问题及挑战,并通过启发式实例以更为具象的方式展开介绍。之后我们会交替使用集群管理系统与平台代表当前的异构计算集群管理系统。
在展开前,我们回顾一下操作系统是什么?[操作系统](https://en.wikipedia.org/wiki/Operating_system)是管理计算机硬件、软件资源并为计算机程序提供通用服务的系统软件。那么我们也可以认为,异构计算集群管理系统是管理计算机集群内的多节点硬件(GPU,CPU,内存,磁盘等)、软件资源(框架,作业,镜像等)并为计算机程序(通常为深度学习训练作业程序)提供通用作业开发服务(提交,调试,监控,克隆等)的系统软件。
- [7.1 异构计算集群管理系统简介](#71-异构计算集群管理系统简介)
- [7.1.1 多租环境运行的训练作业](#711-多租环境运行的训练作业)
- [7.1.2 作业生命周期](#712-作业生命周期)
- [7.1.3 集群管理系统架构](#713-集群管理系统架构)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
## 7.1.1 多租环境运行的训练作业
<center><img src="./img/1/7-1-1-jobsubmission.png" /></center>
<center>图 7.1.1 多租环境提交运行作业</center>
在企业级深度学习场景下,大型企业有很多机器学习科学家与工程师,组织有大量的 GPU 服务器,为了组织效率的提升与资源共享,就诞生了针对深度学习场景下设计的多租户的平台系统。
如图 7.1.1 所示,企业级环境下,不同用户会提交不同框架(例如, PyTorch,TensorFlow等)的深度学习作业,有不同作业的资源需求(例如,单 GPU 卡,多 GPU 卡),共享一个物理集群才能让组织减少硬件资源浪费。
多租户(Multi-Tenancy)技术是一种软件架构技术,它实现如何于多用户多作业的环境下,共用使用系统或程序组件,并且仍可确保各用户间资源和数据保持隔离性。又由于当前深度学习场景下,平台系统管理的资源是异构(例如, CPU,GPU 等)的。所以本章主要介绍的是管理“异构资源”,调度“深度学习作业”的,“多租户”的平台系统(Platform System)。
多用户共享多 GPU 服务器相比原来深度学习开发者独占使用服务器进行模型训练,有很大的不同,如图 7.1.1 所示。这也为异构计算集群管理系统(简称,平台,深度学习平台)的设计产生了相应的需求。主要体现在以下几点:
- 多作业(Job),多用户
- 每个用户需要不断改进模型,超参数调优,调试与优化作业,这样会提交大量的作业到平台。
- 不同业务与应用场景(例如,计算机视觉,自然语言处理,语音识别等任务)的人工智能团队在使用平台。不同的团队有多名人工智能工程师,他们会在同一时段向平台申请资源执行作业。
- 作业环境需求多样
- 目前深度学习的技术栈并不统一,有的用户使用 TensorFlow,有的用户使用 PyTorch,还有些可能是用 Hugging Face 等上层库。由于用户可能使用开源的项目,有些项目已经较为陈旧,有些项目又使用了最新框架,用户不想再每次都做版本适配,而原作者开源的框架版本也可能不一样,造成底层依赖,例如 NVIDIA CUDA 等也可能版本不同。同时用户如果共享机器,需要对依赖的环境互不干扰,不希望其他用户安装的 Python,PyTorch 等版本影响自己的作业需要的环境。
- 作业资源需求多样
- 用户提交的作业有些是分布式训练作业,对资源需求较多,有些是单机的训练或者调试任务,对资源的需求较少,有些是大规模分布式训练任务用到几百甚至上千块 GPU。同时由于不同作业的模型不同,超参数不同,造成即使申请的 GPU 数量一致的情况下,资源利用率也不同。平台需要按需求做好资源的分配,减少资源碎片。
对用户来说,不希望作业本身受到其他作业的硬件资源与命名空间冲突的干扰,理想是像使用独占资源一样运行自己的作业。因为有些用户可能是在赶某个截止任务,希望尽可能快的执行完成模型训练。平台需要做好运行期资源隔离,保证好服务质量。
- 服务器软件环境单一
- 平台方在采购资源,安装底层操作系统和驱动时,很难规划和指定未来用户的软件和版本,同时为了运维和部署减少软件兼容性问题,一般将服务器安装统一的操作系统,驱动,并让其版本保持一致,减少运维负担。这与之前用户的多样环境需求产生了矛盾。同时即使安装不同的操作系统和环境,由于用户的作业类型环境需求动态变化且很难提前规划,也无法做到精准适配,所以集群都是底层统一软件和操作系统版本,以期通过类似云的方式,通过镜像等手段对每个用户提供个性化的环境,但是云平台的镜像加载与制作开销较大,对专有场景用户也没有那么高的安全与隔离需求,似乎也不是效率最高的解决方法。
- 服务器空闲资源多样
- 虽然平台一般批量购买同型号大量机器。但由于用户的作业申请资源多样,作业生命周期多样,造成资源释放后,平台上的空闲资源的组合比较多样,需要设计好调度策略,尽可能提升资源的利用率。
从以上的问题,我们可以看到调度与资源管理问题需要一个统一的平台系统来支撑。它底层抽象并管理计算资源,对上层应用提供隔离并且易用的作业运行时环境,整理来说我们可以理解其为支持深度学习应用的管理分布式 GPU 服务器集群的操作系统。我们可以总结以下几点来概括平台的使命和重要性:
- 提供人工智能开发与模型生产的基础架构支持:
- 高效地深度学习作业调度与管理:根据作业资源需求,分配和回收计算资源,提升利用率的同时,保持一定的公平性等。对组织来说,本身希望更高的投入产出比,希望购买的硬件本身能够被高效的利用。
- 稳定地异构硬件管理:高效运维,动态扩容,节点问题修复等运维功能的支持。管理员和用户可以监控节点及硬件资源状态和利用率等。当服务器扩展到更多的节点之后,造成集群内有更高的概率在同一段时间内有一台节点故障,而且这个概率会随着服务器的增加而增加,所以做好平台本身的容错,重试机制,是在设计之初就需要提前规划和考量的。
- 提升用户的研发生产力:
- 用户专注于模型创新,无需关注系统部署,管理。通过镜像等技术,让用户打包软件依赖,简化部署与安装。同时如果能提供很多通用模板,加速库,最佳实践文档等,都会在一定程度上提升整体的生产力。
- 运行时资源与软件依赖隔离,让用户像独占服务器一样使用运行时资源,执行作业,保持良好的用户体验。
- 模型,代码和数据共享,加速研究与创新。组织内共享与提供模块化可复用的代码,模型,镜像与工作流支持,加速创新。当前人工智能社区中新的研究工作层出不穷,如何高效的支持新想法新实验的验证,减少在部署和环境方面投入的时间,也是平台本身需要为用户提供的基本保障。
但是我们也可以看到用户追求独占使用资源的体验需求与组织平台希望共享资源提升利用率的需求是需要取舍与平衡的,这其中需要底层技术的支持,也需要管理机制策略,用户培训等其他手段的协同作用才能取得双方的平衡。
请读者思考,如果你是用户,你希望平台提供什么样的资源和服务?换个角度,如果你是平台管理员,你希望你的平台是被如何管理的,给用户提供以什么样的系统使用体验?这其中是否会有相应的矛盾的地方?
## 7.1.2 作业生命周期
在展开平台组件与功能前,让我们先了解一下,一个深度学习作业,在平台上是如何提交并执行的,也就是作业的生命周期。
<center><img src="./img/1/7-1-2-cluster.png" /></center>
<center>图 7.1.2 GPU 集群</center>
平台上作业生命周期:
1. 作业提交与排队:用户先将作业的依赖环境在本地测试成功后,打包为镜像,并上传到公共镜像中心(例如,Docker Hub,Azure Container Registry 等)。之后用户将代码,数据等运行作业需要的输入,上传到平台的文件系统(例如,NFS,HDFS,Azure Blob 等)。之后用户可以通过作业提交工具(例如,Web,命令行,API 等),填写资源申请(例如,几块 GPU,内存等需求规格),作业启动命令,部署方式,以及镜像,代码和数据的路径。之后点击提交即可。作业提交时,用户需要权衡资源需求和排队时间,一般资源需求越高,排队时间越长,同时需要权衡作业的成本,减少无用作业提交造成的配额与资源浪费。
2. 作业资源分配与调度:平台收到用户的资源申请后,先进行排队,调度器轮询到作业时,根据目前集群中空闲资源状况,根据一定的调度算法(例如,7.3,7.4 章节中介绍的调度算法),决定作业是在哪些拥有空闲资源的服务器节点启动,如果不满足条件,则继续排队等待。如果提交失败或超时,用户需要调整作业重新提交。
3. 作业执行完成与释放:当作业被调度器调度启动,平台会在有空闲资源的节点启动作业,下载镜像,挂载代码和数据所在的文件系统到节点本地,运行时做好资源限制与隔离,启动作业,执行作业。在作业运行中,平台监控系统不断收集运行时性能指标和日志,方便用户调试。作业执行完成后,平台会释放申请的资源,并继续分配给其他作业使用。
我们可以将作业状态抽象为以下的状态机(State Machine):
1. 作业准备与提交:触发作业提交动作
1. 提交成功
2. 提交失败:重新开始 1
2. 作业排队:触发作业调度动作
1. 调度成功
2. 调度失败:重新开始 1
3. 作业部署运行: 触发作业执行动作
1. 执行成功
2. 作业失败,小于等于重试次数 N:重新开始 1
3. 作业失败,大于重试次数 N:作业失败退出
用户的整个操作实际上是在以上状态中不断切换,最终达到将作业成功执行或失败。如果执行成功,用户在作业执行完成后可以获取需要的结果和深度学习模型。
在这样一个生命周期中,请大家思考集群环境下的模型训练遇到的新问题与挑战可以通过什么技术解决?
1. 如何提交作业与解决环境依赖问题?
2. 如何高效调度作业并分配资源?
3. 如何将启动的作业运行时环境,资源与命名空间隔离?
4. 如何面向深度学习作业和异构资源设计集群管理系统?
5. 如何高效存取数据?
6. 如何不断开发平台新功能与运维平台并保证稳定性?
## 7.1.3 集群管理系统架构
<center><img src="./img/1/7-1-3-archcluster.png" /></center>
<center>图 7.1.3. 异构集群管理系统架构</center>
如图 7.1.3 所示,异构集群管理中通常包含很多组件,有进行资源与作业管理的调度器,有监控健康状态和报警的监控系统,有用户交互的 Web界面,也有存储数据,模型与代码的存储系统等。接下来我们看一下平台中的主要组件及功能:
***平台中的主要组件***:
1. 集群调度与资源管理模块:其统一管理集群资源,调度作业到集群空闲资源,回收运行完作业的资源。一般控制平面(Control Plane)可以选择使用 Kubernetes[<sup>[1]</sup>](#k8s),YARN[<sup>[2]</sup>](#yarn),Mesos[<sup>[3]</sup>](#mesos) 等系统。也可以针对深度学习作业和异构硬件特点,定制化调度策略或者使用开源深度学习调度器,例如 HiveD[<sup>[4]</sup>](#hived) 等。
2. 镜像中心:存储 Docker 镜像,供用户提交与共享镜像,作业下载加载镜像。一般可以选用 Docker Hub,或者处于安全和合规要求,构建私有的镜像中心,或者云上镜像中心 Azure Containter Registry 等。
3. 存储模块:在平台中扮演数据平面(Data Plane)角色,存储数据,模型与代码。用户上传数据,作业下载数据和上传结果与模型。存储系统一般根据性能,扩展性,稳定性等需求权衡,可以选用:NFS,Lustre,HDFS 等,或者选用云存储 AWS S3,Azure Blob 等。
4. 作业生命周期管理器:部署作业,监控作业,重试作业,作业错误诊断。类型属于单作业的控制平面,一般不涉及其他作业情况,自动化机器学习系统也可以构建在平台接口之上进行作业编排。生命周期管理一般可以选择使用 K8s Operator,Framework Controller,YARN AppMaster 等。
5. 集群监控与报警:负责集群硬件,服务与作业的状态监控与报警。监控系统一般可以选择使用 Promethus + Grafana + Alert Manager 等开源系统搭建,针对特殊需求开发监控指标收集脚本(例如,Promethus node exporter)。
6. 集成开发环境:平台也会对用户提供 Web 门户,REST服务与集成开发环境 IDE(例如,VS Code 和 Jupyter Notebook)。用户使用这些工具进行作业与数据资源提交与,作业管理监控与调试。
7. 测试集群:为了和生产环境隔离,平台开发工程师可以部署小规模的测试集群,在测试平台进行开发测试,之后再上线到生产环境集群。
***经典回顾***
从以上的平台架构图中我们可以观察到其设计也采用了关注点分离([Separation of Concerns](https://en.wikipedia.org/wiki/Separation_of_concerns))简称 (SoC)原则,也就是模块化的设计。SoC “是将计算机系统分成不同部分的设计原则。每个部分都解决了一个单独的问题(Concern),即一组影响计算机程序代码的信息。能够很好地体现 SoC 的系统称为模块化(Modular)系统。”例如,平台中的关注点有:调度,监控,存储,运行时等,这些部分可以剥离,以及独立演化。
除了系统本身的组件,由于系统是整体人工智能开发的基石,其被使用的用户也较为多样,那么接下来我们看一下平台中到底有哪些角色和分工:
***平台中的角色***:
1. 用户:用户打包作业镜像,上传数据和代码到存储,并书写作业规格(Specification),进而提交作业,观察作业性能和错误,如果有问题重新修改提交,如果成功则获取训练完成的模型或者处理完的数据。
2. 运维工程师:运维工程师负责监控运维和管理集群健康状况和错误,处理和应对突发事件,进行错误修复,处理和配置租户资源请求等。
3. 平台开发工程师:平台工程师负责不断开发平台服务的新组件与功能,持续集成,持续部署。
总结起来,相比以 CPU,以太网和 SSD 磁盘等硬件为代表的传统数据中心的大数据平台基础架构,面向深度学习的以 GPU,InfiniBand 等异构硬件为核心资源的基础架构有以下 ***特点*** :
(1)硬件更新换代较快:以英伟达为代表的 GPU 厂商每个 1~2 年就有新一代 GPU 推出,提供更大的算力和内存。同时由于深度学习模型本身不断朝着参数量越大效果越好发展,对算力的需求也在不断增长,旧的硬件逐渐无法满足新作业的需求,驱动平台不断购买新的硬件。硬件的更新也意味着驱动库等基础库的频繁迭代。
(2)硬件稳定性和可观测性不如传统基础架构成熟:GPU 目前本身的稳定性,隔离性,可观测性的支持不如传统 CPU 生态成熟,造成系统运维需要投入更多的精力与自动化方式与工具。
(3)硬件成本高。GPU 等异构硬件逐渐占据服务器中的主要成本。如何更高效的使用这些昂贵的硬件资源是每个组织都需要面临的问题。
(4)计算密集型负载为主:其中的任务以深度学习,科学计算等任务为主,其本身更多为计算密集型任务,其一般使用 GPU 加速器进行加速,通过 InfiniBand 等高速网卡进行节点间互联与通信。
除了以上特点,当前平台的部署模型也比较多样,
组织一般可以根据,成本,数据合规与安全,弹性资源需求等多个维度衡量和考虑是本地还是采用云平台进行部署。平台的部署模式当前一般支持以下几种 ***部署模式*** :
1. 本地(On-Premises)部署方式:有些公司出于数据合规,性能等需求选择使用开源(例如,OpenPAI[<sup>[5]</sup>](#pai),Kubeflow[<sup>[6]</sup>](#kubeflow) 等)或者自研平台(例如,基于 Kuberenetes,YARN 等二次开发)进行平台本地部署,保证数据,镜像等在自有数据中心维护,这种方式对运维与开发工程师要求较高。用户需要自建数据中心或基于已有数据中心基础架构之上部署,初期需要较大投资且做好资源规划。平台层软件,监控,报警与运维等需要全职的运维团队进行维护,且需要一定的平台服务软件的定制开发能力。尤其当前以 GPU 为代表的异构芯片与硬件更新迭代较快,经历一段时间使用后,硬件容易淘汰过时,硬件规格的更新又需要面临较高的成本。
2. 公有云部署方式:有些公司可以采购公有云的 IaaS(虚拟机) 或者 PaaS(已有的云平台)服务进行平台搭建,好处是减轻运维压力并可以利用公有云平台最先进的技术,能够弹性伸缩资源,但是数据和代码需要上云,且长期使用成本一般会更高。此类方式初期不需要用户大量的投资与资源规划,按序付费,且平台层软件的大部分运维工作交给公有云平台进行维护,适合初期中期应用。但是出于一些数据与合规等需求,一些厂商可能无法将基础架构完全托管于公有云。也有随着基础架构规模增长造成成本与日俱增,最终入无法负担的案例出现,例如,“[NASA因存储数据过大,支付不起亚马逊 AWS 的费用的新闻](https://www.theregister.com/2020/03/19/nasa_cloud_data_migration_mess/)”[<sup>[7]</sup>](#nasa),NASA计划新增在 AWS 数据存储空间,但迁移云端后,下载数据的成本激增,但之前并没有意识到与规划这部分高昂的预算。
3. 混合云:目前有些公司采用敏感数据放在本地数据中心,非敏感数据或弹性资源需求上公有云的方案,一些公有云提供商也提供了混合云机器学习平台,例如,微软提供 [Azure Arc-enabled machine learning](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-attach-arc-kubernetes?tabs=studio) 服务,在一套集群管理系统中管理混合云深度学习平台资源。
4. 多云方式:目前有些公司出于防止锁死一家公司或者综合选取性价比最高的方案会选择多云方案,例如有些公司如 [HashiCorp](https://www.hashicorp.com/) 等提供多云运维工具与服务。在 Hotos '21 上 UCB 的 Ion Stoica 和 Scott Shenker 发表“[From Cloud Computing to Sky Computing](https://sigops.org/s/conferences/hotos/2021/papers/hotos21-s02-stoica.pdf)”[<sup>[8]</sup>](#sky),这篇文章简而言之,它应该很容易让开发人员构建多云应用程序,不同的基础设施模块和服务可以来源不同的云服务提供商,也称此为天空计算(Sky Computing)。但是当前商业化的提供商受限于性能安全等因素。常用的资源服务组合更多是在同一家云服务供应商,多云更多的是在供应商间提供无缝切换整体基础设施,例如,[HashiCorp](https://www.hashicorp.com/) 等。
如图 7.1.4 所示,横轴是时间,纵轴是基础设施成本。一般本地部署初始一次性投入较大,在初始的几年成本高于云计算按需付费的方式,云计算一般在一定年份之后,成本会逐渐超过本地部署。所以云适合公司规模较小和发展初期,等业务稳定和体量大后,机构可以选择自建本地集群或者采用混合云方式,降本增效。
<center><img src="./img/1/7-1-4-publicvspremisecloud.png" /></center>
<center>图 7.1.4 本地与云部署成本趋势 (<a href="https://www.scirp.org/journal/paperinformation.aspx?paperid=87661">图片引用 Cameron Fisher 文章</a>) </center>
对是否将基础架构部署于云上的讨论,其实在 2009 年 UCB RAD Lab 的 Armando Fox 教授曾有一段对比“[Above the Clouds:
A Berkeley View of Cloud Computing](https://www.cs.purdue.edu/homes/bb/BerkeleyCloud.pdf)”[<sup>[9]</sup>](#aboveclouds)。在今天看来也可以用来作为衡量面向深度学习的异构资源管理系统更为底层的部署模式的选型条件。
<center>表 7-1-1 公有云与私有云对比 (<a href="https://www.cs.purdue.edu/homes/bb/BerkeleyCloud.pdf">表格引用 Armando Fox 演讲 '09</a>) </center>
|好处|公有云|私有云|
|---|---|---|
|规模经济(Economy of Scale)|✓|✗|
|近乎无限按序使用资源|✓|✗|
|细粒度按需付费(Pay-as-You-Go)|✓|✗|
|更高利用率和简化运维|✓|✗|
|不需要用户预先承诺(Up-front Commitment)使用量|✓|✗|
综上所述,如果对初期验证阶段且资源使用规模较小的团队,云未尝不是一种好的选择。当已经技术积累深入,资源规模需求较大,定制化需求高,且有数据合规等需求的团队,可以考虑自建基础设施。
## 小结与讨论
本章我们主要介绍异构计算集群管理系统的应用场景和其中的面对的问题与挑战,启发读者展开后续章节的阅读,并从中理解为何会涉及到相关技术点。
请读者思考,多租的场景相比独占使用资源的场景让系统面临何种问题和挑战?
## 参考文献
<div id="k8s"></div>
1. [Kubernetes](https://kubernetes.io/)
<div id="yarn"></div>
2. [Apache Hadoop YARN](https://hadoop.apache.org/docs/stable/hadoop-yarn/hadoop-yarn-site/YARN.html)
<div id="mesos"></div>
3. [Apache Mesos](https://mesos.apache.org/)
<div id="hived"></div>
4. [Hanyu Zhao, Zhenhua Han, Zhi Yang, Quanlu Zhang, Fan Yang, Lidong Zhou, Mao Yang, Francis C.M. Lau, Yuqi Wang, Yifan Xiong, and Bin Wang. 2020. HiveD: sharing a GPU cluster for deep learning with guarantees. Proceedings of the 14th USENIX Conference on Operating Systems Design and Implementation. USENIX Association, USA, Article 29, 515–532.
](https://dl.acm.org/doi/abs/10.5555/3488766.3488795)
<div id="pai"></div>
5. [Microsoft Open Platform for AI (OpenPAI)](https://github.com/microsoft/pai)
<div id="kubeflow"></div>
6. [Kubeflow: The Machine Learning Toolkit for Kubernetes](https://www.kubeflow.org/)
<div id="nasa"></div>
7. [NASA to launch 247 petabytes of data into AWS – but forgot about eye-watering cloudy egress costs before lift-off](https://www.theregister.com/2020/03/19/nasa_cloud_data_migration_mess/)
<div id="sky"></div>
8. [Ion Stoica and Scott Shenker. 2021. From cloud computing to sky computing. In Proceedings of the Workshop on Hot Topics in Operating Systems (HotOS '21). Association for Computing Machinery, New York, NY, USA, 26–32. https://doi.org/10.1145/3458336.3465301](https://dl.acm.org/doi/10.1145/3458336.3465301)
<div id="aboveclouds"></div>
9. [Armbrust, Michael et al. “Above the Clouds: A Berkeley View of Cloud Computing.” Science 53 (2009): 07-013.](https://www2.eecs.berkeley.edu/Pubs/TechRpts/2009/EECS-2009-28.pdf)
|
AI-System/Textbook/第7章-异构计算集群调度与资源管理系统/7.1-异构计算集群管理系统简介.md/0
|
{
"file_path": "AI-System/Textbook/第7章-异构计算集群调度与资源管理系统/7.1-异构计算集群管理系统简介.md",
"repo_id": "AI-System",
"token_count": 16865
}
| 17 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 9.1 自动机器学习
- [9.1 自动机器学习](#91-自动机器学习)
- [9.1.1 超参数优化](#911-超参数优化)
- [9.1.2 神经网络结构搜索](#912-神经网络结构搜索)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
自动机器学习的核心是机器学习模型,对给定任务自动设计相应模型,这个过程含有两个主要的设计空间:模型的结构与模型(训练)的超参数。模型的结构包括不同类型的模型,如随机森林(Random Forest)、多层感知机(Multilayer Perceptron),也包括同一类模型的不同结构,如卷积神经网络(Convolutional Neural Network)中不同的网络结构。其中,在不同类型的模型中选择最合适的模型又被称为模型选择(Model Selection)。围绕着模型的自动设计与生成,自动机器学习的流程如图9-1-1所示。首先,用户提供想要使用机器学习模型的任务的描述以及数据集。对应该任务,存在一系列候选模型及其超参数的候选取值可能在该任务上取得较好的表现(Performance)。这些候选的模型结构及超参数取值构成了一个模型空间(Model Space)。自动机器学习会在这个模型空间中搜索,使用的搜索算法有两类:超参数优化算法和神经网络结构搜索算法。这些算法通常会从模型空间中采样出一系列具体的模型作为试验(Trial),在计算节点上运行并验证其表现。试验中得到的结果通常会返回给搜索算法,使算法生成更优潜力的试验,以此循环往复直到得到满足用户需求的模型。所得到的模型最终会被部署在云端或者终端设备上。
<center> <img src="./img/9-1-1-overview.png"/></center>
<center>图9-1-1. 自动机器学习的基本流程</center>
## 9.1.1 超参数优化
***超参数优化概述***
在机器学习中,超参数是指机器学习模型各个方面可配置的参数。例如,在模型结构中丢弃率(Dropout Rate),卷积算子中的通道数(Channel Number),在模型训练中的学习率(Learning Rate),批尺寸(Batch Size)。超参数优化是指为这些可配置的参数在其可行域(Feasible Region)内寻找最优参数取值的过程。超参数的调优在机器学习领域也被戏称为“炼丹”,可见超参数在使机器学习模型获得好的表现中发挥着重要作用。传统的超参数调优是领域专家根据自己的既有经验设置超参数的取值然后验证该组取值的表现,根据对其表现的分析再结合自己的专家经验再设置一组新的超参数取值并验证其表现。重复这样的过程直到找到表现较好的超参数取值。这种手动超参数调优的过程通常是较为重复和繁琐的,因为很多时候即使是领域专家对于一个超参数具体取什么值时模型表现最好也无法确定,需要手动逐个试验。因此,相比于直接指定一组最优的超参数取值,由领域专家指定一个超参数取值的范围更加实际且合理。这里,范围即超参数被指定的可行域,也被称之为模型空间。在超参数的可行域内寻找最优超参数取值的过程,可以被搜索算法自动化起来,从而减少领域专家的单调而重复的调参工作,专注于模型的设计和创新。因此,在机器学习特别是深度神经网络蓬勃发展的时候,超参数优化算法也在快速迭代,以应对机器学习模型带来的新的优化机会和挑战。
如果更广义的理解超参数,超参数不仅存在于机器学习模型中,它还广泛存在于计算机系统的各个部件中,甚至整个工业生产的各个环节。例如,数据库系统中存在大量超参数(或称之为配置参数),像缓存(Cache)的大小,缓存替换算法的选择。再例如,食品工程中每种原料的添加量。在这些场景中,超参数优化同样可以提升配置和研发效率。本章主要围绕着机器学习和深度神经网络模型深入阐述超参数优化与神经网络结构搜索。
***超参数优化的形式化描述***
假设机器学习模型$M$的待调优超参数有$N$个,每个超参数$\theta_{i}$的可行域是$\Theta_{i}$。$\theta = (\theta_0, \theta_1, ..., \theta_{n-1}) \in \Theta$,其中$\Theta = \Theta_0 \times \Theta_1 \times ... \Theta_{n-1}$。模型$M$的一组超参数取值$\theta$在任务数据集$D$上的表现由评估函数$\digamma$得到,例如,$\digamma$返回模型$M$使用$\theta$在数据集$D$上的验证精度(Validation Accuracy)。$\digamma$通常需要针对模型和数据集来定义和实现。超参数优化的优化目标可以由下面的目标函数来形式化定义。
$$\underset{\theta \in \Theta}{argmax} \digamma (M, \theta, D)$$
超参数优化的过程如图9-1-1所示。超参数优化算法是整个搜索过程的核心,其优化目标则是上面的目标函数。整个过程形成一个产生超参数取值与收取反馈的闭环,直到找到满足要求的一组超参数取值或者预设的计算资源用尽。
***超参数优化算法***
超参数优化算法主要分为三类:暴力搜索(Brute-Force)算法,基于模型的(Model-Based)算法,和启发式(Heuristic)算法.
**暴力搜索算法**包括随机搜索算法(Random Search)和网格搜索算法(Grid Search)。随机搜索算法是指随机地在每个超参数的可行域中采样出一个取值,从而得到的一组超参数取值。有时用户会根据自己的先验知识指定超参数的分布(如均匀分布,高斯分布),超参数取值的随机采样会依据这个分布进行。网格搜索算法是指对于每个超参数,在其可行域范围内依次遍历其候选取值。通常网格搜索被用于处理离散型(Discrete)超参数,连续型超参数也可以通过转换到一些列离散取值来应用网格搜索算法。图9-1-2以两个超参数为例分别展示了随机搜索算法与网格搜索算法采样出的超参数取值在搜索空间中不同的分布。
<center> <img src="./img/9-1-2-randomgrid.png"/></center>
<center>图9-1-2. 随机搜索算法与网格搜索算法的示意图(TODO:重画)</center>
暴力搜索算法的特点是具有非常高的并行。在计算资源充足的情况下,它可以采样出成百上千组超参数的取值,验证它们的表现,从中挑选出最优者。这就要求自动机器学习系统能够*支持多试验并发运行*。
**基于模型的算法**一般被统称为SMBO(Sequential Model-Based Optimization)。这种算法会选择一种模型来拟合优化空间,并在基于拟合出的优化空间(拟合出的优化空间并不一定准确)做新的超参数取值的采样。具体来说,SMBO是在以下两个步骤上交替执行,以尽可能高效的采样出表现优秀的超参数取值:
- 拟合模型:将已经运行结束并取得对应超参数表现的试验作为训练数据。使用该训练数据训练用于拟合优化空间的模型。
- 基于拟合的模型采样:拟合后的模型在优化空间中的每组超参数取值,均有其对该组超参数取值在表现上评估。因此可以通过采样的方式选出该模型认为的最优超参数取值作为下次尝试对象。
以下是更详细的SMBO执行逻辑的伪代码。
```python
def SMBO(algo, eval_func, max_trial_num)
# algo: 用于拟合超参数空间的模型
# eval_func: 调参数的目标函数,通常是DNN模型
# max_trial_num: 运行eval_func的次数
trials = []
for _ in range(max_trial_num):
new_trial = sample_optimal(algo)
perf = eval_func(new_trial)
trials.append((new_trial, perf))
algo = model_fitting(algo, trials)
return trials
```
[SMAC算法](https://ml.informatik.uni-freiburg.de/wp-content/uploads/papers/11-LION5-SMAC.pdf)使用随机森林作为模型,可以为每组超参数取值计算出它的表现的均值和方差。每次生成新的试验的过程中,该算法会随机抽样大量的超参数取值(例如10,000个),并通过一个评估函数EI(Expected Improvement)从中选出最有希望的一个或者多个超参数取值。每一组超参数取值的EI的计算是基于由模型估计出来的该组超参数表现的均值和方差。核心思想是综合探索(Exploration)和利用(Exploitation),既快速收敛到区域最优点(Local Optimal)又可以发现足够的最优区域。另一大类基于模型的算法是基于高斯过程(Gaussian Process)。使用高斯过程做超参数搜索计算复杂度较高,是试验点数量的三次方复杂度,超参数数量的线性复杂度。因此较难应用在大规模超参数搜索的场景中。一些工作借鉴高斯过程的思想,对建模过程做了简化,如TPE。[TPE](https://proceedings.neurips.cc/paper/2011/file/86e8f7ab32cfd12577bc2619bc635690-Paper.pdf)的核心思量是将给定一组超参数取值计算其表现的概率分布p(loss|params),转换为给定一个表现计算不同超参数取值取得这个表现的概率p(params|loss)。这里后者可以使用已得到的试验做近似估计。与SMAC类似,基于高斯过程的方法同样是使用EI做超参数的选取。
图9-1-3形象的展示了在超参数搜索的过程中,模型对搜索空间中不同区域的估计。随着试验数量的增加,模型对搜索空间的估计的置信度不断增加,表现为估计的方差变小。
<center> <img src="./img/9-1-6-smbo.png"/></center>
<center>图9-1-3. SMBO的优化过程</center>
**启发式算法**种类繁多,包括遗传算法(Evolutionary Algorithm),模拟退火算法(Simulated Annealing)。遗传算法是指维护一个种群,其中每个个体是一组超参数取值,根据这些个体的表现进行变异和淘汰,比如改变一个表现较好的个体的一个超参数取值来生成一个新的个体。具体变异和淘汰的方式有很多种,这里不展开介绍。在机器学习和深度学习领域,遗传算法通常会有较好的表现,特别是在搜索空间较大的情况下。模拟退火算法的整个搜索过程和遗传算法类似。它的初始状态可以是一组或者多组超参数取值,由一个产生函数基于当前超参数取值生成新的超参数取值。类似于遗传算法,这个产生函数可以是改变某个超参数的取值。然后使用一种接受标准(常用的是Metropolis标准)来决定是否接受这组新的超参数取值。
这些算法根据根据自身原理有自己最适合的超参数优化任务,有些擅长连续空间上的搜索(如高斯过程),有些适合离散空间上的遍历(如网格搜索),有些适合神经网络结构参数的搜索(如遗传算法),有些适合大搜索空间的快速遍历(如即将介绍的Hyperband)。这就要求自动机器学习系统*能够灵活插拔不同的超参数优化算法*,以满足不同的场景与需求。
***超参数优化过程在算法层面上的加速***
超参数优化在深度学习场景下通常极为耗时,因为每一个试验需要运行较长时间才能获得该组超参数的表现评估。一些算法会利用试验的运行特性,试验之间的关系,超参数搜索任务之间的关系,来加速超参数的搜索过程。
利用试验运行特性的超参数搜索。每个试验在运行过程中会输出中间结果(Intermediate Performance),如学习曲线。这些中间结果可以表现一个试验是否具有得到较好表现的潜质。如果中间结果表现就明显差于其他试验,则该试验可以被提前终止,释放计算资源给新的试验。这个过程被称之为早停(Early Stop)。典型的早停算法有Median Stop和Curve Fitting。Median Stop是指如果一个试验如果它的所有中间结果都低于其他试验对应中间结果的中位数,这个试验将被终止。Curve Fitting是使用曲线拟合的方式拟合学习曲线,用以预测该试验在未来epoch的表现,如果表现差于设定的阈值,则提前终止该试验。早停算法可以和搜索算法并行独立工作,也可以和搜索算法有机结合。结合的典型算法有[Hyperband](https://arxiv.org/pdf/1603.06560.pdf)和[BOHB](https://arxiv.org/pdf/1807.01774.pdf)。
利用试验之间关系的超参数搜索。一个超参数搜索任务产生的试验通常是针对同一个任务的,因此在有些情况下试验和试验之间可以共享模型的参数。[PBT](https://arxiv.org/pdf/1711.09846.pdf)属于这类超参数搜索算法。它的基本搜索框架基于遗传算法。其中新产生的个体(即一组新的超参数取值对应的模型)会从其父个体中继承模型权重,从而可以加速新个体的训练进程。
利用迁移学习(Transfer Learning)加速超参数搜索。相似的超参数搜索任务在超参数取值的选择上可以相互借鉴。
## 9.1.2 神经网络结构搜索
***神经网络结构搜索概述***
在深度学习领域,神经网络结构(Neural Architecture)是影响模型性能的一个关键因素。一方面,在深度学习发展的过程中,神经网络结构在不断迭代,带来更高的模型精度,如AlexNet、ResNet、VGG、InceptionV3、EfficientNet,再到后来十分流行的Transformer。另一方面,针对特定的场景,神经网络结构通常需要做有针对性的设计和调优,以达到预期的模型精度和模型推理延迟,例如,对模型的宽度、深度的调整,对算子(Operator)的选择。神经网络结构本质上是一个数据流图(Data Flow Graph),图中的节点是算子或者模块(Block),边是张量(Tensor)及其流向。由于图的变化的自由度较大,因此神经网络模型的研究人员尝试通过搜索的方式在一个神经网络结构的可行域空间中寻找最优的神经网络结构。这种技术被称之为神经网络结构搜索,简称为NAS(Neural Architecture Search)。
NAS技术主要面向两类场景,一类是用于探索和发现新的神经网络结构。这类研究工作从已有的各种神经网络结构中总结出结构特点并结合自己对神经网络结构的理解,构建出一个神经网络结构的搜索空间,以期该空间中存在更优的神经网络结构。[NASNet](https://arxiv.org/pdf/1707.07012.pdf)是这一类的一个经典工作。[AutoML-Zero](http://proceedings.mlr.press/v119/real20a/real20a.pdf)则更进一步期望使用基础的数学算子构建出整个神经网络模型。另一类NAS技术面向的场景是在给定的神经网络结构下寻找网络中各层大小的最优配比,以期将模型快速适配到对模型大小和延迟有不同需求的场景中。其中最典型的工作是[Once-for-All](https://arxiv.org/abs/1908.09791)。这种NAS技术非常适合将模型快速适配并部署到端侧设备上(Edge Device),从某种意义上说它和深度学习模型的剪枝技术在解决相似的问题。
近些年,虽然NAS技术得到了快速的发展,但是需要清楚NAS的适用范围。NAS并不能取代神经网络模型的专家或者领域专家,而更多的是作为提升模型设计效率的手段和加速深度学习模型落地的途径。网络结构搜索空间的设计仍然需要交由专家完成,在空间中寻找最优网络结构则交由神经网络结构搜索算法完成。这个过程和超参数优化类似。因此,可以预见未来的神经网络模型设计和调优的过程会由相辅相成的两个方面组成。一个方面是由专家设计或指定一个网络结构的宏观轮廓(Sketch),另一个阶段是由自动化模块细化这个宏观轮廓生成具体可执行的神经网络结构。这种模型设计和调优过程充分发挥了两者各自的优势,专家更了解逻辑层面上哪些操作(Operator),模块(Block or Cell)和连接(Connection)可能对当前任务更有优势,而自动化过程更适合精细地调优网络的各种连接、大小的配置。
神经网络搜索空间和神经网络搜索算法是NAS中的两个关键组件。下面会分别详细介绍。
***神经网络结构搜索空间***
神经网络搜索空间(以下简称NAS空间)是专家知识的凝练。首先,对于单个任务,它圈定了一个模型探索的范围,以此获得表现更好的模型;其次,对于一类任务,它是对在该类任务上表现较好的模型的一种归纳,从而在任何一个具体的任务上都可以在这个NAS空间中搜索到优秀的模型。
图9-1-4是一个简化的搜索空间的例子。其中计算流图的每个节点是一个算子(Operator),边是张量及其流向。在这个搜索空间中每个节点中的算子都可以从一个候选算子集合中选取。图中虚线表示一个节点的输入可以从其前驱节点的输出中任意选取,例如第三个节点可以接第二个节点的输出,也可以接第一个节点的输出,还可以同时接第一个和第二个节点的输出。可选的算子和可选的连边一起构成了完整的NAS空间。
<center> <img src="./img/9-1-4-nasspace.png"/></center>
<center>图9-1-4. 一个神经网络结构搜索空间的例子</center>
一个NAS空间通常是面向某一个或者某一类任务设计的,而且相比于上面的例子更加复杂和完善,例如,[NASNet中的space](https://arxiv.org/pdf/1707.07012.pdf),[MnasNet中的space](https://arxiv.org/pdf/1807.11626.pdf),[DARTS中的space](https://arxiv.org/pdf/1806.09055.pdf)等等。一个搜索空间通常都包含$10^{10}$以上的不同的候选网络。目前,在NAS的研究中,不断有新的搜索空间被设计出来,使其包含新的网络结构(如Transformer结构),面向新的设备。各种各样的NAS空间使*简单而灵活得编写NAS空间*成为一个重要的需求,催生了机器学习工具新的演进方向,即,新的机器学习工具需要能够提供表达NAS空间的简单易用的编程接口。
***神经网络结构搜索算法***
神经网络结构搜索算法和超参数优化算法有很多相似之处,又存在很大的不同。相同之处在于,如果把神经网络结构空间使用超参数来描述的话,超参数优化算法都可以作为神经网络结构搜索算法使用。而不同之处在于神经网络结构搜索有其自己的特点,基于这些特点而设计的搜索算法和超参数搜索算法有很大的不同。
神经网络结构搜索算法可以分为三类:多试验搜索(Multi-trial Search)、单发搜索(Oneshot Search)和基于预测器的搜索(Predictor-based Search)。
**多试验搜索**中的算法最接近超参数优化算法。在多试验搜索中,搜索算法从搜索空间中采样出的每个网络结构都进行独立做表现评估,并将表现结果反馈给搜索算法。这里获得一个网络结构的表现评估通常需要在训练数据集上训练该网络结构。对于深度学习模型来说,这是一个非常耗时的过程,因此多试验搜索通常需要耗费大量的计算资源。在计算资源十分充足的情况下,这种搜索算法能够更稳定的找到搜索空间中表现优秀的网络结构。多试验搜索的经典算法有[NASNet](https://arxiv.org/pdf/1707.07012.pdf)中使用的强化学习算法,[AmoebaNet](https://arxiv.org/pdf/1802.01548.pdf)中使用的时效进化(Aging Evolution)。它们分别使用强化学习算法和进化算法不断采样有可能表现更优的网络结构。相比而言,进化算法通常需要的试验数量更少,收敛速度更快。如图9-1-1所示,多试验搜索的过程与超参数优化基本一致,只有采样内容上的区别,即,是超参数和神经网络结构。
**单发搜索**是目前神经网络结构搜索算法中比较流行的一类,主要原因是它在很大的搜索空间上需要的搜索时间远小于多试验搜索。它的核心思想是将搜索空间构建成一个超大的网络,称之为超网络(Supernet),将其作为一个模型训练。超网络将组合爆炸的网络结构数变成了一个线性复杂度的大模型,其中算子的权重会被所有包含该算子的网络结构所共享(即共同训练)。图9-1-5展示了一个超网络的例子,其中超网络的形式有两种:多路连接的超网络(Multi-path Supernet)和混合算子的超网络(Mixed-op Supernet)。
<center> <img src="./img/9-1-5-supernet.png"/></center>
<center>图9-1-5. 一个搜索空间对应的超网络 </center>
多路连接的超网络是将一个节点中的候选算子并排连接到超网络中,每个采样出的网络结构只激活每个节点中的一路,如图9-1-5(b)所示。采样出的算子会继承其在超网络中的权重,训练并将权重更新回超网络中。在超网络的训练中,通常每一个采样出的网络结构仅训练一个小批次(Mini-batch),因此在超网络的训练过程中,网络结构的采样非常频繁,采样策略也影响超网络的训练效果。由于权重的共享,每个子网的训练会更加高效。相比于每个子网独立从头独立训练,会大大降低计算资源的消耗。这类单发搜索的典型算法有[ENAS](http://proceedings.mlr.press/v80/pham18a/pham18a.pdf)、[DARTS](https://arxiv.org/pdf/1806.09055.pdf),其中ENAS使用强化学习算法做网络结构的采样,而DARTS是在每一路上增加结构权重,通过可微分的方式训练结构权重并基于结构权重采样网络结构。
由于一个节点中的候选算子类型的差异,不同子网络共享权重时会产生互相拉扯的效果,影响超网络的训练效果。为了降低候选算子之间相互的影响,混合算子是一种新的权重共享方式,这里称之为混合算子共享。图9-1-5(c)的上半部分展示了以Conv2d为例的混合算子。其中,5x5的权重矩阵是7x7权重矩阵的子矩阵,3x3的权重矩阵又是5x5权重矩阵的子矩阵,因此混合算子的参数量和最大的算子的参数量相等。混合算子在超网络中的训练是每次采样其中一个候选算子所对应的参数矩阵,训练一个小批次并更新对应参数。这种共享粒度是在一个节点的候选算子之间做权重共享,而多路连接超网络中的共享是子网络在各个候选算子上的共享。混合算子中的权重共享,由于其算子类型相同只是算子大小存在差异,这种共享更加有效。混合算子共享的局限性也显而易见,即,一个节点的候选算子必须是同类型算子,对于图9-1-5(c)下半部分的节点则不能使用混合算子共享。
<center> <img src="./img/9-1-6-oneshot.png"/></center>
<center>图9-1-6. 单发搜索的流程 </center>
超网络的训练只是单发搜索的第一阶段,单发搜索的整个过程如图9-1-6所示。第一个阶段会训练出一个超网络,训练的过程可以选择不同的子网络的采样算法,比如[三明治采样](https://arxiv.org/pdf/1903.05134.pdf)。训练得到的超网络用于第二阶段作为评估子网络表现的代理指标(Proxy Metric)。具体的,任何一个子网络在继承了超网络中的权重后可以直接在测试数据集上验证其表现,而不用从头训练该子网络,从而大大加速了搜索的过程。第二阶段是将超网络作为一种网络结构的代理评估器。搜索算法,如遗传算法,会在超网络上采样子网络,用子网络继承于超网络的权重做子网络表现的评估,并以此评估指导后续在超网络上的采样。最终搜索算法收敛到表现最优的若干个子网络。把这些子网络独立的从头训练获取它们真实的表现,找出其中表现最好的那个作为单发搜索最终的搜索结果。
**基于预测器的搜索**是训练一个网络结构的预测器,来预测每一个网络结构的表现。它比多试验搜索需要的计算资源少很多,但是通常多于单发搜索。因为预测器的训练仍然需要至少上百个从搜索空间采样出网络结构,将它们独立训练获得其表现。表现预测器基于这批网络结构的真实表现拟合得来。然后,训练好的预测器作为评估网络结构表现的代理指标。整个流程和单发搜索类似。区别在于单发搜索第一阶段产出的是训练好的超网络,而基于预测器的搜索第一阶段产出的是训练好的表现预测器。这类搜索算法的研究工作包括[BRP-NAS](https://arxiv.org/pdf/2007.08668.pdf)、[Neural Predictor](https://arxiv.org/pdf/2108.03001.pdf)。
从以上的介绍可以看出,一个搜索空间可以使用不同的搜索算法,一个搜索算法又可以被应用到不同的搜索空间上。这就要求自动机器学习系统和工具将*搜索空间的表达和搜索算法的实现解耦*。另外,无论是哪种搜索算法都对计算资源有较高的要求,这就需要系统和工具能够*分布式运行搜索过程*并且从系统层面上*优化训练和搜索的速度*。
## 小结与讨论
在机器学习模型被越来越多的部署在不同场景和应用中,自动化机器学习也掀起了一波新的热潮试图解决当前机器学习模型在设计和部署中难于扩展的问题。每一个具体的场景都需要模型开发人员深度介入做模型的设计和调优。目前,自动化机器学习已经对模型开发提供了很大的帮助,特别是自动超参数搜索。但是,距离更加自动化的设计模型还有不小的距离。其中自动化搜索算法上需要有进一步的创新。另外,一个易用、灵活、且强大的自动化工具是自动化机器学习发展和应用的基石。在下一节,我们将详细讨论自动化机器学习的系统与工具。
## 参考文献
<div id="xx-1"></div>
1. [Bergstra, James, Rémi Bardenet, Yoshua Bengio, and Balázs Kégl. "Algorithms for hyper-parameter optimization." Advances in neural information processing systems 24 (2011).](https://proceedings.neurips.cc/paper/2011/file/86e8f7ab32cfd12577bc2619bc635690-Paper.pdf)
2. [Hutter, Frank, Holger H. Hoos, and Kevin Leyton-Brown. "Sequential model-based optimization for general algorithm configuration." In International conference on learning and intelligent optimization, pp. 507-523. Springer, Berlin, Heidelberg, 2011.](https://ml.informatik.uni-freiburg.de/wp-content/uploads/papers/11-LION5-SMAC.pdf)
3. [Li, Lisha, Kevin Jamieson, Giulia DeSalvo, Afshin Rostamizadeh, and Ameet Talwalkar. "Hyperband: A novel bandit-based approach to hyperparameter optimization." The Journal of Machine Learning Research 18, no. 1 (2017): 6765-6816.](https://arxiv.org/pdf/1603.06560.pdf)
4. [Falkner, Stefan, Aaron Klein, and Frank Hutter. "BOHB: Robust and efficient hyperparameter optimization at scale." In International Conference on Machine Learning, pp. 1437-1446. PMLR, 2018.](https://arxiv.org/pdf/1807.01774.pdf)
5. [Jaderberg, Max, Valentin Dalibard, Simon Osindero, Wojciech M. Czarnecki, Jeff Donahue, Ali Razavi, Oriol Vinyals et al. "Population based training of neural networks." arXiv preprint arXiv:1711.09846 (2017).](https://arxiv.org/pdf/1711.09846.pdf)
- [Zoph, Barret, Vijay Vasudevan, Jonathon Shlens, and Quoc V. Le. "Learning transferable architectures for scalable image recognition." In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8697-8710. 2018.](https://arxiv.org/pdf/1707.07012.pdf)
6. [Zoph, Barret, Vijay Vasudevan, Jonathon Shlens, and Quoc V. Le. "Learning transferable architectures for scalable image recognition." In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8697-8710. 2018.](http://proceedings.mlr.press/v119/real20a/real20a.pdf)
7. [Cai, Han, Chuang Gan, Tianzhe Wang, Zhekai Zhang, and Song Han. "Once-for-all: Train one network and specialize it for efficient deployment." arXiv preprint arXiv:1908.09791 (2019).](https://arxiv.org/abs/1908.09791)
8. [Tan, Mingxing, Bo Chen, Ruoming Pang, Vijay Vasudevan, Mark Sandler, Andrew Howard, and Quoc V. Le. "Mnasnet: Platform-aware neural architecture search for mobile." In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2820-2828. 2019.](https://arxiv.org/pdf/1807.11626.pdf)
9. [Liu, Hanxiao, Karen Simonyan, and Yiming Yang. "Darts: Differentiable architecture search." arXiv preprint arXiv:1806.09055 (2018).](https://arxiv.org/pdf/1806.09055.pdf)
10. [Real, Esteban, Alok Aggarwal, Yanping Huang, and Quoc V. Le. "Regularized evolution for image classifier architecture search." In Proceedings of the aaai conference on artificial intelligence, vol. 33, no. 01, pp. 4780-4789. 2019.](https://arxiv.org/pdf/1802.01548.pdf)
11. [Pham, Hieu, Melody Guan, Barret Zoph, Quoc Le, and Jeff Dean. "Efficient neural architecture search via parameters sharing." In International conference on machine learning, pp. 4095-4104. PMLR, 2018.](http://proceedings.mlr.press/v80/pham18a/pham18a.pdf)
12. [Yu, Jiahui, and Thomas S. Huang. "Universally slimmable networks and improved training techniques." In Proceedings of the IEEE/CVF international conference on computer vision, pp. 1803-1811. 2019.](https://arxiv.org/pdf/1903.05134.pdf)
13. [Dudziak, Lukasz, Thomas Chau, Mohamed Abdelfattah, Royson Lee, Hyeji Kim, and Nicholas Lane. "Brp-nas: Prediction-based nas using gcns." Advances in Neural Information Processing Systems 33 (2020): 10480-10490.](https://arxiv.org/pdf/2007.08668.pdf)
14. [Wen, Wei, Hanxiao Liu, Yiran Chen, Hai Li, Gabriel Bender, and Pieter-Jan Kindermans. "Neural predictor for neural architecture search." In European Conference on Computer Vision, pp. 660-676. Springer, Cham, 2020.](https://arxiv.org/pdf/2108.03001.pdf)
|
AI-System/Textbook/第9章-自动化机器学习系统/9.1-自动化机器学习.md/0
|
{
"file_path": "AI-System/Textbook/第9章-自动化机器学习系统/9.1-自动化机器学习.md",
"repo_id": "AI-System",
"token_count": 19757
}
| 18 |
# MLHyperparameterTuning Pipeline
trigger:
batch: true
branches:
include:
- master
variables:
- group: AzureKeyVault
jobs:
- job: MLHyperparameterTuningJob
timeoutInMinutes: 300
cancelTimeoutInMinutes: 2
pool:
vmImage: 'Ubuntu-16.04'
steps:
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
which conda
conda env create -f environment.yml
conda env list
conda activate MLHyperparameterTuning
conda env list
echo Login Azure Account
az login -t $(sptenent) --service-principal -u $(spidentity) --password $(spsecret)
echo Try and figure out what account set takes
az account set -h
echo Try and set it.
az account set --subscription $(subscriptionid)
# papermill 01_Data_Prep.ipynb 01_Data_Prep_Output.ipynb --log-output --no-progress-bar -k python3
displayName: 'Configuration'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate MLHyperparameterTuning
echo Executing 00_Data_Prep.ipynb
papermill 00_Data_Prep.ipynb 00_Data_Prep_Output.ipynb --log-output --no-progress-bar -k python3
displayName: '00_Data_Prep.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate MLHyperparameterTuning
echo Executing 01_Training_Script.ipynb
papermill 01_Training_Script.ipynb 01_Training_Script_Output.ipynb --log-output --no-progress-bar -k python3
displayName: '01_Training_Script.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate MLHyperparameterTuning
echo Executing 02_Testing_Script.ipynb
papermill 02_Testing_Script.ipynb 02_Testing_Script_Output.ipynb --log-output --no-progress-bar -k python3
displayName: '02_Testing_Script.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate MLHyperparameterTuning
echo Executing 03_Run_Locally.ipynb
papermill 03_Run_Locally.ipynb 03_Run_Locally_Output.ipynb --log-output --no-progress-bar -k python3 -p selected_subscription $(subscriptionid) -p resource_group $(azurergname)
displayName: '03_Run_Locally.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate MLHyperparameterTuning
echo Executing 04_Hyperparameter_Random_Search.ipynb
papermill 04_Hyperparameter_Random_Search.ipynb 04_Hyperparameter_Random_Search_Output.ipynb --log-output --no-progress-bar -k python3 -p max_total_runs $(dsmaxruns)
displayName: '04_Hyperparameter_Random_Search.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate MLHyperparameterTuning
echo Executing 05_Train_Best_Model.ipynb
papermill 05_Train_Best_Model.ipynb 05_Train_Best_Model_Output.ipynb --log-output --no-progress-bar -k python3
displayName: '05_Train_Best_Model.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate MLHyperparameterTuning
echo Executing 06_Test_Best_Model.ipynb
papermill 06_Test_Best_Model.ipynb 06_Test_Best_Model_Output.ipynb --log-output --no-progress-bar -k python3
displayName: '06_Test_Best_Model.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate MLHyperparameterTuning
echo Executing 07_Tear_Down.ipynb
papermill 07_Tear_Down.ipynb 07_Tear_Down_Output.ipynb --log-output --no-progress-bar -k python3
displayName: '07_Tear_Down.ipynb'
- bash: |
source /usr/share/miniconda/etc/profile.d/conda.sh
conda activate MLHyperparameterTuning
echo Execute Resource Group Delete
existResponse=$(az group exists -n $(azurergname))
if [ "$existResponse" == "true" ]; then
echo Deleting project resource group
az group delete --name $(azurergname) --yes
else
echo Project resource group did not exist
fi
echo Done Cleanup
displayName: 'Backup Cleanup'
condition: or(canceled(),failed())
- task: CreateWorkItem@1
inputs:
workItemType: 'Issue'
title: $(System.TeamProject) - Build $(Build.BuildNumber) Failed
assignedTo: 'Mario Bourgoin <[email protected]>'
associate: true
teamProject: $(System.TeamProject)
fieldMappings: |
Description=Branch: Branch $(Build.SourceBranch) failed to build. Go to Boards>WorkItems and tag the failure type.
displayName: 'Create work item on failure'
condition: failed()
|
AI/.ci/python-ml-training.yml/0
|
{
"file_path": "AI/.ci/python-ml-training.yml",
"repo_id": "AI",
"token_count": 1812
}
| 19 |
# Deploy Python Development Stage
#
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
# Job: Run Unit Tests and Static Code Analysis
#
# Step 1: Install Python Version 3.7
# Step 2: Authenticate with Private Pypi
# Step 3: Update Pip
# Step 4: Install Tox
# Step 5: Use Tox to run tests from tox.ini
# Step 6: Run PyLint for Static Code Anlaysis and Linting
# Step 8: Publish Test Results to Azure DevOps
# Step 9: Publish Code Coverage Results to Azure DevOps
#
# Author: [email protected]
parameters:
Agent: Hosted Ubuntu 1604
Demands: "python3"
stageName: 'defaultStageName'
jobDisplayName: 'defaultDisplayName'
jobTimeoutInMinutes: 180
DefaultWorkingDirectory: #
pypi_artifactFeeds: #
pypi_pythonDownloadServiceConnections: #
module: src
stages:
- stage: ${{parameters.stageName}}
dependsOn: []
jobs:
- job: Development
displayName: ${{parameters.jobDisplayName}}
pool:
name: ${{parameters.Agent}}
demands: ${{parameters.Demands}}
timeoutInMinutes: ${{parameters.jobTimeoutInMinutes}}
workspace:
clean: all
steps:
- task: UsePythonVersion@0
displayName: 'Use Python 3.7'
inputs:
versionSpec: 3.7
- task: PipAuthenticate@1
inputs:
artifactFeeds: $(pypi_artifactFeeds)
pythonDownloadServiceConnections: ${{parameters.pypi_pythonDownloadServiceConnections}}
onlyAddExtraIndex: true
- script: |
pip install --upgrade pip
displayName: 'Update Pip'
- script: |
pip install tox
displayName: 'Install Tox'
- script: |
tox -e py
displayName: "Run Tox"
env:
VERSION: $(Build.BuildNumber)
- script: |
pip install pylint junit-xml pylint-junit
pylint ./${{parameters.module}}
pylint --output-format=junit ./${{parameters.module}} >> test-pylint-results.xml
displayName: PyLint
continueOnError: true
- task: ComponentGovernanceComponentDetection@0
inputs:
scanType: 'LogOnly'
verbosity: 'Verbose'
alertWarningLevel: 'High'
failOnAlert: false
- task: PublishTestResults@2
condition: succeededOrFailed()
inputs:
testResultsFiles: '**/test-*.xml'
testRunTitle: 'Publish test results for Python 3.7'
- task: PublishCodeCoverageResults@1
inputs:
codeCoverageTool: Cobertura
summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml'
|
AI/.ci/stage/python_development_stage.yml/0
|
{
"file_path": "AI/.ci/stage/python_development_stage.yml",
"repo_id": "AI",
"token_count": 980
}
| 20 |
parameters:
azureSubscription: ''
azure_subscription: ''
location: "."
azureresourcegroup: dcibhpdl
workspacename: dcibhpwsdl
azureregion: westus2
aksimagename: dcibhpaksdl
aks_name: dcibhpaksdl
aks_service_name: dcibhpaksdlapi
doCleanup: true
flighting_release: false
flighting_preview: false
sp_appid: #
sp_password: #
steps:
- template: config_r.yml
parameters:
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
flighting_release: ${{parameters.flighting_release}}
flighting_preview: ${{parameters.flighting_preview}}
azure_subscription: ${{parameters.azure_subscription}}
sp_appid: ${{parameters.sp_appid}}
sp_password: ${{parameters.sp_password}}
- template: bash_r.yml
parameters:
notebook: 00_train_model.R
- template: bash_r.yml
parameters:
notebook: 01_create_resources.R
- template: bash_r.yml
parameters:
notebook: 02_install_ingress.R
- template: bash_r.yml
parameters:
notebook: 03_deploy_service.R
- template: bash_r.yml
parameters:
notebook: 04_test_service.R
- template: cleanuptask.yml
parameters:
azureSubscription: ${{parameters.azureSubscription}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
doCleanup: ${{parameters.doCleanup}}
|
AI/.ci/steps/RMLAKSDeployAMLJob.yml/0
|
{
"file_path": "AI/.ci/steps/RMLAKSDeployAMLJob.yml",
"repo_id": "AI",
"token_count": 496
}
| 21 |
parameters:
root: C:\Anaconda\envs\
conda_env: # this param must be set
steps:
- script: |
call conda env remove -n ${{parameters.conda_env}} -y
if exist ${{parameters.root}}${{parameters.conda_env}} rmdir /s /q ${{parameters.root}}${{parameters.conda_env}}
displayName: 'Remove Conda Env if it exists'
- script: |
python ./scripts/generate_conda_file.py --name ${{parameters.conda_env}}
call conda env create -f ${{parameters.conda_env}}.yaml
displayName: 'Setup Conda Env'
failOnStderr: true
|
AI/.ci/steps/reco_conda_config_win.yml/0
|
{
"file_path": "AI/.ci/steps/reco_conda_config_win.yml",
"repo_id": "AI",
"token_count": 204
}
| 22 |
from os.path import join
import sys
sys.path += ['../']
import argparse
import json
import os
import random
import numpy as np
import torch
from torch.utils.data import Dataset, TensorDataset
from model.models import MSMarcoConfigDict, ALL_MODELS
import csv
from utils.util import multi_file_process, numbered_byte_file_generator, EmbeddingCache
import pickle
def normalize_question(question: str) -> str:
if question[-1] == '?':
question = question[:-1]
return question
def write_qas_query(args, qas_file, out_query_file):
print("Writing qas query files " + str(out_query_file))
print("print",args.answer_dir,qas_file)
qas_path = os.path.join(
args.answer_dir,
qas_file,
)
out_query_path = os.path.join(
args.out_data_dir,
out_query_file ,
)
configObj = MSMarcoConfigDict[args.model_type]
tokenizer = configObj.tokenizer_class.from_pretrained(
args.model_name_or_path,
do_lower_case=True,
cache_dir=None,
)
qid = 0
with open(qas_path, "r", encoding="utf-8") as f, open(out_query_path, "wb") as out_query:
reader = csv.reader(f, delimiter='\t')
for row in reader:
question = normalize_question(row[0])
out_query.write(QueryPreprocessingFn(args, qid, question, tokenizer))
qid += 1
meta = {'type': 'int32', 'total_number': qid, 'embedding_size': args.max_seq_length}
with open(out_query_path + "_meta", 'w') as f:
json.dump(meta, f)
def write_query_rel(args, pid2offset, query_file, out_query_file, out_ann_file, out_train_file, passage_id_name="passage_id"):
print("Writing query files " + str(out_query_file) + " and " + str(out_ann_file))
query_path = os.path.join(
args.question_dir,
query_file,
)
with open(query_path, 'r', encoding="utf-8") as f:
data = json.load(f)
print('Aggregated data size: {}'.format(len(data)))
data = [r for r in data if len(r['positive_ctxs']) > 0]
print('Total cleaned data size: {}'.format(len(data)))
data = [r for r in data if len(r['hard_negative_ctxs']) > 0]
print('Total cleaned data size: {}'.format(len(data)))
out_query_path = os.path.join(
args.out_data_dir,
out_query_file ,
)
out_ann_file = os.path.join(
args.out_data_dir,
out_ann_file ,
)
out_training_path = os.path.join(
args.out_data_dir,
out_train_file ,
)
qid = 0
configObj = MSMarcoConfigDict[args.model_type]
tokenizer = configObj.tokenizer_class.from_pretrained(
args.model_name_or_path,
do_lower_case=True,
cache_dir=None,
)
with open(out_query_path, "wb") as out_query, \
open(out_ann_file, "w", encoding='utf-8') as out_ann, \
open(out_training_path, "w", encoding='utf-8') as out_training:
for sample in data:
positive_ctxs = sample['positive_ctxs']
neg_ctxs = sample['hard_negative_ctxs']
question = normalize_question(sample['question'])
first_pos_pid = pid2offset[int(positive_ctxs[0][passage_id_name])]
neg_pids = [str(pid2offset[int(neg_ctx[passage_id_name])]) for neg_ctx in neg_ctxs]
out_ann.write("{}\t{}\t{}\n".format(qid, first_pos_pid, sample["answers"]))
out_training.write("{}\t{}\t{}\n".format(qid, first_pos_pid, ','.join(neg_pids)))
out_query.write(QueryPreprocessingFn(args, qid, question, tokenizer))
qid += 1
print("Total lines written: " + str(qid))
meta = {'type': 'int32', 'total_number': qid, 'embedding_size': args.max_seq_length}
with open(out_query_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_query_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
def write_mapping(args, id2offset, out_name):
out_path = os.path.join(
args.out_data_dir,
out_name ,
)
with open(out_path, 'w') as f:
for item in id2offset.items():
f.write("{}\t{}\n".format(item[0], item[1]))
def load_mapping(data_dir, out_name):
out_path = os.path.join(
data_dir,
out_name ,
)
pid2offset = {}
offset2pid = {}
with open(out_path, 'r') as f:
for line in f.readlines():
line_arr = line.split('\t')
pid2offset[int(line_arr[0])] = int(line_arr[1])
offset2pid[int(line_arr[1])] = int(line_arr[0])
return pid2offset, offset2pid
def preprocess(args):
pid2offset = {}
in_passage_path = os.path.join(
args.wiki_dir,
"psgs_w100.tsv" ,
)
out_passage_path = os.path.join(
args.out_data_dir,
"passages" ,
)
if os.path.exists(out_passage_path):
print("preprocessed data already exist, exit preprocessing")
return
else:
out_line_count = 0
print('start passage file split processing')
multi_file_process(args, 32, in_passage_path, out_passage_path, PassagePreprocessingFn)
print('start merging splits')
with open(out_passage_path, 'wb') as f:
for idx, record in enumerate(numbered_byte_file_generator(out_passage_path, 32, 8 + 4 + args.max_seq_length * 4)):
p_id = int.from_bytes(record[:8], 'big')
f.write(record[8:])
pid2offset[p_id] = idx
if idx < 3:
print(str(idx) + " " + str(p_id))
out_line_count += 1
print("Total lines written: " + str(out_line_count))
meta = {'type': 'int32', 'total_number': out_line_count, 'embedding_size': args.max_seq_length}
with open(out_passage_path + "_meta", 'w') as f:
json.dump(meta, f)
write_mapping(args, pid2offset, "pid2offset")
embedding_cache = EmbeddingCache(out_passage_path)
print("First line")
with embedding_cache as emb:
print(emb[pid2offset[1]])
if args.data_type == 0:
write_query_rel(args, pid2offset, "nq-train.json", "train-query", "train-ann", "train-data")
elif args.data_type == 1:
write_query_rel(args, pid2offset, "trivia-train.json", "train-query", "train-ann", "train-data", "psg_id")
else:
# use both training dataset and merge them
write_query_rel(args, pid2offset, "nq-train.json", "train-query-nq", "train-ann-nq", "train-data-nq")
write_query_rel(args, pid2offset, "trivia-train.json", "train-query-trivia", "train-ann-trivia", "train-data-trivia", "psg_id")
with open(args.out_data_dir + "train-query-nq", "rb") as nq_query, \
open(args.out_data_dir + "train-query-trivia", "rb") as trivia_query, \
open(args.out_data_dir + "train-query", "wb") as out_query:
out_query.write(nq_query.read())
out_query.write(trivia_query.read())
with open(args.out_data_dir + "train-query-nq_meta", "r", encoding='utf-8') as nq_query, \
open(args.out_data_dir + "train-query-trivia_meta", "r", encoding='utf-8') as trivia_query, \
open(args.out_data_dir + "train-query_meta", "w", encoding='utf-8') as out_query:
a = json.load(nq_query)
b = json.load(trivia_query)
meta = {'type': 'int32', 'total_number': a['total_number'] + b['total_number'], 'embedding_size': args.max_seq_length}
json.dump(meta, out_query)
embedding_cache = EmbeddingCache(args.out_data_dir + "train-query")
print("First line after merge")
with embedding_cache as emb:
print(emb[58812])
with open(args.out_data_dir + "train-ann-nq", "r", encoding='utf-8') as nq_ann, \
open(args.out_data_dir + "train-ann-trivia", "r", encoding='utf-8') as trivia_ann, \
open(args.out_data_dir + "train-ann", "w", encoding='utf-8') as out_ann:
out_ann.writelines(nq_ann.readlines())
out_ann.writelines(trivia_ann.readlines())
write_query_rel(args, pid2offset, "nq-dev.json", "dev-query", "dev-ann", "dev-data")
write_query_rel(args, pid2offset, "trivia-dev.json", "dev-query-trivia", "dev-ann-trivia", "dev-data-trivia", "psg_id")
write_qas_query(args, "nq-test.csv", "test-query")
write_qas_query(args, "trivia-test.csv", "trivia-test-query")
def PassagePreprocessingFn(args, line, tokenizer):
line_arr = list(csv.reader([line], delimiter='\t'))[0]
if line_arr[0] == 'id':
return bytearray()
p_id = int(line_arr[0])
text = line_arr[1]
title = line_arr[2]
token_ids = tokenizer.encode(title, text_pair=text, add_special_tokens=True,
max_length=args.max_seq_length,
pad_to_max_length=False)
seq_len = args.max_seq_length
passage_len = len(token_ids)
if len(token_ids) < seq_len:
token_ids = token_ids + [tokenizer.pad_token_id] * (seq_len - len(token_ids))
if len(token_ids) > seq_len:
token_ids = token_ids[0:seq_len]
token_ids[-1] = tokenizer.sep_token_id
if p_id < 5:
a = np.array(token_ids, np.int32)
print("pid {}, passagelen {}, shape {}".format(p_id, passage_len, a.shape))
return p_id.to_bytes(8, 'big') + passage_len.to_bytes(4, 'big') + np.array(token_ids, np.int32).tobytes()
def QueryPreprocessingFn(args, qid, text, tokenizer):
token_ids = tokenizer.encode(text, add_special_tokens=True, max_length=args.max_seq_length,
pad_to_max_length=False)
seq_len = args.max_seq_length
passage_len = len(token_ids)
if len(token_ids) < seq_len:
token_ids = token_ids + [tokenizer.pad_token_id] * (seq_len - len(token_ids))
if len(token_ids) > seq_len:
token_ids = token_ids[0:seq_len]
token_ids[-1] = tokenizer.sep_token_id
if qid < 5:
a = np.array(token_ids, np.int32)
print("qid {}, passagelen {}, shape {}".format(qid, passage_len, a.shape))
return passage_len.to_bytes(4, 'big') + np.array(token_ids, np.int32).tobytes()
def GetProcessingFn(args, query=False):
def fn(vals, i):
passage_len, passage = vals
max_len = args.max_seq_length
pad_len = max(0, max_len - passage_len)
token_type_ids = [0] * passage_len + [0] * pad_len
attention_mask = passage != 0
passage_collection = [(i, passage, attention_mask, token_type_ids)]
query2id_tensor = torch.tensor([f[0] for f in passage_collection], dtype=torch.long)
all_input_ids_a = torch.tensor([f[1] for f in passage_collection], dtype=torch.int)
all_attention_mask_a = torch.tensor([f[2] for f in passage_collection], dtype=torch.bool)
all_token_type_ids_a = torch.tensor([f[3] for f in passage_collection], dtype=torch.uint8)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, query2id_tensor)
return [ts for ts in dataset]
return fn
def GetTrainingDataProcessingFn(args, query_cache, passage_cache, shuffle=True):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(args, query=True)(query_cache[qid], qid)[0]
pos_data = GetProcessingFn(args, query=False)(passage_cache[pos_pid], pos_pid)[0]
if shuffle:
random.shuffle(neg_pids)
neg_data = GetProcessingFn(args, query=False)(passage_cache[neg_pids[0]], neg_pids[0])[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2])
yield (query_data[0], query_data[1], query_data[2], neg_data[0], neg_data[1], neg_data[2])
return fn
def GetTripletTrainingDataProcessingFn(args, query_cache, passage_cache, shuffle=True):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(args, query=True)(query_cache[qid], qid)[0]
pos_data = GetProcessingFn(args, query=False)(passage_cache[pos_pid], pos_pid)[0]
if shuffle:
random.shuffle(neg_pids)
neg_data = GetProcessingFn(args, query=False)(passage_cache[neg_pids[0]], neg_pids[0])[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2],
neg_data[0], neg_data[1], neg_data[2])
return fn
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--out_data_dir",
default="/webdata-nfs/jialliu/dpr/ann/ann_multi_data_256/",
type=str,
help="The output data dir",
)
parser.add_argument(
"--model_type",
default="dpr",
type=str,
help="Model type selected in the list: " + ", ".join(MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--model_name_or_path",
default="bert-base-uncased",
type=str,
help="Path to pre-trained model or shortcut name selected in the list: " +
", ".join(ALL_MODELS),
)
parser.add_argument(
"--max_seq_length",
default=256,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--data_type",
default=0,
type=int,
help="0 is nq, 1 is trivia, 2 is both",
)
parser.add_argument(
"--question_dir",
type=str,
help="location of the raw QnA question data",
)
parser.add_argument(
"--wiki_dir",
type=str,
help="location of the wiki corpus",
)
parser.add_argument(
"--answer_dir",
type=str,
help="location of the QnA answers for evaluation",
)
args = parser.parse_args()
if not os.path.exists(args.out_data_dir):
os.makedirs(args.out_data_dir)
preprocess(args)
if __name__ == '__main__':
main()
|
ANCE/data/DPR_data.py/0
|
{
"file_path": "ANCE/data/DPR_data.py",
"repo_id": "ANCE",
"token_count": 6768
}
| 23 |
from setuptools import setup
with open('SEED-Encoder.md') as f:
readme = f.read()
setup(
name='SEED-Encoder',
long_description=readme,
install_requires=[
'scikit-learn',
'pandas',
'tensorboardX',
'tqdm',
'tokenizers==0.9.2',
'six',
],
)
|
ANCE/model/SEED_Encoder/setup.py/0
|
{
"file_path": "ANCE/model/SEED_Encoder/setup.py",
"repo_id": "ANCE",
"token_count": 155
}
| 24 |
"""
Code for self-training with weak supervision.
Author: Giannis Karamanolakis ([email protected])
"""
import os
import numpy as np
import random
from weaksource import SMSRules, TRECRules, YoutubeRules, CENSUSRules, MITRRules, SPOUSERules
from RuleAttentionNetwork import RAN
supported_weak_sources = {
'smsrules': SMSRules,
'trecrules': TRECRules,
'youtuberules': YoutubeRules,
'censusrules': CENSUSRules,
'mitrrules': MITRRules,
'spouserules': SPOUSERules,
}
class Teacher:
"""
Teacher:
(1) considers multiple weak sources (1) multiple weak (heuristic) rules, (2) Student
(2) aggregates weak sources with an aggregation model (e.g., RAN) to compute a single pseudo-label
"""
def __init__(self, args, logger=None):
self.name = args.teacher_name
self.datapath = args.datapath
if self.name != "ran":
raise (BaseException("Teacher not supported: {}".format(self.name)))
if args.weak_sources is None:
if args.dataset in ['sms', 'trec', 'youtube', 'census', 'mitr', 'spouse']:
args.weak_sources = ["{}rules".format(args.dataset)]
else:
raise (BaseException("Teacher not available for dataset={}".format(args.dataset)))
logger.info("No weak sources specified for Teacher. Using default: {}".format(args.weak_sources))
else:
logger.info("weak sources: {}".format(args.weak_sources))
self.args = args
self.logger = logger
self.seed = args.seed
self.num_labels = self.args.num_labels
np.random.seed(self.seed)
self.source_names = args.weak_sources
print(self.source_names)
for source_name in self.source_names:
assert source_name in supported_weak_sources, "Weak Source not supported: {}".format(source_name)
self.weak_sources = {src: supported_weak_sources[src](self.datapath) for src in self.source_names}
self.num_rules = np.sum([src.num_rules for _, src in self.weak_sources.items()])
self.preprocess_fns = [src.preprocess for src_name, src in self.weak_sources.items()]
self.preprocess = None if None in self.preprocess_fns else self.preprocess_all
self.agg_model = RAN(args=self.args, num_rules=self.num_rules, logger=self.logger, name=self.name)
self.name = 'ran'
self.student = None
self.convert_abstain_to_random = args.convert_abstain_to_random
def preprocess_all(self, dataset):
all_preds = []
for src_name, src in self.weak_sources.items():
preds = src.preprocess(dataset)
all_preds.append(preds)
if len(all_preds) > 1:
raise(BaseException('pre-processing not implemented for multiple sources yet...'))
return all_preds[0]
def apply(self, dataset):
# Apply Teacher on unlabeled data
all_preds = []
for src_name, weak_src in self.weak_sources.items():
# Each source is a set of rules.
num_rules = weak_src.num_rules
self.logger.info("Applying Teacher with {} LF(s) on {} data".format(num_rules, len(dataset)))
# preds: num_examples x num_rules
preds = weak_src.apply(dataset)
preds = np.array(preds).astype(int)
if preds.ndim == 1:
# make sure arrays are 2D
preds = preds[..., np.newaxis]
all_preds.append(preds)
# all_preds: num_examples x num_rules
all_preds = np.hstack(all_preds)
return all_preds
def train(self, dataset):
weak_labels = self.apply(dataset)
res = self.aggregate_sources(weak_labels, train=True)
return {
"preds": res['preds'],
"proba": res['proba'],
"lf_weights": res['lf_weights']
}
def predict(self, dataset, student_features=None):
weak_labels = self.apply(dataset)
res = self.aggregate_sources(weak_labels, student_features, train=False)
if dataset.method in ['test', 'dev'] and self.convert_abstain_to_random:
aggregated_labels = [x if x != -1 else np.random.choice(np.arange(self.num_labels), 1)[0] for x in res['preds'].tolist()]
res['preds'] = np.array(aggregated_labels)
return res
def predict_ran(self, dataset):
rule_pred = self.apply(dataset)
student_pred_dict = self.student.predict(dataset=dataset)
student_pred_proba = student_pred_dict['proba']
res = self.aggregate_sources(rule_pred,
student_features=student_pred_dict['features'],
student_pred=student_pred_proba,
train=False)
# self.logger.info("First 10 teacher proba:\n{}".format(res['proba'][:10]))
if dataset.method in ['test', 'dev'] and self.convert_abstain_to_random:
labels = [x if x != -1 else np.random.choice(np.arange(self.num_labels), 1)[0] for x in res['preds'].tolist()]
res['preds'] = np.array(labels)
return res
def update(self, train_dataset, train_student_features=None, train_label_name='student_labels',
dev_dataset=None, dev_student_features=None, dev_label_name='labels',
unsup_dataset=None, unsup_student_features=None,):
self.logger.info("Getting rule predictions on train dataset")
rule_pred_train = self.apply(train_dataset)
self.logger.info("Getting rule predictions on dev dataset")
rule_pred_dev = self.apply(dev_dataset)
if unsup_dataset is not None:
rule_pred_unsup = self.apply(unsup_dataset)
else:
rule_pred_unsup = None
assert ((rule_pred_train != -1).sum(axis=1) == 0).sum() == 0, "cannot train RAN in examples where no rules apply. need to drop these examples first"
self.logger.info("Training Rule Attention Network")
self.agg_model.train(
x_train=train_student_features,
rule_pred_train=rule_pred_train,
y_train=train_dataset.data[train_label_name],
x_dev=dev_student_features,
rule_pred_dev=rule_pred_dev,
y_dev=dev_dataset.data[dev_label_name],
x_unsup=unsup_student_features,
rule_pred_unsup=rule_pred_unsup,
)
return {}
def train_ran(self, train_dataset=None, train_label_name='student_labels',
dev_dataset=None, dev_label_name='labels', unlabeled_dataset=None):
self.logger.info("Getting rule predictions")
rule_pred_train = self.apply(train_dataset) if train_dataset is not None else None
rule_pred_dev = self.apply(dev_dataset) if dev_dataset is not None else None
rule_pred_unsup = self.apply(unlabeled_dataset) if unlabeled_dataset is not None else None
self.logger.info("Getting student predictions on train (and dev) dataset")
assert self.student is not None, "To train RAN we need access to the Student"
student_pred_train = self.student.predict(dataset=train_dataset) if train_dataset is not None else {'features': None, 'proba': None}
student_pred_dev = self.student.predict(dataset=dev_dataset) if dev_dataset is not None else {'features': None, 'proba': None}
student_pred_unsup = self.student.predict(dataset=unlabeled_dataset) if unlabeled_dataset is not None else {'features': None, 'proba': None}
self.logger.info("Training Rule Attention Network")
self.agg_model.train(
x_train=student_pred_train['features'],
rule_pred_train=rule_pred_train,
student_pred_train=student_pred_train['proba'],
y_train=train_dataset.data[train_label_name] if train_dataset is not None else None,
x_dev=student_pred_dev['features'],
rule_pred_dev=rule_pred_dev,
student_pred_dev=student_pred_dev['proba'],
y_dev=dev_dataset.data[dev_label_name] if dev_dataset is not None else None,
x_unsup=student_pred_unsup['features'],
rule_pred_unsup=rule_pred_unsup,
student_pred_unsup=student_pred_unsup['proba'],
)
return {}
def aggregate_sources(self, weak_labels, student_features=None, train=False, student_pred=None):
assert weak_labels.shape[1] == self.num_rules, "num rules = {} but weak_labels.shape={}".format(self.num_rules,
weak_labels.shape[1])
self.active_rules = np.sum(weak_labels != -1, axis=0) != 0
self.logger.info("There are {}/{} active rules".format(np.sum(self.active_rules), weak_labels.shape[1]))
coverage = (np.sum(weak_labels != -1, axis=1) != 0).sum()
self.logger.info("Coverage: {:.1f}% ({}/{})".format(100*coverage/weak_labels.shape[0], coverage, weak_labels.shape[0]))
# Train aggregator
self.lf_weights = None
if train and self.name == "ran":
self.agg_model.init(weak_labels)
elif train:
raise(BaseException("Teacher method not implemented: {}".format(self.name)))
if self.lf_weights is not None:
self.logger.info("Aggregating sources with weights ({}):\n{}".format(self.lf_weights.shape, self.lf_weights))
res = self.agg_model.predict(rule_pred=weak_labels, student_features=student_features, student_pred=student_pred)
agg_labels = res['preds']
agg_proba = res['proba']
att_scores = res['att_scores']
rule_mask = res['rule_mask']
return {
'preds': agg_labels,
"proba": agg_proba,
'lf_weights': self.lf_weights,
'att_scores': att_scores,
'rule_mask': rule_mask,
}
def save(self, savename=None):
if savename is None:
savefolder = os.path.join(self.args.logdir, 'teacher')
else:
savefolder = os.path.join(self.args.logdir, savename)
self.logger.info("Saving teacher at {}".format(savefolder))
os.makedirs(savefolder, exist_ok=True)
self.agg_model.save(os.path.join(savefolder, 'rule_attention_network.h5'))
return
def load(self, savefolder):
self.logger.info("Loading teacher from {}".format(savefolder))
self.agg_model.load(os.path.join(savefolder, 'rule_attention_network.h5'))
|
ASTRA/astra/Teacher.py/0
|
{
"file_path": "ASTRA/astra/Teacher.py",
"repo_id": "ASTRA",
"token_count": 4793
}
| 25 |
. ./venv/bin/activate
seed=110
python -m torch.distributed.launch --nproc_per_node=1 src/gpt2_ft.py \
--train_data ./data/webnlg_challenge_2017/train.jsonl \
--valid_data ./data/webnlg_challenge_2017/valid.jsonl \
--train_batch_size 8 \
--grad_acc 1 \
--valid_batch_size 4 \
--seq_len 512 \
--model_card gpt2.md \
--init_checkpoint ./pretrained_checkpoints/gpt2-medium-pytorch_model.bin \
--platform local \
--clip 0.0 \
--lr 0.0002 \
--weight_decay 0.01 \
--correct_bias \
--adam_beta2 0.999 \
--scheduler linear \
--warmup_step 500 \
--max_epoch 5 \
--save_interval 1000 \
--lora_dim 4 \
--lora_alpha 32 \
--lora_dropout 0.1 \
--label_smooth 0.1 \
--work_dir ./trained_models/GPT2_M/webnlg/$seed/lora_only \
--random_seed $seed \
--lora_only 1
bash run_eval_webnlg_lora_only.sh --seed $seed
|
AdaMix/NLG/run_train_webnlg_lora_only.sh/0
|
{
"file_path": "AdaMix/NLG/run_train_webnlg_lora_only.sh",
"repo_id": "AdaMix",
"token_count": 339
}
| 26 |
# ------------------------------------------------------------------------------------------
# Copyright (c). All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
import math
import os
from collections import OrderedDict
import argparse
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR, _LRScheduler
def add_optimizer_params(parser: argparse.ArgumentParser):
parser.add_argument('--lr', default=0.00001, type=float, help='learning rate')
parser.add_argument('--weight_decay', default=0.01, type=float, help='weight decay rate')
parser.add_argument('--correct_bias', action='store_true', help='correct adam bias term')
parser.add_argument('--adam_epislon', default=1e-6, type=float, help='adam epsilon')
parser.add_argument('--no_decay_bias', action='store_true', help='no weight decay on bias weigh')
parser.add_argument('--adam_beta1', default=0.9, type=float, help='adam beta1 term')
parser.add_argument('--adam_beta2', default=0.98, type=float, help='adam beta2 term')
parser.add_argument('--scheduler', default='linear', type=str,
choices=['cosine', 'inv_sqrt', 'dev_perf', 'constant', 'linear', 'cycle'],
help='lr scheduler to use.')
parser.add_argument('--max_step', type=int, default=None, help='upper epoch limit')
parser.add_argument('--max_epoch', type=int, default=None, help='max epoch of training')
parser.add_argument('--warmup_step', type=int, default=0, help='upper epoch limit')
parser.add_argument('--i_steps', type=str, default='0', help='interval_steps')
parser.add_argument('--i_lrs', type=str, default='0.00025', help='interval_lrs')
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.98)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.98), eps=1e-6, weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def reset_state(self):
for group in param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state["exp_avg"] = torch.zeros_like(p.data)
state["exp_avg_sq"] = torch.zeros_like(p.data)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if 'correct_bias' in group and group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
class CosineAnnealingWarmupRestarts(_LRScheduler):
"""
optimizer (Optimizer): Wrapped optimizer.
first_cycle_steps (int): First cycle step size.
cycle_mult(float): Cycle steps magnification. Default: -1.
max_lr(float): First cycle's max learning rate. Default: 0.1.
min_lr(float): Min learning rate. Default: 0.001.
warmup_steps(int): Linear warmup step size. Default: 0.
gamma(float): Decrease rate of max learning rate by cycle. Default: 1.
last_epoch (int): The index of last epoch. Default: -1.
"""
def __init__(
self,
optimizer : torch.optim.Optimizer,
max_lr : float = 0.1,
min_lr : float = 0.0,
warmup_steps : int = 0,
max_steps : int = 1,
alpha : float = 0.,
last_epoch : int = -1
):
self.max_lr = max_lr # max learning rate in the current cycle
self.min_lr = min_lr # min learning rate
self.warmup_steps = warmup_steps # warmup step size
self.alpha = alpha # decrease rate of max learning rate by cycle
self.max_steps = max_steps
super(CosineAnnealingWarmupRestarts, self).__init__(optimizer, last_epoch)
self.init_lr()
def init_lr(self):
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.min_lr
def get_lr(self):
if self.last_epoch < self.warmup_steps:
curr_lr = self.max_lr * self.last_epoch / self.warmup_steps
return curr_lr
else:
_step = min(self.last_epoch, self.max_steps)
cosine_decay = 0.5 * (1 + math.cos(math.pi * _step / self.max_steps))
decayed = (1 - self.alpha) * cosine_decay + self.alpha
return self.max_lr * decayed # learning_rate * decayed
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = math.floor(epoch)
_lr = self.get_lr()
for param_group in self.optimizer.param_groups:
param_group['lr'] = _lr
class CyclicScheduler(_LRScheduler):
def __init__(
self,
optimizer,
interval_steps = [],
interval_lrs = [],
last_epoch = -1,
):
self.optimizer = optimizer
self.interval_steps = interval_steps
self.interval_lrs = interval_lrs
self.last_epoch = last_epoch
super(CyclicScheduler, self).__init__(optimizer, last_epoch)
self.init_lr()
def init_lr(self):
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.interval_lrs[0]
def get_lr(self):
for _i in range(0, len(self.interval_steps)-1):
if self.last_epoch >= self.interval_steps[_i] and self.last_epoch < self.interval_steps[_i + 1]:
_alpha = (self.last_epoch - self.interval_steps[_i]) / (self.interval_steps[_i + 1] - self.interval_steps[_i] + 1e-6)
if _alpha < 0:
_alpha = 0
if _alpha >= 1:
_alpha = 1
curr_lr = _alpha * self.interval_lrs[_i + 1] + (1.0 - _alpha) * self.interval_lrs[_i]
return curr_lr
return self.interval_lrs[-1]
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
#self.max_lr = self.base_max_lr * (self.gamma**self.cycle)
self.last_epoch = math.floor(epoch)
_lr = self.get_lr()
for param_group in self.optimizer.param_groups: #, self.get_lr()):
param_group['lr'] = _lr
def get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps,
num_training_steps,
last_epoch=-1
):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_constant_schedule_with_warmup(
optimizer,
num_warmup_steps,
num_training_steps,
last_epoch=-1
):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch)
def create_grouped_parameters(model, no_decay_bias): # args):
if not no_decay_bias:
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters()], # if not any(nd in n for nd in no_decay)],
}]
else:
no_decay = ["bias", "layer_norm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
}]
return optimizer_grouped_parameters
def create_adam_optimizer(
model,
lr,
weight_decay,
optimizer_grouped_parameters=None,
beta1=0.9,
beta2=0.98,
correct_bias=True,
adam_epislon=1e-6,
no_decay_bias=False
):
if optimizer_grouped_parameters is None:
optimizer_grouped_parameters = create_grouped_parameters(model, no_decay_bias)
optimizer = AdamW(
optimizer_grouped_parameters,
lr=lr,
betas=(beta1, beta2),
eps=adam_epislon,
weight_decay=weight_decay,
correct_bias=correct_bias
)
return optimizer
def create_sgd_optimizer(model, lr):
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.0)
return optimizer
def create_adam_optimizer_from_args(model, args, grouped_parameters=None):
if grouped_parameters is None:
grouped_parameters = create_grouped_parameters(model, args.no_decay_bias)
optimizer = AdamW(
grouped_parameters,
lr=args.lr,
betas=(args.adam_beta1, args.adam_beta2),
eps=args.adam_epislon,
weight_decay=args.weight_decay,
correct_bias=args.correct_bias
)
return optimizer
def create_optimizer_scheduler(optimizer, args):
if args.scheduler == 'cosine':
scheduler = CosineAnnealingWarmupRestarts(
optimizer,
max_lr=args.lr,
min_lr=0.0,
warmup_steps=args.warmup_step,
max_steps=args.max_step, alpha=0
)
elif args.scheduler == 'linear':
scheduler = get_linear_schedule_with_warmup(
optimizer, args.warmup_step, args.max_step, last_epoch=-1
)
elif args.scheduler == 'cycle':
if args.i_steps is not None:
args.i_steps = [int(_i) for _i in args.i_steps.split(',')]
args.i_lrs = [float(_i) for _i in args.i_lrs.split(',')]
args.max_step = args.i_steps[-1]
print('max_step is rest to', args.max_step)
scheduler = CyclicScheduler(
optimizer, interval_steps=args.i_steps, interval_lrs=args.i_lrs
)
elif args.scheduler == 'constant':
scheduler = get_constant_schedule_with_warmup(
optimizer, args.warmup_step, args.max_step, last_epoch=-1
)
else:
# constant leanring rate.
scheduler = None
return scheduler
|
AdaMix/NLG/src/optimizer.py/0
|
{
"file_path": "AdaMix/NLG/src/optimizer.py",
"repo_id": "AdaMix",
"token_count": 6410
}
| 27 |
FROM google/cloud-sdk:slim
# Build args.
ARG GITHUB_REF=refs/heads/master
# TODO: This Dockerfile installs pytorch/xla 3.6 wheels. There are also 3.7
# wheels available; see below.
ENV PYTHON_VERSION=3.6
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
cmake \
git \
curl \
ca-certificates
# Install conda and python.
# NOTE new Conda does not forward the exit status... https://github.com/conda/conda/issues/8385
RUN curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-4.7.12-Linux-x86_64.sh && \
chmod +x ~/miniconda.sh && \
~/miniconda.sh -b && \
rm ~/miniconda.sh
ENV PATH=/root/miniconda3/bin:$PATH
RUN conda create -y --name container python=$PYTHON_VERSION
# Run the rest of commands within the new conda env.
# Use absolute path to appease Codefactor.
SHELL ["/root/miniconda3/bin/conda", "run", "-n", "container", "/bin/bash", "-c"]
RUN conda install -y python=$PYTHON_VERSION mkl
RUN pip uninstall -y torch && \
# Python 3.7 wheels are available. Replace cp36-cp36m with cp37-cp37m
gsutil cp 'gs://tpu-pytorch/wheels/torch-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' . && \
gsutil cp 'gs://tpu-pytorch/wheels/torch_xla-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' . && \
gsutil cp 'gs://tpu-pytorch/wheels/torchvision-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' . && \
pip install 'torch-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
pip install 'torch_xla-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
pip install 'torchvision-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
rm 'torch-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
rm 'torch_xla-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
rm 'torchvision-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \
apt-get install -y libomp5
ENV LD_LIBRARY_PATH=root/miniconda3/envs/container/lib
# Install huggingface/transformers at the current PR, plus dependencies.
RUN git clone https://github.com/huggingface/transformers.git && \
cd transformers && \
git fetch origin $GITHUB_REF:CI && \
git checkout CI && \
cd .. && \
pip install ./transformers && \
pip install -r ./transformers/examples/requirements.txt && \
pip install pytest
RUN python -c "import torch_xla; print(torch_xla.__version__)"
RUN python -c "import transformers as trf; print(trf.__version__)"
RUN conda init bash
COPY docker-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
CMD ["bash"]
|
AdaMix/docker/transformers-pytorch-tpu/Dockerfile/0
|
{
"file_path": "AdaMix/docker/transformers-pytorch-tpu/Dockerfile",
"repo_id": "AdaMix",
"token_count": 1229
}
| 28 |
..
Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Feature Extractor
-----------------------------------------------------------------------------------------------------------------------
A feature extractor is in charge of preparing input features for a multi-modal model. This includes feature extraction
from sequences, *e.g.*, pre-processing audio files to Log-Mel Spectrogram features, feature extraction from images
*e.g.* cropping image image files, but also padding, normalization, and conversion to Numpy, PyTorch, and TensorFlow
tensors.
FeatureExtractionMixin
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.feature_extraction_utils.FeatureExtractionMixin
:members: from_pretrained, save_pretrained
SequenceFeatureExtractor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.SequenceFeatureExtractor
:members: pad
BatchFeature
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.BatchFeature
:members:
|
AdaMix/docs/source/main_classes/feature_extractor.rst/0
|
{
"file_path": "AdaMix/docs/source/main_classes/feature_extractor.rst",
"repo_id": "AdaMix",
"token_count": 394
}
| 29 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Bertweet
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The BERTweet model was proposed in `BERTweet: A pre-trained language model for English Tweets
<https://www.aclweb.org/anthology/2020.emnlp-demos.2.pdf>`__ by Dat Quoc Nguyen, Thanh Vu, Anh Tuan Nguyen.
The abstract from the paper is the following:
*We present BERTweet, the first public large-scale pre-trained language model for English Tweets. Our BERTweet, having
the same architecture as BERT-base (Devlin et al., 2019), is trained using the RoBERTa pre-training procedure (Liu et
al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa-base and XLM-R-base (Conneau et al.,
2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks:
Part-of-speech tagging, Named-entity recognition and text classification.*
Example of use:
.. code-block::
import torch
from transformers import AutoModel, AutoTokenizer
bertweet = AutoModel.from_pretrained("vinai/bertweet-base")
# For transformers v4.x+:
tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", use_fast=False)
# For transformers v3.x:
# tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base")
# INPUT TWEET IS ALREADY NORMALIZED!
line = "SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:"
input_ids = torch.tensor([tokenizer.encode(line)])
with torch.no_grad():
features = bertweet(input_ids) # Models outputs are now tuples
## With TensorFlow 2.0+:
# from transformers import TFAutoModel
# bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base")
The original code can be found `here <https://github.com/VinAIResearch/BERTweet>`__.
BertweetTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.BertweetTokenizer
:members:
|
AdaMix/docs/source/model_doc/bertweet.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/bertweet.rst",
"repo_id": "AdaMix",
"token_count": 751
}
| 30 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Funnel Transformer
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Funnel Transformer model was proposed in the paper `Funnel-Transformer: Filtering out Sequential Redundancy for
Efficient Language Processing <https://arxiv.org/abs/2006.03236>`__. It is a bidirectional transformer model, like
BERT, but with a pooling operation after each block of layers, a bit like in traditional convolutional neural networks
(CNN) in computer vision.
The abstract from the paper is the following:
*With the success of language pretraining, it is highly desirable to develop more efficient architectures of good
scalability that can exploit the abundant unlabeled data at a lower cost. To improve the efficiency, we examine the
much-overlooked redundancy in maintaining a full-length token-level presentation, especially for tasks that only
require a single-vector presentation of the sequence. With this intuition, we propose Funnel-Transformer which
gradually compresses the sequence of hidden states to a shorter one and hence reduces the computation cost. More
importantly, by re-investing the saved FLOPs from length reduction in constructing a deeper or wider model, we further
improve the model capacity. In addition, to perform token-level predictions as required by common pretraining
objectives, Funnel-Transformer is able to recover a deep representation for each token from the reduced hidden sequence
via a decoder. Empirically, with comparable or fewer FLOPs, Funnel-Transformer outperforms the standard Transformer on
a wide variety of sequence-level prediction tasks, including text classification, language understanding, and reading
comprehension.*
Tips:
- Since Funnel Transformer uses pooling, the sequence length of the hidden states changes after each block of layers.
The base model therefore has a final sequence length that is a quarter of the original one. This model can be used
directly for tasks that just require a sentence summary (like sequence classification or multiple choice). For other
tasks, the full model is used; this full model has a decoder that upsamples the final hidden states to the same
sequence length as the input.
- The Funnel Transformer checkpoints are all available with a full version and a base version. The first ones should be
used for :class:`~transformers.FunnelModel`, :class:`~transformers.FunnelForPreTraining`,
:class:`~transformers.FunnelForMaskedLM`, :class:`~transformers.FunnelForTokenClassification` and
class:`~transformers.FunnelForQuestionAnswering`. The second ones should be used for
:class:`~transformers.FunnelBaseModel`, :class:`~transformers.FunnelForSequenceClassification` and
:class:`~transformers.FunnelForMultipleChoice`.
The original code can be found `here <https://github.com/laiguokun/Funnel-Transformer>`__.
FunnelConfig
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelConfig
:members:
FunnelTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelTokenizer
:members: build_inputs_with_special_tokens, get_special_tokens_mask,
create_token_type_ids_from_sequences, save_vocabulary
FunnelTokenizerFast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelTokenizerFast
:members:
Funnel specific outputs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput
:members:
.. autoclass:: transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput
:members:
FunnelBaseModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelBaseModel
:members: forward
FunnelModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelModel
:members: forward
FunnelModelForPreTraining
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelForPreTraining
:members: forward
FunnelForMaskedLM
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelForMaskedLM
:members: forward
FunnelForSequenceClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelForSequenceClassification
:members: forward
FunnelForMultipleChoice
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelForMultipleChoice
:members: forward
FunnelForTokenClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelForTokenClassification
:members: forward
FunnelForQuestionAnswering
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FunnelForQuestionAnswering
:members: forward
TFFunnelBaseModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFFunnelBaseModel
:members: call
TFFunnelModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFFunnelModel
:members: call
TFFunnelModelForPreTraining
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFFunnelForPreTraining
:members: call
TFFunnelForMaskedLM
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFFunnelForMaskedLM
:members: call
TFFunnelForSequenceClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFFunnelForSequenceClassification
:members: call
TFFunnelForMultipleChoice
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFFunnelForMultipleChoice
:members: call
TFFunnelForTokenClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFFunnelForTokenClassification
:members: call
TFFunnelForQuestionAnswering
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFFunnelForQuestionAnswering
:members: call
|
AdaMix/docs/source/model_doc/funnel.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/funnel.rst",
"repo_id": "AdaMix",
"token_count": 1794
}
| 31 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
PhoBERT
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The PhoBERT model was proposed in `PhoBERT: Pre-trained language models for Vietnamese
<https://www.aclweb.org/anthology/2020.findings-emnlp.92.pdf>`__ by Dat Quoc Nguyen, Anh Tuan Nguyen.
The abstract from the paper is the following:
*We present PhoBERT with two versions, PhoBERT-base and PhoBERT-large, the first public large-scale monolingual
language models pre-trained for Vietnamese. Experimental results show that PhoBERT consistently outperforms the recent
best pre-trained multilingual model XLM-R (Conneau et al., 2020) and improves the state-of-the-art in multiple
Vietnamese-specific NLP tasks including Part-of-speech tagging, Dependency parsing, Named-entity recognition and
Natural language inference.*
Example of use:
.. code-block::
import torch
from transformers import AutoModel, AutoTokenizer
phobert = AutoModel.from_pretrained("vinai/phobert-base")
tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base")
# INPUT TEXT MUST BE ALREADY WORD-SEGMENTED!
line = "Tôi là sinh_viên trường đại_học Công_nghệ ."
input_ids = torch.tensor([tokenizer.encode(line)])
with torch.no_grad():
features = phobert(input_ids) # Models outputs are now tuples
## With TensorFlow 2.0+:
# from transformers import TFAutoModel
# phobert = TFAutoModel.from_pretrained("vinai/phobert-base")
The original code can be found `here <https://github.com/VinAIResearch/PhoBERT>`__.
PhobertTokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.PhobertTokenizer
:members:
|
AdaMix/docs/source/model_doc/phobert.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/phobert.rst",
"repo_id": "AdaMix",
"token_count": 699
}
| 32 |
..
Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
XLSR-Wav2Vec2
-----------------------------------------------------------------------------------------------------------------------
Overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The XLSR-Wav2Vec2 model was proposed in `Unsupervised Cross-Lingual Representation Learning For Speech Recognition
<https://arxiv.org/abs/2006.13979>`__ by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael
Auli.
The abstract from the paper is the following:
*This paper presents XLSR which learns cross-lingual speech representations by pretraining a single model from the raw
waveform of speech in multiple languages. We build on wav2vec 2.0 which is trained by solving a contrastive task over
masked latent speech representations and jointly learns a quantization of the latents shared across languages. The
resulting model is fine-tuned on labeled data and experiments show that cross-lingual pretraining significantly
outperforms monolingual pretraining. On the CommonVoice benchmark, XLSR shows a relative phoneme error rate reduction
of 72% compared to the best known results. On BABEL, our approach improves word error rate by 16% relative compared to
a comparable system. Our approach enables a single multilingual speech recognition model which is competitive to strong
individual models. Analysis shows that the latent discrete speech representations are shared across languages with
increased sharing for related languages. We hope to catalyze research in low-resource speech understanding by releasing
XLSR-53, a large model pretrained in 53 languages.*
Tips:
- XLSR-Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal.
- XLSR-Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be
decoded using :class:`~transformers.Wav2Vec2CTCTokenizer`.
XLSR-Wav2Vec2's architecture is based on the Wav2Vec2 model, so one can refer to :doc:`Wav2Vec2's documentation page
<wav2vec2>`.
The original code can be found `here <https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec>`__.
|
AdaMix/docs/source/model_doc/xlsr_wav2vec2.rst/0
|
{
"file_path": "AdaMix/docs/source/model_doc/xlsr_wav2vec2.rst",
"repo_id": "AdaMix",
"token_count": 695
}
| 33 |
name: NLU
channels:
- pytorch
- nvidia
- defaults
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=4.5=1_gnu
- blas=1.0=mkl
- bzip2=1.0.8=h7b6447c_0
- ca-certificates=2021.7.5=h06a4308_1
- certifi=2021.5.30=py37h06a4308_0
- cudatoolkit=11.1.74=h6bb024c_0
- ffmpeg=4.3=hf484d3e_0
- freetype=2.10.4=h5ab3b9f_0
- gmp=6.2.1=h2531618_2
- gnutls=3.6.15=he1e5248_0
- intel-openmp=2021.2.0=h06a4308_610
- jpeg=9b=h024ee3a_2
- lame=3.100=h7b6447c_0
- lcms2=2.12=h3be6417_0
- ld_impl_linux-64=2.35.1=h7274673_9
- libffi=3.3=he6710b0_2
- libgcc-ng=9.3.0=h5101ec6_17
- libgomp=9.3.0=h5101ec6_17
- libiconv=1.15=h63c8f33_5
- libidn2=2.3.1=h27cfd23_0
- libpng=1.6.37=hbc83047_0
- libstdcxx-ng=9.3.0=hd4cf53a_17
- libtasn1=4.16.0=h27cfd23_0
- libtiff=4.2.0=h85742a9_0
- libunistring=0.9.10=h27cfd23_0
- libuv=1.40.0=h7b6447c_0
- libwebp-base=1.2.0=h27cfd23_0
- lz4-c=1.9.3=h2531618_0
- mkl=2021.2.0=h06a4308_296
- mkl-service=2.4.0=py37h7f8727e_0
- mkl_fft=1.3.0=py37h42c9631_2
- mkl_random=1.2.1=py37ha9443f7_2
- ncurses=6.2=he6710b0_1
- nettle=3.7.3=hbbd107a_1
- numpy=1.20.2=py37h2d18471_0
- numpy-base=1.20.2=py37hfae3a4d_0
- olefile=0.46=py37_0
- openh264=2.1.0=hd408876_0
- openjpeg=2.3.0=h05c96fa_1
- openssl=1.1.1k=h27cfd23_0
- pillow=8.3.1=py37h2c7a002_0
- pip=21.1.3=py37h06a4308_0
- python=3.7.10=h12debd9_4
- pytorch=1.9.0=py3.7_cuda11.1_cudnn8.0.5_0
- readline=8.1=h27cfd23_0
- setuptools=52.0.0=py37h06a4308_0
- six=1.16.0=pyhd3eb1b0_0
- sqlite=3.36.0=hc218d9a_0
- tk=8.6.10=hbc83047_0
- torchaudio=0.9.0=py37
- torchvision=0.10.0=py37_cu111
- typing_extensions=3.10.0.0=pyh06a4308_0
- wheel=0.36.2=pyhd3eb1b0_0
- xz=5.2.5=h7b6447c_0
- zlib=1.2.11=h7b6447c_3
- zstd=1.4.9=haebb681_0
- pip:
- accelerate==0.3.0
- charset-normalizer==2.0.1
- click==8.0.1
- datasets==1.9.0
- deepspeed==0.5.0
- dill==0.3.4
- filelock==3.0.12
- fsspec==2021.7.0
- huggingface-hub==0.0.13
- idna==3.2
- importlib-metadata==4.6.1
- joblib==1.0.1
- multiprocess==0.70.12.2
- ninja==1.10.0.post2
- packaging==21.0
- pandas==1.3.0
- protobuf==3.17.3
- psutil==5.8.0
- pyaml==20.4.0
- pyarrow==4.0.1
- pyparsing==2.4.7
- python-dateutil==2.8.1
- pytz==2021.1
- pyyaml==5.4.1
- regex==2021.7.6
- requests==2.26.0
- sacremoses==0.0.45
- scikit-learn==0.24.2
- scipy==1.7.0
- sentencepiece==0.1.96
- sklearn==0.0
- tensorboardx==1.8
- threadpoolctl==2.2.0
- tokenizers==0.10.3
- tqdm==4.61.2
- triton==0.4.2
- urllib3==1.26.5
- xxhash==2.0.2
- zipp==3.5.0
- wrapt==1.12.1
- azureml-core==1.32.0
- loralib==0.1.1
prefix: /opt/conda/envs/transformers
|
AdaMix/environment.yml/0
|
{
"file_path": "AdaMix/environment.yml",
"repo_id": "AdaMix",
"token_count": 1761
}
| 34 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for multiple choice (Bert, Roberta, XLNet)."""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
logger = logging.getLogger(__name__)
def simple_accuracy(preds, labels):
return (preds == labels).mean()
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys())})
data_dir: str = field(metadata={"help": "Should contain the data files for the task."})
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
try:
processor = processors[data_args.task_name]()
label_list = processor.get_labels()
num_labels = len(label_list)
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
model = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
train_dataset = (
MultipleChoiceDataset(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
task=data_args.task_name,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.train,
)
if training_args.do_train
else None
)
eval_dataset = (
MultipleChoiceDataset(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
task=data_args.task_name,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.dev,
)
if training_args.do_eval
else None
)
def compute_metrics(p: EvalPrediction) -> Dict:
preds = np.argmax(p.predictions, axis=1)
return {"acc": simple_accuracy(preds, p.label_ids)}
# Data collator
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) if training_args.fp16 else None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
data_collator=data_collator,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
result = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
results.update(result)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
AdaMix/examples/legacy/multiple_choice/run_multiple_choice.py/0
|
{
"file_path": "AdaMix/examples/legacy/multiple_choice/run_multiple_choice.py",
"repo_id": "AdaMix",
"token_count": 3257
}
| 35 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Transformer XL model evaluation script.
Adapted from https://github.com/kimiyoung/transformer-xl.
In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/eval.py
This script with default values evaluates a pretrained Transformer-XL on WikiText 103
"""
import argparse
import logging
import math
import time
import torch
from transformers import TransfoXLCorpus, TransfoXLLMHeadModel
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(description="PyTorch Transformer Language Model")
parser.add_argument("--model_name", type=str, default="transfo-xl-wt103", help="pretrained model name")
parser.add_argument(
"--split", type=str, default="test", choices=["all", "valid", "test"], help="which split to evaluate"
)
parser.add_argument("--batch_size", type=int, default=10, help="batch size")
parser.add_argument("--tgt_len", type=int, default=128, help="number of tokens to predict")
parser.add_argument("--ext_len", type=int, default=0, help="length of the extended context")
parser.add_argument("--mem_len", type=int, default=1600, help="length of the retained previous heads")
parser.add_argument("--clamp_len", type=int, default=1000, help="max positional embedding index")
parser.add_argument("--no_cuda", action="store_true", help="Do not use CUDA even though CUA is available")
parser.add_argument("--work_dir", type=str, required=True, help="path to the work_dir")
parser.add_argument("--no_log", action="store_true", help="do not log the eval result")
parser.add_argument("--same_length", action="store_true", help="set same length attention with masking")
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
args = parser.parse_args()
assert args.ext_len >= 0, "extended context length must be non-negative"
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
logger.info("device: {}".format(device))
# Load a pre-processed dataset
# You can also build the corpus yourself using TransfoXLCorpus methods
# The pre-processing involve computing word frequencies to prepare the Adaptive input and SoftMax
# and tokenizing the dataset
# The pre-processed corpus is a convertion (using the conversion script )
corpus = TransfoXLCorpus.from_pretrained(args.model_name)
va_iter = corpus.get_iterator("valid", args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len)
te_iter = corpus.get_iterator("test", args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len)
# Load a pre-trained model
model = TransfoXLLMHeadModel.from_pretrained(args.model_name)
model.to(device)
logger.info(
"Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}".format(
args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len
)
)
model.reset_memory_length(args.mem_len)
if args.clamp_len > 0:
model.clamp_len = args.clamp_len
if args.same_length:
model.same_length = True
###############################################################################
# Evaluation code
###############################################################################
def evaluate(eval_iter):
# Turn on evaluation mode which disables dropout.
model.eval()
total_len, total_loss = 0, 0.0
start_time = time.time()
with torch.no_grad():
mems = None
for idx, (data, target, seq_len) in enumerate(eval_iter):
ret = model(data, lm_labels=target, mems=mems)
loss, _, mems = ret
loss = loss.mean()
total_loss += seq_len * loss.item()
total_len += seq_len
total_time = time.time() - start_time
logger.info("Time : {:.2f}s, {:.2f}ms/segment".format(total_time, 1000 * total_time / (idx + 1)))
return total_loss / total_len
# Run on test data.
if args.split == "all":
test_loss = evaluate(te_iter)
valid_loss = evaluate(va_iter)
elif args.split == "valid":
valid_loss = evaluate(va_iter)
test_loss = None
elif args.split == "test":
test_loss = evaluate(te_iter)
valid_loss = None
def format_log(loss, split):
log_str = "| {0} loss {1:5.2f} | {0} ppl {2:9.3f} ".format(split, loss, math.exp(loss))
return log_str
log_str = ""
if valid_loss is not None:
log_str += format_log(valid_loss, "valid")
if test_loss is not None:
log_str += format_log(test_loss, "test")
logger.info("=" * 100)
logger.info(log_str)
logger.info("=" * 100)
if __name__ == "__main__":
main()
|
AdaMix/examples/legacy/run_transfo_xl.py/0
|
{
"file_path": "AdaMix/examples/legacy/run_transfo_xl.py",
"repo_id": "AdaMix",
"token_count": 2277
}
| 36 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition."""
import logging
import os
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import classification_report, f1_score, precision_score, recall_score
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
TFAutoModelForTokenClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
from utils_ner import Split, TFTokenClassificationDataset, TokenClassificationTask
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
task_type: Optional[str] = field(
default="NER", metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."}
)
labels: Optional[str] = field(
metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
module = import_module("tasks")
try:
token_classification_task_clazz = getattr(module, model_args.task_type)
token_classification_task: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}"
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(
"n_replicas: %s, distributed training: %s, 16-bits training: %s",
training_args.n_replicas,
bool(training_args.n_replicas > 1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Prepare Token Classification task
labels = token_classification_task.get_labels(data_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
)
with training_args.strategy.scope():
model = TFAutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_pt=bool(".bin" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
train_dataset = (
TFTokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.train,
)
if training_args.do_train
else None
)
eval_dataset = (
TFTokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.dev,
)
if training_args.do_eval
else None
)
def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
out_label_list = [[] for _ in range(batch_size)]
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != -100:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(p: EvalPrediction) -> Dict:
preds_list, out_label_list = align_predictions(p.predictions, p.label_ids)
return {
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
# Initialize our Trainer
trainer = TFTrainer(
model=model,
args=training_args,
train_dataset=train_dataset.get_dataset() if train_dataset else None,
eval_dataset=eval_dataset.get_dataset() if eval_dataset else None,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
result = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
results.update(result)
# Predict
if training_args.do_predict:
test_dataset = TFTokenClassificationDataset(
token_classification_task=token_classification_task,
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.test,
)
predictions, label_ids, metrics = trainer.predict(test_dataset.get_dataset())
preds_list, labels_list = align_predictions(predictions, label_ids)
report = classification_report(labels_list, preds_list)
logger.info("\n%s", report)
output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
writer.write("%s\n" % report)
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
output_line = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
|
AdaMix/examples/legacy/token-classification/run_tf_ner.py/0
|
{
"file_path": "AdaMix/examples/legacy/token-classification/run_tf_ner.py",
"repo_id": "AdaMix",
"token_count": 4667
}
| 37 |
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Research projects
This folder contains various research projects using 🤗 Transformers. They are not maintained and require a specific
version of 🤗 Transformers that is indicated in the requirements file of each folder. Updating them to the most recent version of the library will require some work.
To use any of them, just run the command
```
pip install -r requirements.txt
```
inside the folder of your choice.
If you need help with any of those, contact the author(s), indicated at the top of the `README` of each folder.
|
AdaMix/examples/research_projects/README.md/0
|
{
"file_path": "AdaMix/examples/research_projects/README.md",
"repo_id": "AdaMix",
"token_count": 279
}
| 38 |
# MIT License
# Copyright (c) 2019 Yang Liu and the HuggingFace team
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import math
import numpy as np
import torch
from torch import nn
from torch.nn.init import xavier_uniform_
from configuration_bertabs import BertAbsConfig
from transformers import BertConfig, BertModel, PreTrainedModel
MAX_SIZE = 5000
BERTABS_FINETUNED_MODEL_ARCHIVE_LIST = [
"remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization",
]
class BertAbsPreTrainedModel(PreTrainedModel):
config_class = BertAbsConfig
load_tf_weights = False
base_model_prefix = "bert"
class BertAbs(BertAbsPreTrainedModel):
def __init__(self, args, checkpoint=None, bert_extractive_checkpoint=None):
super().__init__(args)
self.args = args
self.bert = Bert()
# If pre-trained weights are passed for Bert, load these.
load_bert_pretrained_extractive = True if bert_extractive_checkpoint else False
if load_bert_pretrained_extractive:
self.bert.model.load_state_dict(
dict([(n[11:], p) for n, p in bert_extractive_checkpoint.items() if n.startswith("bert.model")]),
strict=True,
)
self.vocab_size = self.bert.model.config.vocab_size
if args.max_pos > 512:
my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size)
my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data
my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][
None, :
].repeat(args.max_pos - 512, 1)
self.bert.model.embeddings.position_embeddings = my_pos_embeddings
tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0)
tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight)
self.decoder = TransformerDecoder(
self.args.dec_layers,
self.args.dec_hidden_size,
heads=self.args.dec_heads,
d_ff=self.args.dec_ff_size,
dropout=self.args.dec_dropout,
embeddings=tgt_embeddings,
vocab_size=self.vocab_size,
)
gen_func = nn.LogSoftmax(dim=-1)
self.generator = nn.Sequential(nn.Linear(args.dec_hidden_size, args.vocab_size), gen_func)
self.generator[0].weight = self.decoder.embeddings.weight
load_from_checkpoints = False if checkpoint is None else True
if load_from_checkpoints:
self.load_state_dict(checkpoint)
def init_weights(self):
for module in self.decoder.modules():
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
for p in self.generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
else:
p.data.zero_()
def forward(
self,
encoder_input_ids,
decoder_input_ids,
token_type_ids,
encoder_attention_mask,
decoder_attention_mask,
):
encoder_output = self.bert(
input_ids=encoder_input_ids,
token_type_ids=token_type_ids,
attention_mask=encoder_attention_mask,
)
encoder_hidden_states = encoder_output[0]
dec_state = self.decoder.init_decoder_state(encoder_input_ids, encoder_hidden_states)
decoder_outputs, _ = self.decoder(decoder_input_ids[:, :-1], encoder_hidden_states, dec_state)
return decoder_outputs
class Bert(nn.Module):
"""This class is not really necessary and should probably disappear."""
def __init__(self):
super().__init__()
config = BertConfig.from_pretrained("bert-base-uncased")
self.model = BertModel(config)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, **kwargs):
self.eval()
with torch.no_grad():
encoder_outputs, _ = self.model(
input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, **kwargs
)
return encoder_outputs
class TransformerDecoder(nn.Module):
"""
The Transformer decoder from "Attention is All You Need".
Args:
num_layers (int): number of encoder layers.
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
dropout (float): dropout parameters
embeddings (:obj:`onmt.modules.Embeddings`):
embeddings to use, should have positional encodings
attn_type (str): if using a separate copy attention
"""
def __init__(self, num_layers, d_model, heads, d_ff, dropout, embeddings, vocab_size):
super().__init__()
# Basic attributes.
self.decoder_type = "transformer"
self.num_layers = num_layers
self.embeddings = embeddings
self.pos_emb = PositionalEncoding(dropout, self.embeddings.embedding_dim)
# Build TransformerDecoder.
self.transformer_layers = nn.ModuleList(
[TransformerDecoderLayer(d_model, heads, d_ff, dropout) for _ in range(num_layers)]
)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
# forward(input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask)
# def forward(self, input_ids, state, attention_mask=None, memory_lengths=None,
# step=None, cache=None, encoder_attention_mask=None, encoder_hidden_states=None, memory_masks=None):
def forward(
self,
input_ids,
encoder_hidden_states=None,
state=None,
attention_mask=None,
memory_lengths=None,
step=None,
cache=None,
encoder_attention_mask=None,
):
"""
See :obj:`onmt.modules.RNNDecoderBase.forward()`
memory_bank = encoder_hidden_states
"""
# Name conversion
tgt = input_ids
memory_bank = encoder_hidden_states
memory_mask = encoder_attention_mask
# src_words = state.src
src_words = state.src
src_batch, src_len = src_words.size()
padding_idx = self.embeddings.padding_idx
# Decoder padding mask
tgt_words = tgt
tgt_batch, tgt_len = tgt_words.size()
tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1).expand(tgt_batch, tgt_len, tgt_len)
# Encoder padding mask
if memory_mask is not None:
src_len = memory_mask.size(-1)
src_pad_mask = memory_mask.expand(src_batch, tgt_len, src_len)
else:
src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1).expand(src_batch, tgt_len, src_len)
# Pass through the embeddings
emb = self.embeddings(input_ids)
output = self.pos_emb(emb, step)
assert emb.dim() == 3 # len x batch x embedding_dim
if state.cache is None:
saved_inputs = []
for i in range(self.num_layers):
prev_layer_input = None
if state.cache is None:
if state.previous_input is not None:
prev_layer_input = state.previous_layer_inputs[i]
output, all_input = self.transformer_layers[i](
output,
memory_bank,
src_pad_mask,
tgt_pad_mask,
previous_input=prev_layer_input,
layer_cache=state.cache["layer_{}".format(i)] if state.cache is not None else None,
step=step,
)
if state.cache is None:
saved_inputs.append(all_input)
if state.cache is None:
saved_inputs = torch.stack(saved_inputs)
output = self.layer_norm(output)
if state.cache is None:
state = state.update_state(tgt, saved_inputs)
# Decoders in transformers return a tuple. Beam search will fail
# if we don't follow this convention.
return output, state # , state
def init_decoder_state(self, src, memory_bank, with_cache=False):
""" Init decoder state """
state = TransformerDecoderState(src)
if with_cache:
state._init_cache(memory_bank, self.num_layers)
return state
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0)
super().__init__()
self.register_buffer("pe", pe)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
def forward(self, emb, step=None):
emb = emb * math.sqrt(self.dim)
if step:
emb = emb + self.pe[:, step][:, None, :]
else:
emb = emb + self.pe[:, : emb.size(1)]
emb = self.dropout(emb)
return emb
def get_emb(self, emb):
return self.pe[:, : emb.size(1)]
class TransformerDecoderLayer(nn.Module):
"""
Args:
d_model (int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
heads (int): the number of heads for MultiHeadedAttention.
d_ff (int): the second-layer of the PositionwiseFeedForward.
dropout (float): dropout probability(0-1.0).
self_attn_type (string): type of self-attention scaled-dot, average
"""
def __init__(self, d_model, heads, d_ff, dropout):
super().__init__()
self.self_attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
self.context_attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6)
self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6)
self.drop = nn.Dropout(dropout)
mask = self._get_attn_subsequent_mask(MAX_SIZE)
# Register self.mask as a saved_state in TransformerDecoderLayer, so
# it gets TransformerDecoderLayer's cuda behavior automatically.
self.register_buffer("mask", mask)
def forward(
self,
inputs,
memory_bank,
src_pad_mask,
tgt_pad_mask,
previous_input=None,
layer_cache=None,
step=None,
):
"""
Args:
inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`
memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`
src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`
tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`
Returns:
(`FloatTensor`, `FloatTensor`, `FloatTensor`):
* output `[batch_size x 1 x model_dim]`
* attn `[batch_size x 1 x src_len]`
* all_input `[batch_size x current_step x model_dim]`
"""
dec_mask = torch.gt(tgt_pad_mask + self.mask[:, : tgt_pad_mask.size(1), : tgt_pad_mask.size(1)], 0)
input_norm = self.layer_norm_1(inputs)
all_input = input_norm
if previous_input is not None:
all_input = torch.cat((previous_input, input_norm), dim=1)
dec_mask = None
query = self.self_attn(
all_input,
all_input,
input_norm,
mask=dec_mask,
layer_cache=layer_cache,
type="self",
)
query = self.drop(query) + inputs
query_norm = self.layer_norm_2(query)
mid = self.context_attn(
memory_bank,
memory_bank,
query_norm,
mask=src_pad_mask,
layer_cache=layer_cache,
type="context",
)
output = self.feed_forward(self.drop(mid) + query)
return output, all_input
# return output
def _get_attn_subsequent_mask(self, size):
"""
Get an attention mask to avoid using the subsequent info.
Args:
size: int
Returns:
(`LongTensor`):
* subsequent_mask `[1 x size x size]`
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype("uint8")
subsequent_mask = torch.from_numpy(subsequent_mask)
return subsequent_mask
class MultiHeadedAttention(nn.Module):
"""
Multi-Head Attention module from
"Attention is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`.
Similar to standard `dot` attention but uses
multiple attention distributions simulataneously
to select relevant items.
.. mermaid::
graph BT
A[key]
B[value]
C[query]
O[output]
subgraph Attn
D[Attn 1]
E[Attn 2]
F[Attn N]
end
A --> D
C --> D
A --> E
C --> E
A --> F
C --> F
D --> O
E --> O
F --> O
B --> O
Also includes several additional tricks.
Args:
head_count (int): number of parallel heads
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
"""
def __init__(self, head_count, model_dim, dropout=0.1, use_final_linear=True):
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super().__init__()
self.head_count = head_count
self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head)
self.linear_values = nn.Linear(model_dim, head_count * self.dim_per_head)
self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.use_final_linear = use_final_linear
if self.use_final_linear:
self.final_linear = nn.Linear(model_dim, model_dim)
def forward(
self,
key,
value,
query,
mask=None,
layer_cache=None,
type=None,
predefined_graph_1=None,
):
"""
Compute the context vector and the attention vectors.
Args:
key (`FloatTensor`): set of `key_len`
key vectors `[batch, key_len, dim]`
value (`FloatTensor`): set of `key_len`
value vectors `[batch, key_len, dim]`
query (`FloatTensor`): set of `query_len`
query vectors `[batch, query_len, dim]`
mask: binary mask indicating which keys have
non-zero attention `[batch, query_len, key_len]`
Returns:
(`FloatTensor`, `FloatTensor`) :
* output context vectors `[batch, query_len, dim]`
* one of the attention vectors `[batch, query_len, key_len]`
"""
batch_size = key.size(0)
dim_per_head = self.dim_per_head
head_count = self.head_count
def shape(x):
""" projection """
return x.view(batch_size, -1, head_count, dim_per_head).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head)
# 1) Project key, value, and query.
if layer_cache is not None:
if type == "self":
query, key, value = (
self.linear_query(query),
self.linear_keys(query),
self.linear_values(query),
)
key = shape(key)
value = shape(value)
if layer_cache is not None:
device = key.device
if layer_cache["self_keys"] is not None:
key = torch.cat((layer_cache["self_keys"].to(device), key), dim=2)
if layer_cache["self_values"] is not None:
value = torch.cat((layer_cache["self_values"].to(device), value), dim=2)
layer_cache["self_keys"] = key
layer_cache["self_values"] = value
elif type == "context":
query = self.linear_query(query)
if layer_cache is not None:
if layer_cache["memory_keys"] is None:
key, value = self.linear_keys(key), self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key, value = (
layer_cache["memory_keys"],
layer_cache["memory_values"],
)
layer_cache["memory_keys"] = key
layer_cache["memory_values"] = value
else:
key, value = self.linear_keys(key), self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key = self.linear_keys(key)
value = self.linear_values(value)
query = self.linear_query(query)
key = shape(key)
value = shape(value)
query = shape(query)
# 2) Calculate and scale scores.
query = query / math.sqrt(dim_per_head)
scores = torch.matmul(query, key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1).expand_as(scores)
scores = scores.masked_fill(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
attn = self.softmax(scores)
if predefined_graph_1 is not None:
attn_masked = attn[:, -1] * predefined_graph_1
attn_masked = attn_masked / (torch.sum(attn_masked, 2).unsqueeze(2) + 1e-9)
attn = torch.cat([attn[:, :-1], attn_masked.unsqueeze(1)], 1)
drop_attn = self.dropout(attn)
if self.use_final_linear:
context = unshape(torch.matmul(drop_attn, value))
output = self.final_linear(context)
return output
else:
context = torch.matmul(drop_attn, value)
return context
class DecoderState(object):
"""Interface for grouping together the current state of a recurrent
decoder. In the simplest case just represents the hidden state of
the model. But can also be used for implementing various forms of
input_feeding and non-recurrent models.
Modules need to implement this to utilize beam search decoding.
"""
def detach(self):
""" Need to document this """
self.hidden = tuple([_.detach() for _ in self.hidden])
self.input_feed = self.input_feed.detach()
def beam_update(self, idx, positions, beam_size):
""" Need to document this """
for e in self._all:
sizes = e.size()
br = sizes[1]
if len(sizes) == 3:
sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2])[:, :, idx]
else:
sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2], sizes[3])[:, :, idx]
sent_states.data.copy_(sent_states.data.index_select(1, positions))
def map_batch_fn(self, fn):
raise NotImplementedError()
class TransformerDecoderState(DecoderState):
""" Transformer Decoder state base class """
def __init__(self, src):
"""
Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch).
"""
self.src = src
self.previous_input = None
self.previous_layer_inputs = None
self.cache = None
@property
def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
if self.previous_input is not None and self.previous_layer_inputs is not None:
return (self.previous_input, self.previous_layer_inputs, self.src)
else:
return (self.src,)
def detach(self):
if self.previous_input is not None:
self.previous_input = self.previous_input.detach()
if self.previous_layer_inputs is not None:
self.previous_layer_inputs = self.previous_layer_inputs.detach()
self.src = self.src.detach()
def update_state(self, new_input, previous_layer_inputs):
state = TransformerDecoderState(self.src)
state.previous_input = new_input
state.previous_layer_inputs = previous_layer_inputs
return state
def _init_cache(self, memory_bank, num_layers):
self.cache = {}
for l in range(num_layers):
layer_cache = {"memory_keys": None, "memory_values": None}
layer_cache["self_keys"] = None
layer_cache["self_values"] = None
self.cache["layer_{}".format(l)] = layer_cache
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.src = self.src.data.repeat(1, beam_size, 1)
def map_batch_fn(self, fn):
def _recursive_map(struct, batch_dim=0):
for k, v in struct.items():
if v is not None:
if isinstance(v, dict):
_recursive_map(v)
else:
struct[k] = fn(v, batch_dim)
self.src = fn(self.src, 0)
if self.cache is not None:
_recursive_map(self.cache)
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class PositionwiseFeedForward(nn.Module):
"""A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
#
# TRANSLATOR
# The following code is used to generate summaries using the
# pre-trained weights and beam search.
#
def build_predictor(args, tokenizer, symbols, model, logger=None):
# we should be able to refactor the global scorer a lot
scorer = GNMTGlobalScorer(args.alpha, length_penalty="wu")
translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger)
return translator
class GNMTGlobalScorer(object):
"""
NMT re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`
Args:
alpha (float): length parameter
beta (float): coverage parameter
"""
def __init__(self, alpha, length_penalty):
self.alpha = alpha
penalty_builder = PenaltyBuilder(length_penalty)
self.length_penalty = penalty_builder.length_penalty()
def score(self, beam, logprobs):
"""
Rescores a prediction based on penalty functions
"""
normalized_probs = self.length_penalty(beam, logprobs, self.alpha)
return normalized_probs
class PenaltyBuilder(object):
"""
Returns the Length and Coverage Penalty function for Beam Search.
Args:
length_pen (str): option name of length pen
cov_pen (str): option name of cov pen
"""
def __init__(self, length_pen):
self.length_pen = length_pen
def length_penalty(self):
if self.length_pen == "wu":
return self.length_wu
elif self.length_pen == "avg":
return self.length_average
else:
return self.length_none
"""
Below are all the different penalty terms implemented so far
"""
def length_wu(self, beam, logprobs, alpha=0.0):
"""
NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
modifier = ((5 + len(beam.next_ys)) ** alpha) / ((5 + 1) ** alpha)
return logprobs / modifier
def length_average(self, beam, logprobs, alpha=0.0):
"""
Returns the average probability of tokens in a sequence.
"""
return logprobs / len(beam.next_ys)
def length_none(self, beam, logprobs, alpha=0.0, beta=0.0):
"""
Returns unmodified scores.
"""
return logprobs
class Translator(object):
"""
Uses a model to translate a batch of sentences.
Args:
model (:obj:`onmt.modules.NMTModel`):
NMT model to use for translation
fields (dict of Fields): data fields
beam_size (int): size of beam to use
n_best (int): number of translations produced
max_length (int): maximum length output to produce
global_scores (:obj:`GlobalScorer`):
object to rescore final translations
copy_attn (bool): use copy attention during translation
beam_trace (bool): trace beam search for debugging
logger(logging.Logger): logger.
"""
def __init__(self, args, model, vocab, symbols, global_scorer=None, logger=None):
self.logger = logger
self.args = args
self.model = model
self.generator = self.model.generator
self.vocab = vocab
self.symbols = symbols
self.start_token = symbols["BOS"]
self.end_token = symbols["EOS"]
self.global_scorer = global_scorer
self.beam_size = args.beam_size
self.min_length = args.min_length
self.max_length = args.max_length
def translate(self, batch, step, attn_debug=False):
"""Generates summaries from one batch of data."""
self.model.eval()
with torch.no_grad():
batch_data = self.translate_batch(batch)
translations = self.from_batch(batch_data)
return translations
def translate_batch(self, batch, fast=False):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
fast (bool): enables fast beam search (may not support all features)
"""
with torch.no_grad():
return self._fast_translate_batch(batch, self.max_length, min_length=self.min_length)
# Where the beam search lives
# I have no idea why it is being called from the method above
def _fast_translate_batch(self, batch, max_length, min_length=0):
"""Beam Search using the encoder inputs contained in `batch`."""
# The batch object is funny
# Instead of just looking at the size of the arguments we encapsulate
# a size argument.
# Where is it defined?
beam_size = self.beam_size
batch_size = batch.batch_size
src = batch.src
segs = batch.segs
mask_src = batch.mask_src
src_features = self.model.bert(src, segs, mask_src)
dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True)
device = src_features.device
# Tile states and memory beam_size times.
dec_states.map_batch_fn(lambda state, dim: tile(state, beam_size, dim=dim))
src_features = tile(src_features, beam_size, dim=0)
batch_offset = torch.arange(batch_size, dtype=torch.long, device=device)
beam_offset = torch.arange(0, batch_size * beam_size, step=beam_size, dtype=torch.long, device=device)
alive_seq = torch.full([batch_size * beam_size, 1], self.start_token, dtype=torch.long, device=device)
# Give full probability to the first beam on the first step.
topk_log_probs = torch.tensor([0.0] + [float("-inf")] * (beam_size - 1), device=device).repeat(batch_size)
# Structure that holds finished hypotheses.
hypotheses = [[] for _ in range(batch_size)] # noqa: F812
results = {}
results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812
results["scores"] = [[] for _ in range(batch_size)] # noqa: F812
results["gold_score"] = [0] * batch_size
results["batch"] = batch
for step in range(max_length):
decoder_input = alive_seq[:, -1].view(1, -1)
# Decoder forward.
decoder_input = decoder_input.transpose(0, 1)
dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states, step=step)
# Generator forward.
log_probs = self.generator(dec_out.transpose(0, 1).squeeze(0))
vocab_size = log_probs.size(-1)
if step < min_length:
log_probs[:, self.end_token] = -1e20
# Multiply probs by the beam probability.
log_probs += topk_log_probs.view(-1).unsqueeze(1)
alpha = self.global_scorer.alpha
length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha
# Flatten probs into a list of possibilities.
curr_scores = log_probs / length_penalty
if self.args.block_trigram:
cur_len = alive_seq.size(1)
if cur_len > 3:
for i in range(alive_seq.size(0)):
fail = False
words = [int(w) for w in alive_seq[i]]
words = [self.vocab.ids_to_tokens[w] for w in words]
words = " ".join(words).replace(" ##", "").split()
if len(words) <= 3:
continue
trigrams = [(words[i - 1], words[i], words[i + 1]) for i in range(1, len(words) - 1)]
trigram = tuple(trigrams[-1])
if trigram in trigrams[:-1]:
fail = True
if fail:
curr_scores[i] = -10e20
curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)
topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)
# Recover log probs.
topk_log_probs = topk_scores * length_penalty
# Resolve beam origin and true word ids.
topk_beam_index = topk_ids.div(vocab_size)
topk_ids = topk_ids.fmod(vocab_size)
# Map beam_index to batch_index in the flat representation.
batch_index = topk_beam_index + beam_offset[: topk_beam_index.size(0)].unsqueeze(1)
select_indices = batch_index.view(-1)
# Append last prediction.
alive_seq = torch.cat([alive_seq.index_select(0, select_indices), topk_ids.view(-1, 1)], -1)
is_finished = topk_ids.eq(self.end_token)
if step + 1 == max_length:
is_finished.fill_(1)
# End condition is top beam is finished.
end_condition = is_finished[:, 0].eq(1)
# Save finished hypotheses.
if is_finished.any():
predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))
for i in range(is_finished.size(0)):
b = batch_offset[i]
if end_condition[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
for j in finished_hyp:
hypotheses[b].append((topk_scores[i, j], predictions[i, j, 1:]))
# If the batch reached the end, save the n_best hypotheses.
if end_condition[i]:
best_hyp = sorted(hypotheses[b], key=lambda x: x[0], reverse=True)
score, pred = best_hyp[0]
results["scores"][b].append(score)
results["predictions"][b].append(pred)
non_finished = end_condition.eq(0).nonzero().view(-1)
# If all sentences are translated, no need to go further.
if len(non_finished) == 0:
break
# Remove finished batches for the next step.
topk_log_probs = topk_log_probs.index_select(0, non_finished)
batch_index = batch_index.index_select(0, non_finished)
batch_offset = batch_offset.index_select(0, non_finished)
alive_seq = predictions.index_select(0, non_finished).view(-1, alive_seq.size(-1))
# Reorder states.
select_indices = batch_index.view(-1)
src_features = src_features.index_select(0, select_indices)
dec_states.map_batch_fn(lambda state, dim: state.index_select(dim, select_indices))
return results
def from_batch(self, translation_batch):
batch = translation_batch["batch"]
assert len(translation_batch["gold_score"]) == len(translation_batch["predictions"])
batch_size = batch.batch_size
preds, _, _, tgt_str, src = (
translation_batch["predictions"],
translation_batch["scores"],
translation_batch["gold_score"],
batch.tgt_str,
batch.src,
)
translations = []
for b in range(batch_size):
pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]])
pred_sents = " ".join(pred_sents).replace(" ##", "")
gold_sent = " ".join(tgt_str[b].split())
raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500]
raw_src = " ".join(raw_src)
translation = (pred_sents, gold_sent, raw_src)
translations.append(translation)
return translations
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1).transpose(0, 1).repeat(count, 1).transpose(0, 1).contiguous().view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x
#
# Optimizer for training. We keep this here in case we want to add
# a finetuning script.
#
class BertSumOptimizer(object):
"""Specific optimizer for BertSum.
As described in [1], the authors fine-tune BertSum for abstractive
summarization using two Adam Optimizers with different warm-up steps and
learning rate. They also use a custom learning rate scheduler.
[1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders."
arXiv preprint arXiv:1908.08345 (2019).
"""
def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8):
self.encoder = model.encoder
self.decoder = model.decoder
self.lr = lr
self.warmup_steps = warmup_steps
self.optimizers = {
"encoder": torch.optim.Adam(
model.encoder.parameters(),
lr=lr["encoder"],
betas=(beta_1, beta_2),
eps=eps,
),
"decoder": torch.optim.Adam(
model.decoder.parameters(),
lr=lr["decoder"],
betas=(beta_1, beta_2),
eps=eps,
),
}
self._step = 0
self.current_learning_rates = {}
def _update_rate(self, stack):
return self.lr[stack] * min(self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-1.5))
def zero_grad(self):
self.optimizer_decoder.zero_grad()
self.optimizer_encoder.zero_grad()
def step(self):
self._step += 1
for stack, optimizer in self.optimizers.items():
new_rate = self._update_rate(stack)
for param_group in optimizer.param_groups:
param_group["lr"] = new_rate
optimizer.step()
self.current_learning_rates[stack] = new_rate
|
AdaMix/examples/research_projects/bertabs/modeling_bertabs.py/0
|
{
"file_path": "AdaMix/examples/research_projects/bertabs/modeling_bertabs.py",
"repo_id": "AdaMix",
"token_count": 17889
}
| 39 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def get_setup_file():
parser = argparse.ArgumentParser()
parser.add_argument("-f")
args = parser.parse_args()
return args.f
class DeeBertTests(TestCasePlus):
def setup(self) -> None:
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
def run_and_check(self, args):
n_gpu = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, "run_glue_deebert.py")
with patch.object(sys, "argv", args):
result = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(value, 0.666)
@slow
@require_torch_non_multi_gpu
def test_glue_deebert_train(self):
train_args = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(train_args)
eval_args = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(eval_args)
entropy_eval_args = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(entropy_eval_args)
|
AdaMix/examples/research_projects/deebert/test_glue_deebert.py/0
|
{
"file_path": "AdaMix/examples/research_projects/deebert/test_glue_deebert.py",
"repo_id": "AdaMix",
"token_count": 1862
}
| 40 |
{
"initializer_range": 0.02,
"layer_norm_epsilon": 0.00001,
"n_ctx": 1024,
"n_embd": 768,
"n_head": 12,
"n_layer": 6,
"n_positions": 1024,
"vocab_size": 50257
}
|
AdaMix/examples/research_projects/distillation/training_configs/distilgpt2.json/0
|
{
"file_path": "AdaMix/examples/research_projects/distillation/training_configs/distilgpt2.json",
"repo_id": "AdaMix",
"token_count": 88
}
| 41 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.