SystemAdmin123 commited on
Commit
ec38180
·
verified ·
1 Parent(s): 5143b1d

Training in progress, step 2400, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:994193241e013d5e269eeb0653436d6339a2c6adf78180ecffaf3e8d5cf2c932
3
  size 2433024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:697f816d0621c0d00e8e0ca9fe72e8fdd524ca12acf84d2e3bd927115c5c70e9
3
  size 2433024
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e25198e4e57fe4a58536d1466b258b97ac29b9051b4d986058fbb95f98ffed4
3
  size 2498406
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82e1d534960590164150b98a4eca7e5d48f9e8072cfbd3c56be0575735e0c5b5
3
  size 2498406
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:19ab3d6cfcb43de67f16e412d0cb4f86309db602f8242d16f2b203a0212d6cbb
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97474a95cf2d0e6166f036d8937e33ebebb2adb23cf1177f88edc10dc549c905
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c88b3aeb8ec2bf995149291b90b69667d3f268ff2f13afbeab1a220b8cc27590
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf1f53caaa12767db3c6df563992bbf88f4b84dc57ec5080b22deb9c2c56ec6e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5918910920390648,
5
  "eval_steps": 200,
6
- "global_step": 2000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1495,6 +1495,302 @@
1495
  "eval_samples_per_second": 40.529,
1496
  "eval_steps_per_second": 10.146,
1497
  "step": 2000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1498
  }
1499
  ],
1500
  "logging_steps": 10,
@@ -1514,7 +1810,7 @@
1514
  "attributes": {}
1515
  }
1516
  },
1517
- "total_flos": 195919478784.0,
1518
  "train_batch_size": 4,
1519
  "trial_name": null,
1520
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7102693104468778,
5
  "eval_steps": 200,
6
+ "global_step": 2400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1495
  "eval_samples_per_second": 40.529,
1496
  "eval_steps_per_second": 10.146,
1497
  "step": 2000
1498
+ },
1499
+ {
1500
+ "epoch": 0.5948505474992601,
1501
+ "grad_norm": 0.52734375,
1502
+ "learning_rate": 2.0280410844810428e-05,
1503
+ "loss": 10.5051,
1504
+ "step": 2010
1505
+ },
1506
+ {
1507
+ "epoch": 0.5978100029594554,
1508
+ "grad_norm": 0.51953125,
1509
+ "learning_rate": 1.9488821249060297e-05,
1510
+ "loss": 10.497,
1511
+ "step": 2020
1512
+ },
1513
+ {
1514
+ "epoch": 0.6007694584196508,
1515
+ "grad_norm": 0.58984375,
1516
+ "learning_rate": 1.871131877836879e-05,
1517
+ "loss": 10.5321,
1518
+ "step": 2030
1519
+ },
1520
+ {
1521
+ "epoch": 0.6037289138798461,
1522
+ "grad_norm": 0.609375,
1523
+ "learning_rate": 1.7948039473155554e-05,
1524
+ "loss": 10.5627,
1525
+ "step": 2040
1526
+ },
1527
+ {
1528
+ "epoch": 0.6066883693400414,
1529
+ "grad_norm": 1.2734375,
1530
+ "learning_rate": 1.7199116885197995e-05,
1531
+ "loss": 10.6749,
1532
+ "step": 2050
1533
+ },
1534
+ {
1535
+ "epoch": 0.6096478248002367,
1536
+ "grad_norm": 0.46875,
1537
+ "learning_rate": 1.646468205426377e-05,
1538
+ "loss": 10.5196,
1539
+ "step": 2060
1540
+ },
1541
+ {
1542
+ "epoch": 0.6126072802604321,
1543
+ "grad_norm": 0.51953125,
1544
+ "learning_rate": 1.5744863485182537e-05,
1545
+ "loss": 10.5294,
1546
+ "step": 2070
1547
+ },
1548
+ {
1549
+ "epoch": 0.6155667357206274,
1550
+ "grad_norm": 0.5546875,
1551
+ "learning_rate": 1.5039787125361326e-05,
1552
+ "loss": 10.5178,
1553
+ "step": 2080
1554
+ },
1555
+ {
1556
+ "epoch": 0.6185261911808227,
1557
+ "grad_norm": 0.703125,
1558
+ "learning_rate": 1.4349576342747462e-05,
1559
+ "loss": 10.6195,
1560
+ "step": 2090
1561
+ },
1562
+ {
1563
+ "epoch": 0.621485646641018,
1564
+ "grad_norm": 0.94921875,
1565
+ "learning_rate": 1.3674351904242611e-05,
1566
+ "loss": 10.5954,
1567
+ "step": 2100
1568
+ },
1569
+ {
1570
+ "epoch": 0.6244451021012134,
1571
+ "grad_norm": 0.458984375,
1572
+ "learning_rate": 1.3014231954572287e-05,
1573
+ "loss": 10.5128,
1574
+ "step": 2110
1575
+ },
1576
+ {
1577
+ "epoch": 0.6274045575614087,
1578
+ "grad_norm": 0.52734375,
1579
+ "learning_rate": 1.2369331995613665e-05,
1580
+ "loss": 10.4853,
1581
+ "step": 2120
1582
+ },
1583
+ {
1584
+ "epoch": 0.630364013021604,
1585
+ "grad_norm": 0.58984375,
1586
+ "learning_rate": 1.173976486618631e-05,
1587
+ "loss": 10.5693,
1588
+ "step": 2130
1589
+ },
1590
+ {
1591
+ "epoch": 0.6333234684817993,
1592
+ "grad_norm": 0.5859375,
1593
+ "learning_rate": 1.1125640722308628e-05,
1594
+ "loss": 10.6354,
1595
+ "step": 2140
1596
+ },
1597
+ {
1598
+ "epoch": 0.6362829239419947,
1599
+ "grad_norm": 1.7734375,
1600
+ "learning_rate": 1.0527067017923654e-05,
1601
+ "loss": 10.6234,
1602
+ "step": 2150
1603
+ },
1604
+ {
1605
+ "epoch": 0.63924237940219,
1606
+ "grad_norm": 0.486328125,
1607
+ "learning_rate": 9.944148486097793e-06,
1608
+ "loss": 10.5555,
1609
+ "step": 2160
1610
+ },
1611
+ {
1612
+ "epoch": 0.6422018348623854,
1613
+ "grad_norm": 0.515625,
1614
+ "learning_rate": 9.376987120695545e-06,
1615
+ "loss": 10.5214,
1616
+ "step": 2170
1617
+ },
1618
+ {
1619
+ "epoch": 0.6451612903225806,
1620
+ "grad_norm": 0.62109375,
1621
+ "learning_rate": 8.825682158533554e-06,
1622
+ "loss": 10.5274,
1623
+ "step": 2180
1624
+ },
1625
+ {
1626
+ "epoch": 0.648120745782776,
1627
+ "grad_norm": 0.79296875,
1628
+ "learning_rate": 8.290330062017016e-06,
1629
+ "loss": 10.6728,
1630
+ "step": 2190
1631
+ },
1632
+ {
1633
+ "epoch": 0.6510802012429713,
1634
+ "grad_norm": 1.109375,
1635
+ "learning_rate": 7.771024502261526e-06,
1636
+ "loss": 10.6148,
1637
+ "step": 2200
1638
+ },
1639
+ {
1640
+ "epoch": 0.6510802012429713,
1641
+ "eval_loss": 10.584245681762695,
1642
+ "eval_runtime": 37.0238,
1643
+ "eval_samples_per_second": 40.569,
1644
+ "eval_steps_per_second": 10.156,
1645
+ "step": 2200
1646
+ },
1647
+ {
1648
+ "epoch": 0.6540396567031667,
1649
+ "grad_norm": 0.486328125,
1650
+ "learning_rate": 7.267856342703461e-06,
1651
+ "loss": 10.5768,
1652
+ "step": 2210
1653
+ },
1654
+ {
1655
+ "epoch": 0.6569991121633619,
1656
+ "grad_norm": 0.51171875,
1657
+ "learning_rate": 6.780913623201346e-06,
1658
+ "loss": 10.4856,
1659
+ "step": 2220
1660
+ },
1661
+ {
1662
+ "epoch": 0.6599585676235573,
1663
+ "grad_norm": 0.859375,
1664
+ "learning_rate": 6.310281544631546e-06,
1665
+ "loss": 10.6694,
1666
+ "step": 2230
1667
+ },
1668
+ {
1669
+ "epoch": 0.6629180230837526,
1670
+ "grad_norm": 0.57421875,
1671
+ "learning_rate": 5.856042453980526e-06,
1672
+ "loss": 10.5306,
1673
+ "step": 2240
1674
+ },
1675
+ {
1676
+ "epoch": 0.665877478543948,
1677
+ "grad_norm": 0.80078125,
1678
+ "learning_rate": 5.418275829936537e-06,
1679
+ "loss": 10.525,
1680
+ "step": 2250
1681
+ },
1682
+ {
1683
+ "epoch": 0.6688369340041432,
1684
+ "grad_norm": 0.55859375,
1685
+ "learning_rate": 4.997058268983135e-06,
1686
+ "loss": 10.6271,
1687
+ "step": 2260
1688
+ },
1689
+ {
1690
+ "epoch": 0.6717963894643386,
1691
+ "grad_norm": 0.53125,
1692
+ "learning_rate": 4.592463471997022e-06,
1693
+ "loss": 10.4891,
1694
+ "step": 2270
1695
+ },
1696
+ {
1697
+ "epoch": 0.6747558449245339,
1698
+ "grad_norm": 0.546875,
1699
+ "learning_rate": 4.204562231352516e-06,
1700
+ "loss": 10.5647,
1701
+ "step": 2280
1702
+ },
1703
+ {
1704
+ "epoch": 0.6777153003847292,
1705
+ "grad_norm": 0.63671875,
1706
+ "learning_rate": 3.83342241853496e-06,
1707
+ "loss": 10.506,
1708
+ "step": 2290
1709
+ },
1710
+ {
1711
+ "epoch": 0.6806747558449245,
1712
+ "grad_norm": 0.87109375,
1713
+ "learning_rate": 3.4791089722651436e-06,
1714
+ "loss": 10.6414,
1715
+ "step": 2300
1716
+ },
1717
+ {
1718
+ "epoch": 0.6836342113051198,
1719
+ "grad_norm": 0.478515625,
1720
+ "learning_rate": 3.1416838871368924e-06,
1721
+ "loss": 10.4905,
1722
+ "step": 2310
1723
+ },
1724
+ {
1725
+ "epoch": 0.6865936667653152,
1726
+ "grad_norm": 0.55078125,
1727
+ "learning_rate": 2.821206202769899e-06,
1728
+ "loss": 10.5434,
1729
+ "step": 2320
1730
+ },
1731
+ {
1732
+ "epoch": 0.6895531222255105,
1733
+ "grad_norm": 0.796875,
1734
+ "learning_rate": 2.5177319934794e-06,
1735
+ "loss": 10.6355,
1736
+ "step": 2330
1737
+ },
1738
+ {
1739
+ "epoch": 0.6925125776857058,
1740
+ "grad_norm": 0.8984375,
1741
+ "learning_rate": 2.2313143584648423e-06,
1742
+ "loss": 10.5647,
1743
+ "step": 2340
1744
+ },
1745
+ {
1746
+ "epoch": 0.6954720331459011,
1747
+ "grad_norm": 0.84765625,
1748
+ "learning_rate": 1.9620034125190644e-06,
1749
+ "loss": 10.7043,
1750
+ "step": 2350
1751
+ },
1752
+ {
1753
+ "epoch": 0.6984314886060965,
1754
+ "grad_norm": 0.5,
1755
+ "learning_rate": 1.7098462772596302e-06,
1756
+ "loss": 10.4649,
1757
+ "step": 2360
1758
+ },
1759
+ {
1760
+ "epoch": 0.7013909440662918,
1761
+ "grad_norm": 0.5078125,
1762
+ "learning_rate": 1.4748870728839347e-06,
1763
+ "loss": 10.4574,
1764
+ "step": 2370
1765
+ },
1766
+ {
1767
+ "epoch": 0.7043503995264871,
1768
+ "grad_norm": 0.578125,
1769
+ "learning_rate": 1.2571669104494256e-06,
1770
+ "loss": 10.4982,
1771
+ "step": 2380
1772
+ },
1773
+ {
1774
+ "epoch": 0.7073098549866824,
1775
+ "grad_norm": 0.9453125,
1776
+ "learning_rate": 1.0567238846803996e-06,
1777
+ "loss": 10.5488,
1778
+ "step": 2390
1779
+ },
1780
+ {
1781
+ "epoch": 0.7102693104468778,
1782
+ "grad_norm": 0.796875,
1783
+ "learning_rate": 8.735930673024806e-07,
1784
+ "loss": 10.4931,
1785
+ "step": 2400
1786
+ },
1787
+ {
1788
+ "epoch": 0.7102693104468778,
1789
+ "eval_loss": 10.585838317871094,
1790
+ "eval_runtime": 36.7691,
1791
+ "eval_samples_per_second": 40.85,
1792
+ "eval_steps_per_second": 10.226,
1793
+ "step": 2400
1794
  }
1795
  ],
1796
  "logging_steps": 10,
 
1810
  "attributes": {}
1811
  }
1812
  },
1813
+ "total_flos": 235181703168.0,
1814
  "train_batch_size": 4,
1815
  "trial_name": null,
1816
  "trial_params": null