Push model using huggingface_hub.
Browse files- README.md +13 -0
- class_config.json +6 -0
- composed_modality.pickle +3 -0
- config.json +63 -0
- experiment_cfg/metadata.json +1 -0
- model.safetensors +3 -0
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: gr00t
|
3 |
+
tags:
|
4 |
+
- gr00t
|
5 |
+
- model_hub_mixin
|
6 |
+
---
|
7 |
+
|
8 |
+
This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
|
9 |
+
- Code: https://github.com/NVIDIA/Isaac-GR00T
|
10 |
+
- Paper: [More Information Needed]
|
11 |
+
- Docs: [More Information Needed]
|
12 |
+
|
13 |
+
# Gr00t
|
class_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"denoising_steps": null,
|
3 |
+
"device": "cuda",
|
4 |
+
"embodiment_tag": "gr1",
|
5 |
+
"model_path": "/tmp/tmpqg6z100q/not-lain/gr00t_policy_1"
|
6 |
+
}
|
composed_modality.pickle
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3cd1e3494b6f5c7d514766b33cd7c26938a6def3be7330974a63eac0b9add295
|
3 |
+
size 2022720
|
config.json
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"action_dim": 32,
|
3 |
+
"action_head_cfg": {
|
4 |
+
"action_dim": 32,
|
5 |
+
"action_horizon": 16,
|
6 |
+
"add_pos_embed": true,
|
7 |
+
"diffusion_model_cfg": {
|
8 |
+
"attention_head_dim": 48,
|
9 |
+
"dropout": 0.2,
|
10 |
+
"final_dropout": true,
|
11 |
+
"interleave_self_attention": true,
|
12 |
+
"norm_type": "ada_norm",
|
13 |
+
"num_attention_heads": 32,
|
14 |
+
"num_layers": 16,
|
15 |
+
"output_dim": 1024,
|
16 |
+
"positional_embeddings": null
|
17 |
+
},
|
18 |
+
"freeze_decode_layer": false,
|
19 |
+
"hidden_size": 1024,
|
20 |
+
"input_embedding_dim": 1536,
|
21 |
+
"load_pretrained_det_decode_layer_path": null,
|
22 |
+
"max_action_dim": 32,
|
23 |
+
"max_state_dim": 64,
|
24 |
+
"model_dtype": "float32",
|
25 |
+
"noise_beta_alpha": 1.5,
|
26 |
+
"noise_beta_beta": 1.0,
|
27 |
+
"noise_s": 0.999,
|
28 |
+
"num_inference_timesteps": 16,
|
29 |
+
"num_timestep_buckets": 1000,
|
30 |
+
"tune_diffusion_model": true,
|
31 |
+
"tune_projector": true
|
32 |
+
},
|
33 |
+
"action_horizon": 16,
|
34 |
+
"architectures": [
|
35 |
+
"GR00T_N1"
|
36 |
+
],
|
37 |
+
"attn_implementation": null,
|
38 |
+
"backbone_cfg": {
|
39 |
+
"allow_reshape_visual": true,
|
40 |
+
"load_pretrained_det_eagle_path": null,
|
41 |
+
"model_name": "$GR00T_BACKBONE_PATH/eagle2_hg_model",
|
42 |
+
"processor_cfg": {
|
43 |
+
"max_input_tiles": 1,
|
44 |
+
"model_path": "$GR00T_BACKBONE_PATH/eagle2_hg_model",
|
45 |
+
"model_spec": {
|
46 |
+
"num_image_token": 64,
|
47 |
+
"template": "qwen2-chat"
|
48 |
+
}
|
49 |
+
},
|
50 |
+
"projector_dim": 2048,
|
51 |
+
"remove_llm": false,
|
52 |
+
"reproject_vision": false,
|
53 |
+
"scale_image_resolution": 1,
|
54 |
+
"select_layer": 12,
|
55 |
+
"tune_llm": false,
|
56 |
+
"tune_visual": true
|
57 |
+
},
|
58 |
+
"hidden_size": 1536,
|
59 |
+
"model_dtype": "float32",
|
60 |
+
"model_type": "gr00t_n1",
|
61 |
+
"torch_dtype": "bfloat16",
|
62 |
+
"transformers_version": "4.50.0"
|
63 |
+
}
|
experiment_cfg/metadata.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6c403942008a5f9e02141f28cc22360c091d8f70a7a58b4b89ed29d93de74de
|
3 |
+
size 4380149488
|