modelId
stringlengths
5
139
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]date
2020-02-15 11:33:14
2025-09-02 12:32:32
downloads
int64
0
223M
likes
int64
0
11.7k
library_name
stringclasses
534 values
tags
listlengths
1
4.05k
pipeline_tag
stringclasses
55 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2025-09-02 12:31:20
card
stringlengths
11
1.01M
AnerYubo/blockassist-bc-pawing_downy_anaconda_1756779147
AnerYubo
2025-09-02T02:12:30Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "pawing downy anaconda", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T02:12:27Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - pawing downy anaconda --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
liukevin666/blockassist-bc-yawning_striped_cassowary_1756779054
liukevin666
2025-09-02T02:12:24Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "yawning striped cassowary", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T02:11:47Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - yawning striped cassowary --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
uopyouop/blockassist-bc-reptilian_noisy_horse_1756779123
uopyouop
2025-09-02T02:12:20Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "reptilian noisy horse", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T02:12:04Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - reptilian noisy horse --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
AnerYubo/blockassist-bc-snappy_tenacious_eagle_1756779133
AnerYubo
2025-09-02T02:12:18Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "snappy tenacious eagle", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T02:12:13Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - snappy tenacious eagle --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
omerbkts/blockassist-bc-insectivorous_bold_lion_1756779078
omerbkts
2025-09-02T02:11:38Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "insectivorous bold lion", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T02:11:34Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - insectivorous bold lion --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
DevQuasar/CohereLabs.command-a-translate-08-2025-GGUF
DevQuasar
2025-09-02T02:11:23Z
1,366
3
null
[ "gguf", "text-generation", "base_model:CohereLabs/command-a-translate-08-2025", "base_model:quantized:CohereLabs/command-a-translate-08-2025", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
text-generation
2025-08-28T18:04:06Z
--- base_model: - CohereLabs/command-a-translate-08-2025 pipeline_tag: text-generation --- [<img src="https://raw.githubusercontent.com/csabakecskemeti/devquasar/main/dq_logo_black-transparent.png" width="200"/>](https://devquasar.com) 'Make knowledge free for everyone' ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64e6d37e02dee9bcb9d9fa18/j8wqc2dvXpf766Bbhv2S9.png) Tested with [DevQuasar/wikitext-2-raw-v1-preprocessed-1k](https://huggingface.co/datasets/DevQuasar/wikitext-2-raw-v1-preprocessed-1k) Quantized version of: [CohereLabs/command-a-translate-08-2025](https://huggingface.co/CohereLabs/command-a-translate-08-2025) <a href='https://ko-fi.com/L4L416YX7C' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi6.png?v=6' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
0701phantom/myT5-DPO
0701phantom
2025-09-02T02:10:48Z
0
0
transformers
[ "transformers", "safetensors", "t5", "text2text-generation", "arxiv:1910.09700", "text-generation-inference", "endpoints_compatible", "region:us" ]
null
2025-09-02T02:10:38Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
cody-li/whisper_fined_tuned_8-8_xl
cody-li
2025-09-02T02:10:05Z
0
0
transformers
[ "transformers", "safetensors", "whisper", "automatic-speech-recognition", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2025-09-02T02:09:30Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
giovannidemuri/llama3b-llama8b-er-v532-seed2-seed2-hx-alpaca-fpt
giovannidemuri
2025-09-02T02:07:37Z
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-02T00:28:43Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
aleebaster/blockassist-bc-sly_eager_boar_1756775204
aleebaster
2025-09-02T02:07:23Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "sly eager boar", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T02:07:13Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - sly eager boar --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
giovannidemuri/llama3b-llama8b-er-v531-seed2-seed2-hx-alpaca-fpt
giovannidemuri
2025-09-02T02:06:02Z
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-02T00:25:22Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
ReactiveAI/RxT-Alpha-Mini-S-MLM
ReactiveAI
2025-09-02T02:05:25Z
0
0
null
[ "safetensors", "model_hub_mixin", "pytorch_model_hub_mixin", "license:apache-2.0", "region:eu" ]
null
2025-09-01T19:53:31Z
--- license: apache-2.0 tags: - model_hub_mixin - pytorch_model_hub_mixin --- This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: - Code: [More Information Needed] - Paper: [More Information Needed] - Docs: [More Information Needed]
ReactiveAI/RxT-Alpha-Mini-S-Encoder
ReactiveAI
2025-09-02T02:05:20Z
0
0
null
[ "safetensors", "model_hub_mixin", "pytorch_model_hub_mixin", "fill-mask", "license:apache-2.0", "region:eu" ]
fill-mask
2025-09-01T19:53:24Z
--- license: apache-2.0 pipeline_tag: fill-mask tags: - model_hub_mixin - pytorch_model_hub_mixin --- This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: - Code: [More Information Needed] - Paper: [More Information Needed] - Docs: [More Information Needed]
mooperyou/blockassist-bc-beaked_frisky_ox_1756778682
mooperyou
2025-09-02T02:05:14Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "beaked frisky ox", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T02:04:44Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - beaked frisky ox --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
sekirr/blockassist-bc-masked_tenacious_whale_1756778592
sekirr
2025-09-02T02:03:52Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "masked tenacious whale", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T02:03:48Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - masked tenacious whale --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
huseyinatahaninan/t1_25k_v2_tag5_filtered_hermes-SFT-Qwen2.5-32B-Instruct
huseyinatahaninan
2025-09-02T02:03:24Z
0
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:Qwen/Qwen2.5-32B-Instruct", "base_model:finetune:Qwen/Qwen2.5-32B-Instruct", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-01T20:13:53Z
--- library_name: transformers license: apache-2.0 base_model: Qwen/Qwen2.5-32B-Instruct tags: - llama-factory - full - generated_from_trainer model-index: - name: t1_25k_v2_tag5_filtered_hermes-SFT-Qwen2.5-32B-Instruct results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t1_25k_v2_tag5_filtered_hermes-SFT-Qwen2.5-32B-Instruct This model is a fine-tuned version of [Qwen/Qwen2.5-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct) on the t1_25k_v2_tag5_filtered_hermes dataset. It achieves the following results on the evaluation set: - Loss: 0.2284 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - total_eval_batch_size: 8 - optimizer: Use adamw_torch_fused with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 0.2844 | 0.2761 | 100 | 0.2545 | | 0.2488 | 0.5521 | 200 | 0.2373 | | 0.2227 | 0.8282 | 300 | 0.2301 | ### Framework versions - Transformers 4.55.0 - Pytorch 2.8.0+cu128 - Datasets 3.6.0 - Tokenizers 0.21.1
IPEC-COMMUNITY/EO-1-3B
IPEC-COMMUNITY
2025-09-02T02:01:03Z
0
7
transformers
[ "transformers", "Robot Control", "Generalist robot policies", "VLA", "Embodied AI", "Unified Model", "multimodal", "large embodied model", "robotics", "en", "dataset:agibot-world/AgiBotWorld-Beta", "dataset:IPEC-COMMUNITY/fractal20220817_data_lerobot", "dataset:youliangtan/bridge_dataset", "dataset:IPEC-COMMUNITY/droid_lerobot", "dataset:liuhaotian/LLaVA-Instruct-150K", "dataset:lmms-lab/LLaVA-Video-178K", "dataset:lmms-lab/RefCOCO", "dataset:allenai/pixmo-points", "dataset:IPEC-COMMUNITY/EO-Data1.5M", "dataset:lmms-lab/RoboVQA", "dataset:x-humanoid-robomind/RoboMIND", "arxiv:2508.21112", "base_model:Qwen/Qwen2.5-VL-3B-Instruct", "base_model:finetune:Qwen/Qwen2.5-VL-3B-Instruct", "license:mit", "endpoints_compatible", "region:us" ]
robotics
2025-08-28T12:26:28Z
--- base_model: - Qwen/Qwen2.5-VL-3B-Instruct datasets: - agibot-world/AgiBotWorld-Beta - IPEC-COMMUNITY/fractal20220817_data_lerobot - youliangtan/bridge_dataset - IPEC-COMMUNITY/droid_lerobot - liuhaotian/LLaVA-Instruct-150K - lmms-lab/LLaVA-Video-178K - lmms-lab/RefCOCO - allenai/pixmo-points - IPEC-COMMUNITY/EO-Data1.5M - lmms-lab/RoboVQA - x-humanoid-robomind/RoboMIND language: - en license: mit metrics: - accuracy - bleu tags: - Robot Control - Generalist robot policies - VLA - Embodied AI - Unified Model - multimodal - large embodied model pipeline_tag: robotics library_name: transformers --- <p align="center"> <img src="assets/logo.png" width="100%"> </p> <p align="left"> <a href="https://eo-robotics.ai/eo-1"> <img src="https://img.shields.io/badge/EO--Robotics-Website-5865F2?logo=googleplay&logoColor=white" alt="EO-Robotics Website" /> </a> <a href="https://arxiv.org/abs/2508.21112"> <img src="https://img.shields.io/badge/EO--1-Paper-red?logo=arxiv&logoColor=red" alt="EO-Robotics Paper on arXiv" /> </a> <a href="https://github.com/EO-Robotics/EO-1"> <img src="https://img.shields.io/badge/GitHub-Code-blue.svg?logo=github&" alt="GitHub Code" /> </a> <a href="https://huggingface.co/collections/IPEC-COMMUNITY/eo-robotics-68ac4ff30e1f746cac28ca14"> <img src="https://img.shields.io/badge/EO--1--3B-Model-FFCC11?logo=huggingface&logoColor=brightyellow" alt="EO-1 Model" /> </a> <a href="https://huggingface.co/spaces/IPEC-COMMUNITY/EO-Robotics"> <img src="https://img.shields.io/badge/EO--Robotics-Space-orange?logo=huggingface&logoColor=brightyellow" alt="EO-Robotics Model" /> </a> <a href="https://discord.gg/JqfDs6va"> <img src="https://img.shields.io/badge/EO--Robotics-Discord-155dfc?logo=discord&logoColor=lightblue" alt="EO-Robotics Discord" /> </a> <a href="mailto:[email protected]"> <img src="https://img.shields.io/badge/EO--Robotics-Email-D14836?logo=gmail&logoColor=red" alt="EO-Robotics Email" /> </a> <a href="https://huggingface.co/datasets/IPEC-COMMUNITY/EO-Data1.5M"> <img src="https://img.shields.io/badge/Dataset-EO--Data1.5M-brightgreen?logo=huggingface&logoColor=brightyellow" alt="EO-1.5M" /> </a> </p> ## Interleaved Vision-Text-Action Pretraining for General Robot Control We introduce **EO-1** model, an open-source unified embodied foundation model comprising 3B parameters, trained on the carefully curated interleaved embodied dataset EO-Data1.5M, Web Multimodal Data, and Robot Control Data (AgiBotWorld, Open X-Embodiment, RoboMIND, SO100-Community, etc.). The **EO-1** model adopt a single unified decoder-only transformer that integrates discrete auto-regressive decoding with continuous flow matching denoising for multimodal embodied reasoning and robot control, enabling seamless perception, planning, reasoning, and acting in single model. This work highlights the following features: - ⚡ **Unified Architecture**: A single decoder-only transformer integrating text, image, video, and actions. - 📚 **EO-1.5M Dataset**: 1.5M high-quality interleaved samples (Physical, Reasoning, Spatial, Control). - 🌀 **Interleaved Pretraining**: Seamless synergy between language and action with autoregressive + flow matching. - 🤖 **Reasoning-Enhanced Generalization**: Superior generalization capabilities with multimodal embodied reasoning and real robot control. <p align="left"> <img src="assets/embodiments.png" width="100%"> </p> ## 0. Model Architecture <p align="left"> <img src="assets/arch.png" width="100%"> </p> **EO-1** model is a Vision-Language-Action (VLA) model that adopts a single unified decoder-only transformer, equipping with discrete language-modeling head for multimodal embodied reasoning and continuous flow-matching head for robot action generation. The language instruction, image observations, robot state, and noisy action are encoded into an interleaved token sequence of tokens to be processed by the shared transformer backbone, whose weights are initialized from Qwen2.5-VL. The model is trained on interleaved vision-text-action data with a combination of flow-matching objective and next-token-prediction objective and capable of seamless embodied reasoning and acting. ### Input: Input Type: - Vision: Image Frames, Video - State: Robot Proprioception - Language Instruction: Text, Pointing, Bounding Box, etc. - Input Format: - Vision: Variable number of uint8 image frames or long video sequence - State: Floating Point - Language Instruction: String ### Output: Output Type(s): Actions, Language Output Format: Continuous-value vectors, Discrete Text ## 1. Inference with pre-trained model **EO-1** is built entirely on 🤗 HuggingFace Transformers and Lerobot, making deployment straightforward and accessible. If your environment supports transformers and lerobot, you can load the model and run inference directly with just a few lines of code (requires ~6.5GB GPU memory). **EO-1** unifies high-level embodied reasoning with low-level robot control, producing either natural language outputs or actionable robot commands. ```python from transformers import AutoModel, AutoProcessor # load the model and processor processor = AutoProcessor.from_pretrained("IPEC-COMMUNITY/EO-1-3B", trust_remote_code=True) model = AutoModel.from_pretrained( "IPEC-COMMUNITY/EO-1-3B", trust_remote_code=True, torch_dtype=torch.bfloat16 ).eval().cuda() # prepare the model input batch = { "observation.images.image": [img], # PIL.Image "observation.images.wrist_image": [wrist_img], "observation.state": [state], "task": ["You are a helpful physical agent equipped with both reasoning and robotic control. \ You see the Tic-Tac-Toe board, think strategically, act logically, and block threats."] } # generate multimodal outputs output = processor.generate(model, batch) text = output.text actions = output.action.numpy() ``` ## 2. Benchmark Mastering Diverse Manipulations on Multiple Embodiments | Model | Franka Pick-and-Place (7 Tasks) | AgiBot Long-horizon Dexterity (4 Tasks) | WidowX Out-of-Box (13 Tasks) | Reasoning Control (4 Tasks) | |--------------|---------------------------------|-----------------------------------------|------------------------------|-----------------------------| | $\pi_0$-fast | 0.610 | 0.449 | 0.227 | — | | $\pi_0$ | 0.831 | 0.672 | 0.693 | 0.525 | | GR00T-N1.5 | 0.857 | 0.681 | 0.705 | 0.617 | | **EO-1** | **0.935** | **0.807** | **0.852** | **0.831** | Multi-modal Benchmark Results | Model | RoboVQA | ERQA | EO-Bench @ Spatial | EO-Bench @ Temporal | Overall | |---------------------|----------|----------|--------------------|---------------------|----------| | Claude 3.5 | 26.7 | 35.5 | 24.0 | 34.8 | 30.3 | | GPT-4o (2024-11-20) | 47.2 | 40.0 | 35.6 | 39.3 | 40.5 | | Qwen2.5 VL 3B | 55.9 | 35.3 | 20.0 | 22.6 | 33.5 | | Magma 8B | 30.3 | 29.3 | 29.4 | 36.7 | 31.4 | | **EO-1 (3B)** | **58.5** | **45.5** | **36.4** | **38.9** | **44.8** | Robot Control Benchmark Results | Model | LIBERO | Simpler @ Google VM | Simpler @ Google VA | Simpler @ WidowX VM | |--------------|-----------|---------------------|---------------------|---------------------| | $\pi_0$ | 0.942 | 0.714 | 0.714 | 0.692 | | $\pi_0$-fast | 0.855 | 0.464 | 0.464 | 0.321 | | GR00T-N1 | 0.939 | — | — | — | | Magma | — | 0.488 | 0.488 | 0.448 | | **EO-1** | **0.982** | **0.765** | **0.765** | **0.727** | ## 📚 3. Citation If you find this project useful, please consider citing: ```bibtex @article{eo-1, title={EmbodiedOneVision: Interleaved Vision-Text-Action Pretraining for General Robot Control}, author={Delin Qu and Haoming Song and Qizhi Chen and Zhaoqing Chen and Xianqiang Gao and Xinyi Ye and Qi Lv and Modi Shi and Guanghui Ren and Cheng Ruan and Maoqing Yao and Haoran Yang and Jiacheng Bao and Bin Zhao and Dong Wang}, journal={arXiv preprint}, year={2025}, url={https://arxiv.org/abs/2508.21112} } ```
chainway9/blockassist-bc-untamed_quick_eel_1756776835
chainway9
2025-09-02T02:00:18Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "untamed quick eel", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T02:00:14Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - untamed quick eel --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
ORIGINAL-VIDEO-DE-GENESIS-PENA-VIDEOS/FULL.ORIGINAL.VIDEO.DE.GENESIS.PENA.ABUSADA
ORIGINAL-VIDEO-DE-GENESIS-PENA-VIDEOS
2025-09-02T01:58:28Z
0
0
null
[ "region:us" ]
null
2025-09-02T01:58:16Z
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
BootesVoid/cmdnxja7k09ixsp0y4nroojx9_cmew8if3w043tsr531z0m6efb
BootesVoid
2025-09-02T01:56:11Z
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2025-09-02T01:56:09Z
--- license: other license_name: flux-1-dev-non-commercial-license license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md language: - en tags: - flux - diffusers - lora - replicate base_model: "black-forest-labs/FLUX.1-dev" pipeline_tag: text-to-image # widget: # - text: >- # prompt # output: # url: https://... instance_prompt: NSFW --- # Cmdnxja7K09Ixsp0Y4Nroojx9_Cmew8If3W043Tsr531Z0M6Efb <Gallery /> ## About this LoRA This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI. It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer/train ## Trigger words You should use `NSFW` to trigger the image generation. ## Run this LoRA with an API using Replicate ```py import replicate input = { "prompt": "NSFW", "lora_weights": "https://huggingface.co/BootesVoid/cmdnxja7k09ixsp0y4nroojx9_cmew8if3w043tsr531z0m6efb/resolve/main/lora.safetensors" } output = replicate.run( "black-forest-labs/flux-dev-lora", input=input ) for index, item in enumerate(output): with open(f"output_{index}.webp", "wb") as file: file.write(item.read()) ``` ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda') pipeline.load_lora_weights('BootesVoid/cmdnxja7k09ixsp0y4nroojx9_cmew8if3w043tsr531z0m6efb', weight_name='lora.safetensors') image = pipeline('NSFW').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters) ## Training details - Steps: 2500 - Learning rate: 9e-05 - LoRA rank: 16 ## Contribute your own examples You can use the [community tab](https://huggingface.co/BootesVoid/cmdnxja7k09ixsp0y4nroojx9_cmew8if3w043tsr531z0m6efb/discussions) to add images that show off what you’ve made with this LoRA.
Venturaa/mistral-recommender-merged-bf16
Venturaa
2025-09-02T01:54:54Z
0
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-02T01:51:54Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
RikiyaT/mxbai-ettin-32m-msmarco-angle4-ft
RikiyaT
2025-09-02T01:54:31Z
0
0
null
[ "license:mit", "region:us" ]
null
2025-09-02T01:54:30Z
--- license: mit --- # RikiyaT/mxbai-ettin-32m-msmarco-angle4-ft Ettin + AnglE fine-tuned embedding model. - **Base Model**: `RikiyaT/mxbai-ettin-32m-pretrained` - **Pooling Strategy**: `mean` (avg) - **Training Method**: AnglE loss (ibn/cln + angle=0.02) on a B-format dataset (text, positive, negative). - **Data Prompts**: `search_query:` / `search_document:` were used during training data creation. ## Usage ### With SentenceTransformers (recommended) A ready-to-use SentenceTransformers variant is available at **[RikiyaT/mxbai-ettin-32m-msmarco-angle4-ft-st](https://huggingface.co/RikiyaT/mxbai-ettin-32m-msmarco-angle4-ft-st)**. ```python from sentence_transformers import SentenceTransformer model = SentenceTransformer('RikiyaT/mxbai-ettin-32m-msmarco-angle4-ft-st') sentences = ["This is an example sentence", "Each sentence is converted"] embeddings = model.encode(sentences) print(embeddings.shape) ``` ### With Transformers (this repository) ```python from transformers import AutoModel, AutoTokenizer model = AutoModel.from_pretrained("RikiyaT/mxbai-ettin-32m-msmarco-angle4-ft", trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained("RikiyaT/mxbai-ettin-32m-msmarco-angle4-ft", trust_remote_code=True) ```
liukevin666/blockassist-bc-yawning_striped_cassowary_1756777762
liukevin666
2025-09-02T01:50:21Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "yawning striped cassowary", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:50:15Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - yawning striped cassowary --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
clembench-playpen/Qwen2-7B-DPO_dialogue
clembench-playpen
2025-09-02T01:50:12Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "dpo", "arxiv:2305.18290", "base_model:clembench-playpen/Qwen2-7B-SFT_merged", "base_model:finetune:clembench-playpen/Qwen2-7B-SFT_merged", "endpoints_compatible", "region:us" ]
null
2025-09-01T22:32:19Z
--- base_model: clembench-playpen/Qwen2-7B-SFT_merged library_name: transformers model_name: outputs tags: - generated_from_trainer - trl - dpo licence: license --- # Model Card for outputs This model is a fine-tuned version of [clembench-playpen/Qwen2-7B-SFT_merged](https://huggingface.co/clembench-playpen/Qwen2-7B-SFT_merged). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?" generator = pipeline("text-generation", model="clembench-playpen/Qwen2-7B-DPO_dialogue", device="cuda") output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0] print(output["generated_text"]) ``` ## Training procedure [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/dmazzaccara_backup/playpen_Qwen2-7B-SFT_merged/runs/9uybklxb) This model was trained with DPO, a method introduced in [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://huggingface.co/papers/2305.18290). ### Framework versions - TRL: 0.12.2 - Transformers: 4.46.3 - Pytorch: 2.5.1 - Datasets: 3.2.0 - Tokenizers: 0.20.3 ## Citations Cite DPO as: ```bibtex @inproceedings{rafailov2023direct, title = {{Direct Preference Optimization: Your Language Model is Secretly a Reward Model}}, author = {Rafael Rafailov and Archit Sharma and Eric Mitchell and Christopher D. Manning and Stefano Ermon and Chelsea Finn}, year = 2023, booktitle = {Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023}, url = {http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-Abstract-Conference.html}, editor = {Alice Oh and Tristan Naumann and Amir Globerson and Kate Saenko and Moritz Hardt and Sergey Levine}, } ``` Cite TRL as: ```bibtex @misc{vonwerra2022trl, title = {{TRL: Transformer Reinforcement Learning}}, author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec}, year = 2020, journal = {GitHub repository}, publisher = {GitHub}, howpublished = {\url{https://github.com/huggingface/trl}} } ```
ppxyn1/Qwen2-0.5B-GRPO-SIUO
ppxyn1
2025-09-02T01:49:22Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "generated_from_trainer", "grpo", "trl", "arxiv:2402.03300", "base_model:Qwen/Qwen2-0.5B-Instruct", "base_model:finetune:Qwen/Qwen2-0.5B-Instruct", "endpoints_compatible", "region:us" ]
null
2025-09-02T00:19:43Z
--- base_model: Qwen/Qwen2-0.5B-Instruct library_name: transformers model_name: Qwen2-0.5B-GRPO-SIUO tags: - generated_from_trainer - grpo - trl licence: license --- # Model Card for Qwen2-0.5B-GRPO-SIUO This model is a fine-tuned version of [Qwen/Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?" generator = pipeline("text-generation", model="ppxyn1/Qwen2-0.5B-GRPO-SIUO", device="cuda") output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0] print(output["generated_text"]) ``` ## Training procedure This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300). ### Framework versions - TRL: 0.22.1 - Transformers: 4.56.0 - Pytorch: 2.8.0 - Datasets: 4.0.0 - Tokenizers: 0.22.0 ## Citations Cite GRPO as: ```bibtex @article{shao2024deepseekmath, title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}}, author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo}, year = 2024, eprint = {arXiv:2402.03300}, } ``` Cite TRL as: ```bibtex @misc{vonwerra2022trl, title = {{TRL: Transformer Reinforcement Learning}}, author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec}, year = 2020, journal = {GitHub repository}, publisher = {GitHub}, howpublished = {\url{https://github.com/huggingface/trl}} } ```
MiguelPalacios2504/my_policy_sujet1
MiguelPalacios2504
2025-09-02T01:49:21Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:MiguelPalacios2504/datset_sujet_1", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2025-09-02T01:48:34Z
--- base_model: lerobot/smolvla_base datasets: MiguelPalacios2504/datset_sujet_1 library_name: lerobot license: apache-2.0 model_name: smolvla pipeline_tag: robotics tags: - lerobot - robotics - smolvla --- # Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index). --- ## How to Get Started with the Model For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy). Below is the short version on how to train and run inference/eval: ### Train from scratch ```bash lerobot-train \ --dataset.repo_id=${HF_USER}/<dataset> \ --policy.type=act \ --output_dir=outputs/train/<desired_policy_repo_id> \ --job_name=lerobot_training \ --policy.device=cuda \ --policy.repo_id=${HF_USER}/<desired_policy_repo_id> --wandb.enable=true ``` _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._ ### Evaluate the policy/run inference ```bash lerobot-record \ --robot.type=so100_follower \ --dataset.repo_id=<hf_user>/eval_<dataset> \ --policy.path=<hf_user>/<desired_policy_repo_id> \ --episodes=10 ``` Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint. --- ## Model Details - **License:** apache-2.0
lemonhat/Llama-3.1-8B-t1_25k_v2_tag5_filtered_1
lemonhat
2025-09-02T01:48:55Z
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:meta-llama/Llama-3.1-8B", "base_model:finetune:meta-llama/Llama-3.1-8B", "license:other", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-02T01:47:32Z
--- library_name: transformers license: other base_model: meta-llama/Llama-3.1-8B tags: - llama-factory - full - generated_from_trainer model-index: - name: t1_25k_v2_tag5_filtered_1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t1_25k_v2_tag5_filtered_1 This model is a fine-tuned version of [meta-llama/Llama-3.1-8B](https://huggingface.co/meta-llama/Llama-3.1-8B) on the t1_25k_v2_tag5_filtered_1 dataset. It achieves the following results on the evaluation set: - Loss: 0.2573 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - total_train_batch_size: 4 - total_eval_batch_size: 4 - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 0.4333 | 0.0760 | 100 | 0.3708 | | 0.4111 | 0.1520 | 200 | 0.3362 | | 0.3141 | 0.2280 | 300 | 0.3172 | | 0.3146 | 0.3040 | 400 | 0.3090 | | 0.2789 | 0.3799 | 500 | 0.3019 | | 0.2647 | 0.4559 | 600 | 0.2982 | | 0.3015 | 0.5319 | 700 | 0.2883 | | 0.3021 | 0.6079 | 800 | 0.2907 | | 0.3021 | 0.6839 | 900 | 0.2832 | | 0.2404 | 0.7599 | 1000 | 0.2764 | | 0.3051 | 0.8359 | 1100 | 0.2768 | | 0.2981 | 0.9119 | 1200 | 0.2697 | | 0.2684 | 0.9878 | 1300 | 0.2740 | | 0.2391 | 1.0638 | 1400 | 0.2742 | | 0.2121 | 1.1398 | 1500 | 0.2705 | | 0.2277 | 1.2158 | 1600 | 0.2635 | | 0.233 | 1.2918 | 1700 | 0.2642 | | 0.2321 | 1.3678 | 1800 | 0.2625 | | 0.2514 | 1.4438 | 1900 | 0.2603 | | 0.2497 | 1.5198 | 2000 | 0.2600 | | 0.199 | 1.5957 | 2100 | 0.2584 | | 0.2094 | 1.6717 | 2200 | 0.2588 | | 0.2054 | 1.7477 | 2300 | 0.2588 | | 0.2055 | 1.8237 | 2400 | 0.2570 | | 0.1926 | 1.8997 | 2500 | 0.2574 | | 0.1982 | 1.9757 | 2600 | 0.2571 | ### Framework versions - Transformers 4.46.1 - Pytorch 2.6.0+cu124 - Datasets 3.1.0 - Tokenizers 0.20.3
JeloH/fin_qwe_src_small4
JeloH
2025-09-02T01:48:45Z
0
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-02T01:46:13Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
WafaaFraih/vit-gpt2-rocov2-ct-finetuned
WafaaFraih
2025-09-02T01:48:40Z
0
0
transformers
[ "transformers", "safetensors", "vision-encoder-decoder", "image-to-text", "generated_from_trainer", "base_model:nlpconnect/vit-gpt2-image-captioning", "base_model:finetune:nlpconnect/vit-gpt2-image-captioning", "license:apache-2.0", "endpoints_compatible", "region:us" ]
image-to-text
2025-09-01T17:16:37Z
--- library_name: transformers license: apache-2.0 base_model: nlpconnect/vit-gpt2-image-captioning tags: - generated_from_trainer metrics: - rouge - bleu model-index: - name: vit-gpt2-rocov2-ct-finetuned results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-gpt2-rocov2-ct-finetuned This model is a fine-tuned version of [nlpconnect/vit-gpt2-image-captioning](https://huggingface.co/nlpconnect/vit-gpt2-image-captioning) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4304 - Rouge1: 0.1211 - Rougel: 0.1029 - Meteor: 0.0566 - Bleu: 4.9187 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 8 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rougel | Meteor | Bleu | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:-------:| | 2.0167 | 1.0 | 360 | 1.6756 | 0.0980 | 0.0900 | 0.0373 | 0.0 | | 1.5293 | 2.0 | 720 | 1.5147 | 0.1010 | 0.0822 | 0.0491 | 0.0 | | 1.4421 | 3.0 | 1080 | 1.4676 | 0.1132 | 0.0922 | 0.0495 | 0.0 | | 1.4018 | 4.0 | 1440 | 1.4433 | 0.1106 | 0.0930 | 0.0521 | 9.9801 | | 1.359 | 5.0 | 1800 | 1.4315 | 0.1176 | 0.0990 | 0.0542 | 16.5158 | | 1.3179 | 6.0 | 2160 | 1.4262 | 0.1187 | 0.1011 | 0.0566 | 9.8647 | | 1.3243 | 7.0 | 2520 | 1.4280 | 0.1181 | 0.1007 | 0.0550 | 16.5158 | | 1.3069 | 8.0 | 2880 | 1.4304 | 0.1211 | 0.1029 | 0.0566 | 4.9187 | ### Framework versions - Transformers 4.56.0 - Pytorch 2.8.0+cu126 - Datasets 4.0.0 - Tokenizers 0.22.0
sekirr/blockassist-bc-masked_tenacious_whale_1756777579
sekirr
2025-09-02T01:46:59Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "masked tenacious whale", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:46:56Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - masked tenacious whale --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
BAKAK/EmotionDiary_Finetunning
BAKAK
2025-09-02T01:43:43Z
0
0
transformers
[ "transformers", "safetensors", "roberta", "text-classification", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2025-09-02T01:43:13Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
kushairinorazli/ppo-LunarLander-v3
kushairinorazli
2025-09-02T01:41:41Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v3", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2025-09-02T01:41:28Z
--- library_name: stable-baselines3 tags: - LunarLander-v3 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v3 type: LunarLander-v3 metrics: - type: mean_reward value: 279.99 +/- 21.94 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v3** This is a trained model of a **PPO** agent playing **LunarLander-v3** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Venturaa/mistral-recommender-merged
Venturaa
2025-09-02T01:40:19Z
0
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-02T01:37:25Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
liukevin666/blockassist-bc-yawning_striped_cassowary_1756777128
liukevin666
2025-09-02T01:39:48Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "yawning striped cassowary", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:39:42Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - yawning striped cassowary --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
omerbkts/blockassist-bc-insectivorous_bold_lion_1756777155
omerbkts
2025-09-02T01:39:38Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "insectivorous bold lion", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:39:34Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - insectivorous bold lion --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
FlagRelease/phi-4-hygon-FlagOS
FlagRelease
2025-09-02T01:39:03Z
0
0
null
[ "safetensors", "phi3", "region:us" ]
null
2025-09-02T01:38:25Z
# Introduction **FlagOS** is a unified heterogeneous computing software stack for large models, co-developed with leading global chip manufacturers. With core technologies such as the **FlagScale** distributed training/inference framework, **FlagGems** universal operator library, **FlagCX** communication library, and **FlagTree** unified compiler, the **FlagRelease** platform leverages the FlagOS stack to automatically produce and release various combinations of <chip + open-source model>. This enables efficient and automated model migration across diverse chips, opening a new chapter for large model deployment and application. Based on this, the **phi-4-hygon-FlagOS** model is adapted for the Nvidia chip using the FlagOS software stack, enabling: ### Integrated Deployment - Deep integration with the open-source [FlagScale framework](https://github.com/FlagOpen/FlagScale) - Out-of-the-box inference scripts with pre-configured hardware and software parameters - Released **FlagOS** container image supporting deployment within minutes ### Consistency Validation - Rigorously evaluated through benchmark testing: Performance and results from the FlagOS software stack are compared against native stacks on multiple public. # Technical Overview ## **FlagScale Distributed Training and Inference Framework** FlagScale is an end-to-end framework for large models across heterogeneous computing resources, maximizing computational efficiency and ensuring model validity through core technologies. Its key advantages include: - **Unified Deployment Interface:** Standardized command-line tools support one-click service deployment across multiple hardware platforms, significantly reducing adaptation costs in heterogeneous environments. - **Intelligent Parallel Optimization:** Automatically generates optimal distributed parallel strategies based on chip computing characteristics, achieving dynamic load balancing of computation/communication resources. - **Seamless Operator Switching:** Deep integration with the FlagGems operator library allows high-performance operators to be invoked via environment variables without modifying model code. ## **FlagGems Universal Large-Model Operator Library** FlagGems is a Triton-based, cross-architecture operator library collaboratively developed with industry partners. Its core strengths include: - **Full-stack Coverage**: Over 100 operators, with a broader range of operator types than competing libraries. - **Ecosystem Compatibility**: Supports 7 accelerator backends. Ongoing optimizations have significantly improved performance. - **High Efficiency**: Employs unique code generation and runtime optimization techniques for faster secondary development and better runtime performance compared to alternatives. ## **FlagEval Evaluation Framework** FlagEval (Libra)** is a comprehensive evaluation system and open platform for large models launched in 2023. It aims to establish scientific, fair, and open benchmarks, methodologies, and tools to help researchers assess model and training algorithm performance. It features: - **Multi-dimensional Evaluation**: Supports 800+ model evaluations across NLP, CV, Audio, and Multimodal fields, covering 20+ downstream tasks including language understanding and image-text generation. - **Industry-Grade Use Cases**: Has completed horizontal evaluations of mainstream large models, providing authoritative benchmarks for chip-model performance validation. # Evaluation Results ## Benchmark Result | Metrics | phi-4-H100-CUDA | phi-4-hygon-FlagOS | | ------------------------- | --------------------- | ------------------ | |AIME-0shot@avg1|0.200|0.200| |GPQA-0shot@avg1|0.241|0.225| |MMLU-5shots@avg1|0.713|0.714| |MUSR-0shot@avg1|0.594|0.574| |LiveBench-0shot@avg1|0.431|0.422| # User Guide **Environment Setup** | Item | Version | | ------------- | ------------------------------------------------------------ | | Docker Version | Docker version 24.0.6, build ed223bc | | Operating System | Ubuntu 22.04.4 LTS | | FlagScale | Version: 0.8.0 | | FlagGems | Version: 3.0 | ## Operation Steps ### Download Open-source Model Weights ```bash pip install modelscope modelscope download --model LLM-Research/phi-4 --local_dir /share/phi-4 ``` ### Download FlagOS Image BE AWARE!, Hygon's FLAGOS image have not decided public-accesible through internet or not. To obtain this image, you can contact us or hygon through issues. ```bash docker pull harbor.baai.ac.cn/flagrelease-inner/flagrelease_hygon_phi ``` ### Start the inference service ```bash #Container Startup docker run -it \ --name=flagos \ --network=host \ --privileged \ --ipc=host \ --shm-size=16G \ --memory="512g" \ --ulimit stack=-1:-1 \ --ulimit memlock=-1:-1 \ --cap-add=SYS_PTRACE \ --security-opt seccomp=unconfined \ --device=/dev/kfd \ --device=/dev/dri \ --group-add video \ -u root \ -v /opt/hyhal:/opt/hyhal \ -v /share:/share \ harbor.baai.ac.cn/flagrelease-inner/flagrelease_hygon_phi \ /bin/bash ``` ### Serve ```bash flagscale serve phi_4 ``` ## Service Invocation ### API-based Invocation Script ```bash import openai openai.api_key = "EMPTY" openai.base_url = "http://<server_ip>:9010/v1/" model = "phi-4-hygon-flagos" messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What's the weather like today?"} ] response = openai.chat.completions.create( model=model, messages=messages, stream=False, ) for item in response: print(item) ``` ### AnythingLLM Integration Guide #### 1. Download & Install - Visit the official site: https://anythingllm.com/ - Choose the appropriate version for your OS (Windows/macOS/Linux) - Follow the installation wizard to complete the setup #### 2. Configuration - Launch AnythingLLM - Open settings (bottom left, fourth tab) - Configure core LLM parameters - Click "Save Settings" to apply changes #### 3. Model Interaction - After model loading is complete: - Click **"New Conversation"** - Enter your question (e.g., “Explain the basics of quantum computing”) - Click the send button to get a response # Contributing We warmly welcome global developers to join us: 1. Submit Issues to report problems 2. Create Pull Requests to contribute code 3. Improve technical documentation 4. Expand hardware adaptation support # License 本模型的权重来源于LLM-Research/phi-4,以apache2.0协议https://www.apache.org/licenses/LICENSE-2.0.txt开源。
akirafudo/blockassist-bc-insectivorous_bold_lion_1756777036
akirafudo
2025-09-02T01:37:40Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "insectivorous bold lion", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:37:37Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - insectivorous bold lion --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
boonpertou/blockassist-bc-tenacious_rugged_cheetah_1756777022
boonpertou
2025-09-02T01:37:36Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "tenacious rugged cheetah", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:37:02Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - tenacious rugged cheetah --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
calegpedia/blockassist-bc-stealthy_slimy_rooster_1756775386
calegpedia
2025-09-02T01:36:05Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "stealthy slimy rooster", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:36:02Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - stealthy slimy rooster --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
omerbektass/blockassist-bc-insectivorous_bold_lion_1756776918
omerbektass
2025-09-02T01:35:42Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "insectivorous bold lion", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:35:38Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - insectivorous bold lion --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
poldanon/blockassist-bc-pale_amphibious_dove_1756776886
poldanon
2025-09-02T01:35:37Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "pale amphibious dove", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:35:18Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - pale amphibious dove --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
omerbkts/blockassist-bc-insectivorous_bold_lion_1756776804
omerbkts
2025-09-02T01:33:44Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "insectivorous bold lion", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:33:40Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - insectivorous bold lion --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
ggozzy/blockassist-bc-stubby_yapping_mandrill_1756776431
ggozzy
2025-09-02T01:28:25Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "stubby yapping mandrill", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:28:19Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - stubby yapping mandrill --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
capungmerah627/blockassist-bc-stinging_soaring_porcupine_1756774947
capungmerah627
2025-09-02T01:27:38Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "stinging soaring porcupine", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:27:35Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - stinging soaring porcupine --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF
mradermacher
2025-09-02T01:26:09Z
1
0
transformers
[ "transformers", "gguf", "programming", "code generation", "code", "codeqwen", "moe", "coding", "coder", "qwen2", "chat", "qwen", "qwen-coder", "Qwen3-Coder-30B-A3B-Instruct", "Qwen3-30B-A3B", "mixture of experts", "128 experts", "8 active experts", "1 million context", "qwen3", "finetune", "brainstorm 40x", "brainstorm", "optional thinking", "qwen3_moe", "en", "fr", "zh", "de", "base_model:DavidAU/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct", "base_model:quantized:DavidAU/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2025-08-31T23:14:38Z
--- base_model: DavidAU/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct language: - en - fr - zh - de library_name: transformers license: apache-2.0 mradermacher: readme_rev: 1 quantized_by: mradermacher tags: - programming - code generation - code - codeqwen - programming - code generation - code - codeqwen - moe - coding - coder - qwen2 - chat - qwen - qwen-coder - chat - qwen - qwen-coder - moe - Qwen3-Coder-30B-A3B-Instruct - Qwen3-30B-A3B - mixture of experts - 128 experts - 8 active experts - 1 million context - qwen3 - finetune - brainstorm 40x - brainstorm - optional thinking - qwen3_moe --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static quants of https://huggingface.co/DavidAU/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct <!-- provided-files --> ***For a convenient overview and download list, visit our [model page for this model](https://hf.tst.eu/model#Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF).*** weighted/imatrix quants are available at https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q2_K.gguf) | Q2_K | 19.5 | | | [GGUF](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q3_K_S.gguf) | Q3_K_S | 23.1 | | | [GGUF](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q3_K_M.gguf) | Q3_K_M | 25.5 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q3_K_L.gguf) | Q3_K_L | 27.6 | | | [GGUF](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.IQ4_XS.gguf) | IQ4_XS | 28.7 | | | [GGUF](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q4_K_S.gguf) | Q4_K_S | 30.3 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q4_K_M.gguf) | Q4_K_M | 32.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q5_K_S.gguf) | Q5_K_S | 36.6 | | | [GGUF](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q5_K_M.gguf) | Q5_K_M | 37.8 | | | [GGUF](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q6_K.gguf) | Q6_K | 43.6 | very good quality | | [PART 1](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q8_0.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct-GGUF/resolve/main/Qwen3-54B-A3B-2507-YOYO2-TOTAL-RECALL-Instruct.Q8_0.gguf.part2of2) | Q8_0 | 56.4 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
helmutsukocok/blockassist-bc-loud_scavenging_kangaroo_1756774801
helmutsukocok
2025-09-02T01:24:43Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "loud scavenging kangaroo", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:24:40Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - loud scavenging kangaroo --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Jniya/Qwen2.5-1.5B-Instruct-Gensyn-Swarm-robust_energetic_bison
Jniya
2025-09-02T01:24:18Z
0
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "rl-swarm", "genrl-swarm", "grpo", "gensyn", "I am robust_energetic_bison", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-02T01:23:24Z
--- library_name: transformers tags: - rl-swarm - genrl-swarm - grpo - gensyn - I am robust_energetic_bison --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
p878567/gemma-2-2b-it-preference_dataset_mixture2_and_safe_pku-Preference
p878567
2025-09-02T01:23:36Z
0
0
transformers
[ "transformers", "safetensors", "gemma2", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-02T01:21:26Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
sphiratrioth666/GM-4_Game_Mistress_Environment_SillyTavern
sphiratrioth666
2025-09-02T01:22:53Z
0
2
null
[ "silly", "tavern", "sillytavern", "silly tavern", "rp", "rpg", "roleplay", "roleplaying", "environment", "character", "card", "generator", "tool", "en", "base_model:ArliAI/QwQ-32B-ArliAI-RpR-v4", "base_model:finetune:ArliAI/QwQ-32B-ArliAI-RpR-v4", "license:cc-by-nc-4.0", "region:us" ]
null
2025-09-01T18:03:37Z
--- license: cc-by-nc-4.0 language: - en base_model: - TheDrummer/Cydonia-24B-v4.1 - TheDrummer/Cydonia-R1-24B-v4 - kyx0r/Neona-12B - Sao10K/MN-12B-Lyra-v4 - inflatebot/MN-12B-Mag-Mell-R1 - TheDrummer/Rocinante-12B-v1.1 - anthracite-org/magnum-v4-22b - MarinaraSpaghetti/NemoMix-Unleashed-12B - ArliAI/QwQ-32B-ArliAI-RpR-v4 - ArliAI/Qwen3-30B-A3B-ArliAI-RpR-v4-Fast tags: - silly - tavern - sillytavern - silly tavern - rp - rpg - roleplay - roleplaying - environment - character - card - generator - tool --- <h1><ins>GM-4: Game Mistress Environment ~ by Sphiratrioth</ins></h1> <div style="text-align: justify">Welcome to the new age of roleplaying. No more repetitive starting messages, no more fixed scenarios. LLM becomes your Game Mistress in a tabletop RPG like session. Choose a world to roleplay in, choose a setting and a genre you want and simply start having fun. The LLM will generate a scene for you and it will push the story forward - in line with a selected setting & genre. A starting message itself will be always different. Time of day/night & weather for a current scene will be rolled randomly, one of 50+ setting-filtered locations will be randomly selected or you can choose the location yourself. Highly customizable roleplay experience without editing the once created character cards and without any custom extensions. All you need are my customized lorebooks. </div> ![image/png](https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/gOQoveF5huZp4Bpb5iE-B.png) <div style="text-align: justify"><b>Permissions: you are allowed to use it and generate your own remixes or versions of the GM-4 Environment, worlds & scenarios. However, if you upload your creations online - you need to give me credits and attach a link to this exact repository. All the creations must be clearly credited as using GM-4 Environment by Sphiratroth.</b> </div> <p><a href="https://buymeacoffee.com/sphiratrioth"> <img src="https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/DInaQUVOSuDcBEKOJoNPH.png" alt="W3Schools.com" width="1273" height="64"> </a></p> <div style="text-align: justify">Here, you can buy me a Coffee. Just click on the banner above. All the work I do remains free - but I drink a lot of coffee, actually - so it is very nice when you show support by fueling my caffeine addiction :-D As a bonus, you can also find a lot of my other projects. Some Reshade presets for different games, my custom TTRPG system (soon), maybe other goodies in the future. </div> <h2><ins>What is it?</ins></h2> <div style="text-align: justify">GM-4 format cards are just the normal V2/V3 character cards but with custom, especially crafted lorebooks that change everything. There is one mainframe lorebook to activate the environment (~900 entries), a modular version with that exact lorebook split into the SFW & NSFW parts and a custom sysprompt for SillyTavern to make everything work. You trigger a given scenario/setting/genre through normal chat and the instructions in the lorebook are sent to the LLM without displaying anything in the chat so it is like OOC on steroids - which generates a starting message and makes the LLM actually follow a scenario/setting/world you picked up from the list. A starting message is different every time, instructions with the actual scneario, setting information etc. are injected as invisible instructions for the LLM. No roleplay feels the same - even for the same scenario. No more boring repetitions. On a top of that - different scenarios may be mixed together. For example, if you want a scene to start in the sci-fi cyberpunk world world, you can keep it SFW, you can turn it NSFW or you can add a supernatural horror/hunting flavor to it. YOu do it all by just triggering two scenarios at the same time. You do not need to edit the GM character card itself, you do not need different variants of the same card either - it all works thanks to the system prompt and the set of customized lorebooks that constitute the GM-4 Environment. </div> <br> <div style="text-align: justify">In other words - everything matches everything and anything may be picked up freely or rolled. I am a game designer for AAA games studio, I do it for living - I make stories and games in a modular form - thus - my roleplays also look and work just like that.</div> ![image/png](https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/LbNjkeDE8Ho7dIepQCuSI.png) <div style="text-align: justify"><h2><ins>What it does?</ins></h2> <li>it becomes your game mistress - it generates the story & pushes it forward but you can do whatever you want in a given setting world - you are like a character in a TTRPG game; <li>it generates a different starting message for the same variables every single time (no more repetitive roleplays); <li>it automatically randomizes time & weather for a current scene; <li>it makes it possible to select a specific, setting-filtered location or it rolls it for you randomly (we all know that "your journey starts at the inn..." trope and the LLMs love it almost the same as the real Game Masters do - thus - it is better selecting or rolling a location to not start at the inn);</li> <li>it allows picking up all of those options from a normal chat window (no need to edit a character card itself, no need for multiple versions of the same character - you simply type the trigger words in the normal chat window, hit enter - and the magic happens);</li> <li>it guides characters in a specific way during the whole roleplay - I wrote a lot of custom instructions to improve the experience (for instance, it stops the LLM directly repeating what {{user}} did in {{char}}'s response, which personally - infuriates me; or it tells the LLM to concentrate on sounds, on visuals, on character's, on feelings in narration; or it forces the NPC characters to act, to take initiative, to behave more like real humans do);</li> <li>it does not require any extensions nor special knowledge - again - you just pick up the intuitive trigger words listed in the alternative starting messages that are used as scenario/variables lists (remember - you roleplay only in the default starting message - all the alternative starting message serves to present the available options in a convenient way - do not roleplay anywhere excepc of the DEFAULT STARTING MESSAGE);</li> <li>it allows randomly rolling locations or choosing where you want to start</li></div> <br> <h1><ins>How to set it up?</ins></h1> <b>Download all the filest in the repository:</b> 1. Navigate to the files section of this post and download all the files. 2. Choose the AIO version or the modular version. AIO includes NSFW and it is just one lorebook to activate in SillyTavern. Modular version allows selecting separate modules freely but you need to activate more lorebooks. BEWARE: DO NOT MIX THE AIO AND THE MODULAR LOREBOOKS! 3. Import the GM Character, the lorebooks & the GM sysprompts in SillyTavern. Use the character import functionality within the SillyTavern for a character card or copy-paste the .png character card file to (...):\SillyTavern\data\default-user\characters. Then, copy-paste the lorebook files into your SillyTavern lorebooks directory aka (...):\SillyTavern\data\default-user\worlds. You need just one "00. AIO - Game Mistress (Framework)" lorebook + all the scenario lorebooks or a varied number of the modular version framework lorebooks + all the scenario lorebooks. Choose just the AIO or just the MODULAR lorebooks, do not mix those two formats together. They are the same - in one pack or split into parts. Finally, copy the system prompt files to (...):\SillyTavern\data\default-user\sysprompt. 5. Activate a proper GM-4 Sysprompt: (character) if you want the AI to be your GM or (user) if you want to be a GM and make AI roleplay a character in the world (experimental, it may or may not work, it works much better with SX-4 environment with a fully fleshed out character rather than within the GM-4 environment!) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/dnnbdduaeNu1FqPfnvUSA.png) 7. Activate the lorebooks: Framework [AIO] + Scenario lorebooks or all the Framework [MODULAR] lorebooks + Scenario lorebooks in SillyTavern. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/mag2nJfOESUn_Xx5E2T-n.png) 8. Select the Game Mistress character. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/ophtVDNyNaW8zD3X0JH0T.png) 10. Swipe the default starting message left to find the alternate starting messages - they are just convenient lists of all the available options and their trigger words. There are also examplary trigger chains in one of the messages - aka example things you will type in the chatting window to start the roleplay - but ONLY UNDER A DEFAULT STARTING MESSAGE. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/Fgj52Nl1qcZV8lu5oyCOq.png) <b>Roleplay:</b> 1. Return to the DEFAULT STARTING MESSAGE. BEWARE: THIS IS THE ONLY MESSAGE WHERE YOU START YOUR ROLEPLAYS. ALL THE ALTERNATIVE STARTING MESSAGES ARE JUST THE TRIGGER WORDS LISTS - FOR CONVENIENCE. DO NOT ROLEPLAY THERE! ![image/png](https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/i2-qghXAAEU_iOlXCH942.png) 3. Type in the trigger words you want. You need a scenario/setting at minimum, you can add the location if you want (and it is highly suggested). Something like: "GM: fantasy, Location: dungeon" or "GM: 03, L: street" (see the included lists of options in the alternative starting messages to find what is available!) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/UMPeFO3qponfzfBoHHjiA.png) 4. Hit ENTER - the actual starting scene/message for your roleplay will be generated. If the LLM makes a mistake, does not jump right into the world of your choice or blabbers somethihng about the Game Mistress and the TTRPG session, try again. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/66a2a6820c4cd36ffbc95623/KEsi4p2p9Q72SMoWPLSST.png) 6. Optional: after deciding what you want to roleplay, you can also activate one of the additional "World" lorebooks to make the roleplay experience better. <div style="text-align: justify"><h2><ins>Tips & Tricks</ins></h2> - always roleplay in the default starting message. If you edit it, just remember that it needs to include a suggestion/set-up for roleplaying - so the LLM understands instructions from a lorebook properly and generates the actual starting message for a roleplay. - edit the scenarios & other interesting things within the framework and scenario lorebooks; - you can turn the options on & off by changing the value "trigger" from 0 to 100 to turn something on and from 0 to 100 to turn something off. This way, you can permanently select something for your roleplays and turn all the rest off. However, you will still need to use the trigger words if they're defined within the trigger words field. If you want to just always roleplay in a given setting, change this setting's activation to a "blue dot" and delete the trigger words, then set all the other entires within the group Trigger values to 0. - some things may be triggered with a full word or with a "quick trigger" like fantasy: 01 vs F: 01; see the scenario lists in the alternate starting messages for more information on what can be triggered and how. It is very convenient and intuitive!</div> <h2><ins>UPDATES</ins></h2> <div style="text-align: justify">v.1.0 <li>RELEASE</li>
dwoprer/blockassist-bc-durable_marine_bee_1756775929
dwoprer
2025-09-02T01:19:34Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "durable marine bee", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:18:49Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - durable marine bee --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
original-mano-ktk-lake-video-viral-clip/New.full.videos.mano.ktk.lake.viral.video.Official.Tutorial
original-mano-ktk-lake-video-viral-clip
2025-09-02T01:19:10Z
0
0
null
[ "region:us" ]
null
2025-09-02T01:18:59Z
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
weleen/grab_bread_and_put
weleen
2025-09-02T01:18:50Z
0
0
lerobot
[ "lerobot", "safetensors", "act", "robotics", "dataset:weleen/grab_bread_and_put", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2025-09-02T01:18:30Z
--- datasets: weleen/grab_bread_and_put library_name: lerobot license: apache-2.0 model_name: act pipeline_tag: robotics tags: - lerobot - act - robotics --- # Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high success rates. This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index). --- ## How to Get Started with the Model For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy). Below is the short version on how to train and run inference/eval: ### Train from scratch ```bash lerobot-train \ --dataset.repo_id=${HF_USER}/<dataset> \ --policy.type=act \ --output_dir=outputs/train/<desired_policy_repo_id> \ --job_name=lerobot_training \ --policy.device=cuda \ --policy.repo_id=${HF_USER}/<desired_policy_repo_id> --wandb.enable=true ``` _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._ ### Evaluate the policy/run inference ```bash lerobot-record \ --robot.type=so100_follower \ --dataset.repo_id=<hf_user>/eval_<dataset> \ --policy.path=<hf_user>/<desired_policy_repo_id> \ --episodes=10 ``` Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint. --- ## Model Details - **License:** apache-2.0
giovannidemuri/llama3b-llama8b-er-v536-seed2-seed2-hx-alpaca-fpt
giovannidemuri
2025-09-02T01:18:33Z
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-01T23:47:24Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
liukevin666/blockassist-bc-yawning_striped_cassowary_1756775827
liukevin666
2025-09-02T01:18:08Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "yawning striped cassowary", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:18:00Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - yawning striped cassowary --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
poldanon/blockassist-bc-pale_amphibious_dove_1756775766
poldanon
2025-09-02T01:16:59Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "pale amphibious dove", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:16:38Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - pale amphibious dove --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
omerbektass/blockassist-bc-insectivorous_bold_lion_1756775731
omerbektass
2025-09-02T01:15:55Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "insectivorous bold lion", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:15:51Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - insectivorous bold lion --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
tscstudios/iwal7zawwerd8k7vjzyubn9guup1_d5dd9f3d-5cd1-4fac-aed7-eb0eb99af238
tscstudios
2025-09-02T01:15:08Z
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2025-09-02T01:15:06Z
--- license: other license_name: flux-1-dev-non-commercial-license license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md language: - en tags: - flux - diffusers - lora - replicate base_model: "black-forest-labs/FLUX.1-dev" pipeline_tag: text-to-image # widget: # - text: >- # prompt # output: # url: https://... instance_prompt: TOK --- # Iwal7Zawwerd8K7Vjzyubn9Guup1_D5Dd9F3D 5Cd1 4Fac Aed7 Eb0Eb99Af238 <Gallery /> ## About this LoRA This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI. It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer/train ## Trigger words You should use `TOK` to trigger the image generation. ## Run this LoRA with an API using Replicate ```py import replicate input = { "prompt": "TOK", "lora_weights": "https://huggingface.co/tscstudios/iwal7zawwerd8k7vjzyubn9guup1_d5dd9f3d-5cd1-4fac-aed7-eb0eb99af238/resolve/main/lora.safetensors" } output = replicate.run( "black-forest-labs/flux-dev-lora", input=input ) for index, item in enumerate(output): with open(f"output_{index}.webp", "wb") as file: file.write(item.read()) ``` ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda') pipeline.load_lora_weights('tscstudios/iwal7zawwerd8k7vjzyubn9guup1_d5dd9f3d-5cd1-4fac-aed7-eb0eb99af238', weight_name='lora.safetensors') image = pipeline('TOK').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters) ## Training details - Steps: 2000 - Learning rate: 0.0004 - LoRA rank: 16 ## Contribute your own examples You can use the [community tab](https://huggingface.co/tscstudios/iwal7zawwerd8k7vjzyubn9guup1_d5dd9f3d-5cd1-4fac-aed7-eb0eb99af238/discussions) to add images that show off what you’ve made with this LoRA.
benaxelrod/q-FrozenLake-v1-4x4-noSlippery
benaxelrod
2025-09-02T01:13:57Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2025-09-02T01:13:55Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage model = load_from_hub(repo_id="benaxelrod/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"])
prernac1/PretendParentAI
prernac1
2025-09-02T01:11:40Z
0
0
transformers
[ "transformers", "safetensors", "parenting", "reddit", "question-answering", "en", "arxiv:1910.09700", "base_model:mistralai/Mistral-7B-Instruct-v0.3", "base_model:finetune:mistralai/Mistral-7B-Instruct-v0.3", "license:apache-2.0", "endpoints_compatible", "region:us" ]
question-answering
2025-09-01T21:49:41Z
--- library_name: transformers tags: - parenting - reddit license: apache-2.0 language: - en metrics: - rouge - bertscore - bleu base_model: - mistralai/Mistral-7B-Instruct-v0.3 pipeline_tag: question-answering --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This model is Mistral Instruct v0.3 instruction-tuned on Reddit data from r/parenting using QLoRA. It results in a model that offers parenting advice while pretending to be a parent i.e. based on its own made-up anecdotes. ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** Prerna Chikersal - **Model type:** PEFT (Parameter-Efficient Fine-Tuning using LoRA) - **Language(s) (NLP):** English - **License:** apache-2.0 - **Finetuned from model:** Mistral-7B-Instruct-v0.3 ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** https://github.com/prernaa/ParentPalAI ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> Can be used to: - Provide parenting guidance. - Simulate users on reddit-like platforms. ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> Please do not use this model to actually simulate reddit users and post on sites like reddit. ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> It generates longer responses that human responses. ## How to Get Started with the Model Use the code below to get started with the model. <pre lang="markdown"> ## Install libraries ``` !pip install -q -U bitsandbytes !pip install -q -U git+https://github.com/huggingface/transformers.git !pip install -q -U git+https://github.com/huggingface/peft.git !pip install -q -U git+https://github.com/huggingface/accelerate.git !pip install -q -U git+https://github.com/huggingface/trl.git !pip install flash-attn --no-build-isolation !pip install quanto ``` </pre> <pre lang="markdown"> ## Load the Base Model and Tokenizer ``` import torch from transformers import AutoModelForCausalLM, BitsAndBytesConfig, AutoTokenizer torch.backends.cuda.matmul.allow_tf32 = True torch.set_float32_matmul_precision("high") BASE_MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.3" bnb_config = BitsAndBytesConfig( load_in_4bit=True, # loads base model in 4-bit precision bnb_4bit_use_double_quant=True, # double quantization saves VRAM bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) model = AutoModelForCausalLM.from_pretrained( BASE_MODEL_ID, quantization_config=bnb_config, device_map="auto", dtype=torch.bfloat16, attn_implementation="flash_attention_2", # FA2 is fastest on A100 token=HF_TOKEN # login to hugging face ) # load tokenizer - we will infer for 1 prompt at a time - so no padding needed tokenizer = AutoTokenizer.from_pretrained( BASE_MODEL_ID, add_bos_token=False, add_eos_token=False, token = HF_TOKEN ) tokenizer.pad_token = tokenizer.eos_token </pre> <pre lang="markdown"> ## Load PretendParentAI Adapter Model ``` # Load the adapter weights for the fine tuned model from peft import PeftModel model = PeftModel.from_pretrained(model, 'prernac1/PretendParentAI') model.config.use_cache = True model.eval() # sets the model into evaluation mode. Disables dropout, layernorm, etc. </pre> <pre lang="markdown"> ## Set up prompting import re # build prompt def build_prompt(prompt): return f'''&lt;s&gt;[INST] You are a friendly parenting companion who gives helpful advice like a fellow parent would. You sound warm and practical — not robotic or formal. Keep your answers under 150 words. Stay focused on the user’s question. Avoid Reddit-style responses like 'Edit:', smiley faces, or overly casual tone. User's Prompt: {prompt} [/INST]''' ## Clean input def get_q_and_a(text): qasplit = text.split("[/INST]") q = qasplit[0] a = qasplit[1] q = q.replace('[INST]', '').replace('&lt;s&gt;', '') a = a.replace('&lt;/s&gt;', '') return q.strip(), a.strip() ## Clean Output def clean_text(text): # Cut anything after 'Source' text = re.split(r'\b(Source:)', text)[0].strip() # Remove the last incomplete sentence (if any) sentences = re.split(r'(?<=[.!?])\s+', text) if sentences and not text.strip().endswith(('.', '!', '?')): sentences = sentences[:-1] # remove the last incomplete part return ' '.join(sentences) </pre> <pre lang="markdown"> ## Infer from PretendParentAI ``` inputs = tokenizer(build_prompt(prompt), return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=150, # lets go for shorter answers do_sample=True, temperature=0.7, top_p=0.9, repetition_penalty=1.15, eos_token_id=tokenizer.eos_token_id, ) raw_output = tokenizer.decode(outputs[0], skip_special_tokens=False) raw_output = raw_output.split("</s>")[0] ques, ans = get_q_and_a(raw_output) print ("\nQuestion") print (ques) print ("\nCleaned Answer") ans_clean = clean_text(ans) print (ans_clean) </pre> ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> I used Reddit data not available on HF. ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> QLoRA on A100 in Google Colab #### Preprocessing Dataset was carefully curated to include high quality reddit answers. See repo linked above for more info. We limit training to samples with less than 630 tokens. #### Training Hyperparameters - **Training regime:** <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> QLoRA bf16 mixed precision ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> A subset of the dataset was used as validation set. We computed BLEU, ROUGE, and BERT Scores on the validation set, and used them to compare this model and Mistral Instruct v0.3. Then, we generated a test set using GPT-4o, and used GPT as LLM-as-a-judge to compare this model and Mistral Instruct v0.3. ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> We generated a test set using GPT-4o #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> N/A #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> We computed BLEU, ROUGE, and BERT Scores on the validation set. ROUGE and BERTScores are more relevant. BLEU is more suited for shorter tasks like machine translation. ### Results BLEU Score | **Model** | **BLEU** | **P@1** | **P@2** | **P@3** | **P@4** | **Length Ratio** | |-----------------------------|----------|-----------|-----------|-----------|-----------|------------------| | Mistral Instruct v0.3 | 0.00695 | 0.1661 | 0.0140 | 0.0023 | 0.0004 | 1.59 | | Fine-Tuned PretendParentAI | 0.00624 | 0.1740 | 0.0169 | 0.0016 | 0.0003 | 1.73. | Both models have very poor BLEU score. BLEU is more suited for short tasks like machine translation. The length ratio shows that both models generate text that is longer than human responses. ROUGE Score | **Model** | **ROUGE-1** | **ROUGE-2** | **ROUGE-L** | **ROUGE-Lsum** | |-----------------------------|-------------|-------------|--------------|----------------| | Mistral Instruct v0.3 | 0.1774 | 0.0161 | 0.0977 | 0.1040 | | Fine-Tuned PretendParentAI | 0.2068 | 0.0215 | 0.1057 | 0.1059 | ROUGE Score is better for the fine tuned model. BERTScore (Average) | **Model** | **Precision** | **Recall** | **F1 Score** | |-----------------------------|----------------|------------|--------------| | Mistral Instruct v0.3 | 0.8334 | 0.8440 | 0.8386 | | Fine-Tuned PretendParentAI | 0.8323 | 0.8462 | 0.8391 | BERT Scores are very similar for both models. #### Summary We fine tuned Mistral Instruct v0.3 on reddit data from r/parenting to generate a PEFT model that can offer parenting advice while simulating reddit users who are parents. ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** A100 - **Hours used:** 4 - **Cloud Provider:** Google Colab - **Compute Region:** USA - **Carbon Emitted:** 0.37 ## Model Card Contact Prerna Chikersal ([email protected])
ggozzy/blockassist-bc-stubby_yapping_mandrill_1756775414
ggozzy
2025-09-02T01:11:30Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "stubby yapping mandrill", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:11:24Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - stubby yapping mandrill --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
abcorrea/tlmbd2-0.5k
abcorrea
2025-09-02T01:08:58Z
18
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "text-generation-inference", "unsloth", "conversational", "en", "base_model:unsloth/Qwen3-1.7B", "base_model:finetune:unsloth/Qwen3-1.7B", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2025-08-31T22:44:30Z
--- base_model: unsloth/Qwen3-1.7B tags: - text-generation-inference - transformers - unsloth - qwen3 license: apache-2.0 language: - en --- # Uploaded finetuned model - **Developed by:** abcorrea - **License:** apache-2.0 - **Finetuned from model :** unsloth/Qwen3-1.7B This qwen3 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
mooperyou/blockassist-bc-beaked_frisky_ox_1756775289
mooperyou
2025-09-02T01:08:28Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "beaked frisky ox", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:08:10Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - beaked frisky ox --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
BootesVoid/cmenpebyx080stlqb6sh050gc_cmf1tica5095isr53jf7ev2pj
BootesVoid
2025-09-02T01:07:58Z
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2025-09-02T01:07:55Z
--- license: other license_name: flux-1-dev-non-commercial-license license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md language: - en tags: - flux - diffusers - lora - replicate base_model: "black-forest-labs/FLUX.1-dev" pipeline_tag: text-to-image # widget: # - text: >- # prompt # output: # url: https://... instance_prompt: JAYA --- # Cmenpebyx080Stlqb6Sh050Gc_Cmf1Tica5095Isr53Jf7Ev2Pj <Gallery /> ## About this LoRA This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI. It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer/train ## Trigger words You should use `JAYA` to trigger the image generation. ## Run this LoRA with an API using Replicate ```py import replicate input = { "prompt": "JAYA", "lora_weights": "https://huggingface.co/BootesVoid/cmenpebyx080stlqb6sh050gc_cmf1tica5095isr53jf7ev2pj/resolve/main/lora.safetensors" } output = replicate.run( "black-forest-labs/flux-dev-lora", input=input ) for index, item in enumerate(output): with open(f"output_{index}.webp", "wb") as file: file.write(item.read()) ``` ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda') pipeline.load_lora_weights('BootesVoid/cmenpebyx080stlqb6sh050gc_cmf1tica5095isr53jf7ev2pj', weight_name='lora.safetensors') image = pipeline('JAYA').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters) ## Training details - Steps: 2500 - Learning rate: 9e-05 - LoRA rank: 16 ## Contribute your own examples You can use the [community tab](https://huggingface.co/BootesVoid/cmenpebyx080stlqb6sh050gc_cmf1tica5095isr53jf7ev2pj/discussions) to add images that show off what you’ve made with this LoRA.
ggozzy/blockassist-bc-stubby_yapping_mandrill_1756775161
ggozzy
2025-09-02T01:07:17Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "stubby yapping mandrill", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:07:10Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - stubby yapping mandrill --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
John6666/nova-anime-xl-il-v110-sdxl
John6666
2025-09-02T01:06:32Z
0
0
diffusers
[ "diffusers", "safetensors", "text-to-image", "stable-diffusion", "stable-diffusion-xl", "anime", "hentai", "2D", "2.5D", "illustration", "fantasy", "landscape", "colorful", "digital art", "posing", "pose structure", "hands", "characters", "merge", "noobai", "Illustrious XL v2.0", "illustrious", "en", "base_model:Laxhar/noobai-XL-1.1", "base_model:merge:Laxhar/noobai-XL-1.1", "base_model:OnomaAIResearch/Illustrious-XL-v2.0", "base_model:merge:OnomaAIResearch/Illustrious-XL-v2.0", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
2025-09-02T01:01:00Z
--- license: other license_name: faipl-1.0-sd license_link: https://freedevproject.org/faipl-1.0-sd/ language: - en library_name: diffusers pipeline_tag: text-to-image tags: - text-to-image - stable-diffusion - stable-diffusion-xl - anime - hentai - 2D - 2.5D - illustration - fantasy - landscape - colorful - digital art - posing - pose structure - hands - characters - merge - noobai - Illustrious XL v2.0 - illustrious base_model: - OnomaAIResearch/Illustrious-XL-v2.0 - Laxhar/noobai-XL-1.1 --- Original model is [here](https://civitai.com/models/376130/nova-anime-xl?modelVersionId=2173013). This model created by [Crody](https://civitai.com/user/Crody).
akirafudo/blockassist-bc-insectivorous_bold_lion_1756775087
akirafudo
2025-09-02T01:05:12Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "insectivorous bold lion", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:05:08Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - insectivorous bold lion --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
calegpedia/blockassist-bc-stealthy_slimy_rooster_1756773428
calegpedia
2025-09-02T01:04:13Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "stealthy slimy rooster", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:04:10Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - stealthy slimy rooster --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
acidjp/blockassist-bc-pesty_extinct_prawn_1756772710
acidjp
2025-09-02T01:03:31Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "pesty extinct prawn", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T01:03:27Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - pesty extinct prawn --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
marcoprado/phi-3-mini-LoRA
marcoprado
2025-09-02T01:01:47Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:microsoft/Phi-3-mini-4k-instruct", "base_model:finetune:microsoft/Phi-3-mini-4k-instruct", "endpoints_compatible", "region:us" ]
null
2025-09-02T01:01:43Z
--- base_model: microsoft/Phi-3-mini-4k-instruct library_name: transformers model_name: phi-3-mini-LoRA tags: - generated_from_trainer - trl - sft licence: license --- # Model Card for phi-3-mini-LoRA This model is a fine-tuned version of [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?" generator = pipeline("text-generation", model="marcoprado/phi-3-mini-LoRA", device="cuda") output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0] print(output["generated_text"]) ``` ## Training procedure [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/marcoprado-complutense-university-of-madrid/Phi3-mini-ft-python-code/runs/w9bq4lmx) This model was trained with SFT. ### Framework versions - TRL: 0.12.2 - Transformers: 4.46.3 - Pytorch: 2.8.0 - Datasets: 4.0.0 - Tokenizers: 0.20.3 ## Citations Cite TRL as: ```bibtex @misc{vonwerra2022trl, title = {{TRL: Transformer Reinforcement Learning}}, author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec}, year = 2020, journal = {GitHub repository}, publisher = {GitHub}, howpublished = {\url{https://github.com/huggingface/trl}} } ```
amethyst9/1555588
amethyst9
2025-09-02T00:59:35Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:59:34Z
[View on Civ Archive](https://civarchive.com/models/1463367?modelVersionId=1654951)
crystalline7/134423
crystalline7
2025-09-02T00:59:17Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:59:17Z
[View on Civ Archive](https://civarchive.com/models/156774?modelVersionId=175992)
okuzarabasi/Qwen2.5-0.5B-Instruct-Gensyn-Swarm-grunting_toothy_elk
okuzarabasi
2025-09-02T00:58:34Z
76
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "generated_from_trainer", "rl-swarm", "grpo", "gensyn", "I am grunting toothy elk", "unsloth", "trl", "genrl-swarm", "I am grunting_toothy_elk", "conversational", "arxiv:2402.03300", "base_model:Gensyn/Qwen2.5-0.5B-Instruct", "base_model:finetune:Gensyn/Qwen2.5-0.5B-Instruct", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-05-04T04:34:50Z
--- base_model: Gensyn/Qwen2.5-0.5B-Instruct library_name: transformers model_name: Qwen2.5-0.5B-Instruct-Gensyn-Swarm-grunting_toothy_elk tags: - generated_from_trainer - rl-swarm - grpo - gensyn - I am grunting toothy elk - unsloth - trl - genrl-swarm - I am grunting_toothy_elk licence: license --- # Model Card for Qwen2.5-0.5B-Instruct-Gensyn-Swarm-grunting_toothy_elk This model is a fine-tuned version of [Gensyn/Qwen2.5-0.5B-Instruct](https://huggingface.co/Gensyn/Qwen2.5-0.5B-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?" generator = pipeline("text-generation", model="okuzarabasi/Qwen2.5-0.5B-Instruct-Gensyn-Swarm-grunting_toothy_elk", device="cuda") output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0] print(output["generated_text"]) ``` ## Training procedure This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300). ### Framework versions - TRL: 0.15.2 - Transformers: 4.48.2 - Pytorch: 2.5.1 - Datasets: 3.6.0 - Tokenizers: 0.21.1 ## Citations Cite GRPO as: ```bibtex @article{zhihong2024deepseekmath, title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}}, author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo}, year = 2024, eprint = {arXiv:2402.03300}, } ``` Cite TRL as: ```bibtex @misc{vonwerra2022trl, title = {{TRL: Transformer Reinforcement Learning}}, author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec}, year = 2020, journal = {GitHub repository}, publisher = {GitHub}, howpublished = {\url{https://github.com/huggingface/trl}} } ```
haider-shah-viral-video-clip-35-second/New.full.videos.haider.shah.Viral.Video.Official.Tutorial
haider-shah-viral-video-clip-35-second
2025-09-02T00:58:22Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:58:11Z
<animated-image data-catalyst=""><a href="https://tinyurl.com/5ye5v3bc?dfhgKasbonStudiosdfg" rel="nofollow" data-target="animated-image.originalLink"><img src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" alt="Foo" data-canonical-src="https://static.wixstatic.com/media/b249f9_adac8f70fb3f45b88691696c77de18f3~mv2.gif" style="max-width: 100%; display: inline-block;" data-target="animated-image.originalImage"></a>
seraphimzzzz/130233
seraphimzzzz
2025-09-02T00:58:02Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:58:02Z
[View on Civ Archive](https://civarchive.com/models/152948?modelVersionId=171215)
seraphimzzzz/1654596
seraphimzzzz
2025-09-02T00:57:46Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:57:45Z
[View on Civ Archive](https://civarchive.com/models/1550026?modelVersionId=1753852)
seraphimzzzz/162258
seraphimzzzz
2025-09-02T00:56:57Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:56:57Z
[View on Civ Archive](https://civarchive.com/models/188063?modelVersionId=211175)
ultratopaz/823329
ultratopaz
2025-09-02T00:56:12Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:56:12Z
[View on Civ Archive](https://civarchive.com/models/818959?modelVersionId=915781)
ultratopaz/137788
ultratopaz
2025-09-02T00:56:04Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:56:04Z
[View on Civ Archive](https://civarchive.com/models/159977?modelVersionId=179948)
John6666/diving-illustrious-anime-v130-vae-sdxl
John6666
2025-09-02T00:55:47Z
0
0
diffusers
[ "diffusers", "safetensors", "text-to-image", "stable-diffusion", "stable-diffusion-xl", "anime", "style", "realistic", "2.5D", "flat coloring", "illustrious", "en", "base_model:OnomaAIResearch/Illustrious-xl-early-release-v0", "base_model:finetune:OnomaAIResearch/Illustrious-xl-early-release-v0", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
2025-09-02T00:47:18Z
--- license: other license_name: faipl-1.0-sd license_link: https://freedevproject.org/faipl-1.0-sd/ language: - en library_name: diffusers pipeline_tag: text-to-image tags: - text-to-image - stable-diffusion - stable-diffusion-xl - anime - style - realistic - 2.5D - flat coloring - illustrious base_model: OnomaAIResearch/Illustrious-xl-early-release-v0 --- Original model is [here](https://civitai.com/models/1170176/diving-illustrious-anime?modelVersionId=2161441). This model created by [DivingSuit](https://civitai.com/user/DivingSuit).
amethyst9/445749
amethyst9
2025-09-02T00:55:39Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:55:38Z
[View on Civ Archive](https://civarchive.com/models/475456?modelVersionId=528845)
seraphimzzzz/152216
seraphimzzzz
2025-09-02T00:55:06Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:55:06Z
[View on Civ Archive](https://civarchive.com/models/177141?modelVersionId=198867)
crystalline7/517578
crystalline7
2025-09-02T00:54:31Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:54:31Z
[View on Civ Archive](https://civarchive.com/models/541970?modelVersionId=602588)
crystalline7/402046
crystalline7
2025-09-02T00:54:23Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:54:22Z
[View on Civ Archive](https://civarchive.com/models/434365?modelVersionId=483830)
amethyst9/1885231
amethyst9
2025-09-02T00:54:06Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:54:06Z
[View on Civ Archive](https://civarchive.com/models/1756726?modelVersionId=1988136)
crystalline7/441829
crystalline7
2025-09-02T00:53:58Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:53:57Z
[View on Civ Archive](https://civarchive.com/models/471791?modelVersionId=524858)
oberbics/llama-3.1-newspaper-arguments-full
oberbics
2025-09-02T00:53:41Z
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-01T20:26:12Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
crystalline7/1562440
crystalline7
2025-09-02T00:53:32Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:53:32Z
[View on Civ Archive](https://civarchive.com/models/1469370?modelVersionId=1661965)
ultratopaz/130195
ultratopaz
2025-09-02T00:53:24Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:53:24Z
[View on Civ Archive](https://civarchive.com/models/152896?modelVersionId=171155)
seraphimzzzz/186791
seraphimzzzz
2025-09-02T00:53:17Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:53:17Z
[View on Civ Archive](https://civarchive.com/models/214308?modelVersionId=241412)
crystalline7/134424
crystalline7
2025-09-02T00:53:09Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:53:09Z
[View on Civ Archive](https://civarchive.com/models/156776?modelVersionId=175994)
crystalline7/321562
crystalline7
2025-09-02T00:53:01Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:53:01Z
[View on Civ Archive](https://civarchive.com/models/356687?modelVersionId=398732)
ultratopaz/162245
ultratopaz
2025-09-02T00:52:19Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:52:19Z
[View on Civ Archive](https://civarchive.com/models/188040?modelVersionId=211154)
ultratopaz/137729
ultratopaz
2025-09-02T00:52:11Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:52:11Z
[View on Civ Archive](https://civarchive.com/models/159912?modelVersionId=179866)
ultratopaz/137745
ultratopaz
2025-09-02T00:51:54Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:51:54Z
[View on Civ Archive](https://civarchive.com/models/159927?modelVersionId=179885)
crystalline7/484428
crystalline7
2025-09-02T00:51:47Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:51:46Z
[View on Civ Archive](https://civarchive.com/models/511634?modelVersionId=568633)
crystalline7/1561911
crystalline7
2025-09-02T00:51:38Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:51:38Z
[View on Civ Archive](https://civarchive.com/models/1468928?modelVersionId=1661420)
crystalline7/321448
crystalline7
2025-09-02T00:51:30Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:51:30Z
[View on Civ Archive](https://civarchive.com/models/356549?modelVersionId=398586)
seraphimzzzz/146573
seraphimzzzz
2025-09-02T00:50:54Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:50:54Z
[View on Civ Archive](https://civarchive.com/models/170560?modelVersionId=191647)
sekirr/blockassist-bc-masked_tenacious_whale_1756774189
sekirr
2025-09-02T00:50:29Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "masked tenacious whale", "arxiv:2504.07091", "region:us" ]
null
2025-09-02T00:50:26Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - masked tenacious whale --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
crystalline7/1570654
crystalline7
2025-09-02T00:50:27Z
0
0
null
[ "region:us" ]
null
2025-09-02T00:50:27Z
[View on Civ Archive](https://civarchive.com/models/1476371?modelVersionId=1669925)