Update README.md
Browse files
README.md
CHANGED
|
@@ -1,6 +1,8 @@
|
|
| 1 |
---
|
| 2 |
library_name: transformers
|
| 3 |
-
|
|
|
|
|
|
|
| 4 |
---
|
| 5 |
|
| 6 |
# Model Card for Model ID
|
|
@@ -211,7 +213,35 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
|
|
| 211 |
|
| 212 |
### Model Architecture and Objective
|
| 213 |
|
| 214 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
|
| 216 |
### Compute Infrastructure
|
| 217 |
|
|
@@ -253,6 +283,4 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
|
|
| 253 |
|
| 254 |
## Model Card Contact
|
| 255 |
|
| 256 |
-
[More Information Needed]
|
| 257 |
-
|
| 258 |
-
|
|
|
|
| 1 |
---
|
| 2 |
library_name: transformers
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
---
|
| 7 |
|
| 8 |
# Model Card for Model ID
|
|
|
|
| 213 |
|
| 214 |
### Model Architecture and Objective
|
| 215 |
|
| 216 |
+
``` python
|
| 217 |
+
|
| 218 |
+
from transformers import MistralConfig, ViTConfig, VisionEncoderDecoderConfig, VisionEncoderDecoderModel
|
| 219 |
+
|
| 220 |
+
# Initializing a ViT & Mistral style configuration
|
| 221 |
+
config_encoder = ViTConfig()
|
| 222 |
+
config_decoder = MistralConfig()
|
| 223 |
+
|
| 224 |
+
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
|
| 225 |
+
|
| 226 |
+
# Initializing a ViTBert model (with random weights) from a ViT & Mistral style configurations
|
| 227 |
+
model = VisionEncoderDecoderModel(config=config)
|
| 228 |
+
|
| 229 |
+
# Accessing the model configuration
|
| 230 |
+
config_encoder = model.config.encoder
|
| 231 |
+
config_decoder = model.config.decoder
|
| 232 |
+
# set decoder config to causal lm
|
| 233 |
+
config_decoder.is_decoder = True
|
| 234 |
+
config_decoder.add_cross_attention = True
|
| 235 |
+
|
| 236 |
+
# Saving the model, including its configuration
|
| 237 |
+
model.save_pretrained("my-model")
|
| 238 |
+
|
| 239 |
+
# loading model and config from pretrained folder
|
| 240 |
+
encoder_decoder_config = VisionEncoderDecoderConfig.from_pretrained("my-model")
|
| 241 |
+
model = VisionEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
```
|
| 245 |
|
| 246 |
### Compute Infrastructure
|
| 247 |
|
|
|
|
| 283 |
|
| 284 |
## Model Card Contact
|
| 285 |
|
| 286 |
+
[More Information Needed]
|
|
|
|
|
|