Update README.md with new model card content
Browse files
README.md
CHANGED
|
@@ -54,7 +54,7 @@ import numpy as np
|
|
| 54 |
|
| 55 |
Use `generate()` to do text generation.
|
| 56 |
```python
|
| 57 |
-
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("llama2_7b_en"
|
| 58 |
llama_lm.generate("What is Keras?", max_length=500)
|
| 59 |
|
| 60 |
# Generate with batched prompts.
|
|
@@ -63,7 +63,7 @@ llama_lm.generate(["What is Keras?", "Give me your best brownie recipe."], max_l
|
|
| 63 |
|
| 64 |
Compile the `generate()` function with a custom sampler.
|
| 65 |
```python
|
| 66 |
-
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("llama2_7b_en"
|
| 67 |
llama_lm.compile(sampler="greedy")
|
| 68 |
llama_lm.generate("I want to say", max_length=30)
|
| 69 |
|
|
@@ -91,7 +91,7 @@ llama_lm.generate(prompt)
|
|
| 91 |
Call `fit()` on a single batch.
|
| 92 |
```python
|
| 93 |
features = ["The quick brown fox jumped.", "I forgot my homework."]
|
| 94 |
-
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("llama2_7b_en"
|
| 95 |
llama_lm.fit(x=features, batch_size=2)
|
| 96 |
```
|
| 97 |
|
|
@@ -122,7 +122,7 @@ import numpy as np
|
|
| 122 |
|
| 123 |
Use `generate()` to do text generation.
|
| 124 |
```python
|
| 125 |
-
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("hf://keras/llama2_7b_en"
|
| 126 |
llama_lm.generate("What is Keras?", max_length=500)
|
| 127 |
|
| 128 |
# Generate with batched prompts.
|
|
@@ -131,7 +131,7 @@ llama_lm.generate(["What is Keras?", "Give me your best brownie recipe."], max_l
|
|
| 131 |
|
| 132 |
Compile the `generate()` function with a custom sampler.
|
| 133 |
```python
|
| 134 |
-
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("hf://keras/llama2_7b_en"
|
| 135 |
llama_lm.compile(sampler="greedy")
|
| 136 |
llama_lm.generate("I want to say", max_length=30)
|
| 137 |
|
|
@@ -159,7 +159,7 @@ llama_lm.generate(prompt)
|
|
| 159 |
Call `fit()` on a single batch.
|
| 160 |
```python
|
| 161 |
features = ["The quick brown fox jumped.", "I forgot my homework."]
|
| 162 |
-
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("hf://keras/llama2_7b_en"
|
| 163 |
llama_lm.fit(x=features, batch_size=2)
|
| 164 |
```
|
| 165 |
|
|
|
|
| 54 |
|
| 55 |
Use `generate()` to do text generation.
|
| 56 |
```python
|
| 57 |
+
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("llama2_7b_en")
|
| 58 |
llama_lm.generate("What is Keras?", max_length=500)
|
| 59 |
|
| 60 |
# Generate with batched prompts.
|
|
|
|
| 63 |
|
| 64 |
Compile the `generate()` function with a custom sampler.
|
| 65 |
```python
|
| 66 |
+
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("llama2_7b_en")
|
| 67 |
llama_lm.compile(sampler="greedy")
|
| 68 |
llama_lm.generate("I want to say", max_length=30)
|
| 69 |
|
|
|
|
| 91 |
Call `fit()` on a single batch.
|
| 92 |
```python
|
| 93 |
features = ["The quick brown fox jumped.", "I forgot my homework."]
|
| 94 |
+
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("llama2_7b_en")
|
| 95 |
llama_lm.fit(x=features, batch_size=2)
|
| 96 |
```
|
| 97 |
|
|
|
|
| 122 |
|
| 123 |
Use `generate()` to do text generation.
|
| 124 |
```python
|
| 125 |
+
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("hf://keras/llama2_7b_en")
|
| 126 |
llama_lm.generate("What is Keras?", max_length=500)
|
| 127 |
|
| 128 |
# Generate with batched prompts.
|
|
|
|
| 131 |
|
| 132 |
Compile the `generate()` function with a custom sampler.
|
| 133 |
```python
|
| 134 |
+
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("hf://keras/llama2_7b_en")
|
| 135 |
llama_lm.compile(sampler="greedy")
|
| 136 |
llama_lm.generate("I want to say", max_length=30)
|
| 137 |
|
|
|
|
| 159 |
Call `fit()` on a single batch.
|
| 160 |
```python
|
| 161 |
features = ["The quick brown fox jumped.", "I forgot my homework."]
|
| 162 |
+
llama_lm = keras_hub.models.LlamaCausalLM.from_preset("hf://keras/llama2_7b_en")
|
| 163 |
llama_lm.fit(x=features, batch_size=2)
|
| 164 |
```
|
| 165 |
|