FlawedLLM
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -46,7 +46,7 @@ import torch
|
|
| 46 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, AutoConfig
|
| 47 |
|
| 48 |
tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_9")
|
| 49 |
-
config = AutoConfig.from_pretrained(
|
| 50 |
|
| 51 |
# quantization_config = BitsAndBytesConfig(
|
| 52 |
# load_in_4bit=True,
|
|
|
|
| 46 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, AutoConfig
|
| 47 |
|
| 48 |
tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_9")
|
| 49 |
+
config = AutoConfig.from_pretrained("FlawedLLM/Bhashini_9") # Load configuration
|
| 50 |
|
| 51 |
# quantization_config = BitsAndBytesConfig(
|
| 52 |
# load_in_4bit=True,
|