Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -37,6 +37,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 37 |
low_cpu_mem_usage=True
|
| 38 |
)
|
| 39 |
|
|
|
|
| 40 |
# model_id = "gpt2"
|
| 41 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 42 |
# model = AutoModelForCausalLM.from_pretrained(model_id)
|
|
@@ -44,6 +45,7 @@ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tok
|
|
| 44 |
hf = HuggingFacePipeline(pipeline=pipe)
|
| 45 |
|
| 46 |
|
|
|
|
| 47 |
|
| 48 |
# tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 49 |
tokenizer.use_default_system_prompt = False
|
|
|
|
| 37 |
low_cpu_mem_usage=True
|
| 38 |
)
|
| 39 |
|
| 40 |
+
print( "initalized model")
|
| 41 |
# model_id = "gpt2"
|
| 42 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 43 |
# model = AutoModelForCausalLM.from_pretrained(model_id)
|
|
|
|
| 45 |
hf = HuggingFacePipeline(pipeline=pipe)
|
| 46 |
|
| 47 |
|
| 48 |
+
print( "initalized second model")
|
| 49 |
|
| 50 |
# tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 51 |
tokenizer.use_default_system_prompt = False
|