isayahc commited on
Commit
e1b8370
·
1 Parent(s): 10d5da7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -9
app.py CHANGED
@@ -56,17 +56,23 @@ model_id = "TheBloke/zephyr-7B-beta-GGUF"
56
  device = "cpu"
57
 
58
 
59
- llm_model = CTransformers(
60
- model="TheBloke/zephyr-7B-beta-GGUF",
61
- model_type="mistral",
62
- max_new_tokens=4384,
63
- temperature=0.2,
64
- repetition_penalty=1.13,
65
- device=device # Set the device explicitly during model initialization
66
- )
 
 
 
67
 
 
 
68
 
69
- tokenizer = AutoTokenizer.from_pretrained(model_id)
 
70
  # model = AutoModelForCausalLM.from_pretrained(model_id)
71
 
72
  # pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10)
 
56
  device = "cpu"
57
 
58
 
59
+ # llm_model = CTransformers(
60
+ # model="TheBloke/zephyr-7B-beta-GGUF",
61
+ # model_type="mistral",
62
+ # max_new_tokens=4384,
63
+ # temperature=0.2,
64
+ # repetition_penalty=1.13,
65
+ # device=device # Set the device explicitly during model initialization
66
+ # )
67
+
68
+ # Load model directly
69
+ from transformers import AutoTokenizer, AutoModelForCausalLM
70
 
71
+ tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
72
+ model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
73
 
74
+
75
+ # tokenizer = AutoTokenizer.from_pretrained(model_id)
76
  # model = AutoModelForCausalLM.from_pretrained(model_id)
77
 
78
  # pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10)