Update app.py
Browse files
app.py
CHANGED
@@ -43,30 +43,30 @@ def create_llm():
|
|
43 |
"""Initialize the language model with optimized parameters"""
|
44 |
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
|
55 |
-
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
|
69 |
-
|
70 |
|
71 |
# return HuggingFaceHub(
|
72 |
# repo_id=MODEL_NAME,
|
@@ -77,13 +77,13 @@ def create_llm():
|
|
77 |
# "frequency_penalty": 0.5
|
78 |
# }
|
79 |
# )
|
80 |
-
llm = HuggingFaceEndpoint(
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
# llm = HuggingFacePipeline.from_model_id(
|
88 |
# model_id=MODEL_NAME,
|
89 |
# task="text-generation",
|
|
|
43 |
"""Initialize the language model with optimized parameters"""
|
44 |
|
45 |
|
46 |
+
bnb_config = BitsAndBytesConfig(
|
47 |
+
load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16
|
48 |
+
)
|
49 |
|
50 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME,
|
51 |
+
quantization_config=bnb_config
|
52 |
+
)
|
53 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
54 |
|
55 |
+
terminators = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|eot_id|>")]
|
56 |
|
57 |
+
text_generation_pipeline = pipeline(
|
58 |
+
model=model,
|
59 |
+
tokenizer=tokenizer,
|
60 |
+
task="text-generation",
|
61 |
+
temperature=0.2,
|
62 |
+
do_sample=True,
|
63 |
+
repetition_penalty=1.1,
|
64 |
+
return_full_text=False,
|
65 |
+
max_new_tokens=200,
|
66 |
+
eos_token_id=terminators,
|
67 |
+
)
|
68 |
|
69 |
+
llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
|
70 |
|
71 |
# return HuggingFaceHub(
|
72 |
# repo_id=MODEL_NAME,
|
|
|
77 |
# "frequency_penalty": 0.5
|
78 |
# }
|
79 |
# )
|
80 |
+
# llm = HuggingFaceEndpoint(
|
81 |
+
# repo_id=MODEL_NAME,
|
82 |
+
# huggingfacehub_api_token = api_token,
|
83 |
+
# temperature = 0.7,
|
84 |
+
# max_new_tokens = 1024,
|
85 |
+
# top_k = 0.9,
|
86 |
+
# )
|
87 |
# llm = HuggingFacePipeline.from_model_id(
|
88 |
# model_id=MODEL_NAME,
|
89 |
# task="text-generation",
|