nurqoneah commited on
Commit
dcd7e0b
Β·
verified Β·
1 Parent(s): 65fab5e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -12
app.py CHANGED
@@ -12,12 +12,12 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
12
  import os
13
  from dotenv import load_dotenv
14
  from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline
15
-
16
 
17
 
18
  warnings.filterwarnings("ignore")
19
  load_dotenv()
20
- os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN")
21
 
22
  # Constants and configurations
23
  APP_TITLE = "πŸ’Š Asisten Kesehatan Feminacare"
@@ -68,15 +68,22 @@ def create_llm():
68
 
69
  # llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
70
 
71
- return HuggingFaceHub(
72
- repo_id=MODEL_NAME,
73
- model_kwargs={
74
- "temperature": 0.7, # Balanced between creativity and accuracy
75
- "max_new_tokens": 1024,
76
- "top_p": 0.9,
77
- "frequency_penalty": 0.5
78
- }
79
- )
 
 
 
 
 
 
 
80
  # llm = HuggingFacePipeline.from_model_id(
81
  # model_id=MODEL_NAME,
82
  # task="text-generation",
@@ -86,7 +93,7 @@ def create_llm():
86
  # repetition_penalty=1.03,
87
  # ),
88
  # )
89
- # return llm
90
 
91
 
92
  # chat_model = ChatHuggingFace(llm=llm)
 
12
  import os
13
  from dotenv import load_dotenv
14
  from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline
15
+ from langchain_community.llms import HuggingFaceEndpoint
16
 
17
 
18
  warnings.filterwarnings("ignore")
19
  load_dotenv()
20
+ api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
21
 
22
  # Constants and configurations
23
  APP_TITLE = "πŸ’Š Asisten Kesehatan Feminacare"
 
68
 
69
  # llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
70
 
71
+ # return HuggingFaceHub(
72
+ # repo_id=MODEL_NAME,
73
+ # model_kwargs={
74
+ # "temperature": 0.7, # Balanced between creativity and accuracy
75
+ # "max_new_tokens": 1024,
76
+ # "top_p": 0.9,
77
+ # "frequency_penalty": 0.5
78
+ # }
79
+ # )
80
+ llm = HuggingFaceEndpoint(
81
+ repo_id=MODEL_NAME,
82
+ huggingfacehub_api_token = api_token,
83
+ temperature = 0.7,
84
+ max_new_tokens = 1024,
85
+ top_k = 0.9,
86
+ )
87
  # llm = HuggingFacePipeline.from_model_id(
88
  # model_id=MODEL_NAME,
89
  # task="text-generation",
 
93
  # repetition_penalty=1.03,
94
  # ),
95
  # )
96
+ return llm
97
 
98
 
99
  # chat_model = ChatHuggingFace(llm=llm)