mohamedyd commited on
Commit
1a5c7c1
·
verified ·
1 Parent(s): 8cb86d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -1,13 +1,10 @@
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from peft import PeftModel
4
- from dotenv import load_dotenv
5
- load_dotenv()
6
  from huggingface_hub import login
7
  import os
8
 
9
- access_token = os.environ["HUGGING_FACE_HUB_TOKEN"]
10
- login(token=access_token)
11
 
12
  # Load the base model and adapter
13
  @st.cache_resource
@@ -17,9 +14,12 @@ def load_model():
17
 
18
  # Load the base model
19
  base_model = AutoModelForCausalLM.from_pretrained(base_model_name, trust_remote_code=True)
 
 
 
20
 
21
  # Load the PEFT adapter on top of the base model
22
- model = PeftModel.from_pretrained(base_model, adapter_model_name)
23
 
24
  # Load tokenizer from base model
25
  tokenizer = AutoTokenizer.from_pretrained(base_model_name, trust_remote_code=True)
 
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from peft import PeftModel
 
 
4
  from huggingface_hub import login
5
  import os
6
 
7
+ login()
 
8
 
9
  # Load the base model and adapter
10
  @st.cache_resource
 
14
 
15
  # Load the base model
16
  base_model = AutoModelForCausalLM.from_pretrained(base_model_name, trust_remote_code=True)
17
+
18
+ # Load the configuration
19
+ config = PeftConfig.from_pretrained(adapter_model_name)
20
 
21
  # Load the PEFT adapter on top of the base model
22
+ model = PeftModel.from_pretrained(base_model, adapter_model_name, config=config)
23
 
24
  # Load tokenizer from base model
25
  tokenizer = AutoTokenizer.from_pretrained(base_model_name, trust_remote_code=True)