mohamedyd commited on
Commit
3648075
·
verified ·
1 Parent(s): 0010947

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -2,35 +2,35 @@ import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from peft import PeftModel
4
 
5
- # Load the base model and the fine-tuned model
6
  @st.cache_resource
7
  def load_model():
8
- base_model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-Coder-3B-Instruct")
9
- model = PeftModel.from_pretrained(base_model, "mohamedyd/Natural-Coder-3B-Instruct-V1")
10
- tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-3B-Instruct")
 
 
 
 
 
 
 
 
 
11
  return model, tokenizer
12
 
13
  model, tokenizer = load_model()
14
 
15
- # Streamlit app
16
  st.title("Natural-Coder-3B-Instruct-V1 Model Interaction")
17
 
18
- # Text input for user prompt
19
  user_input = st.text_area("Enter your prompt here:", height=150)
20
 
21
- # Button to generate response
22
  if st.button("Generate Response"):
23
  if user_input:
24
- # Tokenize the input
25
  inputs = tokenizer(user_input, return_tensors="pt")
26
-
27
- # Generate response
28
  outputs = model.generate(**inputs, max_length=512, num_return_sequences=1)
29
-
30
- # Decode the output
31
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
-
33
- # Display the response
34
  st.write("Model Response:")
35
  st.write(response)
36
  else:
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from peft import PeftModel
4
 
5
+ # Load the base model and adapter
6
  @st.cache_resource
7
  def load_model():
8
+ base_model_name = "Qwen/Qwen2.5-Coder-3B-Instruct" # Ensure this is the correct base model
9
+ adapter_model_name = "mohamedyd/Natural-Coder-3B-Instruct-V1"
10
+
11
+ # Load the base model
12
+ base_model = AutoModelForCausalLM.from_pretrained(base_model_name, trust_remote_code=True)
13
+
14
+ # Load the PEFT adapter on top of the base model
15
+ model = PeftModel.from_pretrained(base_model, adapter_model_name)
16
+
17
+ # Load tokenizer from base model
18
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name, trust_remote_code=True)
19
+
20
  return model, tokenizer
21
 
22
  model, tokenizer = load_model()
23
 
24
+ # Streamlit App UI
25
  st.title("Natural-Coder-3B-Instruct-V1 Model Interaction")
26
 
 
27
  user_input = st.text_area("Enter your prompt here:", height=150)
28
 
 
29
  if st.button("Generate Response"):
30
  if user_input:
 
31
  inputs = tokenizer(user_input, return_tensors="pt")
 
 
32
  outputs = model.generate(**inputs, max_length=512, num_return_sequences=1)
 
 
33
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
34
  st.write("Model Response:")
35
  st.write(response)
36
  else: