Sagar23p commited on
Commit
ae0bfe5
·
verified ·
1 Parent(s): ab959be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -19,25 +19,27 @@ load_dotenv()
19
 
20
 
21
 
22
- # initialize the client
23
- client = OpenAI(
24
- base_url="https://api-inference.huggingface.co/v1",
25
- api_key=os.environ.get('HUGGINGFACE_API')#"hf_xxx" # Replace with your token
26
- )
27
 
28
 
 
29
 
30
 
31
  #Create supported models
32
  model_links ={
33
- "Mistral":"mistralai/Mistral-7B-Instruct-v0.2",
34
- "Gemma-7B":"google/gemma-7b-it",
35
- "Gemma-2B":"google/gemma-2b-it",
36
- "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
37
  # "Llama-2":"meta-llama/Llama-2-7b-chat-hf"
38
-
39
  }
40
 
 
 
41
  #Pull info about the model to display
42
  model_info ={
43
  "Mistral":
@@ -151,7 +153,7 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
151
 
152
  # Display assistant response in chat message container
153
  with st.chat_message("assistant"):
154
- stream = client.chat.completions.create(
155
  model=model_links[selected_model],
156
  messages=[
157
  {"role": m["role"], "content": m["content"]}
 
19
 
20
 
21
 
22
+ # # initialize the client
23
+ # client = OpenAI(
24
+ # base_url="https://api-inference.huggingface.co/v1",
25
+ # api_key=os.environ.get('HUGGINGFACE_API')#"hf_xxx" # Replace with your token
26
+ # )
27
 
28
 
29
+ base_url="https://api-inference.huggingface.co/v1
30
 
31
 
32
  #Create supported models
33
  model_links ={
34
+ "Mistral":base_url+"mistralai/Mistral-7B-Instruct-v0.2",
35
+ "Gemma-7B":base_url+"google/gemma-7b-it",
36
+ "Gemma-2B":base_url+"google/gemma-2b-it",
37
+ "Zephyr-7B-β":base_url+"HuggingFaceH4/zephyr-7b-beta",
38
  # "Llama-2":"meta-llama/Llama-2-7b-chat-hf"
 
39
  }
40
 
41
+ headers = {"Authorization":F"Bearer"+os.environ.get('HUGGINGFACE_API')}
42
+
43
  #Pull info about the model to display
44
  model_info ={
45
  "Mistral":
 
153
 
154
  # Display assistant response in chat message container
155
  with st.chat_message("assistant"):
156
+ stream = InferenceClient(
157
  model=model_links[selected_model],
158
  messages=[
159
  {"role": m["role"], "content": m["content"]}