g0th commited on
Commit
03836f6
·
verified ·
1 Parent(s): d91a205

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -8
app.py CHANGED
@@ -2,17 +2,25 @@ import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import PyPDF2
4
  import torch
 
5
 
6
- st.set_page_config(page_title="Perplexity-style Q&A (OpenHermes)", layout="wide")
7
- st.title("🧠 Perplexity-style Study Assistant with OpenHermes-2.5")
 
 
 
8
 
9
  @st.cache_resource
10
  def load_model():
11
- tokenizer = AutoTokenizer.from_pretrained("teknium/OpenHermes-2.5-Mistral-7B")
 
 
 
12
  model = AutoModelForCausalLM.from_pretrained(
13
- "teknium/OpenHermes-2.5-Mistral-7B",
14
  torch_dtype=torch.float16,
15
- device_map="auto"
 
16
  )
17
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
18
  return pipe
@@ -33,8 +41,7 @@ if uploaded_file:
33
 
34
  if st.button("Generate Answer"):
35
  with st.spinner("Generating answer..."):
36
- prompt = f"<|system|>You are a helpful study assistant.<|user|>Context:\n{context}\n\nQuestion: {query}<|assistant|>"
37
  result = textgen(prompt)[0]["generated_text"]
38
- answer = result.replace(prompt, "").strip()
39
  st.success("Answer:")
40
- st.write(answer)
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import PyPDF2
4
  import torch
5
+ import os
6
 
7
+ st.set_page_config(page_title="Perplexity-style Q&A (Mistral Auth)", layout="wide")
8
+ st.title("🧠 AI Study Assistant using Mistral 7B (Authenticated)")
9
+
10
+ # ✅ Load Hugging Face token from secrets
11
+ hf_token = os.getenv("HF_TOKEN")
12
 
13
  @st.cache_resource
14
  def load_model():
15
+ tokenizer = AutoTokenizer.from_pretrained(
16
+ "mistralai/Mistral-7B-Instruct-v0.1",
17
+ token=hf_token
18
+ )
19
  model = AutoModelForCausalLM.from_pretrained(
20
+ "mistralai/Mistral-7B-Instruct-v0.1",
21
  torch_dtype=torch.float16,
22
+ device_map="auto",
23
+ token=hf_token
24
  )
25
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
26
  return pipe
 
41
 
42
  if st.button("Generate Answer"):
43
  with st.spinner("Generating answer..."):
44
+ prompt = f"[INST] Use the following context to answer the question:\n\n{context}\n\nQuestion: {query} [/INST]"
45
  result = textgen(prompt)[0]["generated_text"]
 
46
  st.success("Answer:")
47
+ st.write(result.replace(prompt, "").strip())