varun500 commited on
Commit
3606011
·
1 Parent(s): 7a7fb4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import streamlit as st
4
  model_id = "RWKV/rwkv-raven-1b5"
5
- model = AutoModelForCausalLM.from_pretrained(model_id).to(0)
6
  tokenizer = AutoTokenizer.from_pretrained(model_id)
7
 
8
  st.title("Raven Text Generator")
@@ -15,7 +15,7 @@ if st.button("Generate Response"):
15
  if question.strip() != "":
16
  # Generate response based on the provided question
17
  prompt = f"### Instruction: {question}\n### Response:"
18
- inputs = tokenizer(prompt, return_tensors="pt").to(0)
19
  output = model.generate(inputs["input_ids"], max_new_tokens=100)
20
  generated_text = tokenizer.decode(output[0].tolist(), skip_special_tokens=True)
21
  st.markdown("## Generated Response")
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import streamlit as st
4
  model_id = "RWKV/rwkv-raven-1b5"
5
+ model = AutoModelForCausalLM.from_pretrained(model_id)
6
  tokenizer = AutoTokenizer.from_pretrained(model_id)
7
 
8
  st.title("Raven Text Generator")
 
15
  if question.strip() != "":
16
  # Generate response based on the provided question
17
  prompt = f"### Instruction: {question}\n### Response:"
18
+ inputs = tokenizer(prompt, return_tensors="pt")
19
  output = model.generate(inputs["input_ids"], max_new_tokens=100)
20
  generated_text = tokenizer.decode(output[0].tolist(), skip_special_tokens=True)
21
  st.markdown("## Generated Response")