swapnasg commited on
Commit
a5cd913
·
verified ·
1 Parent(s): 5f80e7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -15
app.py CHANGED
@@ -1,24 +1,17 @@
1
  import gradio as gr
2
- import os
3
-
4
-
5
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
  model_id = "tiiuae/falcon-rw-1b"
8
 
9
- os.makedirs("offload", exist_ok=True)
10
-
11
  tokenizer = AutoTokenizer.from_pretrained(model_id)
12
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", offload_folder="./offload")
13
 
14
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
15
 
16
- def chat_with_expert(message, history):
17
- prompt = f"<s>[INST] You are an expert assistant. Answer with clarity and depth.\n{message} [/INST]"
18
- response = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)[0]['generated_text']
19
- answer = response.split('[/INST]')[-1].strip()
20
- history.append((message, answer))
21
  return history, history
22
 
23
- chatbot = gr.ChatInterface(fn=chat_with_expert, title="Expert Chat Assistant")
24
- chatbot.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
 
 
 
3
 
4
  model_id = "tiiuae/falcon-rw-1b"
5
 
 
 
6
  tokenizer = AutoTokenizer.from_pretrained(model_id)
7
+ model = AutoModelForCausalLM.from_pretrained(model_id)
8
 
9
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
 
11
+ def chat_with_user(message, history=[]):
12
+ prompt = message
13
+ result = generator(prompt, max_new_tokens=200)[0]['generated_text']
14
+ history.append((message, result.strip()))
 
15
  return history, history
16
 
17
+ gr.ChatInterface(chat_with_user, title="Expert Chat Assistant").launch()