Darshan-BugendaiTech commited on
Commit
05fbf30
Β·
1 Parent(s): a112bdd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -0
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, pipeline, TextIteratorStreamer
3
+ import gradio as gr
4
+ from torch import bfloat16
5
+ from threading import Thread
6
+
7
+ MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ MODEL_NAME, device_map="auto", torch_dtype=torch.float16, load_in_4bit=True
12
+ )
13
+
14
+ # Chat Interface
15
+ system_prompt = "You are a helpful assistant who helps user to complete its query. If you don't know the answer be honet don't provide false information."
16
+
17
+ def prompt_build(system_prompt, user_inp, hist):
18
+ prompt = f"""<|system|>\n{system_prompt}\n"""
19
+
20
+ for pair in hist:
21
+ prompt += f"""<|user|>\n{pair[0]}\n<|assistant|>\n{pair[1]}"""
22
+
23
+ prompt += f"""<|user|>\n{user_inp}\n<|assistant|>\n"""
24
+ return prompt
25
+
26
+ def chat(user_input, history):
27
+ if not user_input:
28
+ yield "Please write your query so that I can assist you even better."
29
+ return
30
+
31
+ prompt = prompt_build(system_prompt, user_input, history)
32
+ model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
33
+
34
+ streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
35
+
36
+ generate_kwargs = dict(
37
+ model_inputs,
38
+ streamer=streamer,
39
+ max_new_tokens=2000,
40
+ do_sample=True,
41
+ top_p=0.7,
42
+ temperature=0.8
43
+ )
44
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
45
+ t.start()
46
+
47
+ model_output = ""
48
+ for new_text in streamer:
49
+ model_output += new_text
50
+ yield model_output
51
+ return model_output
52
+
53
+
54
+ with gr.Blocks() as demo:
55
+ chatbot = gr.ChatInterface(fn=chat,examples=[["Hello, Good Morning!"],["Who is Virat Kohli?"],["Write an email to Client call Darshan Kholakiya whose email address is [email protected] and address is Alaknanda Building, Rawalapada, Borivali East, Mumbai-400008. My name is Vijay and I am a sales person from Nividia, my phone number is 7710020978 and email is [email protected] and I want to pitch Darshan to buy semi conductorΒ chipsΒ fromΒ us."]],
56
+ title="Marketing Email Generator")
57
+
58
+ demo.queue().launch(share=True,debug=True)
59
+