Athspi commited on
Commit
c21b225
·
verified ·
1 Parent(s): 8129ab8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -0
app.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ # Load model and tokenizer
6
+ model_id = "suayptalha/FastLlama-3.2-3B-Instruct"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_id,
10
+ torch_dtype=torch.float16,
11
+ device_map="auto"
12
+ )
13
+
14
+ # System prompt
15
+ system_prompt = "You are a friendly assistant named FastLlama."
16
+
17
+ def format_prompt(message: str, history: list):
18
+ prompt = f"<|system|>\n{system_prompt}</s>\n"
19
+ for user_msg, bot_msg in history:
20
+ prompt += f"<|user|>\n{user_msg}</s>\n<|assistant|>\n{bot_msg}</s>\n"
21
+ prompt += f"<|user|>\n{message}</s>\n<|assistant|>\n"
22
+ return prompt
23
+
24
+ def respond(message: str, history: list):
25
+ # Format the prompt with chat history
26
+ full_prompt = format_prompt(message, history)
27
+
28
+ # Tokenize input
29
+ inputs = tokenizer(full_prompt, return_tensors="pt").to(model.device)
30
+
31
+ # Generate response
32
+ output = model.generate(
33
+ inputs.input_ids,
34
+ max_new_tokens=256,
35
+ temperature=0.7,
36
+ top_p=0.9,
37
+ repetition_penalty=1.1,
38
+ do_sample=True,
39
+ pad_token_id=tokenizer.eos_token_id
40
+ )
41
+
42
+ # Decode response
43
+ response = tokenizer.decode(
44
+ output[0][inputs.input_ids.shape[-1]:],
45
+ skip_special_tokens=True
46
+ )
47
+
48
+ return response
49
+
50
+ # Create chat interface
51
+ chat = gr.ChatInterface(
52
+ fn=respond,
53
+ title="FastLlama-3.2B Chat",
54
+ description="Chat with FastLlama-3.2-3B-Instruct AI assistant",
55
+ examples=[
56
+ ["Explain quantum computing in simple terms"],
57
+ ["Write a poem about artificial intelligence"],
58
+ ["What's the meaning of life?"]
59
+ ],
60
+ cache_examples=False
61
+ )
62
+
63
+ # Launch the app
64
+ if __name__ == "__main__":
65
+ chat.launch(server_name="0.0.0.0")