Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import pipeline, AutoTokenizer
|
3 |
+
|
4 |
+
def load_model():
|
5 |
+
return pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
|
6 |
+
|
7 |
+
models = [load_model() for _ in range(3)]
|
8 |
+
tokenizer = models[0].tokenizer
|
9 |
+
|
10 |
+
# Enhanced prompt engineering (unchanged)
|
11 |
+
messages = [
|
12 |
+
{
|
13 |
+
"role": "system",
|
14 |
+
"content": "You are a friendly chatbot who always responds in the style of a pirate. Use pirate vocabulary and mannerisms in your replies.",
|
15 |
+
},
|
16 |
+
{"role": "user", "content": "How many helicopters can a human eat in one sitting, matey?"},
|
17 |
+
]
|
18 |
+
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
19 |
+
|
20 |
+
# Ensemble generation with averaging (corrected)
|
21 |
+
responses = []
|
22 |
+
for model in models:
|
23 |
+
outputs = model(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
24 |
+
response = outputs[0]['generated_text']
|
25 |
+
responses.append(response)
|
26 |
+
|
27 |
+
# Average the generated text directly
|
28 |
+
averaged_text = ""
|
29 |
+
for i in range(min(len(response) for response in responses)):
|
30 |
+
token_counts = {}
|
31 |
+
for response in responses:
|
32 |
+
token = response[i]
|
33 |
+
token_counts[token] = token_counts.get(token, 0) + 1
|
34 |
+
most_frequent_tokens = sorted(token_counts.items(), key=lambda x: x[1], reverse=True)
|
35 |
+
averaged_token = most_frequent_tokens[0][0] # Choose the most frequent token
|
36 |
+
averaged_text += averaged_token
|
37 |
+
|
38 |
+
print(averaged_text)
|