File size: 1,406 Bytes
42e540e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
from transformers import AutoModelForCausalLM, AutoTokenizer
import cradio as cr

# Load the first model and tokenizer
model_1 = AutoModelForCausalLM.from_pretrained("mosaicml/mpt-7b-chat")
tokenizer_1 = AutoTokenizer.from_pretrained("mosaicml/mpt-7b-chat")

# Load the second model and tokenizer
model_2 = AutoModelForCausalLM.from_pretrained("nomic-ai/gpt4all-j")
tokenizer_2 = AutoTokenizer.from_pretrained("nomic-ai/gpt4all-j")

def generate_responses(prompt):
    # Encode the prompt using both tokenizers
    input_ids_1 = tokenizer_1.encode(prompt, return_tensors="pt")
    input_ids_2 = tokenizer_2.encode(prompt, return_tensors="pt")

    # Generate responses using both models
    output_1 = model_1.generate(input_ids_1, max_length=50, num_return_sequences=1)
    output_2 = model_2.generate(input_ids_2, max_length=50, num_return_sequences=1)

    # Decode the responses using the corresponding tokenizers
    response_1 = tokenizer_1.decode(output_1[0], skip_special_tokens=True)
    response_2 = tokenizer_2.decode(output_2[0], skip_special_tokens=True)

    return response_1, response_2

iface = cr.Interface(
    fn=generate_responses,
    inputs=cr.TextInput("Enter your question:"),
    outputs=[cr.Textbox(label="Response from model 1"), cr.Textbox(label="Response from model 2")],
    title="AI Chatbot",
    description="Get responses from two different AI models.",
)

iface.launch()