File size: 2,189 Bytes
d349925
11d423c
d349925
 
927f7a3
 
11d423c
 
 
 
 
 
 
d349925
 
11d423c
 
b3a1556
d349925
 
4295e2c
927f7a3
 
76e9582
4295e2c
 
 
 
 
 
927f7a3
4295e2c
 
 
11d423c
76e9582
11d423c
d349925
4295e2c
 
 
11d423c
d349925
 
 
 
4295e2c
d349925
4295e2c
927f7a3
d349925
 
 
 
b3a1556
76e9582
11d423c
4295e2c
927f7a3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import gradio as gr
from transformers import AutoTokenizer, AutoModel, GPT2LMHeadModel, GPT2Tokenizer
import torch

# Load the NASA-specific bi-encoder model and tokenizer
bi_encoder_model_name = "nasa-impact/nasa-smd-ibm-st-v2"
bi_tokenizer = AutoTokenizer.from_pretrained(bi_encoder_model_name)
bi_model = AutoModel.from_pretrained(bi_encoder_model_name)

# Load the GPT-2 model and tokenizer for response generation
gpt2_model_name = "gpt2"
gpt2_tokenizer = GPT2Tokenizer.from_pretrained(gpt2_model_name)
gpt2_model = GPT2LMHeadModel.from_pretrained(gpt2_model_name)

def encode_text(text):
    inputs = bi_tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=128)
    outputs = bi_model(**inputs)
    # Ensure the output is 2D by averaging the last hidden state along the sequence dimension
    return outputs.last_hidden_state.mean(dim=1).detach().numpy()

def generate_response(user_input, context_embedding):
    # Create a structured prompt for GPT-2
    combined_input = f"Question: {user_input}\nContext: {context_embedding}\nAnswer:"
    
    # Generate a response using GPT-2 with adjusted parameters
    gpt2_inputs = gpt2_tokenizer.encode(combined_input, return_tensors='pt')
    gpt2_outputs = gpt2_model.generate(
        gpt2_inputs,
        max_length=150,
        num_return_sequences=1,
        temperature=0.7,
        top_p=0.9,
        repetition_penalty=1.2
    )
    generated_text = gpt2_tokenizer.decode(gpt2_outputs[0], skip_special_tokens=True)
    
    return generated_text

def chatbot(user_input, context=""):
    context_embedding = encode_text(context) if context else ""
    response = generate_response(user_input, context_embedding)
    return response

# Create the Gradio interface
iface = gr.Interface(
    fn=chatbot,
    inputs=[gr.Textbox(lines=2, placeholder="Enter your message here..."), gr.Textbox(lines=2, placeholder="Enter context here (optional)...")],
    outputs="text",
    title="Context-Aware Dynamic Response Chatbot",
    description="A chatbot using a NASA-specific bi-encoder model to understand the input context and GPT-2 to generate dynamic responses."
)

# Launch the interface
iface.launch()