File size: 1,548 Bytes
54e8483
4c23181
54e8483
4c23181
 
 
54e8483
4c23181
 
9b65c50
4c23181
 
 
 
 
b503163
3b69718
fd893e6
 
 
 
 
 
 
 
 
4c23181
 
b503163
5db5fa6
4c23181
fd893e6
4c23181
fd893e6
3b69718
 
 
 
4c23181
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline

# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("hrshtsharma2012/NL2SQL-Picard-final")
model = AutoModelForSeq2SeqLM.from_pretrained("hrshtsharma2012/NL2SQL-Picard-final")

# Initialize the pipeline
nl2sql_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer)

def generate_sql(query):
    # Use the model to generate SQL from the natural language query
    results = nl2sql_pipeline(query)
    # Extract the first result (highest likelihood)
    sql_query = results[0]['generated_text']
    return sql_query

# Example questions from the Spider dataset
example_questions = [
    ("How many heads of the departments are older than 56 ?",),
    ("List the name, born state and age of the heads of departments ordered by age.",),
    ("List the creation year, name and budget of each department.",),
    ("What are the maximum and minimum budget of the departments?",),
    ("In which year were most departments established?.",)
]

# Create a Gradio interface
interface = gr.Interface(
    fn=generate_sql,
    inputs=gr.Textbox(lines=2, placeholder="Enter your natural language query here..."),
    outputs="text",
    examples=example_questions,
    title="NL to SQL with Picard",
    description="This model converts natural language queries into SQL. It's based on the Spider dataset. Try one of the example questions or enter your own!"
)

# Launch the app
if __name__ == "__main__":
    interface.launch()