|
import gradio as gr |
|
import torch |
|
from transformers import T5Tokenizer, T5ForConditionalGeneration |
|
|
|
|
|
model_path = "./" |
|
tokenizer = T5Tokenizer.from_pretrained(model_path) |
|
model = T5ForConditionalGeneration.from_pretrained(model_path) |
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model = model.to(device) |
|
model.eval() |
|
|
|
def generate_sql(user_input): |
|
inputs = tokenizer(user_input, return_tensors="pt", truncation=True, padding=True).to(device) |
|
with torch.no_grad(): |
|
outputs = model.generate(**inputs, max_length=512) |
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
gr.Interface( |
|
fn=generate_sql, |
|
inputs=gr.Textbox(lines=15, placeholder="Paste your full SQL prompt here..."), |
|
outputs=gr.Textbox(label="Generated SQL Query"), |
|
title="HISAB AI β Text2SQL Generator", |
|
description="Paste your schema + user query + instructions. Model returns a PostgreSQL query." |
|
).launch() |
|
|