Spaces:
Sleeping
Sleeping
increased max_length to 6000
Browse files
app.py
CHANGED
@@ -15,7 +15,7 @@ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float
|
|
15 |
def generate_sql_query(user_input):
|
16 |
""" Convert natural language input into an SQL query """
|
17 |
inputs = tokenizer(user_input, return_tensors="pt", padding=True, truncation=True).to(device)
|
18 |
-
outputs = model.generate(**inputs, max_length=
|
19 |
return tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
20 |
|
21 |
@app.post("/chat")
|
|
|
15 |
def generate_sql_query(user_input):
|
16 |
""" Convert natural language input into an SQL query """
|
17 |
inputs = tokenizer(user_input, return_tensors="pt", padding=True, truncation=True).to(device)
|
18 |
+
outputs = model.generate(**inputs, max_length=600, do_sample=False, num_beams=2)
|
19 |
return tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
20 |
|
21 |
@app.post("/chat")
|