Spaces:
Runtime error
Runtime error
File size: 1,426 Bytes
ac5b777 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
from transformers import TFT5ForConditionalGeneration, RobertaTokenizer
# load saved finetuned model
model = TFT5ForConditionalGeneration.from_pretrained('Yati05/TF-CodeT5-base')
# load saved tokenizer
tokenizer = RobertaTokenizer.from_pretrained('Yati05/TF-CodeT5-base')
def chat(chat_history, user_input):
query = "Generate Python: " + user_input
encoded_text = tokenizer(query, return_tensors='tf', padding='max_length', truncation=True, max_length=48)
# inference
generated_code = model.generate(
encoded_text["input_ids"], attention_mask=encoded_text["attention_mask"],
max_length=128
)
# decode generated tokens
decoded_code = tokenizer.decode(generated_code.numpy()[0], skip_special_tokens=True)
return chat_history + [(user_input, decoded_code)]
# response = ""
# for letter in decoded_code: #[decoded_code[i:i+1] for i in range(0, len(decoded_code), 1)]:
# response += letter + ""
# yield chat_history + [(user_input, response)]
with gr.Blocks(title="Python Code Generation") as demo:
gr.HTML(value="<style>h1 {text-align: center;}</style><h1>Python Code Generation</h1>")
chatbot = gr.Chatbot([], elem_id="chatbot")
message = gr.Textbox(label="Text Prompt",placeholder="Enter the prompt to generate code")
message.submit(chat, [chatbot, message], chatbot)
demo.queue().launch(debug = True) |