|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
import torch |
|
|
from dotenv import load_dotenv |
|
|
import os |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-rw-1b", trust_remote_code=True) |
|
|
model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-rw-1b", trust_remote_code=True) |
|
|
|
|
|
|
|
|
def generate_blog(topic, keywords): |
|
|
prompt_template = f""" |
|
|
You are a content writer. Write a poem of a maximum of 10 sentences on the following topic. |
|
|
Topic: {topic} |
|
|
Poem: |
|
|
""" |
|
|
input_ids = tokenizer(prompt_template, return_tensors="pt", max_length=512, truncation=True) |
|
|
outputs = model.generate(input_ids["input_ids"], max_length=800, num_return_sequences=1) |
|
|
blog_content = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
return blog_content |
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=generate_blog, |
|
|
inputs=[ |
|
|
gr.Textbox(lines=2, placeholder="Enter the poem topic", label="Blog Topic"), |
|
|
], |
|
|
outputs=gr.Textbox(label="Generated Poem"), |
|
|
title="Poem Generator", |
|
|
description="Generate a poem based on the providing a topic." |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
iface.launch(share=True) |
|
|
|