|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
from dotenv import load_dotenv |
|
import os |
|
load_dotenv() |
|
|
|
|
|
hf_token=os.getenv("HF_TOKEN") |
|
|
|
|
|
model_name = "openai-community/gpt2" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name,token=hf_token) |
|
|
|
|
|
def generate_blog(topic, keywords): |
|
prompt_template = f""" |
|
You are a technical content writer. Write a detailed and informative blog on the following topic. |
|
|
|
Topic: {topic} |
|
|
|
Keywords: {keywords} |
|
|
|
Make sure the blog covers the following sections: |
|
1. Introduction |
|
2. Detailed Explanation |
|
3. Examples |
|
4. Conclusion |
|
|
|
Blog: |
|
""" |
|
|
|
inputs = tokenizer(prompt_template, return_tensors="pt", max_length=512, truncation=True) |
|
outputs = model.generate(inputs.input_ids, max_length=800, num_return_sequences=1, temperature=0.7) |
|
blog_content = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
return blog_content |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_blog, |
|
inputs=[ |
|
gr.Textbox(lines=2, placeholder="Enter the blog topic", label="Blog Topic"), |
|
gr.Textbox(lines=2, placeholder="Enter keywords (comma-separated)", label="Keywords") |
|
], |
|
outputs=gr.Textbox(label="Generated Blog Content"), |
|
title="Technical Blog Generator", |
|
description="Generate a detailed technical blog by providing a topic and relevant keywords." |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|