File size: 3,090 Bytes
268b556
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb9a195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
from transformers import pipeline
import streamlit as st

# Load the text generation pipeline
pipe = pipeline("text-generation", model="meta-llama/Meta-Llama-3-8B")

def generate_blog(topic, no_words):
    # Create the prompt
    prompt = f"Write a blog on the topic '{topic}' within {no_words} words."
    
    # Generate the blog content
    result = pipe(prompt, max_length=int(no_words), num_return_sequences=1)
    
    # Extract the generated text
    blog_content = result[0]['generated_text']
    return blog_content

# Streamlit app
st.set_page_config(page_title="Blog Generator", page_icon="πŸ“")
st.title("Blog Content Generator πŸ“")

# Input fields
topic = st.text_input("Enter the Blog Topic")
no_words = st.number_input("Enter the Number of Words", min_value=50, max_value=1000, value=200, step=50)

if st.button("Generate Blog"):
    if topic and no_words:
        with st.spinner("Generating blog content..."):
            blog_content = generate_blog(topic, no_words)
            st.subheader("Generated Blog Content")
            st.write(blog_content)
    else:
        st.error("Please provide both the blog topic and the number of words.")



import streamlit as st
from langchain.prompts import PromptTemplate
from langchain_community.llms import CTransformers

## Function To get response from LLaMA 2 model
def getLLamaresponse(input_text, no_words, blog_style):
    ### LLaMA 2 model
    llm = CTransformers(
        model='llama-2-7b-chat.ggmlv3.q8_0.bin',
        model_type='llama',
        config={'max_new_tokens': 256, 'temperature': 0.01}
    )
    
    ## Prompt Template
    template = """
        Write a blog for {blog_style} job profile for a topic {input_text}
        within {no_words} words.
    """
    
    prompt = PromptTemplate(
        input_variables=["blog_style", "input_text", "no_words"],
        template=template
    )
    
    ## Generate the response from the LLaMA 2 model
    response = llm.invoke(prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
    return response

def main():
    st.set_page_config(
        page_title="Generate Blogs",
        page_icon='πŸ€–',
        layout='centered',
        initial_sidebar_state='collapsed'
    )

    st.header("Generate Blogs πŸ€–")

    input_text = st.text_input("Enter the Blog Topic")

    ## Creating two more columns for additional fields
    col1, col2 = st.columns([5, 5])

    with col1:
        no_words = st.text_input('Number of Words')
    with col2:
        blog_style = st.selectbox('Writing the blog for', ('Researchers', 'Data Scientist', 'Common People'), index=0)

    submit = st.button("Generate")

    ## Final response
    if submit:
        if not input_text or not no_words:
            st.error("Please enter both the blog topic and the number of words.")
        else:
            try:
                response = getLLamaresponse(input_text, no_words, blog_style)
                st.write(response)
            except Exception as e:
                st.error(f"An error occurred: {e}")

if __name__ == "__main__":
    main()