Gayatrikh16's picture
Update app.py
6d60c2b verified
raw
history blame
1.71 kB
import streamlit as st
#from transformers import pipeline
# Function to get response from LLaMA 2 model
from transformers import MistralForCausalLM
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('mistralai/mathstral-7B-v0.1')
def getLLamaresponse(input_text, keywords, blog_style):
# Load the LLaMA 2 model from Hugging Face
model_name = MistralForCausalLM.from_pretrained('mistralai/mathstral-7B-v0.1')
#llm = pipeline('text-generation', model=model_name)
# Prompt Template
template = """
Generate project idea for {blog_style} by using keywords like {keywords} for the profession of {input_text}.
"""
# Format the prompt
prompt = template.format(blog_style=blog_style, input_text=input_text, keywords=keywords)
# Generate the response from the LLaMA 2 model
response = model.generate(prompt, max_length=250, temperature=0.01)
return response[0]['generated_text']
st.set_page_config(page_title="Generate Project Idea",
page_icon='πŸ€–',
layout='centered',
initial_sidebar_state='collapsed')
st.header("Generate Project Idea πŸ€–")
input_text = st.text_input("Enter the Topic")
# Creating two more columns for additional fields
col1, col2 = st.columns([5, 5])
with col1:
no_words = st.text_input('Keywords')
with col2:
blog_style = st.selectbox('Generating project idea for',
('Researchers', 'Data Scientist', 'Software Developer', 'Common People', " "), index=0)
submit = st.button("Generate")
# Final response
if submit:
response = getLLamaresponse(input_text, no_words, blog_style)
st.write(response)