Spaces:
Sleeping
Sleeping
File size: 1,707 Bytes
9f8c851 6d60c2b 9f8c851 7596367 6d60c2b 461c219 6d60c2b 37e27c1 9f8c851 7596367 6d60c2b 7596367 9f8c851 7596367 6d60c2b 7596367 9f8c851 7596367 9f8c851 7596367 9f8c851 7596367 9f8c851 7596367 9f8c851 7596367 9f8c851 7596367 9f8c851 7596367 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import streamlit as st
#from transformers import pipeline
# Function to get response from LLaMA 2 model
from transformers import MistralForCausalLM
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('mistralai/mathstral-7B-v0.1')
def getLLamaresponse(input_text, keywords, blog_style):
# Load the LLaMA 2 model from Hugging Face
model_name = MistralForCausalLM.from_pretrained('mistralai/mathstral-7B-v0.1')
#llm = pipeline('text-generation', model=model_name)
# Prompt Template
template = """
Generate project idea for {blog_style} by using keywords like {keywords} for the profession of {input_text}.
"""
# Format the prompt
prompt = template.format(blog_style=blog_style, input_text=input_text, keywords=keywords)
# Generate the response from the LLaMA 2 model
response = model.generate(prompt, max_length=250, temperature=0.01)
return response[0]['generated_text']
st.set_page_config(page_title="Generate Project Idea",
page_icon='🤖',
layout='centered',
initial_sidebar_state='collapsed')
st.header("Generate Project Idea 🤖")
input_text = st.text_input("Enter the Topic")
# Creating two more columns for additional fields
col1, col2 = st.columns([5, 5])
with col1:
no_words = st.text_input('Keywords')
with col2:
blog_style = st.selectbox('Generating project idea for',
('Researchers', 'Data Scientist', 'Software Developer', 'Common People', " "), index=0)
submit = st.button("Generate")
# Final response
if submit:
response = getLLamaresponse(input_text, no_words, blog_style)
st.write(response)
|