Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,8 +9,8 @@ model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
|
9 |
|
10 |
def getLLamaresponse(input_text, keywords, blog_style):
|
11 |
# Load the LLaMA 2 model from Hugging Face
|
12 |
-
model_name = model
|
13 |
-
llm = pipeline('text-generation', model=model_name)
|
14 |
|
15 |
# Prompt Template
|
16 |
template = """
|
@@ -21,7 +21,7 @@ def getLLamaresponse(input_text, keywords, blog_style):
|
|
21 |
prompt = template.format(blog_style=blog_style, input_text=input_text, keywords=keywords)
|
22 |
|
23 |
# Generate the response from the LLaMA 2 model
|
24 |
-
response =
|
25 |
return response[0]['generated_text']
|
26 |
|
27 |
st.set_page_config(page_title="Generate Project Idea",
|
|
|
9 |
|
10 |
def getLLamaresponse(input_text, keywords, blog_style):
|
11 |
# Load the LLaMA 2 model from Hugging Face
|
12 |
+
#model_name = model
|
13 |
+
#llm = pipeline('text-generation', model=model_name)
|
14 |
|
15 |
# Prompt Template
|
16 |
template = """
|
|
|
21 |
prompt = template.format(blog_style=blog_style, input_text=input_text, keywords=keywords)
|
22 |
|
23 |
# Generate the response from the LLaMA 2 model
|
24 |
+
response = model(prompt, max_length=250, temperature=0.01)
|
25 |
return response[0]['generated_text']
|
26 |
|
27 |
st.set_page_config(page_title="Generate Project Idea",
|