Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,10 +2,14 @@ import streamlit as st
|
|
2 |
from transformers import pipeline
|
3 |
|
4 |
# Function to get response from LLaMA 2 model
|
|
|
|
|
|
|
|
|
5 |
|
6 |
def getLLamaresponse(input_text, keywords, blog_style):
|
7 |
# Load the LLaMA 2 model from Hugging Face
|
8 |
-
model_name =
|
9 |
llm = pipeline('text-generation', model=model_name)
|
10 |
|
11 |
# Prompt Template
|
|
|
2 |
from transformers import pipeline
|
3 |
|
4 |
# Function to get response from LLaMA 2 model
|
5 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
6 |
+
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
8 |
+
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
9 |
|
10 |
def getLLamaresponse(input_text, keywords, blog_style):
|
11 |
# Load the LLaMA 2 model from Hugging Face
|
12 |
+
model_name = model
|
13 |
llm = pipeline('text-generation', model=model_name)
|
14 |
|
15 |
# Prompt Template
|