Gayatrikh16 commited on
Commit
37e27c1
·
verified ·
1 Parent(s): d5d78b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -4,13 +4,12 @@ from transformers import pipeline
4
  # Function to get response from LLaMA 2 model
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
 
7
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
8
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
9
 
10
  def getLLamaresponse(input_text, keywords, blog_style):
11
  # Load the LLaMA 2 model from Hugging Face
12
- #model_name = model
13
- #llm = pipeline('text-generation', model=model_name)
14
 
15
  # Prompt Template
16
  template = """
@@ -21,7 +20,7 @@ def getLLamaresponse(input_text, keywords, blog_style):
21
  prompt = template.format(blog_style=blog_style, input_text=input_text, keywords=keywords)
22
 
23
  # Generate the response from the LLaMA 2 model
24
- response = model(prompt, max_length=250, temperature=0.01)
25
  return response[0]['generated_text']
26
 
27
  st.set_page_config(page_title="Generate Project Idea",
 
4
  # Function to get response from LLaMA 2 model
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
 
7
+
 
8
 
9
  def getLLamaresponse(input_text, keywords, blog_style):
10
  # Load the LLaMA 2 model from Hugging Face
11
+ model_name = "https://api-inference.huggingface.co/models/mistralai/mathstral-7B-v0.1"
12
+ llm = pipeline('text-generation', model=model_name)
13
 
14
  # Prompt Template
15
  template = """
 
20
  prompt = template.format(blog_style=blog_style, input_text=input_text, keywords=keywords)
21
 
22
  # Generate the response from the LLaMA 2 model
23
+ response = llm(prompt, max_length=250, temperature=0.01)
24
  return response[0]['generated_text']
25
 
26
  st.set_page_config(page_title="Generate Project Idea",