Cylanoid commited on
Commit
112890f
·
verified ·
1 Parent(s): 4b6c42c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -2,20 +2,17 @@ import gradio as gr
2
  from transformers import LlamaTokenizer, LlamaForCausalLM
3
  import torch
4
 
5
- # Load the fine-tuned model and tokenizer
6
- try:
7
- tokenizer = LlamaTokenizer.from_pretrained("./fine_tuned_llama2")
8
- model = LlamaForCausalLM.from_pretrained("./fine_tuned_llama2")
9
- model.eval()
10
- print("Model and tokenizer loaded successfully.")
11
- except Exception as e:
12
- print(f"Error loading model or tokenizer: {e}")
13
-
14
  # Function to predict fraud based on text input
15
  def predict(input_text):
16
  if not input_text:
17
  return "Please enter some text to analyze."
 
18
  try:
 
 
 
 
 
19
  # Tokenize input
20
  inputs = tokenizer(input_text, return_tensors="pt", max_length=512, padding="max_length", truncation=True)
21
  # Generate output
 
2
  from transformers import LlamaTokenizer, LlamaForCausalLM
3
  import torch
4
 
 
 
 
 
 
 
 
 
 
5
  # Function to predict fraud based on text input
6
  def predict(input_text):
7
  if not input_text:
8
  return "Please enter some text to analyze."
9
+
10
  try:
11
+ # Load the fine-tuned model and tokenizer inside the function
12
+ tokenizer = LlamaTokenizer.from_pretrained("./fine_tuned_llama2")
13
+ model = LlamaForCausalLM.from_pretrained("./fine_tuned_llama2")
14
+ model.eval()
15
+
16
  # Tokenize input
17
  inputs = tokenizer(input_text, return_tensors="pt", max_length=512, padding="max_length", truncation=True)
18
  # Generate output