Ujeshhh commited on
Commit
4c7c078
·
verified ·
1 Parent(s): 078d8fe

Update analysis.py

Browse files
Files changed (1) hide show
  1. analysis.py +26 -31
analysis.py CHANGED
@@ -1,39 +1,34 @@
1
- from transformers import pipeline
2
  from huggingface_hub import login
3
- import pandas as pd
 
4
 
5
- # 🔹 OPTIONAL: Authenticate if using a gated/private model
6
- # login(token="your_huggingface_token") # Uncomment and replace if needed
7
 
8
- # ✅ Use a Free Model (Mistral-7B-v0.1 OR Gemma-2B)
9
- MODEL_NAME = "tiiuae/falcon-7b-instruct" # Open-source alternative # Publicly available
10
- # MODEL_NAME = "google/gemma-2b" # Alternative (smaller but open-access)
11
 
12
- # Load the text generation model
13
- llm_pipeline = pipeline("text-generation", model=MODEL_NAME, device_map="auto")
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  def analyze_spending_pattern(df):
16
- """
17
- Analyze the user's spending behavior.
18
- """
19
- prompt = f"""
20
- Here is the user's spending data:
21
- {df.to_string(index=False)}
22
-
23
- Identify spending trends, categorize expenses, and highlight areas for cost-saving.
24
- """
25
- response = llm_pipeline(prompt, max_length=200, do_sample=True)
26
- return response[0]['generated_text']
27
 
28
  def get_financial_advice(df):
29
- """
30
- Provide personalized financial recommendations.
31
- """
32
- prompt = f"""
33
- Given the following transaction history:
34
- {df.to_string(index=False)}
35
-
36
- Provide personalized recommendations to reduce expenses and improve financial health.
37
- """
38
- response = llm_pipeline(prompt, max_length=200, do_sample=True)
39
- return response[0]['generated_text']
 
1
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
2
  from huggingface_hub import login
3
+ import os
4
+ import torch
5
 
6
+ # ✅ Get API token from environment variable
7
+ HF_TOKEN = os.getenv("HF_TOKEN")
8
 
9
+ # ✅ Authenticate with Hugging Face (without exposing the token in code)
10
+ login(HF_TOKEN)
 
11
 
12
+ # ✅ Load Model Efficiently
13
+ MODEL_NAME = "mistralai/Mistral-7B-Instruct"
14
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ MODEL_NAME,
17
+ token=HF_TOKEN,
18
+ device_map="auto",
19
+ torch_dtype=torch.float16,
20
+ load_in_8bit=True
21
+ )
22
+
23
+ # ✅ Create LLM Pipeline
24
+ llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")
25
 
26
  def analyze_spending_pattern(df):
27
+ prompt = "Analyze the following UPI transactions:\n" + df.to_string()
28
+ response = llm_pipeline(prompt, max_length=200)[0]["generated_text"]
29
+ return response
 
 
 
 
 
 
 
 
30
 
31
  def get_financial_advice(df):
32
+ prompt = "Provide financial advice based on these UPI transactions:\n" + df.to_string()
33
+ response = llm_pipeline(prompt, max_length=200)[0]["generated_text"]
34
+ return response