File size: 1,114 Bytes
b6881b4
4c7c078
b6881b4
923c242
4c7c078
cbd92ed
c75e97c
b6881b4
4c7c078
b6881b4
 
4c7c078
 
b6881b4
 
 
4c7c078
 
 
923c242
 
4c7c078
 
 
923c242
 
4c7c078
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import os

HF_TOKEN = os.getenv("HF_TOKEN")
MODEL_NAME = "TheBloke/Mistral-7B-Instruct-v0.1-GGUF"  # Use a smaller model

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)

# Load model (use torch.float16 if on GPU, otherwise use torch.float32 for CPU)
model = AutoModelForCausalLM.from_pretrained(
    MODEL_NAME,
    torch_dtype=torch.float32,  # Change to float16 if running on GPU
    device_map="auto",  # Uses CPU if no GPU is available
    token=HF_TOKEN
)
# ✅ Create LLM Pipeline
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")

def analyze_spending_pattern(df):
    prompt = "Analyze the following UPI transactions:\n" + df.to_string()
    response = llm_pipeline(prompt, max_length=200)[0]["generated_text"]
    return response

def get_financial_advice(df):
    prompt = "Provide financial advice based on these UPI transactions:\n" + df.to_string()
    response = llm_pipeline(prompt, max_length=200)[0]["generated_text"]
    return response