Spaces:
Sleeping
Sleeping
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
import os | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
MODEL_NAME = "TheBloke/Mistral-7B-Instruct-v0.1-GGUF" # Use a smaller model | |
# Load tokenizer | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN) | |
# Load model (use torch.float16 if on GPU, otherwise use torch.float32 for CPU) | |
model = AutoModelForCausalLM.from_pretrained( | |
MODEL_NAME, | |
torch_dtype=torch.float32, # Change to float16 if running on GPU | |
device_map="auto", # Uses CPU if no GPU is available | |
token=HF_TOKEN | |
) | |
# ✅ Create LLM Pipeline | |
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto") | |
def analyze_spending_pattern(df): | |
prompt = "Analyze the following UPI transactions:\n" + df.to_string() | |
response = llm_pipeline(prompt, max_length=200)[0]["generated_text"] | |
return response | |
def get_financial_advice(df): | |
prompt = "Provide financial advice based on these UPI transactions:\n" + df.to_string() | |
response = llm_pipeline(prompt, max_length=200)[0]["generated_text"] | |
return response |