Spaces:
Sleeping
Sleeping
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
from huggingface_hub import login | |
import os | |
import torch | |
# β Get API token from environment variable | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
# β Authenticate with Hugging Face (without exposing the token in code) | |
login(HF_TOKEN) | |
# β Load Model Efficiently | |
MODEL_NAME = "tiiuae/falcon-7b-instruct" | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN) | |
model = AutoModelForCausalLM.from_pretrained( | |
MODEL_NAME, | |
token=HF_TOKEN, | |
device_map="auto", | |
torch_dtype=torch.float16, | |
load_in_8bit=True | |
) | |
# β Create LLM Pipeline | |
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto") | |
def analyze_spending_pattern(df): | |
prompt = "Analyze the following UPI transactions:\n" + df.to_string() | |
response = llm_pipeline(prompt, max_length=200)[0]["generated_text"] | |
return response | |
def get_financial_advice(df): | |
prompt = "Provide financial advice based on these UPI transactions:\n" + df.to_string() | |
response = llm_pipeline(prompt, max_length=200)[0]["generated_text"] | |
return response |