Spaces:
Sleeping
Sleeping
File size: 1,144 Bytes
4c7c078 c75e97c 4c7c078 923c242 4c7c078 c75e97c 4c7c078 c75e97c 4c7c078 b770b29 4c7c078 923c242 4c7c078 923c242 4c7c078 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
from huggingface_hub import login
import os
import torch
# β
Get API token from environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
# β
Authenticate with Hugging Face (without exposing the token in code)
login(HF_TOKEN)
# β
Load Model Efficiently
MODEL_NAME = "tiiuae/falcon-7b-instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
token=HF_TOKEN,
device_map="auto",
torch_dtype=torch.float16,
load_in_8bit=True
)
# β
Create LLM Pipeline
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")
def analyze_spending_pattern(df):
prompt = "Analyze the following UPI transactions:\n" + df.to_string()
response = llm_pipeline(prompt, max_length=200)[0]["generated_text"]
return response
def get_financial_advice(df):
prompt = "Provide financial advice based on these UPI transactions:\n" + df.to_string()
response = llm_pipeline(prompt, max_length=200)[0]["generated_text"]
return response |