Spaces:
Running
Running
import streamlit as st | |
import torch | |
import requests | |
import os | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from huggingface_hub import login | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from peft import PeftModel | |
import torch | |
def load_fingpt_lora(): | |
base_model_id = "meta-llama/Llama-2-7b-hf" | |
lora_adapter_id = "FinGPT/fingpt-mt_llama2-7b_lora" | |
tokenizer = AutoTokenizer.from_pretrained(base_model_id, use_auth_token=HF_TOKEN) | |
base_model = AutoModelForCausalLM.from_pretrained( | |
base_model_id, | |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
device_map="auto", | |
use_auth_token=HF_TOKEN | |
) | |
model = PeftModel.from_pretrained(base_model, lora_adapter_id, use_auth_token=HF_TOKEN) | |
return model, tokenizer | |
# Load token from Hugging Face Space secrets | |
HF_TOKEN = os.getenv("Allie", None) | |
if HF_TOKEN: | |
login(HF_TOKEN) | |
# === Available Models for Selection === | |
model_map = { | |
"FinGPT LoRA" : {"id": "FinGPT/fingpt-mt_llama2-7b_lora", "local": True, "custom_loader": load_fingpt_lora}, | |
"InvestLM (AWQ)": {"id": "yixuantt/InvestLM-mistral-AWQ", "local": False}, | |
"FinLLaMA (LLaMA3.1-8B)": {"id": "us4/fin-llama3.1-8b", "local": False}, | |
"FinanceConnect (13B)": {"id": "ceadar-ie/FinanceConnect-13B", "local": True}, | |
"Sujet-Finance (8B)": {"id": "sujet-ai/Sujet-Finance-8B-v0.1", "local": True} | |
} | |
# === Load local models with caching === | |
def load_local_model(model_id): | |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HF_TOKEN) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
torch_dtype=torch.float32, | |
device_map="auto" if torch.cuda.is_available() else None, | |
use_auth_token=HF_TOKEN | |
) | |
return model, tokenizer | |
# === Build system prompt for discursive answers === | |
def build_prompt(user_question): | |
return ( | |
"You are FinGPT, a helpful and knowledgeable financial assistant. " | |
"You explain finance, controlling, and tax topics clearly, with examples when useful.\n\n" | |
f"User: {user_question.strip()}\n" | |
"FinGPT:" | |
) | |
# === Clean repeated/extra outputs === | |
def clean_output(output_text): | |
parts = output_text.split("FinGPT:") | |
return parts[-1].strip() if len(parts) > 1 else output_text.strip() | |
# === Generate with local model === | |
def query_local_model(model_id, prompt): | |
model, tokenizer = load_local_model(model_id) | |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=300, | |
temperature=0.7, | |
top_k=50, | |
top_p=0.95, | |
repetition_penalty=1.2, | |
do_sample=True, | |
pad_token_id=tokenizer.eos_token_id, | |
eos_token_id=tokenizer.eos_token_id | |
) | |
raw_output = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return clean_output(raw_output) | |
# === Generate with remote HF API === | |
def query_remote_model(model_id, prompt): | |
headers = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {} | |
payload = {"inputs": prompt, "parameters": {"max_new_tokens": 300}} | |
response = requests.post( | |
f"https://api-inference.huggingface.co/models/{model_id}", | |
headers=headers, | |
json=payload | |
) | |
if response.status_code == 200: | |
result = response.json() | |
return result[0]["generated_text"] if isinstance(result, list) else result.get("generated_text", "No output") | |
else: | |
raise RuntimeError(f"API Error {response.status_code}: {response.text}") | |
# === Unified model query handler === | |
def query_model(model_entry, user_question): | |
prompt = build_prompt(user_question) | |
if model_entry["local"]: | |
return query_local_model(model_entry["id"], prompt) | |
else: | |
return clean_output(query_remote_model(model_entry["id"], prompt)) | |
# === Streamlit UI Layout === | |
st.set_page_config(page_title="Finance LLM Comparison", layout="centered") | |
st.title("💼 Financial LLM Evaluation Interface") | |
model_choice = st.selectbox("Select a Financial Model", list(model_map.keys())) | |
user_question = st.text_area("Enter your financial question:", "What is EBIT vs EBITDA?", height=150) | |
if st.button("Get Response"): | |
with st.spinner("Thinking like a CFO..."): | |
try: | |
model_entry = model_map[model_choice] | |
answer = query_model(model_entry, user_question) | |
st.text_area("💬 Response:", value=answer, height=300, disabled=True) | |
except Exception as e: | |
st.error(f"❌ Error: {e}") | |