Spaces:
Sleeping
Sleeping
import streamlit as st | |
import torch | |
import requests | |
import os | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from huggingface_hub import login | |
# Load Hugging Face token from secrets | |
HF_TOKEN = os.getenv("Allie", None) | |
if HF_TOKEN: | |
login(HF_TOKEN) | |
# All available models | |
model_map = { | |
"FinGPT": {"id": "OpenFinAL/GPT2_FINGPT_QA", "local": True}, | |
"InvestLM": {"id": "yixuantt/InvestLM-mistral-AWQ", "local": False}, | |
"FinLLaMA": {"id": "us4/fin-llama3.1-8b", "local": False}, | |
"FinanceConnect": {"id": "ceadar-ie/FinanceConnect-13B", "local": True}, | |
"Sujet-Finance": {"id": "sujet-ai/Sujet-Finance-8B-v0.1", "local": True} | |
} | |
# Load local model | |
def load_local_model(model_id): | |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HF_TOKEN) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
torch_dtype=torch.float32, | |
device_map="auto" if torch.cuda.is_available() else None, | |
use_auth_token=HF_TOKEN | |
) | |
return model, tokenizer | |
# Build discursive prompt | |
def build_prompt(user_question): | |
return ( | |
"You are a helpful and knowledgeable financial assistant named FinGPT. " | |
"You explain financial terms and concepts clearly, with examples when useful.\n\n" | |
f"User: {user_question.strip()}\n" | |
"FinGPT:" | |
) | |
# Clean up repeated parts | |
def clean_output(output_text): | |
parts = output_text.split("FinGPT:") | |
return parts[-1].strip() if len(parts) > 1 else output_text.strip() | |
# Local inference | |
def query_local_model(model_id, prompt): | |
model, tokenizer = load_local_model(model_id) | |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=200, | |
temperature=0.7, | |
top_k=50, | |
top_p=0.95, | |
repetition_penalty=1.2, | |
do_sample=True, | |
pad_token_id=tokenizer.eos_token_id, | |
eos_token_id=tokenizer.eos_token_id | |
) | |
raw_output = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return clean_output(raw_output) | |
# Remote inference | |
def query_remote_model(model_id, prompt): | |
headers = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {} | |
payload = {"inputs": prompt, "parameters": {"max_new_tokens": 200}} | |
response = requests.post( | |
f"https://api-inference.huggingface.co/models/{model_id}", | |
headers=headers, | |
json=payload | |
) | |
if response.status_code == 200: | |
result = response.json() | |
return result[0]["generated_text"] if isinstance(result, list) else result.get("generated_text", "No output") | |
else: | |
raise RuntimeError(f"API Error: {response.status_code} — {response.text}") | |
# Unified query handler | |
def query_model(model_entry, user_question): | |
prompt = build_prompt(user_question) | |
if model_entry["local"]: | |
return query_local_model(model_entry["id"], prompt) | |
else: | |
return query_remote_model(model_entry["id"], prompt) | |
# Streamlit UI | |
st.set_page_config(page_title="Financial LLM Interface", layout="centered") | |
st.title("💼 Financial LLM Evaluation Interface") | |
model_choice = st.selectbox("Select a Financial Model", list(model_map.keys())) | |
user_question = st.text_area("Enter your financial question:", "What is CAP in finance?") | |
if st.button("Get Response"): | |
with st.spinner("Generating discursive response..."): | |
try: | |
model_entry = model_map[model_choice] | |
answer = query_model(model_entry, user_question) | |
st.markdown("### 🧠 Response:") | |
st.text_area("💬 Response from FinGPT:", value=answer, height=200, disabled=True) | |
except Exception as e: | |
st.error(f"❌ Error: {e}") | |