Spaces:
Running
Running
import streamlit as st | |
import torch | |
import requests | |
import os | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from huggingface_hub import login, HfApi | |
HF_TOKEN = os.getenv("Allie", None) | |
if HF_TOKEN: | |
from huggingface_hub import login | |
login(HF_TOKEN) | |
# Define model map with access type | |
model_map = { | |
"FinGPT": {"id": "OpenFinAL/GPT2_FINGPT_QA", "local": True}, | |
"InvestLM": {"id": "yixuantt/InvestLM-mistral-AWQ", "local": False}, | |
"FinLLaMA": {"id": "us4/fin-llama3.1-8b", "local": False}, | |
"FinanceConnect": {"id": "ceadar-ie/FinanceConnect-13B", "local": True}, | |
"Sujet-Finance": {"id": "sujet-ai/Sujet-Finance-8B-v0.1", "local": True} | |
} | |
# Cache local models | |
def load_local_model(model_id): | |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HF_TOKEN) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
device_map="auto" if torch.cuda.is_available() else None, | |
use_auth_token=HF_TOKEN | |
) | |
return model, tokenizer | |
# Local model querying | |
def query_local_model(model_id, prompt): | |
model, tokenizer = load_local_model(model_id) | |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
outputs = model.generate(**inputs, max_new_tokens=150) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Remote model querying (via Inference API) | |
def query_remote_model(model_id, prompt): | |
headers = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {} | |
payload = {"inputs": prompt, "parameters": {"max_new_tokens": 150}} | |
response = requests.post( | |
f"https://api-inference.huggingface.co/models/{model_id}", | |
headers=headers, | |
json=payload | |
) | |
if response.status_code == 200: | |
result = response.json() | |
return result[0]["generated_text"] if isinstance(result, list) else result.get("generated_text", "No output") | |
else: | |
raise RuntimeError(f"Failed to call remote model: {response.text}") | |
# Unified query dispatcher | |
def query_model(model_entry, prompt): | |
if model_entry["local"]: | |
return query_local_model(model_entry["id"], prompt) | |
else: | |
return query_remote_model(model_entry["id"], prompt) | |
# --- Streamlit UI --- | |
st.title("💼 Financial LLM Evaluation Interface") | |
model_choice = st.selectbox("Select a Financial Model", list(model_map.keys())) | |
user_question = st.text_area("Enter your financial question:", "What is EBITDA?") | |
if st.button("Get Response"): | |
with st.spinner("Generating response..."): | |
try: | |
model_entry = model_map[model_choice] | |
answer = query_model(model_entry, user_question) | |
st.subheader(f"Response from {model_choice}:") | |
st.write(answer) | |
except Exception as e: | |
st.error(f"Something went wrong: {e}") | |