Spaces:
Sleeping
Sleeping
File size: 1,347 Bytes
85d3bc8 60ba391 85d3bc8 60ba391 61f4e5e 85d3bc8 7b638ad 85d3bc8 60ba391 85d3bc8 7b638ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import gradio as gr
import pandas as pd
import requests
from src.about import (
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
def get_evaluation():
response = requests.get("http://aim100.qinference.com/api/leaderboard/list")
data_json = response.json()
df = pd.DataFrame(data_json)
for col in df.columns:
df.loc[df[col] == 0, col] = '-'
df.insert(0, 'No', df.reset_index().index + 1)
ret = df.drop(columns='nodeSeq').rename(columns={'modelName': 'Model'})
ret.columns = [x.capitalize() for x in ret.columns]
return ret
leaderboard = gr.Blocks(css=custom_css)
with leaderboard:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("π
LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
leaderboard_table = gr.components.Dataframe(
value=get_evaluation(),
elem_id="leaderboard-table",
interactive=False,
visible=True,
)
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
leaderboard.queue(default_concurrency_limit=40).launch()
|