File size: 2,923 Bytes
54fdeab
845c238
54fdeab
 
66333bd
54fdeab
 
 
 
 
 
c604eb0
66c91d7
7a01fda
99b47fb
c7f3f07
 
99b47fb
7a01fda
530329e
66333bd
845c238
c604eb0
66333bd
c4887da
c604eb0
530329e
2472705
50c626b
6496c92
2cd88e3
d25b665
7c72599
54fdeab
94c68e9
54fdeab
 
6496c92
 
 
 
17c1bbe
54fdeab
c604eb0
a8e2f0c
5ab1e76
 
 
 
 
54fdeab
f1d84ae
 
54fdeab
17c1bbe
 
 
54fdeab
17c1bbe
54fdeab
75afdc2
54fdeab
 
 
66c91d7
54fdeab
 
75afdc2
54fdeab
 
94c68e9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
from gradio_leaderboard import Leaderboard, SelectColumns, ColumnFilter
from pathlib import Path

from utils import LLM_BENCHMARKS_ABOUT_TEXT, LLM_BENCHMARKS_SUBMIT_TEXT, custom_css, jsonl_to_dataframe, add_average_column_to_df, apply_markdown_format_for_columns, submit, PART_LOGO, sort_dataframe_by_column



abs_path = Path(__file__).parent

# Any pandas-compatible data
leaderboard_df = jsonl_to_dataframe(str(abs_path / "leaderboard_data.jsonl"))

average_column_name = "Average Accuracy"

columns_to_average = ['GeneralKnowledge', 'GSM8K', 'DC-Homograph', 'MC-Homograph', 'PiQA', 'Proverb-Quiz', 'VerbEval', 'Winogrande', 'Arc-Challenge', 'Arc-Easy', 'Feqh', 'Hallucination (Truthfulness)', 'P-Hellaswag', 'Law', 'AUT Multiple Choice', 'Parsi Literature', 'BoolQA', 'Reading Comprehension', 'PartExpert', 'MMLU Pro', 'Iranian Social Norms']
all_columns = ["Model", average_column_name, "Precision", "#Params (B)"] + columns_to_average



leaderboard_df = add_average_column_to_df(leaderboard_df, columns_to_average, index=3, average_column_name=average_column_name)

leaderboard_df = apply_markdown_format_for_columns(df=leaderboard_df, model_column_name="Model")
leaderboard_df = sort_dataframe_by_column(leaderboard_df, column_name=average_column_name)

columns_data_type = ["markdown" for i in range(len(leaderboard_df.columns))]
# "str", "number", "bool", "date", "markdown"
# columns_data_type[0] = "markdown"

NUM_MODELS=len(leaderboard_df)

with gr.Blocks(css=custom_css,theme=gr.themes.Default(font=["sans-serif","ui-sans-serif","system-ui"], font_mono=["monospace","ui-monospace","Consolas"])) as demo:
    # gr.HTML(PART_LOGO)
    gr.Markdown("""
    # Open Persian LLM Leaderboard
    """)

    gr.Markdown(f"""
    - **Total Models**: {NUM_MODELS}
    """)

    with gr.Tab("πŸŽ–οΈ Persian Leaderboard"):
        Leaderboard(
        value=leaderboard_df,
        datatype=columns_data_type,
        select_columns=SelectColumns(
            default_selection=all_columns,
            cant_deselect=["Model"],
            label="Select Columns to Show",
        ),
        search_columns=["model_name_for_query"],
        hide_columns=["model_name_for_query", "Precision", "#Params (B)"],
        filter_columns=[],
    )
    with gr.TabItem("πŸ“ About"):
        gr.Markdown(LLM_BENCHMARKS_ABOUT_TEXT)

    with gr.Tab("βœ‰οΈ Submit"):
        gr.Markdown(LLM_BENCHMARKS_SUBMIT_TEXT)
        model_name = gr.Textbox(label="Model name")
        model_id = gr.Textbox(label="username/space e.g PartAI/Dorna-Llama3-8B-Instruct")
        contact_email = gr.Textbox(label="Contact E-Mail")
        submit_btn = gr.Button("Submit")

        submit_btn.click(submit, inputs=[model_name, model_id, contact_email], outputs=[])

        gr.Markdown("""
        Please find more information about Part DP AI on [partdp.ai](https://partdp.ai)""")

if __name__ == "__main__":
    demo.launch()