Spaces:
Sleeping
Sleeping
Commit
Β·
9dc4521
1
Parent(s):
172f670
added citation
Browse files- app.py +12 -4
- src/assets/text_content.py +16 -3
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import gradio as gr
|
|
| 3 |
import pandas as pd
|
| 4 |
from apscheduler.schedulers.background import BackgroundScheduler
|
| 5 |
|
| 6 |
-
from src.assets.text_content import TITLE, INTRODUCTION_TEXT
|
| 7 |
from src.assets.css_html_js import custom_css, get_window_url_params
|
| 8 |
from src.utils import restart_space, load_dataset_repo, make_clickable_model
|
| 9 |
|
|
@@ -61,8 +61,8 @@ with demo:
|
|
| 61 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
| 62 |
|
| 63 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 64 |
-
with gr.TabItem("
|
| 65 |
-
dataframe_text = "<h4>
|
| 66 |
|
| 67 |
gr.HTML(dataframe_text)
|
| 68 |
benchmark_df = get_benchmark_df()
|
|
@@ -70,9 +70,17 @@ with demo:
|
|
| 70 |
value=benchmark_df,
|
| 71 |
datatype=COLUMNS_DATATYPES,
|
| 72 |
headers=NEW_COLUMNS,
|
| 73 |
-
elem_id="pytorch-
|
| 74 |
)
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
# Restart space every hour
|
| 78 |
scheduler = BackgroundScheduler()
|
|
|
|
| 3 |
import pandas as pd
|
| 4 |
from apscheduler.schedulers.background import BackgroundScheduler
|
| 5 |
|
| 6 |
+
from src.assets.text_content import TITLE, INTRODUCTION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
|
| 7 |
from src.assets.css_html_js import custom_css, get_window_url_params
|
| 8 |
from src.utils import restart_space, load_dataset_repo, make_clickable_model
|
| 9 |
|
|
|
|
| 61 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
| 62 |
|
| 63 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 64 |
+
with gr.TabItem("π A100-80GB Benchmark ποΈ", elem_id="a100-benchmark", id=0):
|
| 65 |
+
dataframe_text = "<h4>Specification:\nSingle and Multi-GPU Setup\nBatch Size: 1\nGenerated Tokens: 100</h4>"
|
| 66 |
|
| 67 |
gr.HTML(dataframe_text)
|
| 68 |
benchmark_df = get_benchmark_df()
|
|
|
|
| 70 |
value=benchmark_df,
|
| 71 |
datatype=COLUMNS_DATATYPES,
|
| 72 |
headers=NEW_COLUMNS,
|
| 73 |
+
elem_id="pytorch-A100-benchmark",
|
| 74 |
)
|
| 75 |
|
| 76 |
+
with gr.Row():
|
| 77 |
+
with gr.Column():
|
| 78 |
+
with gr.Accordion("π Citation", open=False):
|
| 79 |
+
citation_button = gr.Textbox(
|
| 80 |
+
value=CITATION_BUTTON_TEXT,
|
| 81 |
+
label=CITATION_BUTTON_LABEL,
|
| 82 |
+
elem_id="citation-button",
|
| 83 |
+
).style(show_copy_button=True)
|
| 84 |
|
| 85 |
# Restart space every hour
|
| 86 |
scheduler = BackgroundScheduler()
|
src/assets/text_content.py
CHANGED
|
@@ -1,8 +1,21 @@
|
|
| 1 |
TITLE = """<h1 align="center" id="space-title">π€ Open LLM-Perf Leaderboard ποΈ</h1>"""
|
| 2 |
|
| 3 |
INTRODUCTION_TEXT = f"""
|
| 4 |
-
The π€ Open LLM-Perf Leaderboard ποΈ aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different hardwares and backends using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark).
|
| 5 |
Anyone from the community can submit a model or a hardware+backend configuration for automated benchmarking:
|
| 6 |
-
- Model submissions should be made in the [π€ Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the π€ Open LLM-Perf Leaderboard ποΈ
|
| 7 |
-
- Hardware+Backend submissions should be made in the π€ Open LLM-Perf Leaderboard ποΈ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions).
|
| 8 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
TITLE = """<h1 align="center" id="space-title">π€ Open LLM-Perf Leaderboard ποΈ</h1>"""
|
| 2 |
|
| 3 |
INTRODUCTION_TEXT = f"""
|
| 4 |
+
The π€ Open LLM-Perf Leaderboard ποΈ aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different hardwares and backends using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark) and [Optimum](https://github.com/huggingface/optimum) flavors.
|
| 5 |
Anyone from the community can submit a model or a hardware+backend configuration for automated benchmarking:
|
| 6 |
+
- Model submissions should be made in the [π€ Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the π€ Open LLM-Perf Leaderboard ποΈ once they're publicly available.
|
| 7 |
+
- Hardware+Backend submissions should be made in the π€ Open LLM-Perf Leaderboard ποΈ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions); An automated process will be set up soon to allow for direct submissions.
|
| 8 |
"""
|
| 9 |
+
|
| 10 |
+
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
| 11 |
+
CITATION_BUTTON_TEXT = """@misc{open-llm-perf-leaderboard,
|
| 12 |
+
author = {Ilyas Moutawwakil},
|
| 13 |
+
title = {Open LLM-Perf Leaderboard},
|
| 14 |
+
year = {2023},
|
| 15 |
+
publisher = {Hugging Face},
|
| 16 |
+
howpublished = "\url{https://huggingface.co/spaces/optimum/open-llm-perf-leaderboard}",
|
| 17 |
+
@software{optimum-benchmark,
|
| 18 |
+
author = {Ilyas Moutawwakil},
|
| 19 |
+
title = {A framework for benchmarking the performance of Transformers models},
|
| 20 |
+
}
|
| 21 |
+
"""
|