Spaces:
Running
Running
Commit
Β·
bee5389
1
Parent(s):
b869fcb
added specifications
Browse files- app.py +5 -2
- src/assets/text_content.py +6 -4
app.py
CHANGED
|
@@ -62,9 +62,12 @@ with demo:
|
|
| 62 |
|
| 63 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 64 |
with gr.TabItem("π A100-80GB Benchmark ποΈ", elem_id="a100-benchmark", id=0):
|
| 65 |
-
dataframe_text = "<h4>
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
-
gr.
|
| 68 |
benchmark_df = get_benchmark_df()
|
| 69 |
leaderboard_table_lite = gr.components.Dataframe(
|
| 70 |
value=benchmark_df,
|
|
|
|
| 62 |
|
| 63 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 64 |
with gr.TabItem("π A100-80GB Benchmark ποΈ", elem_id="a100-benchmark", id=0):
|
| 65 |
+
dataframe_text = """<h4>Specifications:</h4>
|
| 66 |
+
- Single and Multi-GPU Setup
|
| 67 |
+
- Batch Size: 1
|
| 68 |
+
- Generated Tokens: 100"""
|
| 69 |
|
| 70 |
+
gr.Markdown(dataframe_text, elem_classes="markdown-text")
|
| 71 |
benchmark_df = get_benchmark_df()
|
| 72 |
leaderboard_table_lite = gr.components.Dataframe(
|
| 73 |
value=benchmark_df,
|
src/assets/text_content.py
CHANGED
|
@@ -5,17 +5,19 @@ The π€ Open LLM-Perf Leaderboard ποΈ aims to benchmark the performance (la
|
|
| 5 |
Anyone from the community can submit a model or a hardware+backend configuration for automated benchmarking:
|
| 6 |
- Model submissions should be made in the [π€ Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the π€ Open LLM-Perf Leaderboard ποΈ once they're publicly available.
|
| 7 |
- Hardware+Backend submissions should be made in the π€ Open LLM-Perf Leaderboard ποΈ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions); An automated process will be set up soon to allow for direct submissions.
|
|
|
|
|
|
|
| 8 |
"""
|
| 9 |
|
| 10 |
-
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
| 11 |
CITATION_BUTTON_TEXT = r"""@misc{open-llm-perf-leaderboard,
|
| 12 |
author = {Ilyas Moutawwakil},
|
| 13 |
title = {Open LLM-Perf Leaderboard},
|
| 14 |
year = {2023},
|
| 15 |
publisher = {Hugging Face},
|
| 16 |
-
howpublished = "\url{https://huggingface.co/spaces/optimum/
|
| 17 |
@software{optimum-benchmark,
|
| 18 |
author = {Ilyas Moutawwakil},
|
| 19 |
-
title = {A framework for benchmarking the performance of Transformers models},
|
| 20 |
}
|
| 21 |
-
"""
|
|
|
|
| 5 |
Anyone from the community can submit a model or a hardware+backend configuration for automated benchmarking:
|
| 6 |
- Model submissions should be made in the [π€ Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the π€ Open LLM-Perf Leaderboard ποΈ once they're publicly available.
|
| 7 |
- Hardware+Backend submissions should be made in the π€ Open LLM-Perf Leaderboard ποΈ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions); An automated process will be set up soon to allow for direct submissions.
|
| 8 |
+
|
| 9 |
+
[Config files](https://github.com/huggingface/optimum-benchmark/blob/main/examples/bert.yaml) (which can be used with Optimum-Benchmark) will be available soon for reproduction and questioning/correction of the results.
|
| 10 |
"""
|
| 11 |
|
| 12 |
+
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results."
|
| 13 |
CITATION_BUTTON_TEXT = r"""@misc{open-llm-perf-leaderboard,
|
| 14 |
author = {Ilyas Moutawwakil},
|
| 15 |
title = {Open LLM-Perf Leaderboard},
|
| 16 |
year = {2023},
|
| 17 |
publisher = {Hugging Face},
|
| 18 |
+
howpublished = "\url{https://huggingface.co/spaces/optimum/llm-perf-leaderboard}",
|
| 19 |
@software{optimum-benchmark,
|
| 20 |
author = {Ilyas Moutawwakil},
|
| 21 |
+
title = {A framework for benchmarking the performance of Transformers models on different hardwares and backends},
|
| 22 |
}
|
| 23 |
+
"""
|