Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Alina Lozovskaya
commited on
Commit
·
3d76e98
1
Parent(s):
8c39d2c
Update config
Browse files- yourbench_space/app.py +13 -103
- yourbench_space/config.py +19 -61
yourbench_space/app.py
CHANGED
|
@@ -33,18 +33,23 @@ def prepare_task(oauth_token: gr.OAuthToken | None, model_token: str):
|
|
| 33 |
manager.start_process(custom_env=new_env)
|
| 34 |
|
| 35 |
|
| 36 |
-
def update_hf_org_dropdown(oauth_token: gr.OAuthToken | None)
|
| 37 |
if oauth_token is None:
|
| 38 |
print(
|
| 39 |
"Please, deploy this on Spaces and log in to view the list of available organizations"
|
| 40 |
)
|
| 41 |
-
return
|
| 42 |
-
user_info = whoami(oauth_token.token)
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
|
| 50 |
config_output = gr.Code(label="Generated Config", language="yaml")
|
|
@@ -60,74 +65,6 @@ base_url = gr.Textbox(
|
|
| 60 |
info="Use a custom API base URL for Hugging Face Inference Endpoints",
|
| 61 |
)
|
| 62 |
|
| 63 |
-
|
| 64 |
-
def make_models(model_name=None):
|
| 65 |
-
if model_name is None:
|
| 66 |
-
model_name = DEFAULT_MODEL
|
| 67 |
-
|
| 68 |
-
ingestion_model = gr.Dropdown(
|
| 69 |
-
label="Model for ingestion",
|
| 70 |
-
choices=AVAILABLE_MODELS,
|
| 71 |
-
value=model_name,
|
| 72 |
-
interactive=False,
|
| 73 |
-
allow_custom_value=True,
|
| 74 |
-
)
|
| 75 |
-
summarization_model = gr.Dropdown(
|
| 76 |
-
label="Model for summarization",
|
| 77 |
-
choices=AVAILABLE_MODELS,
|
| 78 |
-
value=model_name,
|
| 79 |
-
interactive=False,
|
| 80 |
-
allow_custom_value=True,
|
| 81 |
-
)
|
| 82 |
-
single_shot_question_generation_model = gr.Dropdown(
|
| 83 |
-
label="Model for single shot question generation",
|
| 84 |
-
choices=AVAILABLE_MODELS,
|
| 85 |
-
value=model_name,
|
| 86 |
-
interactive=False,
|
| 87 |
-
allow_custom_value=True,
|
| 88 |
-
)
|
| 89 |
-
multi_hop_question_generation_model = gr.Dropdown(
|
| 90 |
-
label="Model for multi hop question generation",
|
| 91 |
-
choices=AVAILABLE_MODELS,
|
| 92 |
-
value=model_name,
|
| 93 |
-
interactive=False,
|
| 94 |
-
allow_custom_value=True,
|
| 95 |
-
)
|
| 96 |
-
answer_generation_model = gr.Dropdown(
|
| 97 |
-
label="Model for answer generation",
|
| 98 |
-
choices=AVAILABLE_MODELS,
|
| 99 |
-
value=model_name,
|
| 100 |
-
interactive=False,
|
| 101 |
-
allow_custom_value=True,
|
| 102 |
-
)
|
| 103 |
-
judge_answers_model = gr.Dropdown(
|
| 104 |
-
label="Model for answer judging",
|
| 105 |
-
choices=AVAILABLE_MODELS,
|
| 106 |
-
value=model_name,
|
| 107 |
-
interactive=False,
|
| 108 |
-
allow_custom_value=True,
|
| 109 |
-
)
|
| 110 |
-
|
| 111 |
-
return [
|
| 112 |
-
ingestion_model,
|
| 113 |
-
summarization_model,
|
| 114 |
-
single_shot_question_generation_model,
|
| 115 |
-
multi_hop_question_generation_model,
|
| 116 |
-
answer_generation_model,
|
| 117 |
-
judge_answers_model,
|
| 118 |
-
]
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
(
|
| 122 |
-
ingestion_model,
|
| 123 |
-
summarization_model,
|
| 124 |
-
single_shot_question_generation_model,
|
| 125 |
-
multi_hop_question_generation_model,
|
| 126 |
-
answer_generation_model,
|
| 127 |
-
judge_answers_model,
|
| 128 |
-
) = make_models()
|
| 129 |
-
|
| 130 |
-
|
| 131 |
with gr.Blocks() as app:
|
| 132 |
gr.Markdown("## YourBench Configuration")
|
| 133 |
with gr.Row():
|
|
@@ -155,19 +92,6 @@ with gr.Blocks() as app:
|
|
| 155 |
|
| 156 |
with gr.Accordion("Model"):
|
| 157 |
model_name.render()
|
| 158 |
-
# TODO handle this better
|
| 159 |
-
model_name.change(
|
| 160 |
-
make_models,
|
| 161 |
-
inputs=[model_name],
|
| 162 |
-
outputs=[
|
| 163 |
-
ingestion_model,
|
| 164 |
-
summarization_model,
|
| 165 |
-
single_shot_question_generation_model,
|
| 166 |
-
multi_hop_question_generation_model,
|
| 167 |
-
answer_generation_model,
|
| 168 |
-
judge_answers_model,
|
| 169 |
-
],
|
| 170 |
-
)
|
| 171 |
|
| 172 |
provider = gr.Radio(
|
| 173 |
["huggingface", "openrouter", "openai"],
|
|
@@ -187,32 +111,18 @@ with gr.Blocks() as app:
|
|
| 187 |
[8, 16, 32], value=16, label="Max Concurrent Requests"
|
| 188 |
)
|
| 189 |
|
| 190 |
-
with gr.Accordion("Stages"):
|
| 191 |
-
ingestion_model.render()
|
| 192 |
-
summarization_model.render()
|
| 193 |
-
single_shot_question_generation_model.render()
|
| 194 |
-
multi_hop_question_generation_model.render()
|
| 195 |
-
answer_generation_model.render()
|
| 196 |
-
judge_answers_model.render()
|
| 197 |
-
|
| 198 |
preview_button = gr.Button("Generate New Config")
|
| 199 |
preview_button.click(
|
| 200 |
generate_base_config,
|
| 201 |
inputs=[
|
| 202 |
hf_org_dropdown,
|
|
|
|
| 203 |
model_name,
|
| 204 |
provider,
|
| 205 |
base_url,
|
| 206 |
model_api_key,
|
| 207 |
max_concurrent_requests,
|
| 208 |
-
hf_dataset_prefix,
|
| 209 |
private_dataset,
|
| 210 |
-
ingestion_model,
|
| 211 |
-
summarization_model,
|
| 212 |
-
single_shot_question_generation_model,
|
| 213 |
-
multi_hop_question_generation_model,
|
| 214 |
-
answer_generation_model,
|
| 215 |
-
judge_answers_model,
|
| 216 |
],
|
| 217 |
outputs=config_output,
|
| 218 |
)
|
|
|
|
| 33 |
manager.start_process(custom_env=new_env)
|
| 34 |
|
| 35 |
|
| 36 |
+
def update_hf_org_dropdown(oauth_token: gr.OAuthToken | None):
|
| 37 |
if oauth_token is None:
|
| 38 |
print(
|
| 39 |
"Please, deploy this on Spaces and log in to view the list of available organizations"
|
| 40 |
)
|
| 41 |
+
return gr.Dropdown([], label="Organization")
|
|
|
|
| 42 |
|
| 43 |
+
try:
|
| 44 |
+
user_info = whoami(oauth_token.token)
|
| 45 |
+
org_names = [org["name"] for org in user_info.get("orgs", [])]
|
| 46 |
+
user_name = user_info.get("name", "Unknown User")
|
| 47 |
+
org_names.insert(0, user_name)
|
| 48 |
+
return gr.Dropdown(org_names, value=user_name, label="Organization")
|
| 49 |
+
|
| 50 |
+
except Exception as e:
|
| 51 |
+
print(f"Error retrieving user info: {e}")
|
| 52 |
+
return gr.Dropdown([], label="Organization")
|
| 53 |
|
| 54 |
|
| 55 |
config_output = gr.Code(label="Generated Config", language="yaml")
|
|
|
|
| 65 |
info="Use a custom API base URL for Hugging Face Inference Endpoints",
|
| 66 |
)
|
| 67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
with gr.Blocks() as app:
|
| 69 |
gr.Markdown("## YourBench Configuration")
|
| 70 |
with gr.Row():
|
|
|
|
| 92 |
|
| 93 |
with gr.Accordion("Model"):
|
| 94 |
model_name.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
provider = gr.Radio(
|
| 97 |
["huggingface", "openrouter", "openai"],
|
|
|
|
| 111 |
[8, 16, 32], value=16, label="Max Concurrent Requests"
|
| 112 |
)
|
| 113 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
preview_button = gr.Button("Generate New Config")
|
| 115 |
preview_button.click(
|
| 116 |
generate_base_config,
|
| 117 |
inputs=[
|
| 118 |
hf_org_dropdown,
|
| 119 |
+
hf_dataset_prefix,
|
| 120 |
model_name,
|
| 121 |
provider,
|
| 122 |
base_url,
|
| 123 |
model_api_key,
|
| 124 |
max_concurrent_requests,
|
|
|
|
| 125 |
private_dataset,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
],
|
| 127 |
outputs=config_output,
|
| 128 |
)
|
yourbench_space/config.py
CHANGED
|
@@ -1,31 +1,24 @@
|
|
| 1 |
-
|
| 2 |
import yaml
|
| 3 |
from yourbench_space.utils import CONFIG_PATH
|
| 4 |
|
| 5 |
-
|
| 6 |
-
|
| 7 |
def generate_base_config(
|
| 8 |
hf_org,
|
|
|
|
| 9 |
model_name,
|
| 10 |
provider,
|
| 11 |
base_url,
|
| 12 |
model_api_key,
|
| 13 |
max_concurrent_requests,
|
| 14 |
-
hf_dataset_prefix,
|
| 15 |
private_dataset,
|
| 16 |
-
ingestion_model,
|
| 17 |
-
summarization_model,
|
| 18 |
-
single_shot_question_generation_model,
|
| 19 |
-
multi_hop_question_generation_model,
|
| 20 |
-
answer_generation_model,
|
| 21 |
-
judge_answers_model,
|
| 22 |
):
|
| 23 |
config = {
|
| 24 |
"hf_configuration": {
|
| 25 |
"token": "$HF_TOKEN",
|
| 26 |
"private": private_dataset,
|
| 27 |
"hf_organization": hf_org,
|
|
|
|
| 28 |
},
|
|
|
|
| 29 |
"model_list": [
|
| 30 |
{
|
| 31 |
"model_name": model_name,
|
|
@@ -36,38 +29,25 @@ def generate_base_config(
|
|
| 36 |
}
|
| 37 |
],
|
| 38 |
"model_roles": {
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
"answer_generation",
|
| 46 |
-
"judge_answers",
|
| 47 |
-
]
|
| 48 |
},
|
| 49 |
-
"inference_config": {"max_concurrent_requests": 16},
|
| 50 |
"pipeline": {
|
| 51 |
"ingestion": {
|
| 52 |
-
"source_documents_dir": "/app/
|
| 53 |
-
"output_dir": "/app/ingested",
|
| 54 |
"run": True,
|
| 55 |
},
|
| 56 |
"upload_ingest_to_hub": {
|
| 57 |
-
"source_documents_dir": "/app/ingested",
|
| 58 |
-
"hub_dataset_name": f"{hf_dataset_prefix}_ingested_documents",
|
| 59 |
-
"run": True,
|
| 60 |
-
},
|
| 61 |
-
"summarization": {
|
| 62 |
-
"source_dataset_name": f"{hf_dataset_prefix}_ingested_documents",
|
| 63 |
-
"output_dataset_name": f"{hf_dataset_prefix}_summaries",
|
| 64 |
-
"concat_existing_dataset": False,
|
| 65 |
"run": True,
|
| 66 |
},
|
|
|
|
| 67 |
"chunking": {
|
| 68 |
-
"source_dataset_name": f"{hf_dataset_prefix}_summaries",
|
| 69 |
-
"output_dataset_name": f"{hf_dataset_prefix}_chunked_documents",
|
| 70 |
-
"concat_existing_dataset": False,
|
| 71 |
"chunking_configuration": {
|
| 72 |
"l_min_tokens": 64,
|
| 73 |
"l_max_tokens": 128,
|
|
@@ -78,50 +58,28 @@ def generate_base_config(
|
|
| 78 |
"run": True,
|
| 79 |
},
|
| 80 |
"single_shot_question_generation": {
|
| 81 |
-
"source_dataset_name": f"{hf_dataset_prefix}_chunked_documents",
|
| 82 |
-
"output_dataset_name": f"{hf_dataset_prefix}_single_shot_questions",
|
| 83 |
"diversification_seed": "24 year old adult",
|
| 84 |
-
"concat_existing_dataset": False,
|
| 85 |
-
"run": True,
|
| 86 |
-
},
|
| 87 |
-
"multi_hop_question_generation": {
|
| 88 |
-
"source_dataset_name": f"{hf_dataset_prefix}_chunked_documents",
|
| 89 |
-
"output_dataset_name": f"{hf_dataset_prefix}_multi_hop_questions",
|
| 90 |
-
"concat_existing_dataset": False,
|
| 91 |
"run": True,
|
| 92 |
},
|
|
|
|
| 93 |
"answer_generation": {
|
| 94 |
-
"
|
| 95 |
-
"
|
| 96 |
-
"concat_existing_dataset": False,
|
| 97 |
"strategies": [
|
| 98 |
-
{
|
| 99 |
-
|
| 100 |
-
"prompt": "ZEROSHOT_QA_USER_PROMPT",
|
| 101 |
-
"model_name": model_name,
|
| 102 |
-
},
|
| 103 |
-
{
|
| 104 |
-
"name": "gold",
|
| 105 |
-
"prompt": "GOLD_QA_USER_PROMPT",
|
| 106 |
-
"model_name": model_name,
|
| 107 |
-
},
|
| 108 |
],
|
| 109 |
-
"run": True,
|
| 110 |
},
|
| 111 |
"judge_answers": {
|
| 112 |
-
"
|
| 113 |
-
"output_judged_dataset_name": f"{hf_dataset_prefix}_judged_comparisons",
|
| 114 |
-
"concat_existing_dataset": False,
|
| 115 |
"comparing_strategies": [["zeroshot", "gold"]],
|
| 116 |
"chunk_column_index": 0,
|
| 117 |
"random_seed": 42,
|
| 118 |
-
"run": True,
|
| 119 |
},
|
| 120 |
},
|
| 121 |
}
|
| 122 |
return yaml.dump(config, sort_keys=False)
|
| 123 |
|
| 124 |
-
|
| 125 |
def save_config(yaml_text):
|
| 126 |
with open(CONFIG_PATH, "w") as file:
|
| 127 |
file.write(yaml_text)
|
|
|
|
|
|
|
| 1 |
import yaml
|
| 2 |
from yourbench_space.utils import CONFIG_PATH
|
| 3 |
|
|
|
|
|
|
|
| 4 |
def generate_base_config(
|
| 5 |
hf_org,
|
| 6 |
+
hf_dataset_name,
|
| 7 |
model_name,
|
| 8 |
provider,
|
| 9 |
base_url,
|
| 10 |
model_api_key,
|
| 11 |
max_concurrent_requests,
|
|
|
|
| 12 |
private_dataset,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
):
|
| 14 |
config = {
|
| 15 |
"hf_configuration": {
|
| 16 |
"token": "$HF_TOKEN",
|
| 17 |
"private": private_dataset,
|
| 18 |
"hf_organization": hf_org,
|
| 19 |
+
"hf_dataset_name": hf_dataset_name,
|
| 20 |
},
|
| 21 |
+
"local_dataset_dir": "results/",
|
| 22 |
"model_list": [
|
| 23 |
{
|
| 24 |
"model_name": model_name,
|
|
|
|
| 29 |
}
|
| 30 |
],
|
| 31 |
"model_roles": {
|
| 32 |
+
"ingestion": [model_name],
|
| 33 |
+
"summarization": [model_name],
|
| 34 |
+
"single_shot_question_generation": [model_name],
|
| 35 |
+
"multi_hop_question_generation": [model_name],
|
| 36 |
+
"answer_generation": [model_name],
|
| 37 |
+
"judge_answers": [model_name],
|
|
|
|
|
|
|
|
|
|
| 38 |
},
|
|
|
|
| 39 |
"pipeline": {
|
| 40 |
"ingestion": {
|
| 41 |
+
"source_documents_dir": "/app/example/raw",
|
| 42 |
+
"output_dir": "/app/example/ingested",
|
| 43 |
"run": True,
|
| 44 |
},
|
| 45 |
"upload_ingest_to_hub": {
|
| 46 |
+
"source_documents_dir": "/app/example/ingested",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
"run": True,
|
| 48 |
},
|
| 49 |
+
"summarization": {"run": True},
|
| 50 |
"chunking": {
|
|
|
|
|
|
|
|
|
|
| 51 |
"chunking_configuration": {
|
| 52 |
"l_min_tokens": 64,
|
| 53 |
"l_max_tokens": 128,
|
|
|
|
| 58 |
"run": True,
|
| 59 |
},
|
| 60 |
"single_shot_question_generation": {
|
|
|
|
|
|
|
| 61 |
"diversification_seed": "24 year old adult",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
"run": True,
|
| 63 |
},
|
| 64 |
+
"multi_hop_question_generation": {"run": True},
|
| 65 |
"answer_generation": {
|
| 66 |
+
"question_type": "single_shot",
|
| 67 |
+
"run": True,
|
|
|
|
| 68 |
"strategies": [
|
| 69 |
+
{"name": "zeroshot", "prompt": "ZEROSHOT_QA_USER_PROMPT", "model_name": model_name},
|
| 70 |
+
{"name": "gold", "prompt": "GOLD_QA_USER_PROMPT", "model_name": model_name},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
],
|
|
|
|
| 72 |
},
|
| 73 |
"judge_answers": {
|
| 74 |
+
"run": True,
|
|
|
|
|
|
|
| 75 |
"comparing_strategies": [["zeroshot", "gold"]],
|
| 76 |
"chunk_column_index": 0,
|
| 77 |
"random_seed": 42,
|
|
|
|
| 78 |
},
|
| 79 |
},
|
| 80 |
}
|
| 81 |
return yaml.dump(config, sort_keys=False)
|
| 82 |
|
|
|
|
| 83 |
def save_config(yaml_text):
|
| 84 |
with open(CONFIG_PATH, "w") as file:
|
| 85 |
file.write(yaml_text)
|