Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -972,9 +972,10 @@ def start_answer_generation(model_choice: str):
|
|
972 |
model_map = {
|
973 |
"Llama 3.1 8B": "meta-llama/Llama-3.1-8B-Instruct",
|
974 |
"Llama 3.3 70B": "meta-llama/Llama-3.3-70B-Instruct",
|
|
|
975 |
"Mistral 7B": "mistralai/Mistral-7B-Instruct-v0.3",
|
976 |
"Qwen 2.5": "Qwen/Qwen‑2.5‑Omni‑7B",
|
977 |
-
"Qwen 2.5 instruct": "Qwen/Qwen2.5-14B-Instruct-1M",
|
978 |
"Qwen 3": "Qwen/Qwen3-32B"
|
979 |
|
980 |
}
|
@@ -1130,7 +1131,7 @@ with gr.Blocks(title="Intelligent Agent with Media Processing") as demo:
|
|
1130 |
|
1131 |
with gr.Row():
|
1132 |
model_choice = gr.Dropdown(
|
1133 |
-
choices=["Llama 3.1 8B", "Llama 3.3 70B", "
|
1134 |
value="Llama 3.1 8B",
|
1135 |
label="Select Model"
|
1136 |
)
|
|
|
972 |
model_map = {
|
973 |
"Llama 3.1 8B": "meta-llama/Llama-3.1-8B-Instruct",
|
974 |
"Llama 3.3 70B": "meta-llama/Llama-3.3-70B-Instruct",
|
975 |
+
"Llama shallow": "tokyotech-llm/Llama-3.3-Swallow-70B-Instruct-v0.4",
|
976 |
"Mistral 7B": "mistralai/Mistral-7B-Instruct-v0.3",
|
977 |
"Qwen 2.5": "Qwen/Qwen‑2.5‑Omni‑7B",
|
978 |
+
#"Qwen 2.5 instruct": "Qwen/Qwen2.5-14B-Instruct-1M",
|
979 |
"Qwen 3": "Qwen/Qwen3-32B"
|
980 |
|
981 |
}
|
|
|
1131 |
|
1132 |
with gr.Row():
|
1133 |
model_choice = gr.Dropdown(
|
1134 |
+
choices=["Llama 3.1 8B", "Llama 3.3 70B", "Llama shallow", "Mistral 7B", "Qwen 2.5", "Qwen 3"],
|
1135 |
value="Llama 3.1 8B",
|
1136 |
label="Select Model"
|
1137 |
)
|