Update app.py
Browse files
app.py
CHANGED
@@ -33,8 +33,9 @@ except ImportError:
|
|
33 |
def get_available_providers(): return ["DummyProvider"]
|
34 |
def get_models_for_provider(p): return ["dummy-model"]
|
35 |
def get_default_model_for_provider(p): return "dummy-model"
|
|
|
36 |
def generate_stream(p, m, a, msgs):
|
37 |
-
yield "
|
38 |
def build_logic_create_space(*args, **kwargs): return "Error: build_logic not found."
|
39 |
def build_logic_get_api_token(key): return (key or os.getenv("HF_TOKEN"), None)
|
40 |
def build_logic_whoami(token): return {"name": "dummy_user"}
|
@@ -319,18 +320,18 @@ def generate_and_stage_changes(ai_response_content, current_files_state, hf_owne
|
|
319 |
"""
|
320 |
changeset = []
|
321 |
current_files_dict = {f["filename"]: f for f in current_files_state if not f.get("is_structure_block")}
|
322 |
-
|
323 |
# 1. Parse proposed files from AI response
|
324 |
parsing_result = _parse_chat_stream_logic(ai_response_content, existing_files_state=current_files_state)
|
325 |
proposed_files = parsing_result.get("parsed_code_blocks", [])
|
326 |
-
|
327 |
# 2. Parse HF_ACTION commands from AI response
|
328 |
action_pattern = re.compile(r"### HF_ACTION:\s*(?P<command_line>[^\n]+)")
|
329 |
for match in action_pattern.finditer(ai_response_content):
|
330 |
cmd_parts = shlex.split(match.group("command_line").strip())
|
331 |
if not cmd_parts: continue
|
332 |
command, args = cmd_parts[0].upper(), cmd_parts[1:]
|
333 |
-
|
334 |
# Add actions to the changeset
|
335 |
if command == "DELETE_FILE" and args:
|
336 |
changeset.append({"type": "DELETE_FILE", "path": args[0]})
|
@@ -345,24 +346,24 @@ def generate_and_stage_changes(ai_response_content, current_files_state, hf_owne
|
|
345 |
if '--sdk' in args: sdk = args[args.index('--sdk') + 1]
|
346 |
if '--private' in args: private = args[args.index('--private') + 1].lower() == 'true'
|
347 |
changeset.append({"type": "CREATE_SPACE", "repo_id": repo_id, "sdk": sdk, "private": private})
|
348 |
-
|
349 |
# 3. Compare proposed files with current files to determine CREATE/UPDATE
|
350 |
for file_block in proposed_files:
|
351 |
if file_block.get("is_structure_block"): continue
|
352 |
-
|
353 |
filename = file_block["filename"]
|
354 |
if filename not in current_files_dict:
|
355 |
changeset.append({"type": "CREATE_FILE", "path": filename, "content": file_block["code"], "lang": file_block["language"]})
|
356 |
elif file_block["code"] != current_files_dict[filename]["code"]:
|
357 |
changeset.append({"type": "UPDATE_FILE", "path": filename, "content": file_block["code"], "lang": file_block["language"]})
|
358 |
-
|
359 |
# 4. Format the changeset into a human-readable Markdown string
|
360 |
if not changeset:
|
361 |
return [], "The AI did not propose any specific changes to files or the space.", parsing_result
|
362 |
|
363 |
md_summary = ["### π Proposed Changes Plan\n"]
|
364 |
md_summary.append("The AI has proposed the following changes. Please review and confirm.")
|
365 |
-
|
366 |
for change in changeset:
|
367 |
if change["type"] == "CREATE_FILE":
|
368 |
md_summary.append(f"- **β Create File:** `{change['path']}`")
|
@@ -376,15 +377,15 @@ def generate_and_stage_changes(ai_response_content, current_files_state, hf_owne
|
|
376 |
md_summary.append(f"- **π Set Privacy:** Set `{change['repo_id']}` to `private={change['private']}`")
|
377 |
elif change["type"] == "DELETE_SPACE":
|
378 |
md_summary.append(f"- **π₯ DELETE ENTIRE SPACE:** `{change['owner']}/{change['space_name']}` **(DESTRUCTIVE ACTION)**")
|
379 |
-
|
380 |
return changeset, "\n".join(md_summary), parsing_result
|
381 |
|
382 |
# --- Gradio Event Handlers ---
|
383 |
|
384 |
-
def handle_chat_submit(user_message, chat_history, hf_api_key_input, provider_select, model_select, system_prompt, hf_owner_name, hf_repo_name):
|
385 |
global parsed_code_blocks_state_cache
|
386 |
_chat_msg_in, _chat_hist = "", list(chat_history)
|
387 |
-
|
388 |
# UI updates for streaming
|
389 |
yield (
|
390 |
_chat_msg_in, _chat_hist, "Initializing...",
|
@@ -406,7 +407,7 @@ def handle_chat_submit(user_message, chat_history, hf_api_key_input, provider_se
|
|
406 |
gr.update(), gr.update(), gr.update(),
|
407 |
[], gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
408 |
)
|
409 |
-
|
410 |
# Prepare context for the AI
|
411 |
current_sys_prompt = system_prompt.strip() or DEFAULT_SYSTEM_PROMPT
|
412 |
export_result = _export_selected_logic(None, f"{hf_owner_name}/{hf_repo_name}", parsed_code_blocks_state_cache)
|
@@ -416,7 +417,9 @@ def handle_chat_submit(user_message, chat_history, hf_api_key_input, provider_se
|
|
416 |
|
417 |
try:
|
418 |
full_bot_response_content = ""
|
419 |
-
|
|
|
|
|
420 |
if chunk is None: continue
|
421 |
if isinstance(chunk, str) and (chunk.startswith("Error:") or chunk.startswith("API HTTP Error")):
|
422 |
full_bot_response_content = chunk; break
|
@@ -433,10 +436,10 @@ def handle_chat_submit(user_message, chat_history, hf_api_key_input, provider_se
|
|
433 |
yield (_chat_msg_in, _chat_hist, _status, gr.update(), gr.update(), gr.update(), [], gr.update(), gr.update(), gr.update(), gr.update())
|
434 |
return
|
435 |
|
436 |
-
#
|
437 |
_status = "Stream complete. Generating change plan..."
|
438 |
yield (_chat_msg_in, _chat_hist, _status, gr.update(), gr.update(), gr.update(), [], gr.update(), gr.update(), gr.update(), gr.update())
|
439 |
-
|
440 |
staged_changeset, summary_md, parsing_res = generate_and_stage_changes(full_bot_response_content, parsed_code_blocks_state_cache, hf_owner_name, hf_repo_name)
|
441 |
|
442 |
if parsing_res["error_message"]:
|
@@ -479,7 +482,7 @@ def handle_confirm_changes(hf_api_key, owner_name, space_name, changeset):
|
|
479 |
return "No changes to apply.", gr.update(), gr.update(), gr.update(), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
480 |
|
481 |
status_messages = []
|
482 |
-
|
483 |
# Handle space creation first, as other ops might depend on it
|
484 |
create_space_op = next((c for c in changeset if c['type'] == 'CREATE_SPACE'), None)
|
485 |
if create_space_op:
|
@@ -489,12 +492,12 @@ def handle_confirm_changes(hf_api_key, owner_name, space_name, changeset):
|
|
489 |
# We need to pass the full markdown for creation. Let's build it from the plan.
|
490 |
# This is a simplification; a more robust solution would pass the planned files directly.
|
491 |
# For now, we assume the AI provides file content for the new space.
|
492 |
-
|
493 |
planned_files_md = [f"# Space: {create_space_op['repo_id']}"]
|
494 |
for change in changeset:
|
495 |
if change['type'] in ['CREATE_FILE', 'UPDATE_FILE']:
|
496 |
planned_files_md.append(f"### File: {change['path']}\n{bbb}{change.get('lang', 'plaintext')}\n{change['content']}\n{bbb}")
|
497 |
-
|
498 |
markdown_for_creation = "\n\n".join(planned_files_md)
|
499 |
|
500 |
result = build_logic_create_space(
|
@@ -543,10 +546,10 @@ def handle_confirm_changes(hf_api_key, owner_name, space_name, changeset):
|
|
543 |
parsed_code_blocks_state_cache = [] # Clear everything
|
544 |
except Exception as e:
|
545 |
status_messages.append(f"Error applying {change['type']} for {change.get('path', '')}: {e}")
|
546 |
-
|
547 |
final_status = " | ".join(status_messages)
|
548 |
_formatted, _detected, _download = _generate_ui_outputs_from_cache(owner_name, space_name)
|
549 |
-
|
550 |
# Hide the confirmation UI and clear the state
|
551 |
return final_status, _formatted, _detected, _download, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), []
|
552 |
|
@@ -664,7 +667,7 @@ def handle_commit_file_changes(hf_api_key_ui, ui_space_name_part, ui_owner_name_
|
|
664 |
def handle_delete_file(hf_api_key_ui, ui_space_name_part, ui_owner_name_part, file_to_delete_path):
|
665 |
if not file_to_delete_path:
|
666 |
return "No file selected to delete.", gr.update(), "", "", "plaintext", gr.update(), gr.update(), gr.update()
|
667 |
-
|
668 |
status_msg = build_logic_delete_space_file(hf_api_key_ui, ui_space_name_part, ui_owner_name_part, file_to_delete_path)
|
669 |
file_list, _ = list_space_files_for_browsing(hf_api_key_ui, ui_space_name_part, ui_owner_name_part)
|
670 |
global parsed_code_blocks_state_cache
|
@@ -676,7 +679,7 @@ def handle_delete_file(hf_api_key_ui, ui_space_name_part, ui_owner_name_part, fi
|
|
676 |
def handle_refresh_space_status(hf_api_key_ui, ui_owner_name, ui_space_name):
|
677 |
if not ui_owner_name or not ui_space_name:
|
678 |
return "Owner and Space Name must be provided to get status."
|
679 |
-
|
680 |
status, err = get_space_runtime_status(hf_api_key_ui, ui_space_name, ui_owner_name)
|
681 |
if err: return f"**Error:** {err}"
|
682 |
if not status: return "Could not retrieve status."
|
@@ -706,9 +709,9 @@ body { background: linear-gradient(to bottom right, #2c3e50, #34495e); color: #e
|
|
706 |
|
707 |
# --- Gradio UI Definition ---
|
708 |
with gr.Blocks(theme=custom_theme, css=custom_css) as demo:
|
709 |
-
#
|
710 |
changeset_state = gr.State([])
|
711 |
-
|
712 |
gr.Markdown("# π€ AI-Powered Hugging Face Space Builder")
|
713 |
gr.Markdown("Use an AI assistant to create, modify, build, and manage your Hugging Face Spaces directly from this interface.")
|
714 |
|
@@ -723,6 +726,8 @@ with gr.Blocks(theme=custom_theme, css=custom_css) as demo:
|
|
723 |
with gr.Accordion("π€ AI Model Settings", open=True):
|
724 |
provider_select = gr.Dropdown(label="AI Provider", choices=get_available_providers(), value=get_default_model_for_provider(get_available_providers()[0] if get_available_providers() else 'Groq'))
|
725 |
model_select = gr.Dropdown(label="AI Model", choices=[])
|
|
|
|
|
726 |
system_prompt_input = gr.Textbox(label="System Prompt", lines=10, value=DEFAULT_SYSTEM_PROMPT, elem_id="system-prompt")
|
727 |
|
728 |
with gr.Column(scale=2):
|
@@ -733,7 +738,7 @@ with gr.Blocks(theme=custom_theme, css=custom_css) as demo:
|
|
733 |
send_chat_button = gr.Button("Send", variant="primary", scale=1)
|
734 |
status_output = gr.Textbox(label="Last Action Status", interactive=False, value="Ready.")
|
735 |
|
736 |
-
#
|
737 |
with gr.Accordion("π Proposed Changes (Pending Confirmation)", visible=False) as confirm_accordion:
|
738 |
changeset_display = gr.Markdown("No changes proposed.")
|
739 |
with gr.Row():
|
@@ -776,7 +781,8 @@ with gr.Blocks(theme=custom_theme, css=custom_css) as demo:
|
|
776 |
# --- Event Listeners ---
|
777 |
provider_select.change(update_models_dropdown, inputs=provider_select, outputs=model_select)
|
778 |
|
779 |
-
|
|
|
780 |
chat_outputs = [
|
781 |
chat_message_input, chatbot_display, status_output,
|
782 |
detected_files_preview, formatted_space_output_display, download_button,
|
@@ -785,7 +791,7 @@ with gr.Blocks(theme=custom_theme, css=custom_css) as demo:
|
|
785 |
send_chat_button.click(handle_chat_submit, inputs=chat_inputs, outputs=chat_outputs)
|
786 |
chat_message_input.submit(handle_chat_submit, inputs=chat_inputs, outputs=chat_outputs)
|
787 |
|
788 |
-
#
|
789 |
confirm_inputs = [hf_api_key_input, owner_name_input, space_name_input, changeset_state]
|
790 |
confirm_outputs = [
|
791 |
status_output, formatted_space_output_display, detected_files_preview, download_button,
|
|
|
33 |
def get_available_providers(): return ["DummyProvider"]
|
34 |
def get_models_for_provider(p): return ["dummy-model"]
|
35 |
def get_default_model_for_provider(p): return "dummy-model"
|
36 |
+
# The dummy function already accepts the api_key argument ('a')
|
37 |
def generate_stream(p, m, a, msgs):
|
38 |
+
yield f"Using dummy model. API Key provided: {'Yes' if a else 'No'}. This is a dummy response as local modules were not found."
|
39 |
def build_logic_create_space(*args, **kwargs): return "Error: build_logic not found."
|
40 |
def build_logic_get_api_token(key): return (key or os.getenv("HF_TOKEN"), None)
|
41 |
def build_logic_whoami(token): return {"name": "dummy_user"}
|
|
|
320 |
"""
|
321 |
changeset = []
|
322 |
current_files_dict = {f["filename"]: f for f in current_files_state if not f.get("is_structure_block")}
|
323 |
+
|
324 |
# 1. Parse proposed files from AI response
|
325 |
parsing_result = _parse_chat_stream_logic(ai_response_content, existing_files_state=current_files_state)
|
326 |
proposed_files = parsing_result.get("parsed_code_blocks", [])
|
327 |
+
|
328 |
# 2. Parse HF_ACTION commands from AI response
|
329 |
action_pattern = re.compile(r"### HF_ACTION:\s*(?P<command_line>[^\n]+)")
|
330 |
for match in action_pattern.finditer(ai_response_content):
|
331 |
cmd_parts = shlex.split(match.group("command_line").strip())
|
332 |
if not cmd_parts: continue
|
333 |
command, args = cmd_parts[0].upper(), cmd_parts[1:]
|
334 |
+
|
335 |
# Add actions to the changeset
|
336 |
if command == "DELETE_FILE" and args:
|
337 |
changeset.append({"type": "DELETE_FILE", "path": args[0]})
|
|
|
346 |
if '--sdk' in args: sdk = args[args.index('--sdk') + 1]
|
347 |
if '--private' in args: private = args[args.index('--private') + 1].lower() == 'true'
|
348 |
changeset.append({"type": "CREATE_SPACE", "repo_id": repo_id, "sdk": sdk, "private": private})
|
349 |
+
|
350 |
# 3. Compare proposed files with current files to determine CREATE/UPDATE
|
351 |
for file_block in proposed_files:
|
352 |
if file_block.get("is_structure_block"): continue
|
353 |
+
|
354 |
filename = file_block["filename"]
|
355 |
if filename not in current_files_dict:
|
356 |
changeset.append({"type": "CREATE_FILE", "path": filename, "content": file_block["code"], "lang": file_block["language"]})
|
357 |
elif file_block["code"] != current_files_dict[filename]["code"]:
|
358 |
changeset.append({"type": "UPDATE_FILE", "path": filename, "content": file_block["code"], "lang": file_block["language"]})
|
359 |
+
|
360 |
# 4. Format the changeset into a human-readable Markdown string
|
361 |
if not changeset:
|
362 |
return [], "The AI did not propose any specific changes to files or the space.", parsing_result
|
363 |
|
364 |
md_summary = ["### π Proposed Changes Plan\n"]
|
365 |
md_summary.append("The AI has proposed the following changes. Please review and confirm.")
|
366 |
+
|
367 |
for change in changeset:
|
368 |
if change["type"] == "CREATE_FILE":
|
369 |
md_summary.append(f"- **β Create File:** `{change['path']}`")
|
|
|
377 |
md_summary.append(f"- **π Set Privacy:** Set `{change['repo_id']}` to `private={change['private']}`")
|
378 |
elif change["type"] == "DELETE_SPACE":
|
379 |
md_summary.append(f"- **π₯ DELETE ENTIRE SPACE:** `{change['owner']}/{change['space_name']}` **(DESTRUCTIVE ACTION)**")
|
380 |
+
|
381 |
return changeset, "\n".join(md_summary), parsing_result
|
382 |
|
383 |
# --- Gradio Event Handlers ---
|
384 |
|
385 |
+
def handle_chat_submit(user_message, chat_history, hf_api_key_input, provider_api_key_input, provider_select, model_select, system_prompt, hf_owner_name, hf_repo_name):
|
386 |
global parsed_code_blocks_state_cache
|
387 |
_chat_msg_in, _chat_hist = "", list(chat_history)
|
388 |
+
|
389 |
# UI updates for streaming
|
390 |
yield (
|
391 |
_chat_msg_in, _chat_hist, "Initializing...",
|
|
|
407 |
gr.update(), gr.update(), gr.update(),
|
408 |
[], gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
409 |
)
|
410 |
+
|
411 |
# Prepare context for the AI
|
412 |
current_sys_prompt = system_prompt.strip() or DEFAULT_SYSTEM_PROMPT
|
413 |
export_result = _export_selected_logic(None, f"{hf_owner_name}/{hf_repo_name}", parsed_code_blocks_state_cache)
|
|
|
417 |
|
418 |
try:
|
419 |
full_bot_response_content = ""
|
420 |
+
# Pass the provider API key from the UI to the generation logic
|
421 |
+
streamer = generate_stream(provider_select, model_select, provider_api_key_input, api_msgs)
|
422 |
+
for chunk in streamer:
|
423 |
if chunk is None: continue
|
424 |
if isinstance(chunk, str) and (chunk.startswith("Error:") or chunk.startswith("API HTTP Error")):
|
425 |
full_bot_response_content = chunk; break
|
|
|
436 |
yield (_chat_msg_in, _chat_hist, _status, gr.update(), gr.update(), gr.update(), [], gr.update(), gr.update(), gr.update(), gr.update())
|
437 |
return
|
438 |
|
439 |
+
# Instead of applying, generate and stage changes
|
440 |
_status = "Stream complete. Generating change plan..."
|
441 |
yield (_chat_msg_in, _chat_hist, _status, gr.update(), gr.update(), gr.update(), [], gr.update(), gr.update(), gr.update(), gr.update())
|
442 |
+
|
443 |
staged_changeset, summary_md, parsing_res = generate_and_stage_changes(full_bot_response_content, parsed_code_blocks_state_cache, hf_owner_name, hf_repo_name)
|
444 |
|
445 |
if parsing_res["error_message"]:
|
|
|
482 |
return "No changes to apply.", gr.update(), gr.update(), gr.update(), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
483 |
|
484 |
status_messages = []
|
485 |
+
|
486 |
# Handle space creation first, as other ops might depend on it
|
487 |
create_space_op = next((c for c in changeset if c['type'] == 'CREATE_SPACE'), None)
|
488 |
if create_space_op:
|
|
|
492 |
# We need to pass the full markdown for creation. Let's build it from the plan.
|
493 |
# This is a simplification; a more robust solution would pass the planned files directly.
|
494 |
# For now, we assume the AI provides file content for the new space.
|
495 |
+
|
496 |
planned_files_md = [f"# Space: {create_space_op['repo_id']}"]
|
497 |
for change in changeset:
|
498 |
if change['type'] in ['CREATE_FILE', 'UPDATE_FILE']:
|
499 |
planned_files_md.append(f"### File: {change['path']}\n{bbb}{change.get('lang', 'plaintext')}\n{change['content']}\n{bbb}")
|
500 |
+
|
501 |
markdown_for_creation = "\n\n".join(planned_files_md)
|
502 |
|
503 |
result = build_logic_create_space(
|
|
|
546 |
parsed_code_blocks_state_cache = [] # Clear everything
|
547 |
except Exception as e:
|
548 |
status_messages.append(f"Error applying {change['type']} for {change.get('path', '')}: {e}")
|
549 |
+
|
550 |
final_status = " | ".join(status_messages)
|
551 |
_formatted, _detected, _download = _generate_ui_outputs_from_cache(owner_name, space_name)
|
552 |
+
|
553 |
# Hide the confirmation UI and clear the state
|
554 |
return final_status, _formatted, _detected, _download, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), []
|
555 |
|
|
|
667 |
def handle_delete_file(hf_api_key_ui, ui_space_name_part, ui_owner_name_part, file_to_delete_path):
|
668 |
if not file_to_delete_path:
|
669 |
return "No file selected to delete.", gr.update(), "", "", "plaintext", gr.update(), gr.update(), gr.update()
|
670 |
+
|
671 |
status_msg = build_logic_delete_space_file(hf_api_key_ui, ui_space_name_part, ui_owner_name_part, file_to_delete_path)
|
672 |
file_list, _ = list_space_files_for_browsing(hf_api_key_ui, ui_space_name_part, ui_owner_name_part)
|
673 |
global parsed_code_blocks_state_cache
|
|
|
679 |
def handle_refresh_space_status(hf_api_key_ui, ui_owner_name, ui_space_name):
|
680 |
if not ui_owner_name or not ui_space_name:
|
681 |
return "Owner and Space Name must be provided to get status."
|
682 |
+
|
683 |
status, err = get_space_runtime_status(hf_api_key_ui, ui_space_name, ui_owner_name)
|
684 |
if err: return f"**Error:** {err}"
|
685 |
if not status: return "Could not retrieve status."
|
|
|
709 |
|
710 |
# --- Gradio UI Definition ---
|
711 |
with gr.Blocks(theme=custom_theme, css=custom_css) as demo:
|
712 |
+
# State to hold the plan
|
713 |
changeset_state = gr.State([])
|
714 |
+
|
715 |
gr.Markdown("# π€ AI-Powered Hugging Face Space Builder")
|
716 |
gr.Markdown("Use an AI assistant to create, modify, build, and manage your Hugging Face Spaces directly from this interface.")
|
717 |
|
|
|
726 |
with gr.Accordion("π€ AI Model Settings", open=True):
|
727 |
provider_select = gr.Dropdown(label="AI Provider", choices=get_available_providers(), value=get_default_model_for_provider(get_available_providers()[0] if get_available_providers() else 'Groq'))
|
728 |
model_select = gr.Dropdown(label="AI Model", choices=[])
|
729 |
+
# --- NEW UI ELEMENT ---
|
730 |
+
provider_api_key_input = gr.Textbox(label="Model Provider API Key (Optional)", type="password", placeholder="sk_... (overrides backend settings)")
|
731 |
system_prompt_input = gr.Textbox(label="System Prompt", lines=10, value=DEFAULT_SYSTEM_PROMPT, elem_id="system-prompt")
|
732 |
|
733 |
with gr.Column(scale=2):
|
|
|
738 |
send_chat_button = gr.Button("Send", variant="primary", scale=1)
|
739 |
status_output = gr.Textbox(label="Last Action Status", interactive=False, value="Ready.")
|
740 |
|
741 |
+
# Confirmation Accordion
|
742 |
with gr.Accordion("π Proposed Changes (Pending Confirmation)", visible=False) as confirm_accordion:
|
743 |
changeset_display = gr.Markdown("No changes proposed.")
|
744 |
with gr.Row():
|
|
|
781 |
# --- Event Listeners ---
|
782 |
provider_select.change(update_models_dropdown, inputs=provider_select, outputs=model_select)
|
783 |
|
784 |
+
# --- UPDATED chat_inputs LIST ---
|
785 |
+
chat_inputs = [chat_message_input, chatbot_display, hf_api_key_input, provider_api_key_input, provider_select, model_select, system_prompt_input, owner_name_input, space_name_input]
|
786 |
chat_outputs = [
|
787 |
chat_message_input, chatbot_display, status_output,
|
788 |
detected_files_preview, formatted_space_output_display, download_button,
|
|
|
791 |
send_chat_button.click(handle_chat_submit, inputs=chat_inputs, outputs=chat_outputs)
|
792 |
chat_message_input.submit(handle_chat_submit, inputs=chat_inputs, outputs=chat_outputs)
|
793 |
|
794 |
+
# Confirmation Button Listeners
|
795 |
confirm_inputs = [hf_api_key_input, owner_name_input, space_name_input, changeset_state]
|
796 |
confirm_outputs = [
|
797 |
status_output, formatted_space_output_display, detected_files_preview, download_button,
|