Update main_app.py
Browse files- main_app.py +656 -338
main_app.py
CHANGED
@@ -19,7 +19,10 @@ with app.setup:
|
|
19 |
from ibm_watsonx_ai import APIClient, Credentials
|
20 |
from ibmcloudant.cloudant_v1 import CloudantV1, Document
|
21 |
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
|
22 |
-
from cloudant_helper_functions import
|
|
|
|
|
|
|
23 |
import marimo as mo
|
24 |
import pandas as pd
|
25 |
import requests
|
@@ -28,41 +31,42 @@ with app.setup:
|
|
28 |
load_dotenv()
|
29 |
|
30 |
|
31 |
-
|
32 |
@app.function
|
33 |
def get_markdown_content(file_path):
|
34 |
-
with open(file_path,
|
35 |
content = file.read()
|
36 |
return content
|
37 |
|
38 |
|
39 |
@app.cell
|
40 |
def _(mo, get_markdown_content):
|
41 |
-
intro_text = get_markdown_content(
|
42 |
-
intro_marimo = get_markdown_content(
|
43 |
-
intro_notebook = get_markdown_content(
|
44 |
-
intro_comparison = get_markdown_content(
|
45 |
-
|
46 |
-
intro = mo.carousel(
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
54 |
|
55 |
|
56 |
@app.cell
|
57 |
def _(os):
|
58 |
# Set explicit temporary directory
|
59 |
-
os.environ[
|
60 |
|
61 |
# Create the directory if it doesn't exist
|
62 |
-
os.makedirs(
|
63 |
|
64 |
# Make sure Python's tempfile module also uses this directory
|
65 |
-
tempfile.tempdir =
|
66 |
|
67 |
def setup_task_credentials(client):
|
68 |
# Get existing task credentials
|
@@ -114,6 +118,7 @@ def _(client_instantiation_form, os):
|
|
114 |
wx_url = None
|
115 |
return client_setup, project_id, space_id, wx_api_key, wx_url
|
116 |
|
|
|
117 |
@app.cell
|
118 |
def _(mo):
|
119 |
### Credentials for the watsonx.ai SDK client
|
@@ -126,12 +131,13 @@ def _(mo):
|
|
126 |
"GB": "https://eu-gb.ml.cloud.ibm.com",
|
127 |
"JP": "https://jp-tok.ml.cloud.ibm.com",
|
128 |
"AU": "https://au-syd.ml.cloud.ibm.com",
|
129 |
-
"CA": "https://ca-tor.ml.cloud.ibm.com"
|
130 |
}
|
131 |
|
132 |
# Create a form with multiple elements
|
133 |
client_instantiation_form = (
|
134 |
-
mo.md(
|
|
|
135 |
###**watsonx.ai credentials:**
|
136 |
|
137 |
{wx_region}
|
@@ -139,13 +145,32 @@ def _(mo):
|
|
139 |
{wx_api_key}
|
140 |
|
141 |
{space_id}
|
142 |
-
|
|
|
|
|
143 |
.batch(
|
144 |
-
wx_region
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
.form(show_clear_button=True, bordered=False)
|
150 |
)
|
151 |
|
@@ -165,13 +190,12 @@ def _(
|
|
165 |
):
|
166 |
### Instantiate the watsonx.ai client
|
167 |
if client_setup:
|
168 |
-
wx_credentials = Credentials(
|
169 |
-
url=wx_url,
|
170 |
-
api_key=wx_api_key
|
171 |
-
)
|
172 |
|
173 |
if project_id:
|
174 |
-
project_client = APIClient(
|
|
|
|
|
175 |
else:
|
176 |
project_client = None
|
177 |
|
@@ -190,7 +214,6 @@ def _(
|
|
190 |
task_credentials_details = setup_task_credentials(project_client)
|
191 |
else:
|
192 |
task_credentials_details = None
|
193 |
-
|
194 |
|
195 |
else:
|
196 |
wx_credentials = None
|
@@ -224,6 +247,7 @@ def _():
|
|
224 |
os.environ["REQUESTS_CA_BUNDLE"] = certifi.where()
|
225 |
return (cloudant,)
|
226 |
|
|
|
227 |
@app.cell
|
228 |
def _(cloudant, cloudant_db):
|
229 |
variant_selectors = cloudant_retrieve_documents(
|
@@ -233,20 +257,76 @@ def _(cloudant, cloudant_db):
|
|
233 |
fields=["category"],
|
234 |
)
|
235 |
|
236 |
-
variant_selectors_list = [
|
237 |
-
|
238 |
-
|
239 |
-
# variant_selectors_list
|
240 |
-
return (variant_selectors_list,)
|
241 |
|
242 |
@app.cell
|
243 |
-
def _(mo):
|
244 |
template_variants = variant_selectors_list
|
245 |
-
template_variant = mo.ui.dropdown(
|
|
|
|
|
246 |
return (template_variant,)
|
247 |
|
|
|
248 |
@app.cell
|
249 |
def _(cloudant, cloudant_db, template_variant):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
retrieved_template = (
|
251 |
cloudant_retrieve_documents(
|
252 |
client=cloudant,
|
@@ -257,19 +337,27 @@ def _(cloudant, cloudant_db, template_variant):
|
|
257 |
if template_variant.value
|
258 |
else None
|
259 |
)
|
260 |
-
template =
|
|
|
|
|
261 |
return (template,)
|
262 |
|
|
|
263 |
@app.cell
|
264 |
def _(client_callout_kind, client_instantiation_form, mo, template_variant):
|
265 |
client_callout = mo.callout(template_variant, kind=client_callout_kind)
|
266 |
-
client_stack = mo.hstack(
|
|
|
|
|
|
|
|
|
267 |
return (client_stack,)
|
268 |
|
269 |
|
270 |
@app.cell
|
271 |
def _(client_stack, mo):
|
272 |
-
client_section = mo.md(
|
|
|
273 |
###**Instantiate your watsonx.ai client:**
|
274 |
|
275 |
1. Select a region from the dropdown menu
|
@@ -284,7 +372,8 @@ def _(client_stack, mo):
|
|
284 |
|
285 |
{client_stack}
|
286 |
|
287 |
-
|
|
|
288 |
return (client_section,)
|
289 |
|
290 |
|
@@ -293,9 +382,11 @@ def _(mo, sc_m, schema_editors):
|
|
293 |
sc_tabs = mo.ui.tabs(
|
294 |
{
|
295 |
"Schema Option Selection": sc_m,
|
296 |
-
"Schema Definition": mo.md(
|
|
|
297 |
####**Edit the schema definitions you selected in the previous tab.**<br>
|
298 |
-
|
|
|
299 |
}
|
300 |
)
|
301 |
return (sc_tabs,)
|
@@ -303,7 +394,8 @@ def _(mo, sc_m, schema_editors):
|
|
303 |
|
304 |
@app.cell
|
305 |
def _(fm, function_editor, mo, sc_tabs):
|
306 |
-
function_section = mo.md(
|
|
|
307 |
|
308 |
1. Use the code editor window to create a function to deploy
|
309 |
<br>
|
@@ -340,13 +432,15 @@ def _(fm, function_editor, mo, sc_tabs):
|
|
340 |
|
341 |
{fm}
|
342 |
|
343 |
-
|
|
|
344 |
return (function_section,)
|
345 |
|
346 |
|
347 |
@app.cell
|
348 |
def _(mo, selection_table, upload_func):
|
349 |
-
upload_section = mo.md(
|
|
|
350 |
###**Review and Upload your function**
|
351 |
|
352 |
1. Review the function metadata specs JSON
|
@@ -365,13 +459,15 @@ def _(mo, selection_table, upload_func):
|
|
365 |
|
366 |
{upload_func}
|
367 |
|
368 |
-
|
|
|
369 |
return (upload_section,)
|
370 |
|
371 |
|
372 |
@app.cell
|
373 |
def _(deploy_fnc, deployment_definition, hw_selection_table, mo):
|
374 |
-
deployment_section = mo.md(
|
|
|
375 |
###**Deploy your function:**
|
376 |
|
377 |
1. Select a hardware specification (vCPUs/GB) that you want your function deployed on
|
@@ -402,13 +498,15 @@ def _(deploy_fnc, deployment_definition, hw_selection_table, mo):
|
|
402 |
|
403 |
{deploy_fnc}
|
404 |
|
405 |
-
|
|
|
406 |
return (deployment_section,)
|
407 |
|
408 |
|
409 |
@app.cell
|
410 |
def _(mo, purge_tabs):
|
411 |
-
purging_section = mo.md(
|
|
|
412 |
###**Helper Purge Functions:**
|
413 |
|
414 |
These functions help you retrieve, select and delete deployments, data assets or repository assets (functions, models, etc.) that you have in the deployment space. This is meant to support fast cleanup.
|
@@ -419,7 +517,8 @@ def _(mo, purge_tabs):
|
|
419 |
|
420 |
{purge_tabs}
|
421 |
|
422 |
-
|
|
|
423 |
|
424 |
return (purging_section,)
|
425 |
|
@@ -433,7 +532,8 @@ def _(
|
|
433 |
ss_asset_response,
|
434 |
yaml_template,
|
435 |
):
|
436 |
-
packages_section = mo.md(
|
|
|
437 |
###**If needed - Create a custom software-spec with added python packages**
|
438 |
|
439 |
1. Check to see if the python library you want to use is already available inside watsonx.ai's runtime environment base specs for deployed functions by adding them as a comma separated list, e.g. - plotly, ibm-watsonx-ai==1.3.6, etc. into the text area.
|
@@ -464,7 +564,8 @@ def _(
|
|
464 |
```json
|
465 |
{json.dumps(ss_asset_response, indent=2)}
|
466 |
```
|
467 |
-
|
|
|
468 |
return (packages_section,)
|
469 |
|
470 |
|
@@ -472,7 +573,7 @@ def _(
|
|
472 |
def _(client_section, mo):
|
473 |
ui_accordion_section_1 = mo.accordion(
|
474 |
{"Section 1: **watsonx.ai Credentials**": client_section}
|
475 |
-
)
|
476 |
ui_accordion_section_1
|
477 |
return
|
478 |
|
@@ -481,7 +582,7 @@ def _(client_section, mo):
|
|
481 |
def _(function_section, mo):
|
482 |
ui_accordion_section_2 = mo.accordion(
|
483 |
{"Section 2: **Function Creation**": function_section}
|
484 |
-
)
|
485 |
ui_accordion_section_2
|
486 |
return
|
487 |
|
@@ -490,7 +591,7 @@ def _(function_section, mo):
|
|
490 |
def _(mo, packages_section):
|
491 |
ui_accordion_section_3 = mo.accordion(
|
492 |
{"Section 3: **Create a Package Extension (Optional)**": packages_section}
|
493 |
-
)
|
494 |
ui_accordion_section_3
|
495 |
return
|
496 |
|
@@ -499,7 +600,7 @@ def _(mo, packages_section):
|
|
499 |
def _(mo, upload_section):
|
500 |
ui_accordion_section_4 = mo.accordion(
|
501 |
{"Section 4: **Function Upload**": upload_section}
|
502 |
-
)
|
503 |
ui_accordion_section_4
|
504 |
return
|
505 |
|
@@ -508,7 +609,7 @@ def _(mo, upload_section):
|
|
508 |
def _(deployment_section, mo):
|
509 |
ui_accordion_section_5 = mo.accordion(
|
510 |
{"Section 5: **Function Deployment**": deployment_section}
|
511 |
-
)
|
512 |
ui_accordion_section_5
|
513 |
return
|
514 |
|
@@ -517,25 +618,32 @@ def _(deployment_section, mo):
|
|
517 |
def _(mo, purging_section):
|
518 |
ui_accordion_section_6 = mo.accordion(
|
519 |
{"Section 6: **Helper Functions**": purging_section}
|
520 |
-
)
|
521 |
ui_accordion_section_6
|
522 |
return
|
523 |
|
524 |
|
525 |
@app.cell
|
526 |
-
def _(mo, template_variant):
|
527 |
function_editor = (
|
528 |
-
mo.md(
|
|
|
529 |
#### **Create your function by editing the template:**
|
530 |
|
531 |
{editor}
|
532 |
|
533 |
-
|
|
|
534 |
.batch(
|
535 |
-
editor
|
|
|
|
|
|
|
|
|
|
|
536 |
)
|
537 |
.form(show_clear_button=True, bordered=False)
|
538 |
-
|
539 |
|
540 |
# function_editor
|
541 |
return (function_editor,)
|
@@ -546,7 +654,7 @@ def _(ast, function_editor, mo, os):
|
|
546 |
function_name = None
|
547 |
if function_editor.value:
|
548 |
# Get the edited code from the function editor
|
549 |
-
code = function_editor.value[
|
550 |
# Extract function name using AST without executing the code
|
551 |
|
552 |
try:
|
@@ -577,14 +685,14 @@ def _(ast, function_editor, mo, os):
|
|
577 |
|
578 |
|
579 |
@app.cell
|
580 |
-
def _():
|
581 |
dependencies = template["dependencies"]
|
582 |
yaml_template = f"""dependencies:
|
583 |
- pip
|
584 |
- pip:
|
585 |
{'\n'.join(f' - {dep}' for dep in dependencies)}
|
586 |
"""
|
587 |
-
|
588 |
yaml_templates = {
|
589 |
"empty": """dependencies:
|
590 |
- pip
|
@@ -597,12 +705,17 @@ def _():
|
|
597 |
dependency_pack_name = template["function_name"] + "_dependencies"
|
598 |
# After the yaml_template definition, add:
|
599 |
yaml_templates[dependency_pack_name] = yaml_template
|
600 |
-
return (yaml_templates,)
|
601 |
|
602 |
|
603 |
@app.cell
|
604 |
-
def _(mo, yaml_templates):
|
605 |
-
yaml_template = mo.ui.dropdown(
|
|
|
|
|
|
|
|
|
|
|
606 |
return (yaml_template,)
|
607 |
|
608 |
|
@@ -610,19 +723,21 @@ def _(mo, yaml_templates):
|
|
610 |
def _(tempfile):
|
611 |
def create_yaml_tempfile(yaml_editor_value):
|
612 |
"""Creates temporary YAML file and returns its path"""
|
613 |
-
temp_file = tempfile.NamedTemporaryFile(suffix=
|
614 |
-
with open(temp_file.name,
|
615 |
f.write(str(yaml_editor_value))
|
616 |
return temp_file.name
|
|
|
617 |
return (create_yaml_tempfile,)
|
618 |
|
619 |
|
620 |
@app.cell
|
621 |
-
def _(mo, yaml_template):
|
622 |
-
pkg_types = {"Conda Yaml":"conda_yml","Custom user library":"custom_library"}
|
623 |
|
624 |
package_meta = (
|
625 |
-
mo.md(
|
|
|
626 |
|
627 |
{yml_editor}
|
628 |
|
@@ -633,43 +748,63 @@ def _(mo, yaml_template):
|
|
633 |
{software_spec_name}
|
634 |
|
635 |
{software_spec_description}
|
636 |
-
|
|
|
637 |
.batch(
|
638 |
-
yml_editor
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
655 |
)
|
656 |
.form(show_clear_button=True, bordered=False)
|
657 |
-
|
658 |
return (package_meta,)
|
659 |
|
660 |
|
661 |
@app.cell
|
662 |
def _(mo):
|
663 |
-
check_packages =(
|
|
|
|
|
664 |
**Check if a package you want to use is in the base software_specification already:**
|
665 |
|
666 |
{package_list}
|
667 |
|
668 |
{return_full_list}
|
669 |
-
|
|
|
670 |
.batch(
|
671 |
-
package_list
|
672 |
-
|
|
|
|
|
|
|
|
|
|
|
673 |
)
|
674 |
.form(show_clear_button=True, bordered=False)
|
675 |
)
|
@@ -679,9 +814,9 @@ def _(mo):
|
|
679 |
@app.cell
|
680 |
def _(check_packages):
|
681 |
if check_packages.value is not None:
|
682 |
-
packages = check_packages.value[
|
683 |
-
verification_list = [item.strip() for item in packages.split(
|
684 |
-
full_list_return = check_packages.value[
|
685 |
else:
|
686 |
packages = None
|
687 |
verification_list = None
|
@@ -698,7 +833,11 @@ def _(
|
|
698 |
visualize_software_spec,
|
699 |
):
|
700 |
if verification_list is not None:
|
701 |
-
pkg_analysis = analyze_software_spec(
|
|
|
|
|
|
|
|
|
702 |
package_df = visualize_software_spec(pkg_analysis, verification_list)
|
703 |
else:
|
704 |
pkg_analysis = None
|
@@ -727,16 +866,19 @@ def _(client):
|
|
727 |
def _(client, create_yaml_tempfile, package_meta, uuid):
|
728 |
if package_meta.value is not None and client is not None:
|
729 |
pack_suffix = str(uuid.uuid4())[:4]
|
730 |
-
pack_name = package_meta.value[
|
731 |
pe_metadata = {
|
732 |
client.package_extensions.ConfigurationMetaNames.NAME: f"{pack_name}_{pack_suffix}",
|
733 |
-
client.package_extensions.ConfigurationMetaNames.TYPE: package_meta.value[
|
734 |
-
|
|
|
|
|
|
|
|
|
735 |
}
|
736 |
-
yaml_file_path = create_yaml_tempfile(package_meta.value[
|
737 |
else:
|
738 |
-
pe_metadata = {
|
739 |
-
}
|
740 |
yaml_file_path = None
|
741 |
|
742 |
return pe_metadata, yaml_file_path
|
@@ -747,8 +889,7 @@ def _(client, pe_metadata, yaml_file_path):
|
|
747 |
if yaml_file_path is not None:
|
748 |
### Stores the package extension
|
749 |
pe_asset_details = client.package_extensions.store(
|
750 |
-
meta_props=pe_metadata,
|
751 |
-
file_path=yaml_file_path
|
752 |
)
|
753 |
package_id = pe_asset_details["metadata"]["asset_id"]
|
754 |
else:
|
@@ -760,7 +901,9 @@ def _(client, pe_metadata, yaml_file_path):
|
|
760 |
@app.cell
|
761 |
def _():
|
762 |
### Helper function for checking if a python library is in the standard software spec.
|
763 |
-
def analyze_software_spec(
|
|
|
|
|
764 |
"""
|
765 |
Analyzes a software specification against a list of required libraries.
|
766 |
|
@@ -776,18 +919,16 @@ def _():
|
|
776 |
- present: Dict of libraries that are present with matching versions
|
777 |
- sw_packages: (Optional) Complete dict of all packages in the software spec
|
778 |
"""
|
779 |
-
result = {
|
780 |
-
"present": {},
|
781 |
-
"not_present": {},
|
782 |
-
"version_mismatch": {}
|
783 |
-
}
|
784 |
|
785 |
# Extract all packages from the software specification
|
786 |
sw_packages = {}
|
787 |
|
788 |
try:
|
789 |
# Extract packages from included_packages in the software specification
|
790 |
-
included_packages = sw_spec_response["entity"]["software_specification"][
|
|
|
|
|
791 |
|
792 |
# Create a dictionary of all packages in the software specification
|
793 |
for package in included_packages:
|
@@ -812,14 +953,20 @@ def _():
|
|
812 |
result["not_present"][lib_name] = None
|
813 |
elif lib_version is not None and lib_version != sw_packages[lib_name]:
|
814 |
# Check version mismatch
|
815 |
-
result["version_mismatch"][lib_name] = [
|
|
|
|
|
|
|
816 |
else:
|
817 |
# Library is present with matching version (or no specific version required)
|
818 |
result["present"][lib_name] = sw_packages[lib_name]
|
819 |
|
820 |
if return_full_sw_package_list:
|
821 |
# Extract just the library names from required_libraries
|
822 |
-
req_libs_names = [
|
|
|
|
|
|
|
823 |
|
824 |
def sort_key(pkg_name):
|
825 |
if pkg_name in result["not_present"]:
|
@@ -832,13 +979,14 @@ def _():
|
|
832 |
return (3, pkg_name) # All other packages last
|
833 |
|
834 |
# Sort sw_packages using the custom sorting key
|
835 |
-
result["sw_packages"] = {
|
|
|
|
|
836 |
|
837 |
# Add missing packages to the top of sw_packages
|
838 |
for pkg in result["not_present"]:
|
839 |
result["sw_packages"] = {pkg: None, **result["sw_packages"]}
|
840 |
|
841 |
-
|
842 |
return result
|
843 |
|
844 |
def visualize_software_spec(analysis_result, required_libraries=None):
|
@@ -893,23 +1041,31 @@ def _():
|
|
893 |
status = "Other"
|
894 |
priority = 3 # Lowest priority
|
895 |
|
896 |
-
rows.append(
|
897 |
-
|
898 |
-
|
899 |
-
|
900 |
-
|
901 |
-
|
|
|
|
|
902 |
|
903 |
df = pd.DataFrame(rows)
|
904 |
|
905 |
# Sort by priority and then package name
|
906 |
-
df =
|
|
|
|
|
|
|
|
|
907 |
|
908 |
else:
|
909 |
# Only use the packages mentioned in required_libraries
|
910 |
-
packages = set(
|
911 |
-
|
912 |
-
|
|
|
|
|
913 |
|
914 |
# Create dataframe rows
|
915 |
rows = []
|
@@ -919,7 +1075,9 @@ def _():
|
|
919 |
status = "❌ Missing"
|
920 |
priority = 0 # Top priority
|
921 |
elif package in analysis_result.get("version_mismatch", {}):
|
922 |
-
version = analysis_result["version_mismatch"][package][
|
|
|
|
|
923 |
status = "⚠️ Version Mismatch"
|
924 |
priority = 1 # Second priority
|
925 |
else:
|
@@ -927,37 +1085,57 @@ def _():
|
|
927 |
status = "✅ Present"
|
928 |
priority = 2 # Third priority
|
929 |
|
930 |
-
rows.append(
|
931 |
-
|
932 |
-
|
933 |
-
|
934 |
-
|
935 |
-
|
|
|
|
|
936 |
|
937 |
df = pd.DataFrame(rows)
|
938 |
|
939 |
# Sort by priority and then package name
|
940 |
-
df =
|
|
|
|
|
|
|
|
|
941 |
|
942 |
return df
|
|
|
943 |
return analyze_software_spec, visualize_software_spec
|
944 |
|
945 |
|
946 |
@app.cell
|
947 |
-
def _(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
948 |
if package_id is not None:
|
949 |
### Creates a custom software specification based on the standard python function spec_id - "45f12dfe-aa78-5b8d-9f38-0ee223c47309"
|
950 |
ss_suffix = str(uuid.uuid4())[:4]
|
951 |
-
ss_name = package_meta.value[
|
952 |
|
953 |
ss_metadata = {
|
954 |
client.software_specifications.ConfigurationMetaNames.NAME: f"{ss_name}_{ss_suffix}",
|
955 |
-
client.software_specifications.ConfigurationMetaNames.DESCRIPTION: package_meta.value[
|
956 |
-
|
957 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
958 |
}
|
959 |
|
960 |
-
|
961 |
ss_asset_details = client.software_specifications.store(meta_props=ss_metadata)
|
962 |
|
963 |
current_status = get_selection_table_status()
|
@@ -983,8 +1161,9 @@ def _(mo):
|
|
983 |
get_selection_table_status, set_selection_table_status = mo.state(None)
|
984 |
return get_selection_table_status, set_selection_table_status
|
985 |
|
|
|
986 |
@app.cell
|
987 |
-
def _(mo, client):
|
988 |
if client:
|
989 |
# First, get all specs data once
|
990 |
specs_df = client.software_specifications.list()
|
@@ -1002,13 +1181,15 @@ def _(client, mo, pd, specs_df):
|
|
1002 |
if client:
|
1003 |
# Filter the specs into two groups
|
1004 |
base_specs = specs_df[
|
1005 |
-
(specs_df[
|
1006 |
-
(
|
|
|
|
|
|
|
|
|
1007 |
]
|
1008 |
|
1009 |
-
derived_specs = specs_df[
|
1010 |
-
(specs_df['TYPE'] == 'derived')
|
1011 |
-
]
|
1012 |
|
1013 |
# Concatenate with base specs first, then derived specs
|
1014 |
supported_specs = pd.concat([base_specs, derived_specs]).reset_index(drop=True)
|
@@ -1024,7 +1205,7 @@ def _(client, mo, pd, specs_df):
|
|
1024 |
"runtime-24.1-py3.11-cuda": "CUDA-enabled (GPU) Python Runtime",
|
1025 |
"runtime-24.1-r4.3": "R Runtime 4.3",
|
1026 |
"spark-mllib_3.4": "Apache Spark 3.4",
|
1027 |
-
"autoai-rag_rt24.1-py3.11": "AutoAI RAG"
|
1028 |
}
|
1029 |
|
1030 |
# Define the preferred order for items to appear at the top
|
@@ -1043,42 +1224,51 @@ def _(client, mo, pd, specs_df):
|
|
1043 |
]
|
1044 |
|
1045 |
# Create a new column for sorting
|
1046 |
-
supported_specs[
|
1047 |
-
lambda x:
|
|
|
|
|
|
|
|
|
1048 |
)
|
1049 |
|
1050 |
# Sort the DataFrame by the new column
|
1051 |
-
supported_specs = supported_specs.sort_values(
|
|
|
|
|
1052 |
|
1053 |
# Drop the sorting column as it's no longer needed
|
1054 |
-
supported_specs = supported_specs.drop(columns=[
|
1055 |
|
1056 |
# Drop the REPLACEMENT column if it exists and add NOTES column
|
1057 |
-
if
|
1058 |
-
supported_specs = supported_specs.drop(columns=[
|
1059 |
|
1060 |
# Add NOTES column with framework information
|
1061 |
-
supported_specs[
|
|
|
|
|
1062 |
|
1063 |
# Create a table with single-row selection
|
1064 |
selection_table = mo.ui.table(
|
1065 |
supported_specs,
|
1066 |
selection="single", # Only allow selecting one row
|
1067 |
label="#### **Select a supported software_spec runtime for your function asset** (For Python Functions select - *'runtime-24.1-py3.11'* ):",
|
1068 |
-
initial_selection=[
|
1069 |
-
|
|
|
|
|
1070 |
)
|
1071 |
else:
|
1072 |
sel_df = pd.DataFrame(
|
1073 |
-
|
1074 |
-
columns=["ID", "VALUE"]
|
1075 |
)
|
1076 |
|
1077 |
selection_table = mo.ui.table(
|
1078 |
sel_df,
|
1079 |
selection="single", # Only allow selecting one row
|
1080 |
label="You haven't activated the client",
|
1081 |
-
initial_selection=[0]
|
1082 |
)
|
1083 |
|
1084 |
return (selection_table,)
|
@@ -1093,7 +1283,7 @@ def _(mo):
|
|
1093 |
@app.cell
|
1094 |
def _(selection_table, set_selected_sw_spec):
|
1095 |
if selection_table.value is not None:
|
1096 |
-
set_selected_sw_spec(selection_table.value[
|
1097 |
return
|
1098 |
|
1099 |
|
@@ -1106,7 +1296,7 @@ def _(mo):
|
|
1106 |
@app.cell
|
1107 |
def _(hw_selection_table, set_selected_hw_spec):
|
1108 |
if hw_selection_table.value is not None:
|
1109 |
-
set_selected_hw_spec(hw_selection_table.value[
|
1110 |
return
|
1111 |
|
1112 |
|
@@ -1126,6 +1316,7 @@ def _(
|
|
1126 |
output_schema_checkbox,
|
1127 |
selection_table,
|
1128 |
template_variant,
|
|
|
1129 |
):
|
1130 |
if selection_table.value is not None:
|
1131 |
# Create the input fields
|
@@ -1134,11 +1325,23 @@ def _(
|
|
1134 |
else:
|
1135 |
fnc_nm = "custom_pythoN_functin"
|
1136 |
|
1137 |
-
uploaded_function_name = mo.ui.text(
|
|
|
|
|
|
|
|
|
|
|
|
|
1138 |
tags_editor = mo.ui.array(
|
1139 |
-
|
1140 |
-
|
1141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
1142 |
)
|
1143 |
software_spec = get_selected_sw_spec()
|
1144 |
|
@@ -1147,9 +1350,9 @@ def _(
|
|
1147 |
label="Description",
|
1148 |
max_length=256,
|
1149 |
rows=5,
|
1150 |
-
full_width=True
|
1151 |
)
|
1152 |
-
|
1153 |
return (
|
1154 |
description_input,
|
1155 |
software_spec,
|
@@ -1157,125 +1360,167 @@ def _(
|
|
1157 |
uploaded_function_name,
|
1158 |
)
|
1159 |
|
|
|
1160 |
@app.cell
|
1161 |
def _(mo, selection_table, description_input, uploaded_function_name, tags_editor):
|
1162 |
if selection_table.value is not None:
|
1163 |
-
func_metadata=mo.hstack(
|
1164 |
-
|
1165 |
-
|
1166 |
-
|
1167 |
-
|
1168 |
-
|
1169 |
-
|
1170 |
-
|
1171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1172 |
)
|
1173 |
return func_metadata
|
1174 |
|
|
|
1175 |
@app.cell
|
1176 |
def _(mo, selection_table, input_schema_checkbox, output_schema_checkbox):
|
1177 |
if selection_table.value is not None:
|
1178 |
-
schema_metadata=mo.hstack(
|
1179 |
-
|
1180 |
-
|
1181 |
-
|
1182 |
-
|
|
|
|
|
|
|
|
|
1183 |
)
|
1184 |
return schema_metadata
|
1185 |
|
|
|
1186 |
@app.cell
|
1187 |
def _(mo, selection_table, func_metadata):
|
1188 |
if selection_table.value is not None:
|
1189 |
-
fm = mo.vstack(
|
1190 |
-
|
|
|
1191 |
],
|
1192 |
align="center",
|
1193 |
-
gap=2
|
1194 |
)
|
1195 |
return fm
|
1196 |
|
|
|
1197 |
@app.cell
|
1198 |
def _(mo, selection_table, schema_metadata):
|
1199 |
if selection_table.value is not None:
|
1200 |
-
sc_m = mo.vstack(
|
1201 |
-
|
1202 |
-
|
|
|
|
|
|
|
1203 |
],
|
1204 |
align="center",
|
1205 |
-
gap=2
|
1206 |
)
|
1207 |
return sc_m
|
1208 |
|
1209 |
|
1210 |
@app.cell
|
1211 |
-
def _(json, mo, template_variant):
|
1212 |
if template["input_schema"]:
|
1213 |
input_schema = template["input_schema"]
|
1214 |
else:
|
1215 |
input_schema = [
|
1216 |
{
|
1217 |
-
|
1218 |
-
|
1219 |
-
|
1220 |
{
|
1221 |
-
|
1222 |
-
|
1223 |
-
|
1224 |
-
|
1225 |
},
|
1226 |
{
|
1227 |
-
|
1228 |
-
|
1229 |
-
|
1230 |
-
|
1231 |
-
}
|
1232 |
-
]
|
1233 |
}
|
1234 |
]
|
1235 |
-
|
1236 |
if template["output_schema"]:
|
1237 |
output_schema = template["output_schema"]
|
1238 |
else:
|
1239 |
output_schema = [
|
1240 |
{
|
1241 |
-
|
1242 |
-
|
1243 |
-
|
1244 |
{
|
1245 |
-
|
1246 |
-
|
1247 |
-
|
1248 |
-
|
1249 |
}
|
1250 |
-
]
|
1251 |
}
|
1252 |
]
|
1253 |
|
1254 |
return input_schema, output_schema
|
1255 |
|
1256 |
-
input_schema_editor = mo.ui.code_editor(
|
1257 |
-
|
1258 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1259 |
|
1260 |
# schema_editors
|
1261 |
return input_schema_editor, output_schema_editor
|
1262 |
|
|
|
1263 |
@app.cell
|
1264 |
def _(mo, input_schema, output_schema):
|
1265 |
-
input_schema_editor = mo.ui.code_editor(
|
1266 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1267 |
return input_schema_editor, output_schema_editor
|
1268 |
|
|
|
1269 |
@app.cell
|
1270 |
def _(mo, input_schema_editor, output_schema_editor):
|
1271 |
schema_editors = mo.accordion(
|
1272 |
{
|
1273 |
"""**Input Schema Metadata Editor**""": input_schema_editor,
|
1274 |
"""**Output Schema Metadata Editor**""": output_schema_editor,
|
1275 |
-
},
|
|
|
1276 |
)
|
1277 |
return schema_editors
|
1278 |
|
|
|
1279 |
@app.cell
|
1280 |
def _(
|
1281 |
ast,
|
@@ -1302,8 +1547,10 @@ def _(
|
|
1302 |
if software_spec and client is not None:
|
1303 |
# Start with the base required fields
|
1304 |
function_meta = {
|
1305 |
-
client.repository.FunctionMetaNames.NAME: f"{uploaded_function_name.value}"
|
1306 |
-
|
|
|
|
|
1307 |
}
|
1308 |
|
1309 |
# Add optional fields if they exist
|
@@ -1313,26 +1560,34 @@ def _(
|
|
1313 |
if filtered_tags: # Only add if there are non-empty tags
|
1314 |
function_meta[client.repository.FunctionMetaNames.TAGS] = filtered_tags
|
1315 |
|
1316 |
-
|
1317 |
if description_input.value:
|
1318 |
-
function_meta[client.repository.FunctionMetaNames.DESCRIPTION] =
|
|
|
|
|
1319 |
|
1320 |
# Add input schema if checkbox is checked
|
1321 |
if input_schema_checkbox.value:
|
1322 |
try:
|
1323 |
-
function_meta[
|
|
|
|
|
1324 |
except json.JSONDecodeError:
|
1325 |
# If JSON parsing fails, try Python literal evaluation as fallback
|
1326 |
-
function_meta[
|
|
|
|
|
1327 |
|
1328 |
# Add output schema if checkbox is checked
|
1329 |
if output_schema_checkbox.value:
|
1330 |
try:
|
1331 |
-
function_meta[
|
|
|
|
|
1332 |
except json.JSONDecodeError:
|
1333 |
# If JSON parsing fails, try Python literal evaluation as fallback
|
1334 |
-
function_meta[
|
1335 |
-
|
|
|
1336 |
|
1337 |
def upload_function(function_meta, use_function_object=False):
|
1338 |
"""
|
@@ -1348,7 +1603,7 @@ def _(
|
|
1348 |
|
1349 |
try:
|
1350 |
# Create temp file from the code in the editor
|
1351 |
-
code_to_deploy = function_editor.value[
|
1352 |
# This function is defined elsewhere in the notebook
|
1353 |
func_name = uploaded_function_name.value or "your_function_name"
|
1354 |
# Ensure function_meta has the correct function name
|
@@ -1372,14 +1627,16 @@ def _(
|
|
1372 |
function_object = getattr(module, func_name)
|
1373 |
|
1374 |
# Change to /tmp directory before calling IBM Watson SDK functions
|
1375 |
-
os.chdir(
|
1376 |
|
1377 |
# Upload the function object
|
1378 |
mo.md(f"Uploading function object: {func_name}")
|
1379 |
-
func_details = client.repository.store_function(
|
|
|
|
|
1380 |
else:
|
1381 |
# Change to /tmp directory before calling IBM Watson SDK functions
|
1382 |
-
os.chdir(
|
1383 |
|
1384 |
# Create a zip file of the Python module
|
1385 |
import gzip
|
@@ -1389,8 +1646,8 @@ def _(
|
|
1389 |
gz_path = f"{save_dir}/{func_name}.py.gz"
|
1390 |
|
1391 |
# Create gzip file
|
1392 |
-
with open(file_path,
|
1393 |
-
with gzip.open(gz_path,
|
1394 |
shutil.copyfileobj(f_in, f_out)
|
1395 |
|
1396 |
# Upload using the gzipped file path
|
@@ -1407,13 +1664,13 @@ def _(
|
|
1407 |
# Always change back to the original directory, even if an exception occurs
|
1408 |
os.chdir(original_dir)
|
1409 |
|
1410 |
-
upload_status =
|
1411 |
|
1412 |
upload_button = mo.ui.button(
|
1413 |
label="Upload Function",
|
1414 |
on_click=lambda _: upload_function(function_meta, use_function_object=False),
|
1415 |
kind="success",
|
1416 |
-
tooltip="Click to upload function to watsonx.ai"
|
1417 |
)
|
1418 |
|
1419 |
# function_meta
|
@@ -1422,18 +1679,19 @@ def _(
|
|
1422 |
|
1423 |
@app.cell
|
1424 |
def _(get_upload_status, mo, upload_button):
|
1425 |
-
# Upload your function
|
1426 |
if upload_button.value:
|
1427 |
try:
|
1428 |
upload_result = upload_button.value
|
1429 |
-
artifact_id = upload_result[
|
1430 |
except Exception as e:
|
1431 |
mo.md(f"Error: {str(e)}")
|
1432 |
|
1433 |
-
upload_func = mo.vstack(
|
1434 |
-
upload_button,
|
1435 |
-
|
1436 |
-
|
|
|
1437 |
return artifact_id, upload_func
|
1438 |
|
1439 |
|
@@ -1457,12 +1715,30 @@ def _(client, mo, pd, upload_button, uuid):
|
|
1457 |
def get_sort_key(name):
|
1458 |
# Create a custom ordering list
|
1459 |
custom_order = [
|
1460 |
-
"XXS",
|
1461 |
-
"XS
|
1462 |
-
"
|
1463 |
-
"
|
1464 |
-
"
|
1465 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1466 |
]
|
1467 |
|
1468 |
# If name is in the custom order list, use its index
|
@@ -1473,10 +1749,10 @@ def _(client, mo, pd, upload_button, uuid):
|
|
1473 |
return (1, name)
|
1474 |
|
1475 |
# Add a temporary column for sorting
|
1476 |
-
result_df[
|
1477 |
|
1478 |
# Sort the dataframe and drop the temporary column
|
1479 |
-
result_df = result_df.sort_values(
|
1480 |
|
1481 |
# Reset the index
|
1482 |
result_df = result_df.reset_index(drop=True)
|
@@ -1495,26 +1771,33 @@ def _(client, mo, pd, upload_button, uuid):
|
|
1495 |
label="#### **Select a supported hardware_specification for your deployment** *(Default: 'XS' - 1vCPU_4GB Ram)*",
|
1496 |
initial_selection=[1],
|
1497 |
page_size=6,
|
1498 |
-
wrapped_columns=[
|
1499 |
)
|
1500 |
|
1501 |
deployment_type = mo.ui.radio(
|
1502 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
1503 |
)
|
1504 |
uuid_suffix = str(uuid.uuid4())[:4]
|
1505 |
|
1506 |
-
deployment_name = mo.ui.text(
|
1507 |
-
|
1508 |
-
|
1509 |
-
|
1510 |
-
columns=["ID", "VALUE"]
|
1511 |
)
|
|
|
|
|
1512 |
|
1513 |
hw_selection_table = mo.ui.table(
|
1514 |
hw_df,
|
1515 |
selection="single", # Only allow selecting one row
|
1516 |
label="You haven't activated the client",
|
1517 |
-
initial_selection=[0]
|
1518 |
)
|
1519 |
|
1520 |
return deployment_name, deployment_type, hw_selection_table
|
@@ -1547,18 +1830,24 @@ def _(
|
|
1547 |
print("Error: No artifact ID provided. Please upload a function first.")
|
1548 |
return None
|
1549 |
|
1550 |
-
if
|
|
|
|
|
1551 |
deployment_props = {
|
1552 |
client.deployments.ConfigurationMetaNames.NAME: deployment_name.value,
|
1553 |
client.deployments.ConfigurationMetaNames.ONLINE: {},
|
1554 |
-
client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {
|
|
|
|
|
1555 |
client.deployments.ConfigurationMetaNames.SERVING_NAME: deployment_name.value,
|
1556 |
}
|
1557 |
else: # "Runnable Job" instead of "Batch (Runnable Jobs)"
|
1558 |
deployment_props = {
|
1559 |
client.deployments.ConfigurationMetaNames.NAME: deployment_name.value,
|
1560 |
client.deployments.ConfigurationMetaNames.BATCH: {},
|
1561 |
-
client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {
|
|
|
|
|
1562 |
# batch does not use serving names
|
1563 |
}
|
1564 |
|
@@ -1566,11 +1855,15 @@ def _(
|
|
1566 |
print(deployment_props)
|
1567 |
# First, get the asset details to confirm it exists
|
1568 |
asset_details = client.repository.get_details(artifact_id)
|
1569 |
-
print(
|
|
|
|
|
1570 |
|
1571 |
# Create the deployment
|
1572 |
deployed_function = client.deployments.create(artifact_id, deployment_props)
|
1573 |
-
print(
|
|
|
|
|
1574 |
return deployed_function
|
1575 |
except Exception as e:
|
1576 |
print(f"Deployment error: {str(e)}")
|
@@ -1584,7 +1877,7 @@ def _(
|
|
1584 |
deployment_info = client.deployments.get_details(deployment_id)
|
1585 |
return deployment_info
|
1586 |
|
1587 |
-
deployment_status =
|
1588 |
|
1589 |
if hw_selection_table.value is not None:
|
1590 |
selected_hw_config = get_selected_hw_spec()
|
@@ -1593,19 +1886,18 @@ def _(
|
|
1593 |
label="Deploy Function",
|
1594 |
on_click=lambda _: deploy_function(artifact_id, deployment_type),
|
1595 |
kind="success",
|
1596 |
-
tooltip="Click to deploy function to watsonx.ai"
|
1597 |
)
|
1598 |
|
1599 |
if client and upload_button.value:
|
1600 |
-
deployment_definition = mo.hstack(
|
1601 |
-
deployment_type,
|
1602 |
-
|
1603 |
-
], justify="space-around")
|
1604 |
else:
|
1605 |
-
deployment_definition = mo.hstack(
|
1606 |
-
"No Deployment Type Selected",
|
1607 |
-
"
|
1608 |
-
|
1609 |
|
1610 |
# deployment_definition
|
1611 |
return deploy_button, deployment_definition
|
@@ -1615,10 +1907,9 @@ def _(
|
|
1615 |
def _(deploy_button, deployment_definition, mo):
|
1616 |
_ = deployment_definition
|
1617 |
|
1618 |
-
deploy_fnc = mo.vstack(
|
1619 |
-
deploy_button,
|
1620 |
-
|
1621 |
-
], justify="space-around", align="center")
|
1622 |
|
1623 |
return (deploy_fnc,)
|
1624 |
|
@@ -1630,17 +1921,19 @@ def _(client, mo, pd, sys):
|
|
1630 |
def get_deployment_list():
|
1631 |
dep_df = client.deployments.list()
|
1632 |
dep_df = pd.DataFrame(dep_df)
|
1633 |
-
|
1634 |
-
columns_to_drop = [
|
|
|
|
|
1635 |
if columns_to_drop:
|
1636 |
dep_df = dep_df.drop(columns=columns_to_drop)
|
1637 |
return dep_df
|
1638 |
|
1639 |
def get_deployment_ids(df):
|
1640 |
-
dep_list = df[
|
1641 |
return dep_list
|
1642 |
|
1643 |
-
|
1644 |
|
1645 |
def get_data_assets_list():
|
1646 |
data_a_df = client.data_assets.list()
|
@@ -1648,25 +1941,29 @@ def _(client, mo, pd, sys):
|
|
1648 |
return data_a_df
|
1649 |
|
1650 |
def get_data_asset_ids(df):
|
1651 |
-
data_asset_list = df[
|
1652 |
return data_asset_list
|
1653 |
|
1654 |
-
|
1655 |
|
1656 |
def get_repository_list():
|
1657 |
rep_list_df = client.repository.list()
|
1658 |
rep_list_df = pd.DataFrame(rep_list_df)
|
1659 |
-
|
1660 |
-
columns_to_drop = [
|
|
|
|
|
|
|
|
|
1661 |
if columns_to_drop:
|
1662 |
rep_list_df = rep_list_df.drop(columns=columns_to_drop)
|
1663 |
return rep_list_df
|
1664 |
|
1665 |
def get_repository_ids(df):
|
1666 |
-
repository_list = df[
|
1667 |
return repository_list
|
1668 |
|
1669 |
-
|
1670 |
|
1671 |
def get_pkg_ext_list():
|
1672 |
pkg_ext_list_df = client.package_extensions.list()
|
@@ -1674,96 +1971,95 @@ def _(client, mo, pd, sys):
|
|
1674 |
return pkg_ext_list_df
|
1675 |
|
1676 |
def get_pkg_ext_ids(df):
|
1677 |
-
pkg_ext_id_list = df[
|
1678 |
return pkg_ext_id_list
|
1679 |
|
1680 |
-
|
1681 |
|
1682 |
def get_sws_list():
|
1683 |
sws_list_df = client.software_specifications.list()
|
1684 |
# Filter to only include derived types
|
1685 |
-
derived_sws_list_df = sws_list_df[
|
1686 |
-
(sws_list_df['TYPE'] == 'derived')
|
1687 |
-
]
|
1688 |
# Reset the index and prepare final dataframe
|
1689 |
sws_list_df = pd.DataFrame(derived_sws_list_df).reset_index(drop=True)
|
1690 |
# Drop STATE and REPLACEMENT columns if they exist
|
1691 |
-
columns_to_drop = [
|
|
|
|
|
1692 |
if columns_to_drop:
|
1693 |
sws_list_df = sws_list_df.drop(columns=columns_to_drop)
|
1694 |
return sws_list_df
|
1695 |
|
1696 |
def get_sws_ids(df):
|
1697 |
-
sws_id_list = df[
|
1698 |
return sws_id_list
|
1699 |
|
1700 |
-
|
1701 |
|
1702 |
-
def delete_with_progress(
|
|
|
|
|
1703 |
errors = []
|
1704 |
-
|
1705 |
with mo.status.progress_bar(
|
1706 |
total=len(ids_list) or 1,
|
1707 |
title=f"Purging {item_type}",
|
1708 |
subtitle=f"Deleting {item_type}...",
|
1709 |
completion_title="Purge Complete",
|
1710 |
completion_subtitle=f"Successfully deleted {len(ids_list) - len(errors)} {item_type}",
|
1711 |
-
remove_on_exit=True
|
1712 |
) as progress:
|
1713 |
for item_id in ids_list:
|
1714 |
try:
|
1715 |
delete_function(item_id)
|
1716 |
except Exception as e:
|
1717 |
-
error_msg =
|
|
|
|
|
1718 |
if display_errors:
|
1719 |
print(error_msg)
|
1720 |
errors.append((item_id, str(e)))
|
1721 |
finally:
|
1722 |
progress.update(increment=1)
|
1723 |
-
|
1724 |
if errors and display_errors:
|
1725 |
with mo.redirect_stderr():
|
1726 |
sys.stderr.write("\nErrors encountered during deletion:\n")
|
1727 |
for item_id, error in errors:
|
1728 |
sys.stderr.write(f" - ID {item_id}: {error}\n")
|
1729 |
-
|
1730 |
return f"Deleted {len(ids_list) - len(errors)} {item_type} successfully"
|
1731 |
-
|
1732 |
# Use with existing deletion functions
|
1733 |
def delete_deployments(deployment_ids):
|
1734 |
return delete_with_progress(
|
1735 |
-
deployment_ids,
|
1736 |
-
lambda id: client.deployments.delete(id),
|
1737 |
-
"deployments"
|
1738 |
)
|
1739 |
|
1740 |
def delete_data_assets(data_asset_ids):
|
1741 |
return delete_with_progress(
|
1742 |
-
data_asset_ids,
|
1743 |
-
lambda id: client.data_assets.delete(id),
|
1744 |
-
"data assets"
|
1745 |
)
|
1746 |
|
1747 |
def delete_repository_items(repository_ids):
|
1748 |
return delete_with_progress(
|
1749 |
-
repository_ids,
|
1750 |
-
lambda id: client.repository.delete(id),
|
1751 |
-
"repository items"
|
1752 |
)
|
1753 |
|
1754 |
def delete_pkg_ext_items(pkg_ids):
|
1755 |
return delete_with_progress(
|
1756 |
-
pkg_ids,
|
1757 |
lambda id: client.package_extensions.delete(id),
|
1758 |
-
"package extensions"
|
1759 |
)
|
1760 |
|
1761 |
def delete_sws_items(sws_ids):
|
1762 |
return delete_with_progress(
|
1763 |
-
sws_ids,
|
1764 |
lambda id: client.software_specifications.delete(id),
|
1765 |
-
"software specifications"
|
1766 |
)
|
|
|
1767 |
return (
|
1768 |
delete_data_assets,
|
1769 |
delete_deployments,
|
@@ -1817,7 +2113,6 @@ def _(
|
|
1817 |
else:
|
1818 |
pkg_ext_table = mo.md("No Table Loaded")
|
1819 |
|
1820 |
-
|
1821 |
return (
|
1822 |
data_assets_table,
|
1823 |
deployments_table,
|
@@ -1835,10 +2130,16 @@ def _(
|
|
1835 |
mo,
|
1836 |
purge_deployments,
|
1837 |
):
|
1838 |
-
deployments_purge_stack = mo.hstack(
|
1839 |
-
|
|
|
|
|
|
|
|
|
1840 |
|
1841 |
-
deployments_purge_tab = mo.vstack(
|
|
|
|
|
1842 |
return (deployments_purge_tab,)
|
1843 |
|
1844 |
|
@@ -1850,10 +2151,16 @@ def _(
|
|
1850 |
purge_repository,
|
1851 |
repository_table,
|
1852 |
):
|
1853 |
-
repository_purge_stack = mo.hstack(
|
1854 |
-
|
|
|
|
|
|
|
|
|
1855 |
|
1856 |
-
repository_purge_tab = mo.vstack(
|
|
|
|
|
1857 |
return (repository_purge_tab,)
|
1858 |
|
1859 |
|
@@ -1865,17 +2172,25 @@ def _(
|
|
1865 |
mo,
|
1866 |
purge_data_assets,
|
1867 |
):
|
1868 |
-
data_assets_purge_stack = mo.hstack(
|
1869 |
-
|
|
|
|
|
|
|
|
|
1870 |
|
1871 |
-
data_assets_purge_tab = mo.vstack(
|
|
|
|
|
1872 |
return (data_assets_purge_tab,)
|
1873 |
|
1874 |
|
1875 |
@app.cell
|
1876 |
def _(get_sws_button, get_sws_id_list, mo, purge_sws, sws_table):
|
1877 |
sws_purge_stack = mo.hstack([get_sws_button, get_sws_id_list, purge_sws])
|
1878 |
-
sws_purge_stack_results = mo.vstack(
|
|
|
|
|
1879 |
|
1880 |
sws_purge_stack_tab = mo.vstack([sws_purge_stack, sws_purge_stack_results])
|
1881 |
return (sws_purge_stack_tab,)
|
@@ -1889,8 +2204,12 @@ def _(
|
|
1889 |
pkg_ext_table,
|
1890 |
purge_pkg_ext,
|
1891 |
):
|
1892 |
-
pkg_ext_purge_stack = mo.hstack(
|
1893 |
-
|
|
|
|
|
|
|
|
|
1894 |
|
1895 |
pkg_ext_purge_tab = mo.vstack([pkg_ext_purge_stack, pkg_ext_purge_stack_results])
|
1896 |
return (pkg_ext_purge_tab,)
|
@@ -1907,13 +2226,13 @@ def _(
|
|
1907 |
):
|
1908 |
purge_tabs = mo.ui.tabs(
|
1909 |
{
|
1910 |
-
"Purge Deployments": deployments_purge_tab,
|
1911 |
"Purge Repository Assets": repository_purge_tab,
|
1912 |
"Purge Data Assets": data_assets_purge_tab,
|
1913 |
"Purge Software Specifications": sws_purge_stack_tab,
|
1914 |
"Purge Package Extensions": pkg_ext_purge_tab,
|
1915 |
-
}
|
1916 |
-
|
1917 |
)
|
1918 |
|
1919 |
return (purge_tabs,)
|
@@ -2058,7 +2377,6 @@ def _(
|
|
2058 |
kind="danger",
|
2059 |
)
|
2060 |
|
2061 |
-
|
2062 |
### Package Extensions Purge
|
2063 |
get_pkg_ext_button = mo.ui.button(
|
2064 |
label="Get Package Extensions Dataframe",
|
|
|
19 |
from ibm_watsonx_ai import APIClient, Credentials
|
20 |
from ibmcloudant.cloudant_v1 import CloudantV1, Document
|
21 |
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
|
22 |
+
from cloudant_helper_functions import (
|
23 |
+
cloudant_retrieve_documents,
|
24 |
+
ensure_database_exists,
|
25 |
+
)
|
26 |
import marimo as mo
|
27 |
import pandas as pd
|
28 |
import requests
|
|
|
31 |
load_dotenv()
|
32 |
|
33 |
|
|
|
34 |
@app.function
|
35 |
def get_markdown_content(file_path):
|
36 |
+
with open(file_path, "r", encoding="utf-8") as file:
|
37 |
content = file.read()
|
38 |
return content
|
39 |
|
40 |
|
41 |
@app.cell
|
42 |
def _(mo, get_markdown_content):
|
43 |
+
intro_text = get_markdown_content("intro_markdown/intro.md")
|
44 |
+
intro_marimo = get_markdown_content("intro_markdown/intro_marimo.md")
|
45 |
+
intro_notebook = get_markdown_content("intro_markdown/intro_notebook.md")
|
46 |
+
intro_comparison = get_markdown_content("intro_markdown/intro_comparison.md")
|
47 |
+
|
48 |
+
intro = mo.carousel(
|
49 |
+
[
|
50 |
+
mo.md(f"{intro_text}"),
|
51 |
+
mo.md(f"{intro_marimo}"),
|
52 |
+
mo.md(f"{intro_notebook}"),
|
53 |
+
mo.md(f"{intro_comparison}"),
|
54 |
+
]
|
55 |
+
)
|
56 |
+
|
57 |
+
mo.accordion({"## Notebook Introduction": intro})
|
58 |
|
59 |
|
60 |
@app.cell
|
61 |
def _(os):
|
62 |
# Set explicit temporary directory
|
63 |
+
os.environ["TMPDIR"] = "/tmp/notebook_functions"
|
64 |
|
65 |
# Create the directory if it doesn't exist
|
66 |
+
os.makedirs("/tmp/notebook_functions", exist_ok=True)
|
67 |
|
68 |
# Make sure Python's tempfile module also uses this directory
|
69 |
+
tempfile.tempdir = "/tmp/notebook_functions"
|
70 |
|
71 |
def setup_task_credentials(client):
|
72 |
# Get existing task credentials
|
|
|
118 |
wx_url = None
|
119 |
return client_setup, project_id, space_id, wx_api_key, wx_url
|
120 |
|
121 |
+
|
122 |
@app.cell
|
123 |
def _(mo):
|
124 |
### Credentials for the watsonx.ai SDK client
|
|
|
131 |
"GB": "https://eu-gb.ml.cloud.ibm.com",
|
132 |
"JP": "https://jp-tok.ml.cloud.ibm.com",
|
133 |
"AU": "https://au-syd.ml.cloud.ibm.com",
|
134 |
+
"CA": "https://ca-tor.ml.cloud.ibm.com",
|
135 |
}
|
136 |
|
137 |
# Create a form with multiple elements
|
138 |
client_instantiation_form = (
|
139 |
+
mo.md(
|
140 |
+
"""
|
141 |
###**watsonx.ai credentials:**
|
142 |
|
143 |
{wx_region}
|
|
|
145 |
{wx_api_key}
|
146 |
|
147 |
{space_id}
|
148 |
+
"""
|
149 |
+
)
|
150 |
+
.style(max_height="300px", overflow="auto", border_color="blue")
|
151 |
.batch(
|
152 |
+
wx_region=mo.ui.dropdown(
|
153 |
+
regions,
|
154 |
+
label="Select your watsonx.ai region:",
|
155 |
+
value="US",
|
156 |
+
searchable=True,
|
157 |
+
),
|
158 |
+
wx_api_key=mo.ui.text(
|
159 |
+
placeholder="Add your IBM Cloud api-key...",
|
160 |
+
label="IBM Cloud Api-key:",
|
161 |
+
kind="password",
|
162 |
+
),
|
163 |
+
project_id=mo.ui.text(
|
164 |
+
placeholder="Add your watsonx.ai project_id...",
|
165 |
+
label="Project_ID:",
|
166 |
+
kind="text",
|
167 |
+
),
|
168 |
+
space_id=mo.ui.text(
|
169 |
+
placeholder="Add your watsonx.ai space_id...",
|
170 |
+
label="Space_ID:",
|
171 |
+
kind="text",
|
172 |
+
),
|
173 |
+
)
|
174 |
.form(show_clear_button=True, bordered=False)
|
175 |
)
|
176 |
|
|
|
190 |
):
|
191 |
### Instantiate the watsonx.ai client
|
192 |
if client_setup:
|
193 |
+
wx_credentials = Credentials(url=wx_url, api_key=wx_api_key)
|
|
|
|
|
|
|
194 |
|
195 |
if project_id:
|
196 |
+
project_client = APIClient(
|
197 |
+
credentials=wx_credentials, project_id=project_id
|
198 |
+
)
|
199 |
else:
|
200 |
project_client = None
|
201 |
|
|
|
214 |
task_credentials_details = setup_task_credentials(project_client)
|
215 |
else:
|
216 |
task_credentials_details = None
|
|
|
217 |
|
218 |
else:
|
219 |
wx_credentials = None
|
|
|
247 |
os.environ["REQUESTS_CA_BUNDLE"] = certifi.where()
|
248 |
return (cloudant,)
|
249 |
|
250 |
+
|
251 |
@app.cell
|
252 |
def _(cloudant, cloudant_db):
|
253 |
variant_selectors = cloudant_retrieve_documents(
|
|
|
257 |
fields=["category"],
|
258 |
)
|
259 |
|
260 |
+
variant_selectors_list = [item["category"] for item in variant_selectors["docs"]]
|
261 |
+
return variant_selectors_list
|
262 |
+
|
|
|
|
|
263 |
|
264 |
@app.cell
|
265 |
+
def _(mo, variant_selectors_list):
|
266 |
template_variants = variant_selectors_list
|
267 |
+
template_variant = mo.ui.dropdown(
|
268 |
+
template_variants, label="Code Template:", value=template_variants[0]
|
269 |
+
)
|
270 |
return (template_variant,)
|
271 |
|
272 |
+
|
273 |
@app.cell
|
274 |
def _(cloudant, cloudant_db, template_variant):
|
275 |
+
fallback_template = {
|
276 |
+
"function_name": "deployable_function",
|
277 |
+
"function_code": """def your_function_name():
|
278 |
+
import subprocess
|
279 |
+
subprocess.check_output('pip install gensim', shell=True)
|
280 |
+
import gensim
|
281 |
+
def score(input_data):
|
282 |
+
message_from_input_payload = payload.get("input_data")[0].get("values")[0][0]
|
283 |
+
response_message = "Received message - {0}".format(message_from_input_payload)
|
284 |
+
# Score using the pre-defined model
|
285 |
+
score_response = {
|
286 |
+
'predictions': [{'fields': ['Response_message_field', 'installed_lib_version'],
|
287 |
+
'values': [[response_message, gensim.__version__]]
|
288 |
+
}]
|
289 |
+
}
|
290 |
+
return score_response
|
291 |
+
return score
|
292 |
+
score = your_function_name()""",
|
293 |
+
"use_cases": ["add_tags"],
|
294 |
+
"input_schema": [
|
295 |
+
{
|
296 |
+
"id": "1",
|
297 |
+
"type": "struct",
|
298 |
+
"fields": [
|
299 |
+
{
|
300 |
+
"name": "<variable name 1>",
|
301 |
+
"type": "string",
|
302 |
+
"nullable": False,
|
303 |
+
"metadata": {},
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"name": "<variable name 2>",
|
307 |
+
"type": "string",
|
308 |
+
"nullable": False,
|
309 |
+
"metadata": {},
|
310 |
+
},
|
311 |
+
],
|
312 |
+
}
|
313 |
+
],
|
314 |
+
"output_schema": [
|
315 |
+
{
|
316 |
+
"id": "1",
|
317 |
+
"type": "struct",
|
318 |
+
"fields": [
|
319 |
+
{
|
320 |
+
"name": "<output return name>",
|
321 |
+
"type": "string",
|
322 |
+
"nullable": False,
|
323 |
+
"metadata": {},
|
324 |
+
}
|
325 |
+
],
|
326 |
+
}
|
327 |
+
],
|
328 |
+
"dependencies": ["<library_to_add>"],
|
329 |
+
}
|
330 |
retrieved_template = (
|
331 |
cloudant_retrieve_documents(
|
332 |
client=cloudant,
|
|
|
337 |
if template_variant.value
|
338 |
else None
|
339 |
)
|
340 |
+
template = (
|
341 |
+
retrieved_template["docs"][0] if retrieved_template else fallback_template
|
342 |
+
)
|
343 |
return (template,)
|
344 |
|
345 |
+
|
346 |
@app.cell
|
347 |
def _(client_callout_kind, client_instantiation_form, mo, template_variant):
|
348 |
client_callout = mo.callout(template_variant, kind=client_callout_kind)
|
349 |
+
client_stack = mo.hstack(
|
350 |
+
[client_instantiation_form, client_callout],
|
351 |
+
align="center",
|
352 |
+
justify="space-around",
|
353 |
+
)
|
354 |
return (client_stack,)
|
355 |
|
356 |
|
357 |
@app.cell
|
358 |
def _(client_stack, mo):
|
359 |
+
client_section = mo.md(
|
360 |
+
f"""
|
361 |
###**Instantiate your watsonx.ai client:**
|
362 |
|
363 |
1. Select a region from the dropdown menu
|
|
|
372 |
|
373 |
{client_stack}
|
374 |
|
375 |
+
"""
|
376 |
+
)
|
377 |
return (client_section,)
|
378 |
|
379 |
|
|
|
382 |
sc_tabs = mo.ui.tabs(
|
383 |
{
|
384 |
"Schema Option Selection": sc_m,
|
385 |
+
"Schema Definition": mo.md(
|
386 |
+
f"""
|
387 |
####**Edit the schema definitions you selected in the previous tab.**<br>
|
388 |
+
{schema_editors}"""
|
389 |
+
),
|
390 |
}
|
391 |
)
|
392 |
return (sc_tabs,)
|
|
|
394 |
|
395 |
@app.cell
|
396 |
def _(fm, function_editor, mo, sc_tabs):
|
397 |
+
function_section = mo.md(
|
398 |
+
f"""###**Create your function from the template:**
|
399 |
|
400 |
1. Use the code editor window to create a function to deploy
|
401 |
<br>
|
|
|
432 |
|
433 |
{fm}
|
434 |
|
435 |
+
"""
|
436 |
+
)
|
437 |
return (function_section,)
|
438 |
|
439 |
|
440 |
@app.cell
|
441 |
def _(mo, selection_table, upload_func):
|
442 |
+
upload_section = mo.md(
|
443 |
+
f"""
|
444 |
###**Review and Upload your function**
|
445 |
|
446 |
1. Review the function metadata specs JSON
|
|
|
459 |
|
460 |
{upload_func}
|
461 |
|
462 |
+
"""
|
463 |
+
)
|
464 |
return (upload_section,)
|
465 |
|
466 |
|
467 |
@app.cell
|
468 |
def _(deploy_fnc, deployment_definition, hw_selection_table, mo):
|
469 |
+
deployment_section = mo.md(
|
470 |
+
f"""
|
471 |
###**Deploy your function:**
|
472 |
|
473 |
1. Select a hardware specification (vCPUs/GB) that you want your function deployed on
|
|
|
498 |
|
499 |
{deploy_fnc}
|
500 |
|
501 |
+
"""
|
502 |
+
)
|
503 |
return (deployment_section,)
|
504 |
|
505 |
|
506 |
@app.cell
|
507 |
def _(mo, purge_tabs):
|
508 |
+
purging_section = mo.md(
|
509 |
+
f"""
|
510 |
###**Helper Purge Functions:**
|
511 |
|
512 |
These functions help you retrieve, select and delete deployments, data assets or repository assets (functions, models, etc.) that you have in the deployment space. This is meant to support fast cleanup.
|
|
|
517 |
|
518 |
{purge_tabs}
|
519 |
|
520 |
+
"""
|
521 |
+
)
|
522 |
|
523 |
return (purging_section,)
|
524 |
|
|
|
532 |
ss_asset_response,
|
533 |
yaml_template,
|
534 |
):
|
535 |
+
packages_section = mo.md(
|
536 |
+
f"""
|
537 |
###**If needed - Create a custom software-spec with added python packages**
|
538 |
|
539 |
1. Check to see if the python library you want to use is already available inside watsonx.ai's runtime environment base specs for deployed functions by adding them as a comma separated list, e.g. - plotly, ibm-watsonx-ai==1.3.6, etc. into the text area.
|
|
|
564 |
```json
|
565 |
{json.dumps(ss_asset_response, indent=2)}
|
566 |
```
|
567 |
+
"""
|
568 |
+
)
|
569 |
return (packages_section,)
|
570 |
|
571 |
|
|
|
573 |
def _(client_section, mo):
|
574 |
ui_accordion_section_1 = mo.accordion(
|
575 |
{"Section 1: **watsonx.ai Credentials**": client_section}
|
576 |
+
)
|
577 |
ui_accordion_section_1
|
578 |
return
|
579 |
|
|
|
582 |
def _(function_section, mo):
|
583 |
ui_accordion_section_2 = mo.accordion(
|
584 |
{"Section 2: **Function Creation**": function_section}
|
585 |
+
)
|
586 |
ui_accordion_section_2
|
587 |
return
|
588 |
|
|
|
591 |
def _(mo, packages_section):
|
592 |
ui_accordion_section_3 = mo.accordion(
|
593 |
{"Section 3: **Create a Package Extension (Optional)**": packages_section}
|
594 |
+
)
|
595 |
ui_accordion_section_3
|
596 |
return
|
597 |
|
|
|
600 |
def _(mo, upload_section):
|
601 |
ui_accordion_section_4 = mo.accordion(
|
602 |
{"Section 4: **Function Upload**": upload_section}
|
603 |
+
)
|
604 |
ui_accordion_section_4
|
605 |
return
|
606 |
|
|
|
609 |
def _(deployment_section, mo):
|
610 |
ui_accordion_section_5 = mo.accordion(
|
611 |
{"Section 5: **Function Deployment**": deployment_section}
|
612 |
+
)
|
613 |
ui_accordion_section_5
|
614 |
return
|
615 |
|
|
|
618 |
def _(mo, purging_section):
|
619 |
ui_accordion_section_6 = mo.accordion(
|
620 |
{"Section 6: **Helper Functions**": purging_section}
|
621 |
+
)
|
622 |
ui_accordion_section_6
|
623 |
return
|
624 |
|
625 |
|
626 |
@app.cell
|
627 |
+
def _(mo, template, template_variant):
|
628 |
function_editor = (
|
629 |
+
mo.md(
|
630 |
+
"""
|
631 |
#### **Create your function by editing the template:**
|
632 |
|
633 |
{editor}
|
634 |
|
635 |
+
"""
|
636 |
+
)
|
637 |
.batch(
|
638 |
+
editor=mo.ui.code_editor(
|
639 |
+
value=template["function_code"],
|
640 |
+
language="python",
|
641 |
+
min_height=200,
|
642 |
+
theme="dark",
|
643 |
+
)
|
644 |
)
|
645 |
.form(show_clear_button=True, bordered=False)
|
646 |
+
)
|
647 |
|
648 |
# function_editor
|
649 |
return (function_editor,)
|
|
|
654 |
function_name = None
|
655 |
if function_editor.value:
|
656 |
# Get the edited code from the function editor
|
657 |
+
code = function_editor.value["editor"]
|
658 |
# Extract function name using AST without executing the code
|
659 |
|
660 |
try:
|
|
|
685 |
|
686 |
|
687 |
@app.cell
|
688 |
+
def _(template):
|
689 |
dependencies = template["dependencies"]
|
690 |
yaml_template = f"""dependencies:
|
691 |
- pip
|
692 |
- pip:
|
693 |
{'\n'.join(f' - {dep}' for dep in dependencies)}
|
694 |
"""
|
695 |
+
|
696 |
yaml_templates = {
|
697 |
"empty": """dependencies:
|
698 |
- pip
|
|
|
705 |
dependency_pack_name = template["function_name"] + "_dependencies"
|
706 |
# After the yaml_template definition, add:
|
707 |
yaml_templates[dependency_pack_name] = yaml_template
|
708 |
+
return (yaml_templates, dependency_pack_name)
|
709 |
|
710 |
|
711 |
@app.cell
|
712 |
+
def _(mo, yaml_templates, dependency_pack_name):
|
713 |
+
yaml_template = mo.ui.dropdown(
|
714 |
+
yaml_templates,
|
715 |
+
searchable=True,
|
716 |
+
label="**Select a template:**",
|
717 |
+
value=dependency_pack_name,
|
718 |
+
)
|
719 |
return (yaml_template,)
|
720 |
|
721 |
|
|
|
723 |
def _(tempfile):
|
724 |
def create_yaml_tempfile(yaml_editor_value):
|
725 |
"""Creates temporary YAML file and returns its path"""
|
726 |
+
temp_file = tempfile.NamedTemporaryFile(suffix=".yaml", delete=False)
|
727 |
+
with open(temp_file.name, "w") as f:
|
728 |
f.write(str(yaml_editor_value))
|
729 |
return temp_file.name
|
730 |
+
|
731 |
return (create_yaml_tempfile,)
|
732 |
|
733 |
|
734 |
@app.cell
|
735 |
+
def _(mo, yaml_template, dependency_pack_name):
|
736 |
+
pkg_types = {"Conda Yaml": "conda_yml", "Custom user library": "custom_library"}
|
737 |
|
738 |
package_meta = (
|
739 |
+
mo.md(
|
740 |
+
"""**Create your Conda YAML by editing the template:**
|
741 |
|
742 |
{yml_editor}
|
743 |
|
|
|
748 |
{software_spec_name}
|
749 |
|
750 |
{software_spec_description}
|
751 |
+
"""
|
752 |
+
)
|
753 |
.batch(
|
754 |
+
yml_editor=mo.ui.code_editor(
|
755 |
+
value=yaml_template.value, language="yaml", min_height=100, theme="dark"
|
756 |
+
),
|
757 |
+
package_name=mo.ui.text(
|
758 |
+
placeholder="Python Package for...",
|
759 |
+
label="Package Extension Name:",
|
760 |
+
kind="text",
|
761 |
+
value=f"Custom Python Package: {dependency_pack_name}",
|
762 |
+
),
|
763 |
+
software_spec_name=mo.ui.text(
|
764 |
+
placeholder="Software Spec Name",
|
765 |
+
label="Custom Software Spec Name:",
|
766 |
+
kind="text",
|
767 |
+
value=f"Extended Python Function Software Spec - {dependency_pack_name}",
|
768 |
+
),
|
769 |
+
package_description=mo.ui.text_area(
|
770 |
+
placeholder="Write a description for your package.",
|
771 |
+
label="Package Description:",
|
772 |
+
value=" ",
|
773 |
+
),
|
774 |
+
software_spec_description=mo.ui.text_area(
|
775 |
+
placeholder="Write a description for your software spec.",
|
776 |
+
label="Software Spec Description:",
|
777 |
+
value=" ",
|
778 |
+
),
|
779 |
+
package_type=mo.ui.dropdown(
|
780 |
+
pkg_types, label="Select your package type:", value="Conda Yaml"
|
781 |
+
),
|
782 |
)
|
783 |
.form(show_clear_button=True, bordered=False)
|
784 |
+
)
|
785 |
return (package_meta,)
|
786 |
|
787 |
|
788 |
@app.cell
|
789 |
def _(mo):
|
790 |
+
check_packages = (
|
791 |
+
mo.md(
|
792 |
+
"""
|
793 |
**Check if a package you want to use is in the base software_specification already:**
|
794 |
|
795 |
{package_list}
|
796 |
|
797 |
{return_full_list}
|
798 |
+
"""
|
799 |
+
)
|
800 |
.batch(
|
801 |
+
package_list=mo.ui.text_area(
|
802 |
+
placeholder="Add packages as a comma separated list (with or without versions)."
|
803 |
+
),
|
804 |
+
return_full_list=mo.ui.checkbox(
|
805 |
+
value=False,
|
806 |
+
label="Return a full list of packages in the base software specification.",
|
807 |
+
),
|
808 |
)
|
809 |
.form(show_clear_button=True, bordered=False)
|
810 |
)
|
|
|
814 |
@app.cell
|
815 |
def _(check_packages):
|
816 |
if check_packages.value is not None:
|
817 |
+
packages = check_packages.value["package_list"]
|
818 |
+
verification_list = [item.strip() for item in packages.split(",")]
|
819 |
+
full_list_return = check_packages.value["return_full_list"]
|
820 |
else:
|
821 |
packages = None
|
822 |
verification_list = None
|
|
|
833 |
visualize_software_spec,
|
834 |
):
|
835 |
if verification_list is not None:
|
836 |
+
pkg_analysis = analyze_software_spec(
|
837 |
+
base_software_spec,
|
838 |
+
verification_list,
|
839 |
+
return_full_sw_package_list=full_list_return,
|
840 |
+
)
|
841 |
package_df = visualize_software_spec(pkg_analysis, verification_list)
|
842 |
else:
|
843 |
pkg_analysis = None
|
|
|
866 |
def _(client, create_yaml_tempfile, package_meta, uuid):
|
867 |
if package_meta.value is not None and client is not None:
|
868 |
pack_suffix = str(uuid.uuid4())[:4]
|
869 |
+
pack_name = package_meta.value["package_name"]
|
870 |
pe_metadata = {
|
871 |
client.package_extensions.ConfigurationMetaNames.NAME: f"{pack_name}_{pack_suffix}",
|
872 |
+
client.package_extensions.ConfigurationMetaNames.TYPE: package_meta.value[
|
873 |
+
"package_type"
|
874 |
+
],
|
875 |
+
client.software_specifications.ConfigurationMetaNames.DESCRIPTION: package_meta.value[
|
876 |
+
"package_description"
|
877 |
+
],
|
878 |
}
|
879 |
+
yaml_file_path = create_yaml_tempfile(package_meta.value["yml_editor"])
|
880 |
else:
|
881 |
+
pe_metadata = {}
|
|
|
882 |
yaml_file_path = None
|
883 |
|
884 |
return pe_metadata, yaml_file_path
|
|
|
889 |
if yaml_file_path is not None:
|
890 |
### Stores the package extension
|
891 |
pe_asset_details = client.package_extensions.store(
|
892 |
+
meta_props=pe_metadata, file_path=yaml_file_path
|
|
|
893 |
)
|
894 |
package_id = pe_asset_details["metadata"]["asset_id"]
|
895 |
else:
|
|
|
901 |
@app.cell
|
902 |
def _():
|
903 |
### Helper function for checking if a python library is in the standard software spec.
|
904 |
+
def analyze_software_spec(
|
905 |
+
sw_spec_response, required_libraries, return_full_sw_package_list=False
|
906 |
+
):
|
907 |
"""
|
908 |
Analyzes a software specification against a list of required libraries.
|
909 |
|
|
|
919 |
- present: Dict of libraries that are present with matching versions
|
920 |
- sw_packages: (Optional) Complete dict of all packages in the software spec
|
921 |
"""
|
922 |
+
result = {"present": {}, "not_present": {}, "version_mismatch": {}}
|
|
|
|
|
|
|
|
|
923 |
|
924 |
# Extract all packages from the software specification
|
925 |
sw_packages = {}
|
926 |
|
927 |
try:
|
928 |
# Extract packages from included_packages in the software specification
|
929 |
+
included_packages = sw_spec_response["entity"]["software_specification"][
|
930 |
+
"software_configuration"
|
931 |
+
]["included_packages"]
|
932 |
|
933 |
# Create a dictionary of all packages in the software specification
|
934 |
for package in included_packages:
|
|
|
953 |
result["not_present"][lib_name] = None
|
954 |
elif lib_version is not None and lib_version != sw_packages[lib_name]:
|
955 |
# Check version mismatch
|
956 |
+
result["version_mismatch"][lib_name] = [
|
957 |
+
sw_packages[lib_name],
|
958 |
+
lib_version,
|
959 |
+
]
|
960 |
else:
|
961 |
# Library is present with matching version (or no specific version required)
|
962 |
result["present"][lib_name] = sw_packages[lib_name]
|
963 |
|
964 |
if return_full_sw_package_list:
|
965 |
# Extract just the library names from required_libraries
|
966 |
+
req_libs_names = [
|
967 |
+
lib.split("==")[0].strip() if "==" in lib else lib.strip()
|
968 |
+
for lib in required_libraries
|
969 |
+
]
|
970 |
|
971 |
def sort_key(pkg_name):
|
972 |
if pkg_name in result["not_present"]:
|
|
|
979 |
return (3, pkg_name) # All other packages last
|
980 |
|
981 |
# Sort sw_packages using the custom sorting key
|
982 |
+
result["sw_packages"] = {
|
983 |
+
k: sw_packages[k] for k in sorted(sw_packages.keys(), key=sort_key)
|
984 |
+
}
|
985 |
|
986 |
# Add missing packages to the top of sw_packages
|
987 |
for pkg in result["not_present"]:
|
988 |
result["sw_packages"] = {pkg: None, **result["sw_packages"]}
|
989 |
|
|
|
990 |
return result
|
991 |
|
992 |
def visualize_software_spec(analysis_result, required_libraries=None):
|
|
|
1041 |
status = "Other"
|
1042 |
priority = 3 # Lowest priority
|
1043 |
|
1044 |
+
rows.append(
|
1045 |
+
{
|
1046 |
+
"Package": package,
|
1047 |
+
"Version": version if version is not None else "Not Present",
|
1048 |
+
"Status": status,
|
1049 |
+
"_priority": priority, # Temporary field for sorting
|
1050 |
+
}
|
1051 |
+
)
|
1052 |
|
1053 |
df = pd.DataFrame(rows)
|
1054 |
|
1055 |
# Sort by priority and then package name
|
1056 |
+
df = (
|
1057 |
+
df.sort_values(by=["_priority", "Package"])
|
1058 |
+
.drop("_priority", axis=1)
|
1059 |
+
.reset_index(drop=True)
|
1060 |
+
)
|
1061 |
|
1062 |
else:
|
1063 |
# Only use the packages mentioned in required_libraries
|
1064 |
+
packages = set(
|
1065 |
+
list(analysis_result.get("not_present", {}).keys())
|
1066 |
+
+ list(analysis_result.get("version_mismatch", {}).keys())
|
1067 |
+
+ list(analysis_result.get("present", {}).keys())
|
1068 |
+
)
|
1069 |
|
1070 |
# Create dataframe rows
|
1071 |
rows = []
|
|
|
1075 |
status = "❌ Missing"
|
1076 |
priority = 0 # Top priority
|
1077 |
elif package in analysis_result.get("version_mismatch", {}):
|
1078 |
+
version = analysis_result["version_mismatch"][package][
|
1079 |
+
0
|
1080 |
+
] # sw_spec version
|
1081 |
status = "⚠️ Version Mismatch"
|
1082 |
priority = 1 # Second priority
|
1083 |
else:
|
|
|
1085 |
status = "✅ Present"
|
1086 |
priority = 2 # Third priority
|
1087 |
|
1088 |
+
rows.append(
|
1089 |
+
{
|
1090 |
+
"Package": package,
|
1091 |
+
"Version": version,
|
1092 |
+
"Status": status,
|
1093 |
+
"_priority": priority, # Temporary field for sorting
|
1094 |
+
}
|
1095 |
+
)
|
1096 |
|
1097 |
df = pd.DataFrame(rows)
|
1098 |
|
1099 |
# Sort by priority and then package name
|
1100 |
+
df = (
|
1101 |
+
df.sort_values(by=["_priority", "Package"])
|
1102 |
+
.drop("_priority", axis=1)
|
1103 |
+
.reset_index(drop=True)
|
1104 |
+
)
|
1105 |
|
1106 |
return df
|
1107 |
+
|
1108 |
return analyze_software_spec, visualize_software_spec
|
1109 |
|
1110 |
|
1111 |
@app.cell
|
1112 |
+
def _(
|
1113 |
+
base_sw_spec_id,
|
1114 |
+
client,
|
1115 |
+
get_selection_table_status,
|
1116 |
+
set_selection_table_status,
|
1117 |
+
package_id,
|
1118 |
+
package_meta,
|
1119 |
+
uuid,
|
1120 |
+
):
|
1121 |
if package_id is not None:
|
1122 |
### Creates a custom software specification based on the standard python function spec_id - "45f12dfe-aa78-5b8d-9f38-0ee223c47309"
|
1123 |
ss_suffix = str(uuid.uuid4())[:4]
|
1124 |
+
ss_name = package_meta.value["software_spec_name"]
|
1125 |
|
1126 |
ss_metadata = {
|
1127 |
client.software_specifications.ConfigurationMetaNames.NAME: f"{ss_name}_{ss_suffix}",
|
1128 |
+
client.software_specifications.ConfigurationMetaNames.DESCRIPTION: package_meta.value[
|
1129 |
+
"software_spec_description"
|
1130 |
+
],
|
1131 |
+
client.software_specifications.ConfigurationMetaNames.BASE_SOFTWARE_SPECIFICATION: {
|
1132 |
+
"guid": base_sw_spec_id
|
1133 |
+
},
|
1134 |
+
client.software_specifications.ConfigurationMetaNames.PACKAGE_EXTENSIONS: [
|
1135 |
+
{"guid": package_id}
|
1136 |
+
],
|
1137 |
}
|
1138 |
|
|
|
1139 |
ss_asset_details = client.software_specifications.store(meta_props=ss_metadata)
|
1140 |
|
1141 |
current_status = get_selection_table_status()
|
|
|
1161 |
get_selection_table_status, set_selection_table_status = mo.state(None)
|
1162 |
return get_selection_table_status, set_selection_table_status
|
1163 |
|
1164 |
+
|
1165 |
@app.cell
|
1166 |
+
def _(mo, client, get_selection_table_status):
|
1167 |
if client:
|
1168 |
# First, get all specs data once
|
1169 |
specs_df = client.software_specifications.list()
|
|
|
1181 |
if client:
|
1182 |
# Filter the specs into two groups
|
1183 |
base_specs = specs_df[
|
1184 |
+
(specs_df["STATE"] == "supported")
|
1185 |
+
& (
|
1186 |
+
specs_df["NAME"].isin(
|
1187 |
+
["runtime-24.1-py3.11", "runtime-24.1-py3.11-cuda"]
|
1188 |
+
)
|
1189 |
+
)
|
1190 |
]
|
1191 |
|
1192 |
+
derived_specs = specs_df[(specs_df["TYPE"] == "derived")]
|
|
|
|
|
1193 |
|
1194 |
# Concatenate with base specs first, then derived specs
|
1195 |
supported_specs = pd.concat([base_specs, derived_specs]).reset_index(drop=True)
|
|
|
1205 |
"runtime-24.1-py3.11-cuda": "CUDA-enabled (GPU) Python Runtime",
|
1206 |
"runtime-24.1-r4.3": "R Runtime 4.3",
|
1207 |
"spark-mllib_3.4": "Apache Spark 3.4",
|
1208 |
+
"autoai-rag_rt24.1-py3.11": "AutoAI RAG",
|
1209 |
}
|
1210 |
|
1211 |
# Define the preferred order for items to appear at the top
|
|
|
1224 |
]
|
1225 |
|
1226 |
# Create a new column for sorting
|
1227 |
+
supported_specs["SORT_ORDER"] = supported_specs["NAME"].apply(
|
1228 |
+
lambda x: (
|
1229 |
+
preferred_order.index(x)
|
1230 |
+
if x in preferred_order
|
1231 |
+
else len(preferred_order)
|
1232 |
+
)
|
1233 |
)
|
1234 |
|
1235 |
# Sort the DataFrame by the new column
|
1236 |
+
supported_specs = supported_specs.sort_values("SORT_ORDER").reset_index(
|
1237 |
+
drop=True
|
1238 |
+
)
|
1239 |
|
1240 |
# Drop the sorting column as it's no longer needed
|
1241 |
+
supported_specs = supported_specs.drop(columns=["SORT_ORDER"])
|
1242 |
|
1243 |
# Drop the REPLACEMENT column if it exists and add NOTES column
|
1244 |
+
if "REPLACEMENT" in supported_specs.columns:
|
1245 |
+
supported_specs = supported_specs.drop(columns=["REPLACEMENT"])
|
1246 |
|
1247 |
# Add NOTES column with framework information
|
1248 |
+
supported_specs["NOTES"] = (
|
1249 |
+
supported_specs["NAME"].map(framework_mapping).fillna("Other")
|
1250 |
+
)
|
1251 |
|
1252 |
# Create a table with single-row selection
|
1253 |
selection_table = mo.ui.table(
|
1254 |
supported_specs,
|
1255 |
selection="single", # Only allow selecting one row
|
1256 |
label="#### **Select a supported software_spec runtime for your function asset** (For Python Functions select - *'runtime-24.1-py3.11'* ):",
|
1257 |
+
initial_selection=[
|
1258 |
+
0
|
1259 |
+
], # Now selecting the first row, which should be runtime-24.1-py3.11
|
1260 |
+
page_size=6,
|
1261 |
)
|
1262 |
else:
|
1263 |
sel_df = pd.DataFrame(
|
1264 |
+
data=[["ID", "Activate client."]], columns=["ID", "VALUE"]
|
|
|
1265 |
)
|
1266 |
|
1267 |
selection_table = mo.ui.table(
|
1268 |
sel_df,
|
1269 |
selection="single", # Only allow selecting one row
|
1270 |
label="You haven't activated the client",
|
1271 |
+
initial_selection=[0],
|
1272 |
)
|
1273 |
|
1274 |
return (selection_table,)
|
|
|
1283 |
@app.cell
|
1284 |
def _(selection_table, set_selected_sw_spec):
|
1285 |
if selection_table.value is not None:
|
1286 |
+
set_selected_sw_spec(selection_table.value["ID"].iloc[0])
|
1287 |
return
|
1288 |
|
1289 |
|
|
|
1296 |
@app.cell
|
1297 |
def _(hw_selection_table, set_selected_hw_spec):
|
1298 |
if hw_selection_table.value is not None:
|
1299 |
+
set_selected_hw_spec(hw_selection_table.value["ID"].iloc[0])
|
1300 |
return
|
1301 |
|
1302 |
|
|
|
1316 |
output_schema_checkbox,
|
1317 |
selection_table,
|
1318 |
template_variant,
|
1319 |
+
template,
|
1320 |
):
|
1321 |
if selection_table.value is not None:
|
1322 |
# Create the input fields
|
|
|
1325 |
else:
|
1326 |
fnc_nm = "custom_pythoN_functin"
|
1327 |
|
1328 |
+
uploaded_function_name = mo.ui.text(
|
1329 |
+
placeholder="<Must be the same as the name in editor>",
|
1330 |
+
label="Function Name:",
|
1331 |
+
kind="text",
|
1332 |
+
value=f"{fnc_nm}",
|
1333 |
+
full_width=False,
|
1334 |
+
)
|
1335 |
tags_editor = mo.ui.array(
|
1336 |
+
[
|
1337 |
+
mo.ui.text(placeholder="Metadata Tags...", value=use_case)
|
1338 |
+
for use_case in template["use_cases"]
|
1339 |
+
]
|
1340 |
+
+ [
|
1341 |
+
mo.ui.text(placeholder="Metadata Tags...")
|
1342 |
+
for _ in range(max(0, 4 - len(template["use_cases"])))
|
1343 |
+
],
|
1344 |
+
label="Optional Metadata Tags",
|
1345 |
)
|
1346 |
software_spec = get_selected_sw_spec()
|
1347 |
|
|
|
1350 |
label="Description",
|
1351 |
max_length=256,
|
1352 |
rows=5,
|
1353 |
+
full_width=True,
|
1354 |
)
|
1355 |
+
|
1356 |
return (
|
1357 |
description_input,
|
1358 |
software_spec,
|
|
|
1360 |
uploaded_function_name,
|
1361 |
)
|
1362 |
|
1363 |
+
|
1364 |
@app.cell
|
1365 |
def _(mo, selection_table, description_input, uploaded_function_name, tags_editor):
|
1366 |
if selection_table.value is not None:
|
1367 |
+
func_metadata = mo.hstack(
|
1368 |
+
[
|
1369 |
+
description_input,
|
1370 |
+
mo.hstack(
|
1371 |
+
[
|
1372 |
+
uploaded_function_name,
|
1373 |
+
tags_editor,
|
1374 |
+
],
|
1375 |
+
justify="start",
|
1376 |
+
gap=1,
|
1377 |
+
align="start",
|
1378 |
+
wrap=True,
|
1379 |
+
),
|
1380 |
+
],
|
1381 |
+
widths=[0.6, 0.4],
|
1382 |
+
gap=2.75,
|
1383 |
)
|
1384 |
return func_metadata
|
1385 |
|
1386 |
+
|
1387 |
@app.cell
|
1388 |
def _(mo, selection_table, input_schema_checkbox, output_schema_checkbox):
|
1389 |
if selection_table.value is not None:
|
1390 |
+
schema_metadata = mo.hstack(
|
1391 |
+
[
|
1392 |
+
input_schema_checkbox,
|
1393 |
+
output_schema_checkbox,
|
1394 |
+
],
|
1395 |
+
justify="center",
|
1396 |
+
gap=1,
|
1397 |
+
align="center",
|
1398 |
+
wrap=True,
|
1399 |
)
|
1400 |
return schema_metadata
|
1401 |
|
1402 |
+
|
1403 |
@app.cell
|
1404 |
def _(mo, selection_table, func_metadata):
|
1405 |
if selection_table.value is not None:
|
1406 |
+
fm = mo.vstack(
|
1407 |
+
[
|
1408 |
+
func_metadata,
|
1409 |
],
|
1410 |
align="center",
|
1411 |
+
gap=2,
|
1412 |
)
|
1413 |
return fm
|
1414 |
|
1415 |
+
|
1416 |
@app.cell
|
1417 |
def _(mo, selection_table, schema_metadata):
|
1418 |
if selection_table.value is not None:
|
1419 |
+
sc_m = mo.vstack(
|
1420 |
+
[
|
1421 |
+
schema_metadata,
|
1422 |
+
mo.md(
|
1423 |
+
"**Make sure to select the checkbox options before filling in descriptions and tags or they will reset.**"
|
1424 |
+
),
|
1425 |
],
|
1426 |
align="center",
|
1427 |
+
gap=2,
|
1428 |
)
|
1429 |
return sc_m
|
1430 |
|
1431 |
|
1432 |
@app.cell
|
1433 |
+
def _(json, mo, template_variant, template):
|
1434 |
if template["input_schema"]:
|
1435 |
input_schema = template["input_schema"]
|
1436 |
else:
|
1437 |
input_schema = [
|
1438 |
{
|
1439 |
+
"id": "1",
|
1440 |
+
"type": "struct",
|
1441 |
+
"fields": [
|
1442 |
{
|
1443 |
+
"name": "<variable name 1>",
|
1444 |
+
"type": "string",
|
1445 |
+
"nullable": False,
|
1446 |
+
"metadata": {},
|
1447 |
},
|
1448 |
{
|
1449 |
+
"name": "<variable name 2>",
|
1450 |
+
"type": "string",
|
1451 |
+
"nullable": False,
|
1452 |
+
"metadata": {},
|
1453 |
+
},
|
1454 |
+
],
|
1455 |
}
|
1456 |
]
|
1457 |
+
|
1458 |
if template["output_schema"]:
|
1459 |
output_schema = template["output_schema"]
|
1460 |
else:
|
1461 |
output_schema = [
|
1462 |
{
|
1463 |
+
"id": "1",
|
1464 |
+
"type": "struct",
|
1465 |
+
"fields": [
|
1466 |
{
|
1467 |
+
"name": "<output return name>",
|
1468 |
+
"type": "string",
|
1469 |
+
"nullable": False,
|
1470 |
+
"metadata": {},
|
1471 |
}
|
1472 |
+
],
|
1473 |
}
|
1474 |
]
|
1475 |
|
1476 |
return input_schema, output_schema
|
1477 |
|
1478 |
+
input_schema_editor = mo.ui.code_editor(
|
1479 |
+
value=json.dumps(input_schema, indent=4),
|
1480 |
+
language="python",
|
1481 |
+
min_height=100,
|
1482 |
+
theme="dark",
|
1483 |
+
)
|
1484 |
+
output_schema_editor = mo.ui.code_editor(
|
1485 |
+
value=json.dumps(output_schema, indent=4),
|
1486 |
+
language="python",
|
1487 |
+
min_height=100,
|
1488 |
+
theme="dark",
|
1489 |
+
)
|
1490 |
|
1491 |
# schema_editors
|
1492 |
return input_schema_editor, output_schema_editor
|
1493 |
|
1494 |
+
|
1495 |
@app.cell
|
1496 |
def _(mo, input_schema, output_schema):
|
1497 |
+
input_schema_editor = mo.ui.code_editor(
|
1498 |
+
value=json.dumps(input_schema, indent=4),
|
1499 |
+
language="python",
|
1500 |
+
min_height=100,
|
1501 |
+
theme="dark",
|
1502 |
+
)
|
1503 |
+
output_schema_editor = mo.ui.code_editor(
|
1504 |
+
value=json.dumps(output_schema, indent=4),
|
1505 |
+
language="python",
|
1506 |
+
min_height=100,
|
1507 |
+
theme="dark",
|
1508 |
+
)
|
1509 |
return input_schema_editor, output_schema_editor
|
1510 |
|
1511 |
+
|
1512 |
@app.cell
|
1513 |
def _(mo, input_schema_editor, output_schema_editor):
|
1514 |
schema_editors = mo.accordion(
|
1515 |
{
|
1516 |
"""**Input Schema Metadata Editor**""": input_schema_editor,
|
1517 |
"""**Output Schema Metadata Editor**""": output_schema_editor,
|
1518 |
+
},
|
1519 |
+
multiple=True,
|
1520 |
)
|
1521 |
return schema_editors
|
1522 |
|
1523 |
+
|
1524 |
@app.cell
|
1525 |
def _(
|
1526 |
ast,
|
|
|
1547 |
if software_spec and client is not None:
|
1548 |
# Start with the base required fields
|
1549 |
function_meta = {
|
1550 |
+
client.repository.FunctionMetaNames.NAME: f"{uploaded_function_name.value}"
|
1551 |
+
or "your_function_name",
|
1552 |
+
client.repository.FunctionMetaNames.SOFTWARE_SPEC_ID: software_spec
|
1553 |
+
or "45f12dfe-aa78-5b8d-9f38-0ee223c47309",
|
1554 |
}
|
1555 |
|
1556 |
# Add optional fields if they exist
|
|
|
1560 |
if filtered_tags: # Only add if there are non-empty tags
|
1561 |
function_meta[client.repository.FunctionMetaNames.TAGS] = filtered_tags
|
1562 |
|
|
|
1563 |
if description_input.value:
|
1564 |
+
function_meta[client.repository.FunctionMetaNames.DESCRIPTION] = (
|
1565 |
+
description_input.value
|
1566 |
+
)
|
1567 |
|
1568 |
# Add input schema if checkbox is checked
|
1569 |
if input_schema_checkbox.value:
|
1570 |
try:
|
1571 |
+
function_meta[
|
1572 |
+
client.repository.FunctionMetaNames.INPUT_DATA_SCHEMAS
|
1573 |
+
] = json.loads(input_schema_editor.value)
|
1574 |
except json.JSONDecodeError:
|
1575 |
# If JSON parsing fails, try Python literal evaluation as fallback
|
1576 |
+
function_meta[
|
1577 |
+
client.repository.FunctionMetaNames.INPUT_DATA_SCHEMAS
|
1578 |
+
] = ast.literal_eval(input_schema_editor.value)
|
1579 |
|
1580 |
# Add output schema if checkbox is checked
|
1581 |
if output_schema_checkbox.value:
|
1582 |
try:
|
1583 |
+
function_meta[
|
1584 |
+
client.repository.FunctionMetaNames.OUTPUT_DATA_SCHEMAS
|
1585 |
+
] = json.loads(output_schema_editor.value)
|
1586 |
except json.JSONDecodeError:
|
1587 |
# If JSON parsing fails, try Python literal evaluation as fallback
|
1588 |
+
function_meta[
|
1589 |
+
client.repository.FunctionMetaNames.OUTPUT_DATA_SCHEMAS
|
1590 |
+
] = ast.literal_eval(output_schema_editor.value)
|
1591 |
|
1592 |
def upload_function(function_meta, use_function_object=False):
|
1593 |
"""
|
|
|
1603 |
|
1604 |
try:
|
1605 |
# Create temp file from the code in the editor
|
1606 |
+
code_to_deploy = function_editor.value["editor"]
|
1607 |
# This function is defined elsewhere in the notebook
|
1608 |
func_name = uploaded_function_name.value or "your_function_name"
|
1609 |
# Ensure function_meta has the correct function name
|
|
|
1627 |
function_object = getattr(module, func_name)
|
1628 |
|
1629 |
# Change to /tmp directory before calling IBM Watson SDK functions
|
1630 |
+
os.chdir("/tmp/notebook_functions")
|
1631 |
|
1632 |
# Upload the function object
|
1633 |
mo.md(f"Uploading function object: {func_name}")
|
1634 |
+
func_details = client.repository.store_function(
|
1635 |
+
function_object, function_meta
|
1636 |
+
)
|
1637 |
else:
|
1638 |
# Change to /tmp directory before calling IBM Watson SDK functions
|
1639 |
+
os.chdir("/tmp/notebook_functions")
|
1640 |
|
1641 |
# Create a zip file of the Python module
|
1642 |
import gzip
|
|
|
1646 |
gz_path = f"{save_dir}/{func_name}.py.gz"
|
1647 |
|
1648 |
# Create gzip file
|
1649 |
+
with open(file_path, "rb") as f_in:
|
1650 |
+
with gzip.open(gz_path, "wb") as f_out:
|
1651 |
shutil.copyfileobj(f_in, f_out)
|
1652 |
|
1653 |
# Upload using the gzipped file path
|
|
|
1664 |
# Always change back to the original directory, even if an exception occurs
|
1665 |
os.chdir(original_dir)
|
1666 |
|
1667 |
+
upload_status = mo.state("No uploads yet")
|
1668 |
|
1669 |
upload_button = mo.ui.button(
|
1670 |
label="Upload Function",
|
1671 |
on_click=lambda _: upload_function(function_meta, use_function_object=False),
|
1672 |
kind="success",
|
1673 |
+
tooltip="Click to upload function to watsonx.ai",
|
1674 |
)
|
1675 |
|
1676 |
# function_meta
|
|
|
1679 |
|
1680 |
@app.cell
|
1681 |
def _(get_upload_status, mo, upload_button):
|
1682 |
+
# Upload your function
|
1683 |
if upload_button.value:
|
1684 |
try:
|
1685 |
upload_result = upload_button.value
|
1686 |
+
artifact_id = upload_result["metadata"]["id"]
|
1687 |
except Exception as e:
|
1688 |
mo.md(f"Error: {str(e)}")
|
1689 |
|
1690 |
+
upload_func = mo.vstack(
|
1691 |
+
[upload_button, mo.md(f"**Status:** {get_upload_status()}")],
|
1692 |
+
justify="space-around",
|
1693 |
+
align="center",
|
1694 |
+
)
|
1695 |
return artifact_id, upload_func
|
1696 |
|
1697 |
|
|
|
1715 |
def get_sort_key(name):
|
1716 |
# Create a custom ordering list
|
1717 |
custom_order = [
|
1718 |
+
"XXS",
|
1719 |
+
"XS",
|
1720 |
+
"S",
|
1721 |
+
"M",
|
1722 |
+
"L",
|
1723 |
+
"XL",
|
1724 |
+
"XS-Spark",
|
1725 |
+
"S-Spark",
|
1726 |
+
"M-Spark",
|
1727 |
+
"L-Spark",
|
1728 |
+
"XL-Spark",
|
1729 |
+
"K80",
|
1730 |
+
"K80x2",
|
1731 |
+
"K80x4",
|
1732 |
+
"V100",
|
1733 |
+
"V100x2",
|
1734 |
+
"WXaaS-XS",
|
1735 |
+
"WXaaS-S",
|
1736 |
+
"WXaaS-M",
|
1737 |
+
"WXaaS-L",
|
1738 |
+
"WXaaS-XL",
|
1739 |
+
"Default Spark",
|
1740 |
+
"Notebook Default Spark",
|
1741 |
+
"ML",
|
1742 |
]
|
1743 |
|
1744 |
# If name is in the custom order list, use its index
|
|
|
1749 |
return (1, name)
|
1750 |
|
1751 |
# Add a temporary column for sorting
|
1752 |
+
result_df["sort_key"] = result_df["NAME"].apply(get_sort_key)
|
1753 |
|
1754 |
# Sort the dataframe and drop the temporary column
|
1755 |
+
result_df = result_df.sort_values("sort_key").drop("sort_key", axis=1)
|
1756 |
|
1757 |
# Reset the index
|
1758 |
result_df = result_df.reset_index(drop=True)
|
|
|
1771 |
label="#### **Select a supported hardware_specification for your deployment** *(Default: 'XS' - 1vCPU_4GB Ram)*",
|
1772 |
initial_selection=[1],
|
1773 |
page_size=6,
|
1774 |
+
wrapped_columns=["DESCRIPTION"],
|
1775 |
)
|
1776 |
|
1777 |
deployment_type = mo.ui.radio(
|
1778 |
+
options={
|
1779 |
+
"Function": "Online (Function Endpoint)",
|
1780 |
+
"Runnable Job": "Batch (Runnable Jobs)",
|
1781 |
+
},
|
1782 |
+
value="Function",
|
1783 |
+
label="Select the Type of Deployment:",
|
1784 |
+
inline=True,
|
1785 |
)
|
1786 |
uuid_suffix = str(uuid.uuid4())[:4]
|
1787 |
|
1788 |
+
deployment_name = mo.ui.text(
|
1789 |
+
value=f"deployed_func_{uuid_suffix}",
|
1790 |
+
label="Deployment Name:",
|
1791 |
+
placeholder="<Must be completely unique>",
|
|
|
1792 |
)
|
1793 |
+
else:
|
1794 |
+
hw_df = pd.DataFrame(data=[["ID", "Activate client."]], columns=["ID", "VALUE"])
|
1795 |
|
1796 |
hw_selection_table = mo.ui.table(
|
1797 |
hw_df,
|
1798 |
selection="single", # Only allow selecting one row
|
1799 |
label="You haven't activated the client",
|
1800 |
+
initial_selection=[0],
|
1801 |
)
|
1802 |
|
1803 |
return deployment_name, deployment_type, hw_selection_table
|
|
|
1830 |
print("Error: No artifact ID provided. Please upload a function first.")
|
1831 |
return None
|
1832 |
|
1833 |
+
if (
|
1834 |
+
deployment_type.value == "Online (Function Endpoint)"
|
1835 |
+
): # Changed from "Online (Function Endpoint)"
|
1836 |
deployment_props = {
|
1837 |
client.deployments.ConfigurationMetaNames.NAME: deployment_name.value,
|
1838 |
client.deployments.ConfigurationMetaNames.ONLINE: {},
|
1839 |
+
client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {
|
1840 |
+
"id": selected_hw_config
|
1841 |
+
},
|
1842 |
client.deployments.ConfigurationMetaNames.SERVING_NAME: deployment_name.value,
|
1843 |
}
|
1844 |
else: # "Runnable Job" instead of "Batch (Runnable Jobs)"
|
1845 |
deployment_props = {
|
1846 |
client.deployments.ConfigurationMetaNames.NAME: deployment_name.value,
|
1847 |
client.deployments.ConfigurationMetaNames.BATCH: {},
|
1848 |
+
client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {
|
1849 |
+
"id": selected_hw_config
|
1850 |
+
},
|
1851 |
# batch does not use serving names
|
1852 |
}
|
1853 |
|
|
|
1855 |
print(deployment_props)
|
1856 |
# First, get the asset details to confirm it exists
|
1857 |
asset_details = client.repository.get_details(artifact_id)
|
1858 |
+
print(
|
1859 |
+
f"Asset found: {asset_details['metadata']['name']} with ID: {asset_details['metadata']['id']}"
|
1860 |
+
)
|
1861 |
|
1862 |
# Create the deployment
|
1863 |
deployed_function = client.deployments.create(artifact_id, deployment_props)
|
1864 |
+
print(
|
1865 |
+
f"Creating deployment from Asset: {artifact_id} with deployment properties {str(deployment_props)}"
|
1866 |
+
)
|
1867 |
return deployed_function
|
1868 |
except Exception as e:
|
1869 |
print(f"Deployment error: {str(e)}")
|
|
|
1877 |
deployment_info = client.deployments.get_details(deployment_id)
|
1878 |
return deployment_info
|
1879 |
|
1880 |
+
deployment_status = mo.state("No deployments yet")
|
1881 |
|
1882 |
if hw_selection_table.value is not None:
|
1883 |
selected_hw_config = get_selected_hw_spec()
|
|
|
1886 |
label="Deploy Function",
|
1887 |
on_click=lambda _: deploy_function(artifact_id, deployment_type),
|
1888 |
kind="success",
|
1889 |
+
tooltip="Click to deploy function to watsonx.ai",
|
1890 |
)
|
1891 |
|
1892 |
if client and upload_button.value:
|
1893 |
+
deployment_definition = mo.hstack(
|
1894 |
+
[deployment_type, deployment_name], justify="space-around"
|
1895 |
+
)
|
|
|
1896 |
else:
|
1897 |
+
deployment_definition = mo.hstack(
|
1898 |
+
["No Deployment Type Selected", "No Deployment Name Provided"],
|
1899 |
+
justify="space-around",
|
1900 |
+
)
|
1901 |
|
1902 |
# deployment_definition
|
1903 |
return deploy_button, deployment_definition
|
|
|
1907 |
def _(deploy_button, deployment_definition, mo):
|
1908 |
_ = deployment_definition
|
1909 |
|
1910 |
+
deploy_fnc = mo.vstack(
|
1911 |
+
[deploy_button, deploy_button.value], justify="space-around", align="center"
|
1912 |
+
)
|
|
|
1913 |
|
1914 |
return (deploy_fnc,)
|
1915 |
|
|
|
1921 |
def get_deployment_list():
|
1922 |
dep_df = client.deployments.list()
|
1923 |
dep_df = pd.DataFrame(dep_df)
|
1924 |
+
|
1925 |
+
columns_to_drop = [
|
1926 |
+
col for col in dep_df.columns if "STATE" in col or "REPLACEMENT" in col
|
1927 |
+
]
|
1928 |
if columns_to_drop:
|
1929 |
dep_df = dep_df.drop(columns=columns_to_drop)
|
1930 |
return dep_df
|
1931 |
|
1932 |
def get_deployment_ids(df):
|
1933 |
+
dep_list = df["ID"].tolist()
|
1934 |
return dep_list
|
1935 |
|
1936 |
+
# ----
|
1937 |
|
1938 |
def get_data_assets_list():
|
1939 |
data_a_df = client.data_assets.list()
|
|
|
1941 |
return data_a_df
|
1942 |
|
1943 |
def get_data_asset_ids(df):
|
1944 |
+
data_asset_list = df["ASSET_ID"].tolist()
|
1945 |
return data_asset_list
|
1946 |
|
1947 |
+
# ----
|
1948 |
|
1949 |
def get_repository_list():
|
1950 |
rep_list_df = client.repository.list()
|
1951 |
rep_list_df = pd.DataFrame(rep_list_df)
|
1952 |
+
|
1953 |
+
columns_to_drop = [
|
1954 |
+
col
|
1955 |
+
for col in ["SPEC_STATE", "SPEC_REPLACEMENT"]
|
1956 |
+
if col in rep_list_df.columns
|
1957 |
+
]
|
1958 |
if columns_to_drop:
|
1959 |
rep_list_df = rep_list_df.drop(columns=columns_to_drop)
|
1960 |
return rep_list_df
|
1961 |
|
1962 |
def get_repository_ids(df):
|
1963 |
+
repository_list = df["ID"].tolist()
|
1964 |
return repository_list
|
1965 |
|
1966 |
+
# ----
|
1967 |
|
1968 |
def get_pkg_ext_list():
|
1969 |
pkg_ext_list_df = client.package_extensions.list()
|
|
|
1971 |
return pkg_ext_list_df
|
1972 |
|
1973 |
def get_pkg_ext_ids(df):
|
1974 |
+
pkg_ext_id_list = df["ASSET_ID"].tolist()
|
1975 |
return pkg_ext_id_list
|
1976 |
|
1977 |
+
# ----
|
1978 |
|
1979 |
def get_sws_list():
|
1980 |
sws_list_df = client.software_specifications.list()
|
1981 |
# Filter to only include derived types
|
1982 |
+
derived_sws_list_df = sws_list_df[(sws_list_df["TYPE"] == "derived")]
|
|
|
|
|
1983 |
# Reset the index and prepare final dataframe
|
1984 |
sws_list_df = pd.DataFrame(derived_sws_list_df).reset_index(drop=True)
|
1985 |
# Drop STATE and REPLACEMENT columns if they exist
|
1986 |
+
columns_to_drop = [
|
1987 |
+
col for col in ["STATE", "REPLACEMENT"] if col in sws_list_df.columns
|
1988 |
+
]
|
1989 |
if columns_to_drop:
|
1990 |
sws_list_df = sws_list_df.drop(columns=columns_to_drop)
|
1991 |
return sws_list_df
|
1992 |
|
1993 |
def get_sws_ids(df):
|
1994 |
+
sws_id_list = df["ID"].tolist()
|
1995 |
return sws_id_list
|
1996 |
|
1997 |
+
# ----
|
1998 |
|
1999 |
+
def delete_with_progress(
|
2000 |
+
ids_list, delete_function, item_type="items", display_errors=True
|
2001 |
+
):
|
2002 |
errors = []
|
2003 |
+
|
2004 |
with mo.status.progress_bar(
|
2005 |
total=len(ids_list) or 1,
|
2006 |
title=f"Purging {item_type}",
|
2007 |
subtitle=f"Deleting {item_type}...",
|
2008 |
completion_title="Purge Complete",
|
2009 |
completion_subtitle=f"Successfully deleted {len(ids_list) - len(errors)} {item_type}",
|
2010 |
+
remove_on_exit=True,
|
2011 |
) as progress:
|
2012 |
for item_id in ids_list:
|
2013 |
try:
|
2014 |
delete_function(item_id)
|
2015 |
except Exception as e:
|
2016 |
+
error_msg = (
|
2017 |
+
f"Error deleting {item_type} with ID {item_id}: {str(e)}"
|
2018 |
+
)
|
2019 |
if display_errors:
|
2020 |
print(error_msg)
|
2021 |
errors.append((item_id, str(e)))
|
2022 |
finally:
|
2023 |
progress.update(increment=1)
|
2024 |
+
|
2025 |
if errors and display_errors:
|
2026 |
with mo.redirect_stderr():
|
2027 |
sys.stderr.write("\nErrors encountered during deletion:\n")
|
2028 |
for item_id, error in errors:
|
2029 |
sys.stderr.write(f" - ID {item_id}: {error}\n")
|
2030 |
+
|
2031 |
return f"Deleted {len(ids_list) - len(errors)} {item_type} successfully"
|
2032 |
+
|
2033 |
# Use with existing deletion functions
|
2034 |
def delete_deployments(deployment_ids):
|
2035 |
return delete_with_progress(
|
2036 |
+
deployment_ids, lambda id: client.deployments.delete(id), "deployments"
|
|
|
|
|
2037 |
)
|
2038 |
|
2039 |
def delete_data_assets(data_asset_ids):
|
2040 |
return delete_with_progress(
|
2041 |
+
data_asset_ids, lambda id: client.data_assets.delete(id), "data assets"
|
|
|
|
|
2042 |
)
|
2043 |
|
2044 |
def delete_repository_items(repository_ids):
|
2045 |
return delete_with_progress(
|
2046 |
+
repository_ids, lambda id: client.repository.delete(id), "repository items"
|
|
|
|
|
2047 |
)
|
2048 |
|
2049 |
def delete_pkg_ext_items(pkg_ids):
|
2050 |
return delete_with_progress(
|
2051 |
+
pkg_ids,
|
2052 |
lambda id: client.package_extensions.delete(id),
|
2053 |
+
"package extensions",
|
2054 |
)
|
2055 |
|
2056 |
def delete_sws_items(sws_ids):
|
2057 |
return delete_with_progress(
|
2058 |
+
sws_ids,
|
2059 |
lambda id: client.software_specifications.delete(id),
|
2060 |
+
"software specifications",
|
2061 |
)
|
2062 |
+
|
2063 |
return (
|
2064 |
delete_data_assets,
|
2065 |
delete_deployments,
|
|
|
2113 |
else:
|
2114 |
pkg_ext_table = mo.md("No Table Loaded")
|
2115 |
|
|
|
2116 |
return (
|
2117 |
data_assets_table,
|
2118 |
deployments_table,
|
|
|
2130 |
mo,
|
2131 |
purge_deployments,
|
2132 |
):
|
2133 |
+
deployments_purge_stack = mo.hstack(
|
2134 |
+
[get_deployments_button, get_deployment_id_list, purge_deployments]
|
2135 |
+
)
|
2136 |
+
deployments_purge_stack_results = mo.vstack(
|
2137 |
+
[deployments_table, get_deployment_id_list.value, purge_deployments.value]
|
2138 |
+
)
|
2139 |
|
2140 |
+
deployments_purge_tab = mo.vstack(
|
2141 |
+
[deployments_purge_stack, deployments_purge_stack_results]
|
2142 |
+
)
|
2143 |
return (deployments_purge_tab,)
|
2144 |
|
2145 |
|
|
|
2151 |
purge_repository,
|
2152 |
repository_table,
|
2153 |
):
|
2154 |
+
repository_purge_stack = mo.hstack(
|
2155 |
+
[get_repository_button, get_repository_id_list, purge_repository]
|
2156 |
+
)
|
2157 |
+
repository_purge_stack_results = mo.vstack(
|
2158 |
+
[repository_table, get_repository_id_list.value, purge_repository.value]
|
2159 |
+
)
|
2160 |
|
2161 |
+
repository_purge_tab = mo.vstack(
|
2162 |
+
[repository_purge_stack, repository_purge_stack_results]
|
2163 |
+
)
|
2164 |
return (repository_purge_tab,)
|
2165 |
|
2166 |
|
|
|
2172 |
mo,
|
2173 |
purge_data_assets,
|
2174 |
):
|
2175 |
+
data_assets_purge_stack = mo.hstack(
|
2176 |
+
[get_data_assets_button, get_data_asset_id_list, purge_data_assets]
|
2177 |
+
)
|
2178 |
+
data_assets_purge_stack_results = mo.vstack(
|
2179 |
+
[data_assets_table, get_data_asset_id_list.value, purge_data_assets.value]
|
2180 |
+
)
|
2181 |
|
2182 |
+
data_assets_purge_tab = mo.vstack(
|
2183 |
+
[data_assets_purge_stack, data_assets_purge_stack_results]
|
2184 |
+
)
|
2185 |
return (data_assets_purge_tab,)
|
2186 |
|
2187 |
|
2188 |
@app.cell
|
2189 |
def _(get_sws_button, get_sws_id_list, mo, purge_sws, sws_table):
|
2190 |
sws_purge_stack = mo.hstack([get_sws_button, get_sws_id_list, purge_sws])
|
2191 |
+
sws_purge_stack_results = mo.vstack(
|
2192 |
+
[sws_table, get_sws_id_list.value, purge_sws.value]
|
2193 |
+
)
|
2194 |
|
2195 |
sws_purge_stack_tab = mo.vstack([sws_purge_stack, sws_purge_stack_results])
|
2196 |
return (sws_purge_stack_tab,)
|
|
|
2204 |
pkg_ext_table,
|
2205 |
purge_pkg_ext,
|
2206 |
):
|
2207 |
+
pkg_ext_purge_stack = mo.hstack(
|
2208 |
+
[get_pkg_ext_button, get_pkg_ext_id_list, purge_pkg_ext]
|
2209 |
+
)
|
2210 |
+
pkg_ext_purge_stack_results = mo.vstack(
|
2211 |
+
[pkg_ext_table, get_pkg_ext_id_list.value, purge_pkg_ext.value]
|
2212 |
+
)
|
2213 |
|
2214 |
pkg_ext_purge_tab = mo.vstack([pkg_ext_purge_stack, pkg_ext_purge_stack_results])
|
2215 |
return (pkg_ext_purge_tab,)
|
|
|
2226 |
):
|
2227 |
purge_tabs = mo.ui.tabs(
|
2228 |
{
|
2229 |
+
"Purge Deployments": deployments_purge_tab,
|
2230 |
"Purge Repository Assets": repository_purge_tab,
|
2231 |
"Purge Data Assets": data_assets_purge_tab,
|
2232 |
"Purge Software Specifications": sws_purge_stack_tab,
|
2233 |
"Purge Package Extensions": pkg_ext_purge_tab,
|
2234 |
+
},
|
2235 |
+
lazy=False,
|
2236 |
)
|
2237 |
|
2238 |
return (purge_tabs,)
|
|
|
2377 |
kind="danger",
|
2378 |
)
|
2379 |
|
|
|
2380 |
### Package Extensions Purge
|
2381 |
get_pkg_ext_button = mo.ui.button(
|
2382 |
label="Get Package Extensions Dataframe",
|