Spaces:
Running
Running
Update src/aibom-generator/api.py
Browse files- src/aibom-generator/api.py +76 -48
src/aibom-generator/api.py
CHANGED
@@ -455,16 +455,18 @@ def create_comprehensive_completeness_score(aibom=None):
|
|
455 |
return calculate_completeness_score(aibom, validate=True, use_best_practices=True)
|
456 |
except Exception as e:
|
457 |
logger.error(f"Error calculating completeness score: {str(e)}")
|
458 |
-
|
|
|
|
|
459 |
# Otherwise, return a default comprehensive structure
|
460 |
return {
|
461 |
-
"total_score":
|
462 |
"section_scores": {
|
463 |
-
"required_fields":
|
464 |
-
"metadata":
|
465 |
-
"component_basic":
|
466 |
-
"component_model_card":
|
467 |
-
"external_references":
|
468 |
},
|
469 |
"max_scores": {
|
470 |
"required_fields": 20,
|
@@ -475,53 +477,53 @@ def create_comprehensive_completeness_score(aibom=None):
|
|
475 |
},
|
476 |
"field_checklist": {
|
477 |
# Required fields
|
478 |
-
"bomFormat": "
|
479 |
-
"specVersion": "
|
480 |
-
"serialNumber": "
|
481 |
-
"version": "
|
482 |
-
"metadata.timestamp": "
|
483 |
-
"metadata.tools": "
|
484 |
-
"metadata.authors": "
|
485 |
-
"metadata.component": "
|
486 |
|
487 |
# Component basic info
|
488 |
-
"component.type": "
|
489 |
-
"component.name": "
|
490 |
-
"component.bom-ref": "
|
491 |
-
"component.purl": "
|
492 |
-
"component.description": "
|
493 |
-
"component.licenses": "
|
494 |
|
495 |
# Model card
|
496 |
-
"modelCard.modelParameters": "
|
497 |
-
"modelCard.quantitativeAnalysis": "
|
498 |
-
"modelCard.considerations": "
|
499 |
|
500 |
# External references
|
501 |
-
"externalReferences": "
|
502 |
|
503 |
# Additional fields from FIELD_CLASSIFICATION
|
504 |
-
"name": "
|
505 |
-
"downloadLocation": "
|
506 |
-
"primaryPurpose": "
|
507 |
-
"suppliedBy": "
|
508 |
-
"energyConsumption": "
|
509 |
-
"hyperparameter": "
|
510 |
-
"limitation": "
|
511 |
-
"safetyRiskAssessment": "
|
512 |
-
"typeOfModel": "
|
513 |
-
"modelExplainability": "
|
514 |
-
"standardCompliance": "
|
515 |
-
"domain": "
|
516 |
-
"energyQuantity": "
|
517 |
-
"energyUnit": "
|
518 |
-
"informationAboutTraining": "
|
519 |
-
"informationAboutApplication": "
|
520 |
-
"metric": "
|
521 |
-
"metricDecisionThreshold": "
|
522 |
-
"modelDataPreprocessing": "
|
523 |
-
"autonomyType": "
|
524 |
-
"useSensitivePersonalInformation": "
|
525 |
},
|
526 |
"field_tiers": {
|
527 |
# Required fields
|
@@ -697,7 +699,6 @@ async def generate_form(
|
|
697 |
enhancement_report = generator.get_enhancement_report()
|
698 |
|
699 |
# Save AIBOM to file, use industry term ai_sbom in file name
|
700 |
-
# Corrected: Removed unnecessary backslashes around '/' and '_'
|
701 |
# Save AIBOM to file using normalized ID
|
702 |
filename = f"{normalized_model_id.replace('/', '_')}_ai_sbom.json"
|
703 |
filepath = os.path.join(OUTPUT_DIR, filename)
|
@@ -766,6 +767,7 @@ async def generate_form(
|
|
766 |
except Exception as e:
|
767 |
logger.error(f"Completeness score error from generator: {str(e)}")
|
768 |
|
|
|
769 |
# If completeness_score is None or doesn't have field_checklist, use comprehensive one
|
770 |
if completeness_score is None or not isinstance(completeness_score, dict) or 'field_checklist' not in completeness_score:
|
771 |
logger.info("Using comprehensive completeness_score with field_checklist")
|
@@ -818,6 +820,32 @@ async def generate_form(
|
|
818 |
"external_references": 10
|
819 |
}
|
820 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
821 |
# Render the template with all necessary data, with normalized model ID
|
822 |
return templates.TemplateResponse(
|
823 |
"result.html",
|
@@ -1111,7 +1139,7 @@ async def get_model_score(
|
|
1111 |
# Round section scores for better readability
|
1112 |
for section, value in score["section_scores"].items():
|
1113 |
if isinstance(value, float) and not value.is_integer():
|
1114 |
-
score["section_scores"][section] = round(value, 1)
|
1115 |
|
1116 |
# Return score information
|
1117 |
return {
|
|
|
455 |
return calculate_completeness_score(aibom, validate=True, use_best_practices=True)
|
456 |
except Exception as e:
|
457 |
logger.error(f"Error calculating completeness score: {str(e)}")
|
458 |
+
raise ValueError(f"Failed to calculate completeness score: {str(e)}")
|
459 |
+
else:
|
460 |
+
raise ValueError("AIBOM object not provided or scoring function not available.")
|
461 |
# Otherwise, return a default comprehensive structure
|
462 |
return {
|
463 |
+
"total_score": 0, # Default score for better UI display
|
464 |
"section_scores": {
|
465 |
+
"required_fields": 0,
|
466 |
+
"metadata": 0,
|
467 |
+
"component_basic": 0,
|
468 |
+
"component_model_card": 0,
|
469 |
+
"external_references": 0
|
470 |
},
|
471 |
"max_scores": {
|
472 |
"required_fields": 20,
|
|
|
477 |
},
|
478 |
"field_checklist": {
|
479 |
# Required fields
|
480 |
+
"bomFormat": "n/a β
β
β
",
|
481 |
+
"specVersion": "n/a β
β
β
",
|
482 |
+
"serialNumber": "n/a β
β
β
",
|
483 |
+
"version": "n/a β
β
β
",
|
484 |
+
"metadata.timestamp": "n/a β
β
",
|
485 |
+
"metadata.tools": "n/a β
β
",
|
486 |
+
"metadata.authors": "n/a β
β
",
|
487 |
+
"metadata.component": "n/a β
β
",
|
488 |
|
489 |
# Component basic info
|
490 |
+
"component.type": "n/a β
β
",
|
491 |
+
"component.name": "n/a β
β
β
",
|
492 |
+
"component.bom-ref": "n/a β
β
",
|
493 |
+
"component.purl": "n/a β
β
",
|
494 |
+
"component.description": "n/a β
β
",
|
495 |
+
"component.licenses": "n/a β
β
",
|
496 |
|
497 |
# Model card
|
498 |
+
"modelCard.modelParameters": "n/a β
β
",
|
499 |
+
"modelCard.quantitativeAnalysis": "n/a β
β
",
|
500 |
+
"modelCard.considerations": "n/a β
β
",
|
501 |
|
502 |
# External references
|
503 |
+
"externalReferences": "n/a β
",
|
504 |
|
505 |
# Additional fields from FIELD_CLASSIFICATION
|
506 |
+
"name": "n/a β
β
β
",
|
507 |
+
"downloadLocation": "n/a β
β
β
",
|
508 |
+
"primaryPurpose": "n/a β
β
β
",
|
509 |
+
"suppliedBy": "n/a β
β
β
",
|
510 |
+
"energyConsumption": "n/a β
β
",
|
511 |
+
"hyperparameter": "n/a β
β
",
|
512 |
+
"limitation": "n/a β
β
",
|
513 |
+
"safetyRiskAssessment": "n/a β
β
",
|
514 |
+
"typeOfModel": "n/a β
β
",
|
515 |
+
"modelExplainability": "n/a β
",
|
516 |
+
"standardCompliance": "n/a β
",
|
517 |
+
"domain": "n/a β
",
|
518 |
+
"energyQuantity": "n/a β
",
|
519 |
+
"energyUnit": "n/a β
",
|
520 |
+
"informationAboutTraining": "n/a β
",
|
521 |
+
"informationAboutApplication": "n/a β
",
|
522 |
+
"metric": "n/a β
",
|
523 |
+
"metricDecisionThreshold": "n/a β
",
|
524 |
+
"modelDataPreprocessing": "n/a β
",
|
525 |
+
"autonomyType": "n/a β
",
|
526 |
+
"useSensitivePersonalInformation": "n/a β
"
|
527 |
},
|
528 |
"field_tiers": {
|
529 |
# Required fields
|
|
|
699 |
enhancement_report = generator.get_enhancement_report()
|
700 |
|
701 |
# Save AIBOM to file, use industry term ai_sbom in file name
|
|
|
702 |
# Save AIBOM to file using normalized ID
|
703 |
filename = f"{normalized_model_id.replace('/', '_')}_ai_sbom.json"
|
704 |
filepath = os.path.join(OUTPUT_DIR, filename)
|
|
|
767 |
except Exception as e:
|
768 |
logger.error(f"Completeness score error from generator: {str(e)}")
|
769 |
|
770 |
+
|
771 |
# If completeness_score is None or doesn't have field_checklist, use comprehensive one
|
772 |
if completeness_score is None or not isinstance(completeness_score, dict) or 'field_checklist' not in completeness_score:
|
773 |
logger.info("Using comprehensive completeness_score with field_checklist")
|
|
|
820 |
"external_references": 10
|
821 |
}
|
822 |
|
823 |
+
# DEBUG: Check for undefined values before template rendering
|
824 |
+
print("DEBUG: Checking completeness_score for undefined values:")
|
825 |
+
if completeness_score and 'section_scores' in completeness_score:
|
826 |
+
for key, value in completeness_score['section_scores'].items():
|
827 |
+
print(f" {key}: {value} (type: {type(value)})")
|
828 |
+
else:
|
829 |
+
print(" No section_scores found in completeness_score")
|
830 |
+
|
831 |
+
# DEBUG: Template data check
|
832 |
+
print("DEBUG: Template data check:")
|
833 |
+
if completeness_score:
|
834 |
+
print(f" completeness_score keys: {list(completeness_score.keys())}")
|
835 |
+
if 'category_details' in completeness_score:
|
836 |
+
print(f" category_details exists: {list(completeness_score['category_details'].keys())}")
|
837 |
+
# Check each category
|
838 |
+
for category in ['required_fields', 'metadata', 'component_basic', 'component_model_card', 'external_references']:
|
839 |
+
if category in completeness_score['category_details']:
|
840 |
+
details = completeness_score['category_details'][category]
|
841 |
+
print(f" {category}: present={details.get('present_fields')}, total={details.get('total_fields')}, percentage={details.get('percentage')}")
|
842 |
+
else:
|
843 |
+
print(f" {category}: MISSING from category_details")
|
844 |
+
else:
|
845 |
+
print(" category_details: NOT FOUND in completeness_score!")
|
846 |
+
else:
|
847 |
+
print(" completeness_score: IS NONE!")
|
848 |
+
|
849 |
# Render the template with all necessary data, with normalized model ID
|
850 |
return templates.TemplateResponse(
|
851 |
"result.html",
|
|
|
1139 |
# Round section scores for better readability
|
1140 |
for section, value in score["section_scores"].items():
|
1141 |
if isinstance(value, float) and not value.is_integer():
|
1142 |
+
score["section_scores"][section] = round(float(value), 1) if value is not None and value != "Undefined" else 0.0
|
1143 |
|
1144 |
# Return score information
|
1145 |
return {
|