a1c00l commited on
Commit
77bbaeb
·
verified ·
1 Parent(s): 550ed76

Upload api.py

Browse files
Files changed (1) hide show
  1. src/aibom-generator/api.py +136 -151
src/aibom-generator/api.py CHANGED
@@ -19,11 +19,28 @@ from starlette.middleware.base import BaseHTTPMiddleware
19
  from huggingface_hub import HfApi
20
  from huggingface_hub.utils import RepositoryNotFoundError # For specific error handling
21
 
22
-
23
  # Configure logging
24
  logging.basicConfig(level=logging.INFO)
25
  logger = logging.getLogger(__name__)
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  # Define directories and constants
28
  templates_dir = "templates"
29
  OUTPUT_DIR = "/tmp/aibom_output"
@@ -418,7 +435,7 @@ def import_utils():
418
 
419
  # Try from src
420
  try:
421
- from src.aibom_generator.utils import calculate_completeness_score
422
  logger.info("Imported src.aibom_generator.utils.calculate_completeness_score")
423
  return calculate_completeness_score
424
  except ImportError:
@@ -442,25 +459,60 @@ def import_utils():
442
  # Try to import the calculate_completeness_score function
443
  calculate_completeness_score = import_utils()
444
 
445
- # Helper function to create a comprehensive completeness_score with field_checklist
446
- def create_comprehensive_completeness_score(aibom=None):
447
- """
448
- Create a comprehensive completeness_score object with all required attributes.
449
- If aibom is provided and calculate_completeness_score is available, use it to calculate the score.
450
- Otherwise, return a default score structure.
451
- """
452
- # If we have the calculate_completeness_score function and an AIBOM, use it
453
- if calculate_completeness_score and aibom:
454
- try:
455
- return calculate_completeness_score(aibom, validate=True, use_best_practices=True)
456
- except Exception as e:
457
- logger.error(f"Error calculating completeness score: {str(e)}")
458
- raise ValueError(f"Failed to calculate completeness score: {str(e)}")
459
- else:
460
- raise ValueError("AIBOM object not provided or scoring function not available.")
461
- # Otherwise, return a default comprehensive structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462
  return {
463
- "total_score": 0, # Default score for better UI display
464
  "section_scores": {
465
  "required_fields": 0,
466
  "metadata": 0,
@@ -476,141 +528,38 @@ def create_comprehensive_completeness_score(aibom=None):
476
  "external_references": 10
477
  },
478
  "field_checklist": {
479
- # Required fields
480
  "bomFormat": "n/a ★★★",
481
  "specVersion": "n/a ★★★",
482
  "serialNumber": "n/a ★★★",
483
  "version": "n/a ★★★",
484
- "metadata.timestamp": "n/a ★★",
485
- "metadata.tools": "n/a ★★",
486
- "metadata.authors": "n/a ★★",
487
- "metadata.component": "n/a ★★",
488
-
489
- # Component basic info
490
- "component.type": "n/a ★★",
491
- "component.name": "n/a ★★★",
492
- "component.bom-ref": "n/a ★★",
493
- "component.purl": "n/a ★★",
494
- "component.description": "n/a ★★",
495
- "component.licenses": "n/a ★★",
496
-
497
- # Model card
498
- "modelCard.modelParameters": "n/a ★★",
499
- "modelCard.quantitativeAnalysis": "n/a ★★",
500
- "modelCard.considerations": "n/a ★★",
501
-
502
- # External references
503
- "externalReferences": "n/a ★",
504
-
505
- # Additional fields from FIELD_CLASSIFICATION
506
  "name": "n/a ★★★",
507
- "downloadLocation": "n/a ★★★",
508
- "primaryPurpose": "n/a ★★★",
509
- "suppliedBy": "n/a ★★★",
510
- "energyConsumption": "n/a ★★",
511
- "hyperparameter": "n/a ★★",
512
- "limitation": "n/a ★★",
513
- "safetyRiskAssessment": "n/a ★★",
514
- "typeOfModel": "n/a ★★",
515
- "modelExplainability": "n/a ★",
516
- "standardCompliance": "n/a ★",
517
- "domain": "n/a ★",
518
- "energyQuantity": "n/a ★",
519
- "energyUnit": "n/a ★",
520
- "informationAboutTraining": "n/a ★",
521
- "informationAboutApplication": "n/a ★",
522
- "metric": "n/a ★",
523
- "metricDecisionThreshold": "n/a ★",
524
- "modelDataPreprocessing": "n/a ★",
525
- "autonomyType": "n/a ★",
526
- "useSensitivePersonalInformation": "n/a ★"
527
- },
528
- "field_tiers": {
529
- # Required fields
530
- "bomFormat": "critical",
531
- "specVersion": "critical",
532
- "serialNumber": "critical",
533
- "version": "critical",
534
- "metadata.timestamp": "important",
535
- "metadata.tools": "important",
536
- "metadata.authors": "important",
537
- "metadata.component": "important",
538
-
539
- # Component basic info
540
- "component.type": "important",
541
- "component.name": "critical",
542
- "component.bom-ref": "important",
543
- "component.purl": "important",
544
- "component.description": "important",
545
- "component.licenses": "important",
546
-
547
- # Model card
548
- "modelCard.modelParameters": "important",
549
- "modelCard.quantitativeAnalysis": "important",
550
- "modelCard.considerations": "important",
551
-
552
- # External references
553
- "externalReferences": "supplementary",
554
-
555
- # Additional fields from FIELD_CLASSIFICATION
556
- "name": "critical",
557
- "downloadLocation": "critical",
558
- "primaryPurpose": "critical",
559
- "suppliedBy": "critical",
560
- "energyConsumption": "important",
561
- "hyperparameter": "important",
562
- "limitation": "important",
563
- "safetyRiskAssessment": "important",
564
- "typeOfModel": "important",
565
- "modelExplainability": "supplementary",
566
- "standardCompliance": "supplementary",
567
- "domain": "supplementary",
568
- "energyQuantity": "supplementary",
569
- "energyUnit": "supplementary",
570
- "informationAboutTraining": "supplementary",
571
- "informationAboutApplication": "supplementary",
572
- "metric": "supplementary",
573
- "metricDecisionThreshold": "supplementary",
574
- "modelDataPreprocessing": "supplementary",
575
- "autonomyType": "supplementary",
576
- "useSensitivePersonalInformation": "supplementary"
577
- },
578
- "missing_fields": {
579
- "critical": [],
580
- "important": ["modelCard.quantitativeAnalysis", "energyConsumption", "safetyRiskAssessment"],
581
- "supplementary": ["modelExplainability", "standardCompliance", "energyQuantity", "energyUnit",
582
- "metric", "metricDecisionThreshold", "modelDataPreprocessing",
583
- "autonomyType", "useSensitivePersonalInformation"]
584
- },
585
- "completeness_profile": {
586
- "name": "standard",
587
- "description": "Comprehensive fields for proper documentation",
588
- "satisfied": True
589
- },
590
- "penalty_applied": False,
591
- "penalty_reason": None,
592
- "recommendations": [
593
- {
594
- "priority": "medium",
595
- "field": "modelCard.quantitativeAnalysis",
596
- "message": "Missing important field: modelCard.quantitativeAnalysis",
597
- "recommendation": "Add quantitative analysis information to the model card"
598
- },
599
- {
600
- "priority": "medium",
601
- "field": "energyConsumption",
602
- "message": "Missing important field: energyConsumption - helpful for environmental impact assessment",
603
- "recommendation": "Consider documenting energy consumption metrics for better transparency"
604
- },
605
- {
606
- "priority": "medium",
607
- "field": "safetyRiskAssessment",
608
- "message": "Missing important field: safetyRiskAssessment",
609
- "recommendation": "Add safety risk assessment information to improve documentation"
610
- }
611
- ]
612
  }
613
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
614
  @app.post("/generate", response_class=HTMLResponse)
615
  async def generate_form(
616
  request: Request,
@@ -834,8 +783,13 @@ async def generate_form(
834
  print(f" completeness_score keys: {list(completeness_score.keys())}")
835
  if 'category_details' in completeness_score:
836
  print(f" category_details exists: {list(completeness_score['category_details'].keys())}")
837
- # Check each category
838
- for category in ['required_fields', 'metadata', 'component_basic', 'component_model_card', 'external_references']:
 
 
 
 
 
839
  if category in completeness_score['category_details']:
840
  details = completeness_score['category_details'][category]
841
  print(f" {category}: present={details.get('present_fields')}, total={details.get('total_fields')}, percentage={details.get('percentage')}")
@@ -1022,7 +976,11 @@ async def api_generate_with_report(request: GenerateRequest):
1022
  )
1023
 
1024
  # Calculate completeness score
1025
- completeness_score = calculate_completeness_score(aibom, validate=True, use_best_practices=request.use_best_practices)
 
 
 
 
1026
 
1027
  # Round only section_scores that aren't already rounded
1028
  for section, score in completeness_score["section_scores"].items():
@@ -1230,3 +1188,30 @@ if __name__ == "__main__":
1230
  if not HF_TOKEN:
1231
  print("Warning: HF_TOKEN environment variable not set. SBOM count will show N/A and logging will be skipped.")
1232
  uvicorn.run(app, host="0.0.0.0", port=8000)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  from huggingface_hub import HfApi
20
  from huggingface_hub.utils import RepositoryNotFoundError # For specific error handling
21
 
 
22
  # Configure logging
23
  logging.basicConfig(level=logging.INFO)
24
  logger = logging.getLogger(__name__)
25
 
26
+ # Registry-driven field classification imports
27
+ try:
28
+ from src.aibom_generator.field_registry_manager import (
29
+ get_field_registry_manager,
30
+ generate_field_classification,
31
+ get_configurable_scoring_weights
32
+ )
33
+ REGISTRY_MANAGER = get_field_registry_manager()
34
+ FIELD_CLASSIFICATION = generate_field_classification()
35
+ SCORING_WEIGHTS = get_configurable_scoring_weights()
36
+ REGISTRY_AVAILABLE = True
37
+ logger.info(f"✅ Registry-driven API: {len(FIELD_CLASSIFICATION)} fields loaded")
38
+ except ImportError as e:
39
+ REGISTRY_AVAILABLE = False
40
+ FIELD_CLASSIFICATION = {}
41
+ SCORING_WEIGHTS = {}
42
+ logger.warning(f"⚠️ Registry not available for API: {e}")
43
+
44
  # Define directories and constants
45
  templates_dir = "templates"
46
  OUTPUT_DIR = "/tmp/aibom_output"
 
435
 
436
  # Try from src
437
  try:
438
+ from src.aibom_generator.utils import calculate_completeness_score
439
  logger.info("Imported src.aibom_generator.utils.calculate_completeness_score")
440
  return calculate_completeness_score
441
  except ImportError:
 
459
  # Try to import the calculate_completeness_score function
460
  calculate_completeness_score = import_utils()
461
 
462
+ # Verify registry integration status
463
+ if REGISTRY_AVAILABLE:
464
+ logger.info("✅ API fully integrated with registry system")
465
+ else:
466
+ logger.warning("⚠️ API using fallback mode - registry not available")
467
+
468
+
469
+ def get_tier_points(tier):
470
+ """Get points for a field tier."""
471
+ tier_points = {
472
+ "critical": 4.0,
473
+ "important": 2.0,
474
+ "supplementary": 1.0
475
+ }
476
+ return tier_points.get(tier, 1.0)
477
+
478
+ def create_registry_driven_fallback():
479
+ """Create fallback score using registry configuration."""
480
+ if not REGISTRY_AVAILABLE:
481
+ return create_hardcoded_fallback()
482
+
483
+ categories = {}
484
+ field_checklist = {}
485
+ max_scores = {}
486
+
487
+ # Get categories and scores from registry
488
+ for field_name, classification in FIELD_CLASSIFICATION.items():
489
+ category = classification["category"]
490
+ tier = classification["tier"]
491
+
492
+ # Initialize category if not exists
493
+ if category not in categories:
494
+ categories[category] = {"total": 0, "present": 0}
495
+ max_scores[category] = 0
496
+
497
+ categories[category]["total"] += 1
498
+ max_scores[category] += get_tier_points(tier)
499
+
500
+ # Add to field checklist with registry-based tier
501
+ tier_stars = {"critical": "★★★", "important": "★★", "supplementary": "★"}
502
+ field_checklist[field_name] = f"n/a {tier_stars.get(tier, '★')}"
503
+
504
+ return {
505
+ "total_score": 0,
506
+ "section_scores": {cat: 0 for cat in categories.keys()},
507
+ "max_scores": max_scores,
508
+ "field_checklist": field_checklist,
509
+ "category_details": categories
510
+ }
511
+
512
+ def create_hardcoded_fallback():
513
+ """Fallback to original hardcoded structure when registry unavailable."""
514
  return {
515
+ "total_score": 0,
516
  "section_scores": {
517
  "required_fields": 0,
518
  "metadata": 0,
 
528
  "external_references": 10
529
  },
530
  "field_checklist": {
 
531
  "bomFormat": "n/a ★★★",
532
  "specVersion": "n/a ★★★",
533
  "serialNumber": "n/a ★★★",
534
  "version": "n/a ★★★",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535
  "name": "n/a ★★★",
536
+ "downloadLocation": "n/a ★★★"
537
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
538
  }
539
 
540
+ # Helper function to create a comprehensive completeness_score with field_checklist
541
+ def create_comprehensive_completeness_score(aibom=None):
542
+ """
543
+ Create a comprehensive completeness_score object with all required attributes.
544
+ Uses registry-driven field classification when available.
545
+ """
546
+ # If we have the calculate_completeness_score function and an AIBOM, use it
547
+ if calculate_completeness_score and aibom:
548
+ try:
549
+ return calculate_completeness_score(aibom, validate=True, use_best_practices=True)
550
+ except Exception as e:
551
+ logger.error(f"Error calculating completeness score: {str(e)}")
552
+ # Fall through to registry-driven fallback
553
+
554
+ # Use registry-driven fallback
555
+ if REGISTRY_AVAILABLE:
556
+ logger.info("Using registry-driven completeness score fallback")
557
+ return create_registry_driven_fallback()
558
+ else:
559
+ logger.warning("Using hardcoded completeness score fallback")
560
+ return create_hardcoded_fallback()
561
+
562
+
563
  @app.post("/generate", response_class=HTMLResponse)
564
  async def generate_form(
565
  request: Request,
 
783
  print(f" completeness_score keys: {list(completeness_score.keys())}")
784
  if 'category_details' in completeness_score:
785
  print(f" category_details exists: {list(completeness_score['category_details'].keys())}")
786
+ # Use registry-driven categories when available
787
+ if REGISTRY_AVAILABLE:
788
+ categories = set(classification["category"] for classification in FIELD_CLASSIFICATION.values())
789
+ else:
790
+ categories = ['required_fields', 'metadata', 'component_basic', 'component_model_card', 'external_references']
791
+
792
+ for category in categories:
793
  if category in completeness_score['category_details']:
794
  details = completeness_score['category_details'][category]
795
  print(f" {category}: present={details.get('present_fields')}, total={details.get('total_fields')}, percentage={details.get('percentage')}")
 
976
  )
977
 
978
  # Calculate completeness score
979
+ try:
980
+ completeness_score = calculate_completeness_score(aibom, validate=True, use_best_practices=True)
981
+ except Exception as e:
982
+ logger.error(f"Failed completeness scoring for {normalized_model_id}: {str(e)}")
983
+ raise HTTPException(status_code=500, detail=f"Error calculating score: {str(e)}")
984
 
985
  # Round only section_scores that aren't already rounded
986
  for section, score in completeness_score["section_scores"].items():
 
1188
  if not HF_TOKEN:
1189
  print("Warning: HF_TOKEN environment variable not set. SBOM count will show N/A and logging will be skipped.")
1190
  uvicorn.run(app, host="0.0.0.0", port=8000)
1191
+
1192
+
1193
+ @app.get("/api/registry/status")
1194
+ async def get_registry_status():
1195
+ """Get current registry configuration status for debugging."""
1196
+ if REGISTRY_AVAILABLE:
1197
+ categories = {}
1198
+ for field_name, classification in FIELD_CLASSIFICATION.items():
1199
+ category = classification["category"]
1200
+ if category not in categories:
1201
+ categories[category] = 0
1202
+ categories[category] += 1
1203
+
1204
+ return {
1205
+ "registry_available": True,
1206
+ "total_fields": len(FIELD_CLASSIFICATION),
1207
+ "categories": list(categories.keys()),
1208
+ "field_count_by_category": categories,
1209
+ "registry_manager_loaded": REGISTRY_MANAGER is not None
1210
+ }
1211
+ else:
1212
+ return {
1213
+ "registry_available": False,
1214
+ "fallback_mode": True,
1215
+ "message": "Using hardcoded field definitions",
1216
+ "total_fields": 6 # Hardcoded fallback count
1217
+ }