a1c00l commited on
Commit
d9ffa28
·
verified ·
1 Parent(s): 0c757a9

Update src/aibom_generator/generator.py

Browse files
Files changed (1) hide show
  1. src/aibom_generator/generator.py +231 -160
src/aibom_generator/generator.py CHANGED
@@ -1,7 +1,7 @@
1
  import json
2
  import uuid
3
  import datetime
4
- from typing import Dict, Optional, Any
5
 
6
  from huggingface_hub import HfApi, ModelCard
7
  from .utils import calculate_completeness_score
@@ -19,6 +19,7 @@ class AIBOMGenerator:
19
  self.inference_model_url = inference_model_url
20
  self.use_inference = use_inference
21
  self.cache_dir = cache_dir
 
22
 
23
  def generate_aibom(
24
  self,
@@ -26,82 +27,125 @@ class AIBOMGenerator:
26
  output_file: Optional[str] = None,
27
  include_inference: Optional[bool] = None,
28
  ) -> Dict[str, Any]:
29
- use_inference = include_inference if include_inference is not None else self.use_inference
30
- model_info = self._fetch_model_info(model_id)
31
- model_card = self._fetch_model_card(model_id)
32
-
33
- # Store original metadata before any AI enhancement
34
- original_metadata = self._extract_structured_metadata(model_id, model_info, model_card)
35
-
36
- # Create initial AIBOM with original metadata
37
- original_aibom = self._create_aibom_structure(model_id, original_metadata)
38
-
39
- # Calculate initial score
40
- original_score = calculate_completeness_score(original_aibom)
41
-
42
- # Final metadata starts with original metadata
43
- final_metadata = original_metadata.copy()
44
-
45
- # Apply AI enhancement if requested
46
- ai_enhanced = False
47
- ai_model_name = None
48
-
49
- if use_inference and self.inference_model_url:
50
- try:
51
- # Extract additional metadata using AI
52
- enhanced_metadata = self._extract_unstructured_metadata(model_card, model_id)
53
-
54
- # If we got enhanced metadata, merge it with original
55
- if enhanced_metadata:
56
- ai_enhanced = True
57
- ai_model_name = "BERT-base-uncased" # Will be replaced with actual model name
58
 
59
- # Merge enhanced metadata with original (enhanced takes precedence)
60
- for key, value in enhanced_metadata.items():
61
- if value is not None and (key not in final_metadata or not final_metadata[key]):
62
- final_metadata[key] = value
63
- except Exception as e:
64
- print(f"Error during AI enhancement: {e}")
65
- # Continue with original metadata if enhancement fails
66
-
67
- # Create final AIBOM with potentially enhanced metadata
68
- aibom = self._create_aibom_structure(model_id, final_metadata)
69
-
70
- # Calculate final score
71
- final_score = calculate_completeness_score(aibom)
72
-
73
- # Add score and enhancement info to metadata properties
74
- if "metadata" in aibom and "properties" not in aibom["metadata"]:
75
- aibom["metadata"]["properties"] = []
76
-
77
- if "metadata" in aibom and "properties" in aibom["metadata"]:
78
- # Add score information
79
- aibom["metadata"]["properties"].append({"name": "aibom:quality-score", "value": str(final_score["total_score"])})
80
- aibom["metadata"]["properties"].append({"name": "aibom:quality-breakdown", "value": json.dumps(final_score["section_scores"])})
81
- aibom["metadata"]["properties"].append({"name": "aibom:max-scores", "value": json.dumps(final_score["max_scores"])})
82
 
83
- # Add AI enhancement information
84
- if ai_enhanced:
85
- aibom["metadata"]["properties"].append({"name": "aibom:ai-enhanced", "value": "true"})
86
- aibom["metadata"]["properties"].append({"name": "aibom:ai-model", "value": ai_model_name})
87
- aibom["metadata"]["properties"].append({"name": "aibom:original-score", "value": str(original_score["total_score"])})
88
- aibom["metadata"]["properties"].append({"name": "aibom:score-improvement",
89
- "value": str(round(final_score["total_score"] - original_score["total_score"], 2))})
90
-
91
- if output_file:
92
- with open(output_file, 'w') as f:
93
- json.dump(aibom, f, indent=2)
94
-
95
- # Create enhancement report for UI display
96
- enhancement_report = {
97
- "ai_enhanced": ai_enhanced,
98
- "ai_model": ai_model_name if ai_enhanced else None,
99
- "original_score": original_score,
100
- "final_score": final_score,
101
- "improvement": round(final_score["total_score"] - original_score["total_score"], 2) if ai_enhanced else 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  }
103
 
104
- return aibom, enhancement_report
 
 
105
 
106
  def _fetch_model_info(self, model_id: str) -> Dict[str, Any]:
107
  try:
@@ -148,31 +192,37 @@ class AIBOMGenerator:
148
  metadata = {}
149
 
150
  if model_info:
151
- metadata.update({
152
- "name": model_info.modelId.split("/")[-1] if hasattr(model_info, "modelId") else model_id.split("/")[-1],
153
- "author": model_info.author if hasattr(model_info, "author") else None,
154
- "tags": model_info.tags if hasattr(model_info, "tags") else [],
155
- "pipeline_tag": model_info.pipeline_tag if hasattr(model_info, "pipeline_tag") else None,
156
- "downloads": model_info.downloads if hasattr(model_info, "downloads") else 0,
157
- "last_modified": model_info.lastModified if hasattr(model_info, "lastModified") else None,
158
- "commit": model_info.sha[:7] if hasattr(model_info, "sha") and model_info.sha else None,
159
- "commit_url": f"https://huggingface.co/{model_id}/commit/{model_info.sha}" if hasattr(model_info, "sha") and model_info.sha else None,
160
- })
 
 
 
161
 
162
- if model_card and model_card.data:
163
- card_data = model_card.data.to_dict() if hasattr(model_card.data, "to_dict") else {}
164
- metadata.update({
165
- "language": card_data.get("language"),
166
- "license": card_data.get("license"),
167
- "library_name": card_data.get("library_name"),
168
- "base_model": card_data.get("base_model"),
169
- "datasets": card_data.get("datasets"),
170
- "model_name": card_data.get("model_name"),
171
- "tags": card_data.get("tags", metadata.get("tags", [])),
172
- "description": card_data.get("model_summary", None)
173
- })
174
- if hasattr(model_card.data, "eval_results") and model_card.data.eval_results:
175
- metadata["eval_results"] = model_card.data.eval_results
 
 
 
176
 
177
  metadata["ai:type"] = "Transformer"
178
  metadata["ai:task"] = metadata.get("pipeline_tag", "Text Generation")
@@ -198,43 +248,46 @@ class AIBOMGenerator:
198
  # Since we can't install the required libraries due to space constraints,
199
  # we'll simulate the enhancement with a placeholder implementation
200
 
201
- if model_card and hasattr(model_card, "text"):
202
- card_text = model_card.text
203
-
204
- # Simulate BERT extraction with basic text analysis
205
- # In reality, this would be done with NLP models
206
-
207
- # Extract description if missing
208
- if card_text and "description" not in enhanced_metadata:
209
- # Take first paragraph that's longer than 20 chars as description
210
- paragraphs = [p.strip() for p in card_text.split('\n\n')]
211
- for p in paragraphs:
212
- if len(p) > 20 and not p.startswith('#'):
213
- enhanced_metadata["description"] = p
214
- break
215
-
216
- # Extract limitations if present
217
- if "limitations" not in enhanced_metadata:
218
- if "## Limitations" in card_text:
219
- limitations_section = card_text.split("## Limitations")[1].split("##")[0].strip()
220
- if limitations_section:
221
- enhanced_metadata["limitations"] = limitations_section
222
-
223
- # Extract ethical considerations if present
224
- if "ethical_considerations" not in enhanced_metadata:
225
- for heading in ["## Ethical Considerations", "## Ethics", "## Bias"]:
226
- if heading in card_text:
227
- section = card_text.split(heading)[1].split("##")[0].strip()
228
- if section:
229
- enhanced_metadata["ethical_considerations"] = section
230
  break
231
-
232
- # Extract risks if present
233
- if "risks" not in enhanced_metadata:
234
- if "## Risks" in card_text:
235
- risks_section = card_text.split("## Risks")[1].split("##")[0].strip()
236
- if risks_section:
237
- enhanced_metadata["risks"] = risks_section
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
 
239
  return enhanced_metadata
240
 
@@ -247,7 +300,7 @@ class AIBOMGenerator:
247
  }]
248
 
249
  authors = []
250
- if "author" in metadata and metadata["author"]:
251
  authors.append({
252
  "name": metadata["author"],
253
  "url": f"https://huggingface.co/{metadata['author']}"
@@ -255,16 +308,20 @@ class AIBOMGenerator:
255
 
256
  component = {
257
  "type": "machine-learning-model",
258
- "name": metadata.get("name", model_id.split("/")[-1]),
259
  "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}"
260
  }
261
 
262
  properties = []
263
- for key, value in metadata.items():
264
- if key not in ["name", "author", "license"] and value is not None:
265
- if isinstance(value, (list, dict)):
266
- value = json.dumps(value)
267
- properties.append({"name": key, "value": str(value)})
 
 
 
 
268
 
269
  metadata_section = {
270
  "timestamp": timestamp,
@@ -282,30 +339,30 @@ class AIBOMGenerator:
282
  def _create_component_section(self, model_id: str, metadata: Dict[str, Any]) -> Dict[str, Any]:
283
  # Create PURL with version information if commit is available
284
  purl = f"pkg:huggingface/{model_id.replace('/', '/')}"
285
- if "commit" in metadata:
286
  purl = f"{purl}@{metadata['commit']}"
287
 
288
  component = {
289
  "type": "machine-learning-model",
290
  "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}",
291
- "name": metadata.get("name", model_id.split("/")[-1]),
292
  "purl": purl
293
  }
294
 
295
- if "description" in metadata:
296
  component["description"] = metadata["description"]
297
 
298
- if "commit" in metadata:
299
  component["version"] = metadata["commit"]
300
 
301
- if "license" in metadata:
302
  component["licenses"] = [{"license": {"id": metadata["license"]}}]
303
 
304
  external_refs = [{
305
  "type": "website",
306
  "url": f"https://huggingface.co/{model_id}"
307
  }]
308
- if "commit_url" in metadata:
309
  external_refs.append({
310
  "type": "vcs",
311
  "url": metadata["commit_url"]
@@ -318,18 +375,32 @@ class AIBOMGenerator:
318
 
319
  def _create_model_card_section(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
320
  model_card_section = {}
321
- model_parameters = {k: metadata[k] for k in ["base_model", "library_name", "pipeline_tag"] if k in metadata}
322
- if model_parameters:
323
- model_card_section["modelParameters"] = model_parameters
324
-
325
- if "eval_results" in metadata:
326
- model_card_section["quantitativeAnalysis"] = {"performanceMetrics": metadata["eval_results"]}
327
-
328
- considerations = {}
329
- for k in ["limitations", "ethical_considerations", "bias", "risks"]:
330
- if k in metadata:
331
- considerations[k] = metadata[k]
332
- if considerations:
333
- model_card_section["considerations"] = considerations
334
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
  return model_card_section
 
1
  import json
2
  import uuid
3
  import datetime
4
+ from typing import Dict, Optional, Any, List
5
 
6
  from huggingface_hub import HfApi, ModelCard
7
  from .utils import calculate_completeness_score
 
19
  self.inference_model_url = inference_model_url
20
  self.use_inference = use_inference
21
  self.cache_dir = cache_dir
22
+ self.enhancement_report = None # Store enhancement report as instance variable
23
 
24
  def generate_aibom(
25
  self,
 
27
  output_file: Optional[str] = None,
28
  include_inference: Optional[bool] = None,
29
  ) -> Dict[str, Any]:
30
+ try:
31
+ use_inference = include_inference if include_inference is not None else self.use_inference
32
+ model_info = self._fetch_model_info(model_id)
33
+ model_card = self._fetch_model_card(model_id)
34
+
35
+ # Store original metadata before any AI enhancement
36
+ original_metadata = self._extract_structured_metadata(model_id, model_info, model_card)
37
+
38
+ # Create initial AIBOM with original metadata
39
+ original_aibom = self._create_aibom_structure(model_id, original_metadata)
40
+
41
+ # Calculate initial score
42
+ original_score = calculate_completeness_score(original_aibom)
43
+
44
+ # Final metadata starts with original metadata
45
+ final_metadata = original_metadata.copy() if original_metadata else {}
46
+
47
+ # Apply AI enhancement if requested
48
+ ai_enhanced = False
49
+ ai_model_name = None
50
+
51
+ if use_inference and self.inference_model_url:
52
+ try:
53
+ # Extract additional metadata using AI
54
+ enhanced_metadata = self._extract_unstructured_metadata(model_card, model_id)
 
 
 
 
55
 
56
+ # If we got enhanced metadata, merge it with original
57
+ if enhanced_metadata:
58
+ ai_enhanced = True
59
+ ai_model_name = "BERT-base-uncased" # Will be replaced with actual model name
60
+
61
+ # Merge enhanced metadata with original (enhanced takes precedence)
62
+ for key, value in enhanced_metadata.items():
63
+ if value is not None and (key not in final_metadata or not final_metadata[key]):
64
+ final_metadata[key] = value
65
+ except Exception as e:
66
+ print(f"Error during AI enhancement: {e}")
67
+ # Continue with original metadata if enhancement fails
68
+
69
+ # Create final AIBOM with potentially enhanced metadata
70
+ aibom = self._create_aibom_structure(model_id, final_metadata)
71
+
72
+ # Calculate final score
73
+ final_score = calculate_completeness_score(aibom)
 
 
 
 
 
74
 
75
+ # Add score and enhancement info to metadata properties
76
+ if "metadata" in aibom and "properties" not in aibom["metadata"]:
77
+ aibom["metadata"]["properties"] = []
78
+
79
+
80
+ if "metadata" in aibom and "properties" in aibom["metadata"]:
81
+ # Add score information
82
+ aibom["metadata"]["properties"].append({"name": "aibom:quality-score", "value": str(final_score["total_score"])})
83
+ aibom["metadata"]["properties"].append({"name": "aibom:quality-breakdown", "value": json.dumps(final_score["section_scores"])})
84
+ aibom["metadata"]["properties"].append({"name": "aibom:max-scores", "value": json.dumps(final_score["max_scores"])})
85
+
86
+ # Add AI enhancement information
87
+ if ai_enhanced:
88
+ aibom["metadata"]["properties"].append({"name": "aibom:ai-enhanced", "value": "true"})
89
+ aibom["metadata"]["properties"].append({"name": "aibom:ai-model", "value": ai_model_name})
90
+ aibom["metadata"]["properties"].append({"name": "aibom:original-score", "value": str(original_score["total_score"])})
91
+ aibom["metadata"]["properties"].append({"name": "aibom:score-improvement",
92
+ "value": str(round(final_score["total_score"] - original_score["total_score"], 2))})
93
+
94
+ if output_file:
95
+ with open(output_file, 'w') as f:
96
+ json.dump(aibom, f, indent=2)
97
+
98
+ # Create enhancement report for UI display and store as instance variable
99
+ self.enhancement_report = {
100
+ "ai_enhanced": ai_enhanced,
101
+ "ai_model": ai_model_name if ai_enhanced else None,
102
+ "original_score": original_score,
103
+ "final_score": final_score,
104
+ "improvement": round(final_score["total_score"] - original_score["total_score"], 2) if ai_enhanced else 0
105
+ }
106
+
107
+ # Return only the AIBOM to maintain compatibility with existing code
108
+ return aibom
109
+ except Exception as e:
110
+ print(f"Error generating AIBOM: {e}")
111
+ # Return a minimal valid AIBOM structure in case of error
112
+ return self._create_minimal_aibom(model_id)
113
+
114
+ def _create_minimal_aibom(self, model_id: str) -> Dict[str, Any]:
115
+ """Create a minimal valid AIBOM structure in case of errors"""
116
+ return {
117
+ "bomFormat": "CycloneDX",
118
+ "specVersion": "1.6",
119
+ "serialNumber": f"urn:uuid:{str(uuid.uuid4())}",
120
+ "version": 1,
121
+ "metadata": {
122
+ "timestamp": datetime.datetime.utcnow().isoformat() + "Z",
123
+ "tools": [{
124
+ "vendor": "Aetheris AI",
125
+ "name": "aetheris-aibom-generator",
126
+ "version": "0.1.0"
127
+ }],
128
+ "component": {
129
+ "type": "machine-learning-model",
130
+ "name": model_id.split("/")[-1],
131
+ "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}"
132
+ },
133
+ "properties": [
134
+ {"name": "aibom:error", "value": "Error generating complete AIBOM"}
135
+ ]
136
+ },
137
+ "components": [{
138
+ "type": "machine-learning-model",
139
+ "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}",
140
+ "name": model_id.split("/")[-1],
141
+ "purl": f"pkg:huggingface/{model_id.replace('/', '/')}"
142
+ }],
143
+ "dependencies": []
144
  }
145
 
146
+ def get_enhancement_report(self):
147
+ """Return the enhancement report from the last generate_aibom call"""
148
+ return self.enhancement_report
149
 
150
  def _fetch_model_info(self, model_id: str) -> Dict[str, Any]:
151
  try:
 
192
  metadata = {}
193
 
194
  if model_info:
195
+ try:
196
+ metadata.update({
197
+ "name": model_info.modelId.split("/")[-1] if hasattr(model_info, "modelId") else model_id.split("/")[-1],
198
+ "author": model_info.author if hasattr(model_info, "author") else None,
199
+ "tags": model_info.tags if hasattr(model_info, "tags") else [],
200
+ "pipeline_tag": model_info.pipeline_tag if hasattr(model_info, "pipeline_tag") else None,
201
+ "downloads": model_info.downloads if hasattr(model_info, "downloads") else 0,
202
+ "last_modified": model_info.lastModified if hasattr(model_info, "lastModified") else None,
203
+ "commit": model_info.sha[:7] if hasattr(model_info, "sha") and model_info.sha else None,
204
+ "commit_url": f"https://huggingface.co/{model_id}/commit/{model_info.sha}" if hasattr(model_info, "sha") and model_info.sha else None,
205
+ })
206
+ except Exception as e:
207
+ print(f"Error extracting model info metadata: {e}")
208
 
209
+ if model_card and hasattr(model_card, "data") and model_card.data:
210
+ try:
211
+ card_data = model_card.data.to_dict() if hasattr(model_card.data, "to_dict") else {}
212
+ metadata.update({
213
+ "language": card_data.get("language"),
214
+ "license": card_data.get("license"),
215
+ "library_name": card_data.get("library_name"),
216
+ "base_model": card_data.get("base_model"),
217
+ "datasets": card_data.get("datasets"),
218
+ "model_name": card_data.get("model_name"),
219
+ "tags": card_data.get("tags", metadata.get("tags", [])),
220
+ "description": card_data.get("model_summary", None)
221
+ })
222
+ if hasattr(model_card.data, "eval_results") and model_card.data.eval_results:
223
+ metadata["eval_results"] = model_card.data.eval_results
224
+ except Exception as e:
225
+ print(f"Error extracting model card metadata: {e}")
226
 
227
  metadata["ai:type"] = "Transformer"
228
  metadata["ai:task"] = metadata.get("pipeline_tag", "Text Generation")
 
248
  # Since we can't install the required libraries due to space constraints,
249
  # we'll simulate the enhancement with a placeholder implementation
250
 
251
+ if model_card and hasattr(model_card, "text") and model_card.text:
252
+ try:
253
+ card_text = model_card.text
254
+
255
+ # Simulate BERT extraction with basic text analysis
256
+ # In reality, this would be done with NLP models
257
+
258
+ # Extract description if missing
259
+ if card_text and "description" not in enhanced_metadata:
260
+ # Take first paragraph that's longer than 20 chars as description
261
+ paragraphs = [p.strip() for p in card_text.split('\n\n')]
262
+ for p in paragraphs:
263
+ if len(p) > 20 and not p.startswith('#'):
264
+ enhanced_metadata["description"] = p
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  break
266
+
267
+ # Extract limitations if present
268
+ if "limitations" not in enhanced_metadata:
269
+ if "## Limitations" in card_text:
270
+ limitations_section = card_text.split("## Limitations")[1].split("##")[0].strip()
271
+ if limitations_section:
272
+ enhanced_metadata["limitations"] = limitations_section
273
+
274
+ # Extract ethical considerations if present
275
+ if "ethical_considerations" not in enhanced_metadata:
276
+ for heading in ["## Ethical Considerations", "## Ethics", "## Bias"]:
277
+ if heading in card_text:
278
+ section = card_text.split(heading)[1].split("##")[0].strip()
279
+ if section:
280
+ enhanced_metadata["ethical_considerations"] = section
281
+ break
282
+
283
+ # Extract risks if present
284
+ if "risks" not in enhanced_metadata:
285
+ if "## Risks" in card_text:
286
+ risks_section = card_text.split("## Risks")[1].split("##")[0].strip()
287
+ if risks_section:
288
+ enhanced_metadata["risks"] = risks_section
289
+ except Exception as e:
290
+ print(f"Error extracting unstructured metadata: {e}")
291
 
292
  return enhanced_metadata
293
 
 
300
  }]
301
 
302
  authors = []
303
+ if metadata and "author" in metadata and metadata["author"]:
304
  authors.append({
305
  "name": metadata["author"],
306
  "url": f"https://huggingface.co/{metadata['author']}"
 
308
 
309
  component = {
310
  "type": "machine-learning-model",
311
+ "name": metadata.get("name", model_id.split("/")[-1]) if metadata else model_id.split("/")[-1],
312
  "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}"
313
  }
314
 
315
  properties = []
316
+ if metadata:
317
+ for key, value in metadata.items():
318
+ if key not in ["name", "author", "license"] and value is not None:
319
+ try:
320
+ if isinstance(value, (list, dict)):
321
+ value = json.dumps(value)
322
+ properties.append({"name": key, "value": str(value)})
323
+ except Exception as e:
324
+ print(f"Error processing metadata property {key}: {e}")
325
 
326
  metadata_section = {
327
  "timestamp": timestamp,
 
339
  def _create_component_section(self, model_id: str, metadata: Dict[str, Any]) -> Dict[str, Any]:
340
  # Create PURL with version information if commit is available
341
  purl = f"pkg:huggingface/{model_id.replace('/', '/')}"
342
+ if metadata and "commit" in metadata:
343
  purl = f"{purl}@{metadata['commit']}"
344
 
345
  component = {
346
  "type": "machine-learning-model",
347
  "bom-ref": f"pkg:generic/{model_id.replace('/', '%2F')}",
348
+ "name": metadata.get("name", model_id.split("/")[-1]) if metadata else model_id.split("/")[-1],
349
  "purl": purl
350
  }
351
 
352
+ if metadata and "description" in metadata:
353
  component["description"] = metadata["description"]
354
 
355
+ if metadata and "commit" in metadata:
356
  component["version"] = metadata["commit"]
357
 
358
+ if metadata and "license" in metadata:
359
  component["licenses"] = [{"license": {"id": metadata["license"]}}]
360
 
361
  external_refs = [{
362
  "type": "website",
363
  "url": f"https://huggingface.co/{model_id}"
364
  }]
365
+ if metadata and "commit_url" in metadata:
366
  external_refs.append({
367
  "type": "vcs",
368
  "url": metadata["commit_url"]
 
375
 
376
  def _create_model_card_section(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
377
  model_card_section = {}
378
+
379
+ if not metadata:
380
+ return model_card_section
381
+
382
+ try:
383
+ # Safely extract model parameters
384
+ model_parameters = {}
385
+ for k in ["base_model", "library_name", "pipeline_tag"]:
386
+ if k in metadata:
387
+ model_parameters[k] = metadata[k]
388
+
389
+ if model_parameters:
390
+ model_card_section["modelParameters"] = model_parameters
391
+
392
+ # Safely extract evaluation results
393
+ if "eval_results" in metadata:
394
+ model_card_section["quantitativeAnalysis"] = {"performanceMetrics": metadata["eval_results"]}
395
+
396
+ # Safely extract considerations
397
+ considerations = {}
398
+ for k in ["limitations", "ethical_considerations", "bias", "risks"]:
399
+ if k in metadata:
400
+ considerations[k] = metadata[k]
401
+ if considerations:
402
+ model_card_section["considerations"] = considerations
403
+ except Exception as e:
404
+ print(f"Error creating model card section: {e}")
405
+
406
  return model_card_section