Synthetic_Biology / genesis /biosecurity.py
mgbam's picture
Create biosecurity.py
a596e6e verified
raw
history blame
2.31 kB
# genesis/biosecurity.py
import os
import requests
from typing import Dict, Any
from datetime import datetime
from .providers import pubmed_fallback_search, run_deepseek_summary
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
NCBI_API_KEY = os.getenv("NCBI_API_KEY")
NCBI_EMAIL = os.getenv("NCBI_EMAIL")
RISK_KEYWORDS = {
"low": ["harmless", "biosafety level 1", "safe", "non-pathogenic"],
"medium": ["biosafety level 2", "BSL-2", "infectious", "containment"],
"high": ["BSL-3", "BSL-4", "pandemic potential", "gain-of-function", "biosecurity concern"]
}
def score_biosecurity_risk(text: str) -> int:
"""Score risk based on keywords in AI report."""
text_lower = text.lower()
score = 0
for word in RISK_KEYWORDS["low"]:
if word in text_lower:
score += 10
for word in RISK_KEYWORDS["medium"]:
if word in text_lower:
score += 25
for word in RISK_KEYWORDS["high"]:
if word in text_lower:
score += 50
return min(score, 100) # cap at 100
def run_biosecurity_scan(entity: str) -> Dict[str, Any]:
"""
Run AI-powered biosecurity risk scan for a given biological entity.
Includes AI assessment + PubMed literature.
"""
try:
# AI assessment
prompt = f"""
You are a synthetic biology & biosecurity expert.
Assess the potential biosecurity risks of the following entity: {entity}.
Classify its biosafety level, potential misuse, regulatory concerns, and safe handling guidelines.
Respond with detailed analysis.
"""
ai_report = run_deepseek_summary(prompt)
# Risk score
score = score_biosecurity_risk(ai_report)
# PubMed citations
citations = pubmed_fallback_search(entity, NCBI_API_KEY, NCBI_EMAIL)
return {
"entity": entity,
"timestamp": datetime.utcnow().isoformat(),
"risk_score": score,
"report": ai_report,
"citations": citations
}
except Exception as e:
return {
"entity": entity,
"timestamp": datetime.utcnow().isoformat(),
"error": str(e),
"risk_score": 0,
"report": "Error running biosecurity scan.",
"citations": []
}