File size: 2,700 Bytes
a963d65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
# FhirFlame Environment Configuration

# =============================================================================
# API Keys (Optional - app works without them)
# =============================================================================

# Mistral API Configuration
MISTRAL_API_KEY=your_mistral_api_key_here

# HuggingFace Configuration  
HF_TOKEN=your_huggingface_token_here

# Modal Labs Configuration
MODAL_TOKEN_ID=your_modal_token_id_here
MODAL_TOKEN_SECRET=your_modal_token_secret_here
MODAL_ENDPOINT_URL=https://your-modal-app.modal.run

# Ollama Configuration
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL=codellama:13b-instruct
USE_REAL_OLLAMA=true

# =============================================================================
# Modal Labs GPU Pricing (USD per hour)
# Based on Modal's official pricing as of 2024
# =============================================================================

# GPU Hourly Rates
MODAL_A100_HOURLY_RATE=1.32
MODAL_T4_HOURLY_RATE=0.51
MODAL_L4_HOURLY_RATE=0.73
MODAL_CPU_HOURLY_RATE=0.048

# Modal Platform Fee (percentage markup)
MODAL_PLATFORM_FEE=15

# GPU Performance Estimates (characters per second)
MODAL_A100_CHARS_PER_SEC=2000
MODAL_T4_CHARS_PER_SEC=1200
MODAL_L4_CHARS_PER_SEC=800

# =============================================================================
# Cloud Provider Pricing
# =============================================================================

# HuggingFace Inference API (USD per 1K tokens)
HF_COST_PER_1K_TOKENS=0.06

# Ollama Local (free)
OLLAMA_COST_PER_REQUEST=0.0

# =============================================================================
# Processing Configuration
# =============================================================================

# Provider selection thresholds
AUTO_SELECT_MODAL_THRESHOLD=1500
AUTO_SELECT_BATCH_THRESHOLD=5

# Demo and Development
DEMO_MODE=false
USE_COST_OPTIMIZATION=true

# =============================================================================
# Monitoring and Observability (Optional)
# =============================================================================

# Langfuse Configuration
LANGFUSE_SECRET_KEY=your_langfuse_secret_key
LANGFUSE_PUBLIC_KEY=your_langfuse_public_key
LANGFUSE_HOST=https://cloud.langfuse.com

# =============================================================================
# Medical AI Configuration
# =============================================================================

# FHIR Validation
FHIR_VALIDATION_LEVEL=standard
ENABLE_FHIR_R4=true
ENABLE_FHIR_R5=true

# Medical Entity Extraction
EXTRACT_PATIENT_INFO=true
EXTRACT_CONDITIONS=true
EXTRACT_MEDICATIONS=true
EXTRACT_VITALS=true
EXTRACT_PROCEDURES=true