File size: 1,259 Bytes
994c0b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
#!/usr/bin/env python3
"""
Verification script to show current model configuration
"""
import os
def show_model_config():
"""Show what model will be used"""
print("π Model Configuration Analysis")
print("=" * 50)
# Check environment variable
ai_model_env = os.environ.get("AI_MODEL", "google/gemma-3n-E4B-it")
print(f"π Environment variable AI_MODEL: {ai_model_env}")
# Apply override logic
if "GGUF" in ai_model_env:
current_model = "google/gemma-3n-E4B-it"
print(f"π OVERRIDE: GGUF model detected, using: {current_model}")
print(f" Original: {ai_model_env}")
print(f" Fixed to: {current_model}")
else:
current_model = ai_model_env
print(f"β
Using: {current_model}")
print(f"\nπ― Final model that will be loaded: {current_model}")
# Check if it's Gemma 3n
is_gemma_3n = "gemma-3n" in current_model.lower()
print(f"π€ Is Gemma 3n model: {is_gemma_3n}")
if is_gemma_3n:
print("π Will use: AutoProcessor + Gemma3nForConditionalGeneration")
else:
print("π Will use: AutoTokenizer + AutoModelForCausalLM")
return current_model
if __name__ == "__main__":
show_model_config()
|