#!/usr/bin/env python3 """ Verification script to show current model configuration """ import os def show_model_config(): """Show what model will be used""" print("šŸ” Model Configuration Analysis") print("=" * 50) # Check environment variable ai_model_env = os.environ.get("AI_MODEL", "google/gemma-3n-E4B-it") print(f"šŸ“ Environment variable AI_MODEL: {ai_model_env}") # Apply override logic if "GGUF" in ai_model_env: current_model = "google/gemma-3n-E4B-it" print(f"šŸ”„ OVERRIDE: GGUF model detected, using: {current_model}") print(f" Original: {ai_model_env}") print(f" Fixed to: {current_model}") else: current_model = ai_model_env print(f"āœ… Using: {current_model}") print(f"\nšŸŽÆ Final model that will be loaded: {current_model}") # Check if it's Gemma 3n is_gemma_3n = "gemma-3n" in current_model.lower() print(f"šŸ¤– Is Gemma 3n model: {is_gemma_3n}") if is_gemma_3n: print("šŸ“š Will use: AutoProcessor + Gemma3nForConditionalGeneration") else: print("šŸ“š Will use: AutoTokenizer + AutoModelForCausalLM") return current_model if __name__ == "__main__": show_model_config()