|
|
|
""" |
|
Verification script to show current model configuration |
|
""" |
|
import os |
|
|
|
def show_model_config(): |
|
"""Show what model will be used""" |
|
print("π Model Configuration Analysis") |
|
print("=" * 50) |
|
|
|
|
|
ai_model_env = os.environ.get("AI_MODEL", "google/gemma-3n-E4B-it") |
|
print(f"π Environment variable AI_MODEL: {ai_model_env}") |
|
|
|
|
|
if "GGUF" in ai_model_env: |
|
current_model = "google/gemma-3n-E4B-it" |
|
print(f"π OVERRIDE: GGUF model detected, using: {current_model}") |
|
print(f" Original: {ai_model_env}") |
|
print(f" Fixed to: {current_model}") |
|
else: |
|
current_model = ai_model_env |
|
print(f"β
Using: {current_model}") |
|
|
|
print(f"\nπ― Final model that will be loaded: {current_model}") |
|
|
|
|
|
is_gemma_3n = "gemma-3n" in current_model.lower() |
|
print(f"π€ Is Gemma 3n model: {is_gemma_3n}") |
|
|
|
if is_gemma_3n: |
|
print("π Will use: AutoProcessor + Gemma3nForConditionalGeneration") |
|
else: |
|
print("π Will use: AutoTokenizer + AutoModelForCausalLM") |
|
|
|
return current_model |
|
|
|
if __name__ == "__main__": |
|
show_model_config() |
|
|