Yago Bolivar
feat: add accessibility check for GPT4All library and model initialization
ca88013
import sys
def check_gpt4all_accessibility(model_name="/Users/yagoairm2/Library/Application Support/nomic.ai/GPT4All/Meta-Llama-3-8B-Instruct.Q4_0.gguf"):
"""
Checks if the gpt4all library is installed and can load a model.
"""
print("--- GPT4All Accessibility Check ---")
try:
print("Step 1: Attempting to import GPT4All...")
from gpt4all import GPT4All
print("SUCCESS: GPT4All library imported successfully.\n")
except ImportError:
print("ERROR: GPT4All library not found.")
print("Please install it by running: pip install gpt4all")
print("-----------------------------------")
return False
except Exception as e:
print(f"ERROR: An unexpected error occurred during import: {e}")
print("-----------------------------------")
return False
try:
print(f"Step 2: Attempting to initialize GPT4All model: '{model_name}'...")
print("This might take a moment if the model needs to be downloaded.")
# You can specify a model_path if your models are stored in a custom location:
# model = GPT4All(model_name, model_path="/path/to/your/models/")
model = GPT4All(model_name, allow_download=True) # allow_download defaults to True
print(f"SUCCESS: GPT4All model '{model_name}' initialized successfully.")
print("Model object:", model)
# Optional: Perform a quick generation test
print("\nStep 3: Performing a quick generation test...")
try:
# Ensure the model has a chat session or generate method available
# For newer versions, direct generation might be preferred
# For older versions or specific models, a chat session might be needed
if hasattr(model, 'generate'):
# Using a context manager for chat session if needed by the model/version
with model.chat_session():
response = model.generate("Hello, world!", max_tokens=10, temp=0.7)
print(f"Test generation successful. Response (first few tokens): {response[:50]}...")
else:
print("Model does not have a direct 'generate' method in this context. Skipping generation test.")
print("SUCCESS: GPT4All seems to be working correctly.")
print("-----------------------------------")
return True
except Exception as e:
print(f"ERROR: Failed during test generation with model '{model_name}': {e}")
print("This could be due to model compatibility or an issue with the generation process.")
print("-----------------------------------")
return False # Consider this a partial success if initialization worked but generation failed
except Exception as e:
print(f"ERROR: Failed to initialize GPT4All model '{model_name}': {e}")
print("This could be due to various reasons:")
print(" - The model name is incorrect or not available for download.")
print(" - You don't have an internet connection to download the model.")
print(" - Issues with model file integrity if previously downloaded.")
print(" - Insufficient disk space or permissions.")
print(" - Underlying C++ library issues (check gpt4all installation).")
print("Please check the model name and your internet connection.")
print("You can find available models at https://gpt4all.io/index.html")
print("-----------------------------------")
return False
if __name__ == "__main__":
if check_gpt4all_accessibility():
print("\nGPT4All is accessible and a model was loaded successfully.")
else:
print("\nGPT4All accessibility check failed. Please review the error messages above.")