Coool2 commited on
Commit
3b3d057
·
1 Parent(s): cd23647

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +3 -5
agent.py CHANGED
@@ -26,7 +26,6 @@ from llama_index.core.schema import ImageNode, TextNode
26
  # LlamaIndex specialized imports
27
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
28
  from llama_index.llms.huggingface import HuggingFaceLLM
29
- from llama_index.multi_modal_llms.huggingface import HuggingFaceMultiModal
30
  from llama_index.readers.assemblyai import AssemblyAIAudioTranscriptReader
31
  from llama_index.readers.json import JSONReader
32
  from llama_index.readers.web import BeautifulSoupWebReader
@@ -119,10 +118,9 @@ def initialize_models(use_api_mode=False):
119
  print("Initializing models in non-API mode with local models...")
120
 
121
  try :
122
- # Fallback to regular HuggingFace LLM
123
- proj_llm = HuggingFaceMultiModal(
124
- model_name="mistralai/Pixtral-12B-2409",
125
- tokenizer_name="mistralai/Pixtral-12B-2409",
126
  device_map="auto",
127
  max_new_tokens=16000,
128
  model_kwargs={"torch_dtype": "auto"},
 
26
  # LlamaIndex specialized imports
27
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
28
  from llama_index.llms.huggingface import HuggingFaceLLM
 
29
  from llama_index.readers.assemblyai import AssemblyAIAudioTranscriptReader
30
  from llama_index.readers.json import JSONReader
31
  from llama_index.readers.web import BeautifulSoupWebReader
 
118
  print("Initializing models in non-API mode with local models...")
119
 
120
  try :
121
+ proj_llm = HuggingFaceLLM(
122
+ model_name="google/gemma-3-12b-it",
123
+ tokenizer_name="google/gemma-3-12b-it",
 
124
  device_map="auto",
125
  max_new_tokens=16000,
126
  model_kwargs={"torch_dtype": "auto"},