Update app.py
Browse files
app.py
CHANGED
@@ -37,7 +37,7 @@ device = init_device()
|
|
37 |
|
38 |
@st.cache_resource
|
39 |
def load_model():
|
40 |
-
"""Load model
|
41 |
try:
|
42 |
# Using your original base model
|
43 |
base_model_id = "unsloth/llama-3.2-11b-vision-instruct-unsloth-bnb-4bit"
|
@@ -45,12 +45,22 @@ def load_model():
|
|
45 |
# Load processor
|
46 |
processor = AutoProcessor.from_pretrained(base_model_id)
|
47 |
|
48 |
-
#
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
# Load adapter
|
56 |
adapter_id = "saakshigupta/deepfake-explainer-1"
|
|
|
37 |
|
38 |
@st.cache_resource
|
39 |
def load_model():
|
40 |
+
"""Load model with fallback options for quantization"""
|
41 |
try:
|
42 |
# Using your original base model
|
43 |
base_model_id = "unsloth/llama-3.2-11b-vision-instruct-unsloth-bnb-4bit"
|
|
|
45 |
# Load processor
|
46 |
processor = AutoProcessor.from_pretrained(base_model_id)
|
47 |
|
48 |
+
# Try to load with 4-bit quantization first
|
49 |
+
try:
|
50 |
+
import bitsandbytes
|
51 |
+
model = AutoModelForCausalLM.from_pretrained(
|
52 |
+
base_model_id,
|
53 |
+
device_map="auto",
|
54 |
+
load_in_4bit=True,
|
55 |
+
torch_dtype=torch.float16
|
56 |
+
)
|
57 |
+
except ImportError:
|
58 |
+
st.warning("bitsandbytes not available. Falling back to float16 precision.")
|
59 |
+
model = AutoModelForCausalLM.from_pretrained(
|
60 |
+
base_model_id,
|
61 |
+
device_map="auto",
|
62 |
+
torch_dtype=torch.float16
|
63 |
+
)
|
64 |
|
65 |
# Load adapter
|
66 |
adapter_id = "saakshigupta/deepfake-explainer-1"
|