Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -35,7 +35,7 @@ import PyPDF2
|
|
35 |
|
36 |
warnings.filterwarnings('ignore')
|
37 |
|
38 |
-
print("๐ฎ ๋ก๋ด ์๊ฐ ์์คํ
์ด๊ธฐํ (Gemma3-
|
39 |
|
40 |
##############################################################################
|
41 |
# ์์ ์ ์
|
@@ -50,7 +50,7 @@ SERPHOUSE_API_KEY = os.getenv("SERPHOUSE_API_KEY", "")
|
|
50 |
##############################################################################
|
51 |
llm = None
|
52 |
model_loaded = False
|
53 |
-
model_name = "Gemma3-
|
54 |
|
55 |
##############################################################################
|
56 |
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ
|
@@ -215,34 +215,73 @@ def image_to_base64_data_uri(image: Union[np.ndarray, Image.Image]) -> str:
|
|
215 |
##############################################################################
|
216 |
def download_model_files():
|
217 |
"""Hugging Face Hub์์ ๋ชจ๋ธ ํ์ผ ๋ค์ด๋ก๋"""
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
local_files_only=False
|
240 |
-
)
|
241 |
|
242 |
-
|
243 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
|
245 |
-
|
|
|
246 |
|
247 |
@spaces.GPU(duration=120)
|
248 |
def load_model():
|
@@ -253,7 +292,7 @@ def load_model():
|
|
253 |
return True
|
254 |
|
255 |
try:
|
256 |
-
logger.info("Gemma3-
|
257 |
clear_cuda_cache()
|
258 |
|
259 |
# ๋ชจ๋ธ ํ์ผ ๋ค์ด๋ก๋
|
@@ -262,26 +301,40 @@ def load_model():
|
|
262 |
# GPU ์ฌ์ฉ ๊ฐ๋ฅ ์ฌ๋ถ ํ์ธ
|
263 |
n_gpu_layers = -1 if torch.cuda.is_available() else 0
|
264 |
|
265 |
-
# ์ฑํ
ํธ๋ค๋ฌ ์์ฑ (๋น์ ์ง์)
|
266 |
-
chat_handler =
|
267 |
-
|
268 |
-
|
269 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
270 |
|
271 |
# ๋ชจ๋ธ ๋ก๋
|
272 |
-
|
273 |
-
model_path
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
|
|
|
|
|
|
|
|
|
|
282 |
|
283 |
model_loaded = True
|
284 |
-
logger.info(f"โ
|
|
|
|
|
285 |
return True
|
286 |
|
287 |
except Exception as e:
|
@@ -342,6 +395,31 @@ def analyze_image_for_robot(
|
|
342 |
return "โ ๋ชจ๋ธ ๋ก๋ฉ ์คํจ"
|
343 |
|
344 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
345 |
# ์ด๋ฏธ์ง๋ฅผ base64๋ก ๋ณํ
|
346 |
image_uri = image_to_base64_data_uri(image)
|
347 |
|
@@ -537,14 +615,14 @@ with gr.Blocks(title="๐ค ๋ก๋ด ์๊ฐ ์์คํ
(Gemma3-4B GGUF)", css=css) a
|
|
537 |
gr.HTML("""
|
538 |
<div class="robot-header">
|
539 |
<h1>๐ค ๋ก๋ด ์๊ฐ ์์คํ
</h1>
|
540 |
-
<h3>๐ฎ Gemma3-
|
541 |
<p>โก ์์ํ ๋ชจ๋ธ๋ก ๋ ๋น ๋ฅด๊ณ ํจ์จ์ ์ธ ๋ก๋ด ์์
๋ถ์!</p>
|
542 |
</div>
|
543 |
""")
|
544 |
|
545 |
gr.HTML("""
|
546 |
<div class="model-info">
|
547 |
-
<strong>๋ชจ๋ธ:</strong> Gemma3-
|
548 |
</div>
|
549 |
""")
|
550 |
|
@@ -851,7 +929,7 @@ with gr.Blocks(title="๐ค ๋ก๋ด ์๊ฐ ์์คํ
(Gemma3-4B GGUF)", css=css) a
|
|
851 |
)
|
852 |
|
853 |
if __name__ == "__main__":
|
854 |
-
print("๐ ๋ก๋ด ์๊ฐ ์์คํ
์์ (Gemma3-
|
855 |
demo.launch(
|
856 |
server_name="0.0.0.0",
|
857 |
server_port=7860,
|
|
|
35 |
|
36 |
warnings.filterwarnings('ignore')
|
37 |
|
38 |
+
print("๐ฎ ๋ก๋ด ์๊ฐ ์์คํ
์ด๊ธฐํ (Gemma3-4B GGUF Q4_K_M)...")
|
39 |
|
40 |
##############################################################################
|
41 |
# ์์ ์ ์
|
|
|
50 |
##############################################################################
|
51 |
llm = None
|
52 |
model_loaded = False
|
53 |
+
model_name = "Gemma3-4B-GGUF-Q4_K_M"
|
54 |
|
55 |
##############################################################################
|
56 |
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ
|
|
|
215 |
##############################################################################
|
216 |
def download_model_files():
|
217 |
"""Hugging Face Hub์์ ๋ชจ๋ธ ํ์ผ ๋ค์ด๋ก๋"""
|
218 |
+
# ์ฌ๋ฌ ๊ฐ๋ฅํ ์ ์ฅ์ ์๋
|
219 |
+
model_repos = [
|
220 |
+
# ์ฒซ ๋ฒ์งธ ์๋: ์ผ๋ฐ์ ์ธ Gemma 3 4B GGUF
|
221 |
+
{
|
222 |
+
"repo": "Mungert/gemma-3-4b-it-gguf",
|
223 |
+
"model": "google_gemma-3-4b-it-q4_k_m.gguf",
|
224 |
+
"mmproj": "google_gemma-3-4b-it-mmproj-bf16.gguf"
|
225 |
+
},
|
226 |
+
# ๋ ๋ฒ์งธ ์๋: LM Studio ๋ฒ์
|
227 |
+
{
|
228 |
+
"repo": "lmstudio-community/gemma-3-4b-it-GGUF",
|
229 |
+
"model": "gemma-3-4b-it-Q4_K_M.gguf",
|
230 |
+
"mmproj": "gemma-3-4b-it-mmproj-f16.gguf"
|
231 |
+
},
|
232 |
+
# ์ธ ๋ฒ์งธ ์๋: unsloth ๋ฒ์
|
233 |
+
{
|
234 |
+
"repo": "unsloth/gemma-3-4b-it-GGUF",
|
235 |
+
"model": "gemma-3-4b-it.Q4_K_M.gguf",
|
236 |
+
"mmproj": "gemma-3-4b-it.mmproj.gguf"
|
237 |
+
}
|
238 |
+
]
|
|
|
|
|
239 |
|
240 |
+
for repo_info in model_repos:
|
241 |
+
try:
|
242 |
+
logger.info(f"์ ์ฅ์ ์๋: {repo_info['repo']}")
|
243 |
+
|
244 |
+
# ๋ฉ์ธ ๋ชจ๋ธ ๋ค์ด๋ก๋
|
245 |
+
model_filename = repo_info["model"]
|
246 |
+
logger.info(f"๋ชจ๋ธ ๋ค์ด๋ก๋ ์ค: {model_filename}")
|
247 |
+
|
248 |
+
model_path = hf_hub_download(
|
249 |
+
repo_id=repo_info["repo"],
|
250 |
+
filename=model_filename,
|
251 |
+
resume_download=True,
|
252 |
+
local_files_only=False
|
253 |
+
)
|
254 |
+
|
255 |
+
# Vision projection ํ์ผ ๋ค์ด๋ก๋
|
256 |
+
mmproj_filename = repo_info["mmproj"]
|
257 |
+
logger.info(f"Vision ๋ชจ๋ธ ๋ค์ด๋ก๋ ์ค: {mmproj_filename}")
|
258 |
+
|
259 |
+
try:
|
260 |
+
mmproj_path = hf_hub_download(
|
261 |
+
repo_id=repo_info["repo"],
|
262 |
+
filename=mmproj_filename,
|
263 |
+
resume_download=True,
|
264 |
+
local_files_only=False
|
265 |
+
)
|
266 |
+
except:
|
267 |
+
# mmproj ํ์ผ์ด ์์ ์๋ ์์
|
268 |
+
logger.warning(f"Vision ๋ชจ๋ธ์ ์ฐพ์ ์ ์์ต๋๋ค: {mmproj_filename}")
|
269 |
+
logger.warning("ํ
์คํธ ์ ์ฉ ๋ชจ๋๋ก ์งํํฉ๋๋ค.")
|
270 |
+
mmproj_path = None
|
271 |
+
|
272 |
+
logger.info(f"โ
๋ชจ๋ธ ๋ค์ด๋ก๋ ์ฑ๊ณต!")
|
273 |
+
logger.info(f"๋ชจ๋ธ ๊ฒฝ๋ก: {model_path}")
|
274 |
+
if mmproj_path:
|
275 |
+
logger.info(f"Vision ๊ฒฝ๋ก: {mmproj_path}")
|
276 |
+
|
277 |
+
return model_path, mmproj_path
|
278 |
+
|
279 |
+
except Exception as e:
|
280 |
+
logger.error(f"์ ์ฅ์ {repo_info['repo']} ์๋ ์คํจ: {e}")
|
281 |
+
continue
|
282 |
|
283 |
+
# ๋ชจ๋ ์๋๊ฐ ์คํจํ ๊ฒฝ์ฐ
|
284 |
+
raise Exception("์ฌ์ฉ ๊ฐ๋ฅํ GGUF ๋ชจ๋ธ์ ์ฐพ์ ์ ์์ต๋๋ค. ์ธํฐ๋ท ์ฐ๊ฒฐ์ ํ์ธํ์ธ์.")
|
285 |
|
286 |
@spaces.GPU(duration=120)
|
287 |
def load_model():
|
|
|
292 |
return True
|
293 |
|
294 |
try:
|
295 |
+
logger.info("Gemma3-4B GGUF Q4_K_M ๋ชจ๋ธ ๋ก๋ฉ ์์...")
|
296 |
clear_cuda_cache()
|
297 |
|
298 |
# ๋ชจ๋ธ ํ์ผ ๋ค์ด๋ก๋
|
|
|
301 |
# GPU ์ฌ์ฉ ๊ฐ๋ฅ ์ฌ๋ถ ํ์ธ
|
302 |
n_gpu_layers = -1 if torch.cuda.is_available() else 0
|
303 |
|
304 |
+
# ์ฑํ
ํธ๋ค๋ฌ ์์ฑ (๋น์ ์ง์ - mmproj๊ฐ ์๋ ๊ฒฝ์ฐ๋ง)
|
305 |
+
chat_handler = None
|
306 |
+
if mmproj_path:
|
307 |
+
try:
|
308 |
+
chat_handler = Llava16ChatHandler(
|
309 |
+
clip_model_path=mmproj_path,
|
310 |
+
verbose=False
|
311 |
+
)
|
312 |
+
logger.info("โ
Vision ๋ชจ๋ธ ๋ก๋ ์ฑ๊ณต")
|
313 |
+
except Exception as e:
|
314 |
+
logger.warning(f"Vision ๋ชจ๋ธ ๋ก๋ ์คํจ, ํ
์คํธ ์ ์ฉ ๋ชจ๋๋ก ์ ํ: {e}")
|
315 |
+
chat_handler = None
|
316 |
|
317 |
# ๋ชจ๋ธ ๋ก๋
|
318 |
+
llm_params = {
|
319 |
+
"model_path": model_path,
|
320 |
+
"n_ctx": 4096, # ์ปจํ
์คํธ ํฌ๊ธฐ
|
321 |
+
"n_gpu_layers": n_gpu_layers, # GPU ๋ ์ด์ด
|
322 |
+
"n_threads": 8, # CPU ์ค๋ ๋
|
323 |
+
"verbose": False,
|
324 |
+
"seed": 42,
|
325 |
+
}
|
326 |
+
|
327 |
+
# chat_handler๊ฐ ์์ผ๋ฉด ์ถ๊ฐ
|
328 |
+
if chat_handler:
|
329 |
+
llm_params["chat_handler"] = chat_handler
|
330 |
+
llm_params["logits_all"] = True # ๋น์ ๋ชจ๋ธ์ ํ์
|
331 |
+
|
332 |
+
llm = Llama(**llm_params)
|
333 |
|
334 |
model_loaded = True
|
335 |
+
logger.info(f"โ
Gemma3-4B ๋ชจ๋ธ ๋ก๋ฉ ์๋ฃ!")
|
336 |
+
if not chat_handler:
|
337 |
+
logger.warning("โ ๏ธ ํ
์คํธ ์ ์ฉ ๋ชจ๋๋ก ์คํ ์ค์
๋๋ค. ์ด๋ฏธ์ง ๋ถ์์ด ์ ํ๋ ์ ์์ต๋๋ค.")
|
338 |
return True
|
339 |
|
340 |
except Exception as e:
|
|
|
395 |
return "โ ๋ชจ๋ธ ๋ก๋ฉ ์คํจ"
|
396 |
|
397 |
try:
|
398 |
+
# Vision ๋ชจ๋ธ์ด ์๋ ๊ฒฝ์ฐ ๊ฒฝ๊ณ
|
399 |
+
if not hasattr(llm, 'chat_handler') or llm.chat_handler is None:
|
400 |
+
logger.warning("Vision ๋ชจ๋ธ์ด ๋ก๋๋์ง ์์์ต๋๋ค. ํ
์คํธ ๊ธฐ๋ฐ ๋ถ์๋ง ๊ฐ๋ฅํฉ๋๋ค.")
|
401 |
+
|
402 |
+
# ํ
์คํธ ์ ์ฉ ๋ถ์
|
403 |
+
system_prompt = f"""๋น์ ์ ๋ก๋ด ์๊ฐ ์์คํ
์๋ฎฌ๋ ์ดํฐ์
๋๋ค.
|
404 |
+
์ค์ ์ด๋ฏธ์ง๋ฅผ ๋ณผ ์๋ ์์ง๋ง, ์ฌ์ฉ์์ ์ค๋ช
์ ๋ฐํ์ผ๋ก ๋ก๋ด ์์
์ ๊ณํํ๊ณ ๋ถ์ํฉ๋๋ค.
|
405 |
+
ํ์คํฌ ์ ํ: {task_type}"""
|
406 |
+
|
407 |
+
messages = [
|
408 |
+
{"role": "system", "content": system_prompt},
|
409 |
+
{"role": "user", "content": f"[์ด๋ฏธ์ง ๋ถ์ ์์ฒญ] {prompt}"}
|
410 |
+
]
|
411 |
+
|
412 |
+
response = llm.create_chat_completion(
|
413 |
+
messages=messages,
|
414 |
+
max_tokens=max_new_tokens,
|
415 |
+
temperature=0.7,
|
416 |
+
top_p=0.9,
|
417 |
+
stream=False
|
418 |
+
)
|
419 |
+
|
420 |
+
result = response['choices'][0]['message']['content'].strip()
|
421 |
+
return f"โ ๏ธ ํ
์คํธ ์ ์ฉ ๋ชจ๋\n\n{result}"
|
422 |
+
|
423 |
# ์ด๋ฏธ์ง๋ฅผ base64๋ก ๋ณํ
|
424 |
image_uri = image_to_base64_data_uri(image)
|
425 |
|
|
|
615 |
gr.HTML("""
|
616 |
<div class="robot-header">
|
617 |
<h1>๐ค ๋ก๋ด ์๊ฐ ์์คํ
</h1>
|
618 |
+
<h3>๐ฎ Gemma3-4B GGUF Q4_K_M + ๐ท ์ค์๊ฐ ์น์บ + ๐ ์น ๊ฒ์</h3>
|
619 |
<p>โก ์์ํ ๋ชจ๋ธ๋ก ๋ ๋น ๋ฅด๊ณ ํจ์จ์ ์ธ ๋ก๋ด ์์
๋ถ์!</p>
|
620 |
</div>
|
621 |
""")
|
622 |
|
623 |
gr.HTML("""
|
624 |
<div class="model-info">
|
625 |
+
<strong>๋ชจ๋ธ:</strong> Gemma3-4B Q4_K_M (2.5GB) | <strong>๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ:</strong> ~3-4GB VRAM
|
626 |
</div>
|
627 |
""")
|
628 |
|
|
|
929 |
)
|
930 |
|
931 |
if __name__ == "__main__":
|
932 |
+
print("๐ ๋ก๋ด ์๊ฐ ์์คํ
์์ (Gemma3-4B GGUF Q4_K_M)...")
|
933 |
demo.launch(
|
934 |
server_name="0.0.0.0",
|
935 |
server_port=7860,
|