openfree commited on
Commit
70cb9de
·
verified ·
1 Parent(s): ec6517e

Delete app-backup.py

Browse files
Files changed (1) hide show
  1. app-backup.py +0 -730
app-backup.py DELETED
@@ -1,730 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- import os
4
- import re
5
- import tempfile
6
- import gc # garbage collector
7
- from collections.abc import Iterator
8
- from threading import Thread
9
- import json
10
- import requests
11
- import cv2
12
- import gradio as gr
13
- import spaces
14
- import torch
15
- from loguru import logger
16
- from PIL import Image
17
- from transformers import AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer
18
-
19
- # CSV/TXT analysis
20
- import pandas as pd
21
- # PDF text extraction
22
- import PyPDF2
23
-
24
- ##############################################################################
25
- # Memory cleanup function
26
- ##############################################################################
27
- def clear_cuda_cache():
28
- """Clear CUDA cache explicitly."""
29
- if torch.cuda.is_available():
30
- torch.cuda.empty_cache()
31
- gc.collect()
32
-
33
- ##############################################################################
34
- # SERPHouse API key from environment variable
35
- ##############################################################################
36
- SERPHOUSE_API_KEY = os.getenv("SERPHOUSE_API_KEY", "")
37
-
38
- ##############################################################################
39
- # Simple keyword extraction function
40
- ##############################################################################
41
- def extract_keywords(text: str, top_k: int = 5) -> str:
42
- """
43
- Extract keywords from text
44
- """
45
- text = re.sub(r"[^a-zA-Z0-9가-힣\s]", "", text)
46
- tokens = text.split()
47
- key_tokens = tokens[:top_k]
48
- return " ".join(key_tokens)
49
-
50
- ##############################################################################
51
- # SerpHouse Live endpoint call
52
- ##############################################################################
53
- def do_web_search(query: str) -> str:
54
- """
55
- Return top 20 'organic' results as JSON string
56
- """
57
- try:
58
- url = "https://api.serphouse.com/serp/live"
59
-
60
- # 기본 GET 방식으로 파라미터 간소화하고 결과 수를 20개로 제한
61
- params = {
62
- "q": query,
63
- "domain": "google.com",
64
- "serp_type": "web", # Basic web search
65
- "device": "desktop",
66
- "lang": "en",
67
- "num": "20" # Request max 20 results
68
- }
69
-
70
- headers = {
71
- "Authorization": f"Bearer {SERPHOUSE_API_KEY}"
72
- }
73
-
74
- logger.info(f"SerpHouse API call... query: {query}")
75
- logger.info(f"Request URL: {url} - params: {params}")
76
-
77
- # GET request
78
- response = requests.get(url, headers=headers, params=params, timeout=60)
79
- response.raise_for_status()
80
-
81
- logger.info(f"SerpHouse API response status: {response.status_code}")
82
- data = response.json()
83
-
84
- # Handle various response structures
85
- results = data.get("results", {})
86
- organic = None
87
-
88
- # Possible response structure 1
89
- if isinstance(results, dict) and "organic" in results:
90
- organic = results["organic"]
91
-
92
- # Possible response structure 2 (nested results)
93
- elif isinstance(results, dict) and "results" in results:
94
- if isinstance(results["results"], dict) and "organic" in results["results"]:
95
- organic = results["results"]["organic"]
96
-
97
- # Possible response structure 3 (top-level organic)
98
- elif "organic" in data:
99
- organic = data["organic"]
100
-
101
- if not organic:
102
- logger.warning("No organic results found in response.")
103
- logger.debug(f"Response structure: {list(data.keys())}")
104
- if isinstance(results, dict):
105
- logger.debug(f"results structure: {list(results.keys())}")
106
- return "No web search results found or unexpected API response structure."
107
-
108
- # Limit results and optimize context length
109
- max_results = min(20, len(organic))
110
- limited_organic = organic[:max_results]
111
-
112
- # Format results for better readability
113
- summary_lines = []
114
- for idx, item in enumerate(limited_organic, start=1):
115
- title = item.get("title", "No title")
116
- link = item.get("link", "#")
117
- snippet = item.get("snippet", "No description")
118
- displayed_link = item.get("displayed_link", link)
119
-
120
- # Markdown format
121
- summary_lines.append(
122
- f"### Result {idx}: {title}\n\n"
123
- f"{snippet}\n\n"
124
- f"**Source**: [{displayed_link}]({link})\n\n"
125
- f"---\n"
126
- )
127
-
128
- # Add simple instructions for model
129
- instructions = """
130
- # X-RAY Security Scanning Reference Results
131
- Use this information to enhance your analysis.
132
- """
133
-
134
- search_results = instructions + "\n".join(summary_lines)
135
- logger.info(f"Processed {len(limited_organic)} search results")
136
- return search_results
137
-
138
- except Exception as e:
139
- logger.error(f"Web search failed: {e}")
140
- return f"Web search failed: {str(e)}"
141
-
142
-
143
- ##############################################################################
144
- # Model/Processor loading
145
- ##############################################################################
146
- MAX_CONTENT_CHARS = 2000
147
- MAX_INPUT_LENGTH = 2096 # Max input token limit
148
- model_id = os.getenv("MODEL_ID", "VIDraft/Gemma-3-R1984-4B")
149
-
150
- processor = AutoProcessor.from_pretrained(model_id, padding_side="left")
151
- model = Gemma3ForConditionalGeneration.from_pretrained(
152
- model_id,
153
- device_map="auto",
154
- torch_dtype=torch.bfloat16,
155
- attn_implementation="eager" # Change to "flash_attention_2" if available
156
- )
157
- MAX_NUM_IMAGES = int(os.getenv("MAX_NUM_IMAGES", "5"))
158
-
159
-
160
- ##############################################################################
161
- # CSV, TXT, PDF analysis functions
162
- ##############################################################################
163
- def analyze_csv_file(path: str) -> str:
164
- """
165
- Convert CSV file to string. Truncate if too long.
166
- """
167
- try:
168
- df = pd.read_csv(path)
169
- if df.shape[0] > 50 or df.shape[1] > 10:
170
- df = df.iloc[:50, :10]
171
- df_str = df.to_string()
172
- if len(df_str) > MAX_CONTENT_CHARS:
173
- df_str = df_str[:MAX_CONTENT_CHARS] + "\n...(truncated)..."
174
- return f"**[CSV File: {os.path.basename(path)}]**\n\n{df_str}"
175
- except Exception as e:
176
- return f"Failed to read CSV ({os.path.basename(path)}): {str(e)}"
177
-
178
-
179
- def analyze_txt_file(path: str) -> str:
180
- """
181
- Read TXT file. Truncate if too long.
182
- """
183
- try:
184
- with open(path, "r", encoding="utf-8") as f:
185
- text = f.read()
186
- if len(text) > MAX_CONTENT_CHARS:
187
- text = text[:MAX_CONTENT_CHARS] + "\n...(truncated)..."
188
- return f"**[TXT File: {os.path.basename(path)}]**\n\n{text}"
189
- except Exception as e:
190
- return f"Failed to read TXT ({os.path.basename(path)}): {str(e)}"
191
-
192
-
193
- def pdf_to_markdown(pdf_path: str) -> str:
194
- """
195
- Convert PDF text to Markdown. Extract text by pages.
196
- """
197
- text_chunks = []
198
- try:
199
- with open(pdf_path, "rb") as f:
200
- reader = PyPDF2.PdfReader(f)
201
- max_pages = min(5, len(reader.pages))
202
- for page_num in range(max_pages):
203
- page = reader.pages[page_num]
204
- page_text = page.extract_text() or ""
205
- page_text = page_text.strip()
206
- if page_text:
207
- if len(page_text) > MAX_CONTENT_CHARS // max_pages:
208
- page_text = page_text[:MAX_CONTENT_CHARS // max_pages] + "...(truncated)"
209
- text_chunks.append(f"## Page {page_num+1}\n\n{page_text}\n")
210
- if len(reader.pages) > max_pages:
211
- text_chunks.append(f"\n...(Showing {max_pages} of {len(reader.pages)} pages)...")
212
- except Exception as e:
213
- return f"Failed to read PDF ({os.path.basename(pdf_path)}): {str(e)}"
214
-
215
- full_text = "\n".join(text_chunks)
216
- if len(full_text) > MAX_CONTENT_CHARS:
217
- full_text = full_text[:MAX_CONTENT_CHARS] + "\n...(truncated)..."
218
-
219
- return f"**[PDF File: {os.path.basename(pdf_path)}]**\n\n{full_text}"
220
-
221
-
222
- ##############################################################################
223
- # Image/Video upload limit check
224
- ##############################################################################
225
- def count_files_in_new_message(paths: list[str]) -> tuple[int, int]:
226
- image_count = 0
227
- video_count = 0
228
- for path in paths:
229
- if path.endswith(".mp4"):
230
- video_count += 1
231
- elif re.search(r"\.(png|jpg|jpeg|gif|webp)$", path, re.IGNORECASE):
232
- image_count += 1
233
- return image_count, video_count
234
-
235
-
236
- def count_files_in_history(history: list[dict]) -> tuple[int, int]:
237
- image_count = 0
238
- video_count = 0
239
- for item in history:
240
- if item["role"] != "user" or isinstance(item["content"], str):
241
- continue
242
- if isinstance(item["content"], list) and len(item["content"]) > 0:
243
- file_path = item["content"][0]
244
- if isinstance(file_path, str):
245
- if file_path.endswith(".mp4"):
246
- video_count += 1
247
- elif re.search(r"\.(png|jpg|jpeg|gif|webp)$", file_path, re.IGNORECASE):
248
- image_count += 1
249
- return image_count, video_count
250
-
251
-
252
- def validate_media_constraints(message: dict, history: list[dict]) -> bool:
253
- media_files = []
254
- for f in message["files"]:
255
- if re.search(r"\.(png|jpg|jpeg|gif|webp)$", f, re.IGNORECASE) or f.endswith(".mp4"):
256
- media_files.append(f)
257
-
258
- new_image_count, new_video_count = count_files_in_new_message(media_files)
259
- history_image_count, history_video_count = count_files_in_history(history)
260
- image_count = history_image_count + new_image_count
261
- video_count = history_video_count + new_video_count
262
-
263
- if video_count > 1:
264
- gr.Warning("Only one video is supported.")
265
- return False
266
- if video_count == 1:
267
- if image_count > 0:
268
- gr.Warning("Mixing images and videos is not allowed.")
269
- return False
270
- if "<image>" in message["text"]:
271
- gr.Warning("Using <image> tags with video files is not supported.")
272
- return False
273
- if video_count == 0 and image_count > MAX_NUM_IMAGES:
274
- gr.Warning(f"You can upload up to {MAX_NUM_IMAGES} images.")
275
- return False
276
-
277
- if "<image>" in message["text"]:
278
- image_files = [f for f in message["files"] if re.search(r"\.(png|jpg|jpeg|gif|webp)$", f, re.IGNORECASE)]
279
- image_tag_count = message["text"].count("<image>")
280
- if image_tag_count != len(image_files):
281
- gr.Warning("The number of <image> tags in the text does not match the number of image files.")
282
- return False
283
-
284
- return True
285
-
286
-
287
- ##############################################################################
288
- # Video processing - with temp file tracking
289
- ##############################################################################
290
- def downsample_video(video_path: str) -> list[tuple[Image.Image, float]]:
291
- vidcap = cv2.VideoCapture(video_path)
292
- fps = vidcap.get(cv2.CAP_PROP_FPS)
293
- total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
294
- frame_interval = max(int(fps), int(total_frames / 10))
295
- frames = []
296
-
297
- for i in range(0, total_frames, frame_interval):
298
- vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
299
- success, image = vidcap.read()
300
- if success:
301
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
302
- # Resize image
303
- image = cv2.resize(image, (0, 0), fx=0.5, fy=0.5)
304
- pil_image = Image.fromarray(image)
305
- timestamp = round(i / fps, 2)
306
- frames.append((pil_image, timestamp))
307
- if len(frames) >= 5:
308
- break
309
-
310
- vidcap.release()
311
- return frames
312
-
313
-
314
- def process_video(video_path: str) -> tuple[list[dict], list[str]]:
315
- content = []
316
- temp_files = [] # List for tracking temp files
317
-
318
- frames = downsample_video(video_path)
319
- for frame in frames:
320
- pil_image, timestamp = frame
321
- with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
322
- pil_image.save(temp_file.name)
323
- temp_files.append(temp_file.name) # Track for deletion later
324
- content.append({"type": "text", "text": f"Frame {timestamp}:"})
325
- content.append({"type": "image", "url": temp_file.name})
326
-
327
- return content, temp_files
328
-
329
-
330
- ##############################################################################
331
- # interleaved <image> processing
332
- ##############################################################################
333
- def process_interleaved_images(message: dict) -> list[dict]:
334
- parts = re.split(r"(<image>)", message["text"])
335
- content = []
336
- image_index = 0
337
-
338
- image_files = [f for f in message["files"] if re.search(r"\.(png|jpg|jpeg|gif|webp)$", f, re.IGNORECASE)]
339
-
340
- for part in parts:
341
- if part == "<image>" and image_index < len(image_files):
342
- content.append({"type": "image", "url": image_files[image_index]})
343
- image_index += 1
344
- elif part.strip():
345
- content.append({"type": "text", "text": part.strip()})
346
- else:
347
- if isinstance(part, str) and part != "<image>":
348
- content.append({"type": "text", "text": part})
349
- return content
350
-
351
-
352
- ##############################################################################
353
- # PDF + CSV + TXT + Image/Video
354
- ##############################################################################
355
- def is_image_file(file_path: str) -> bool:
356
- return bool(re.search(r"\.(png|jpg|jpeg|gif|webp)$", file_path, re.IGNORECASE))
357
-
358
- def is_video_file(file_path: str) -> bool:
359
- return file_path.endswith(".mp4")
360
-
361
- def is_document_file(file_path: str) -> bool:
362
- return (
363
- file_path.lower().endswith(".pdf")
364
- or file_path.lower().endswith(".csv")
365
- or file_path.lower().endswith(".txt")
366
- )
367
-
368
-
369
- def process_new_user_message(message: dict) -> tuple[list[dict], list[str]]:
370
- temp_files = [] # List for tracking temp files
371
-
372
- if not message["files"]:
373
- return [{"type": "text", "text": message["text"]}], temp_files
374
-
375
- video_files = [f for f in message["files"] if is_video_file(f)]
376
- image_files = [f for f in message["files"] if is_image_file(f)]
377
- csv_files = [f for f in message["files"] if f.lower().endswith(".csv")]
378
- txt_files = [f for f in message["files"] if f.lower().endswith(".txt")]
379
- pdf_files = [f for f in message["files"] if f.lower().endswith(".pdf")]
380
-
381
- content_list = [{"type": "text", "text": message["text"]}]
382
-
383
- for csv_path in csv_files:
384
- csv_analysis = analyze_csv_file(csv_path)
385
- content_list.append({"type": "text", "text": csv_analysis})
386
-
387
- for txt_path in txt_files:
388
- txt_analysis = analyze_txt_file(txt_path)
389
- content_list.append({"type": "text", "text": txt_analysis})
390
-
391
- for pdf_path in pdf_files:
392
- pdf_markdown = pdf_to_markdown(pdf_path)
393
- content_list.append({"type": "text", "text": pdf_markdown})
394
-
395
- if video_files:
396
- video_content, video_temp_files = process_video(video_files[0])
397
- content_list += video_content
398
- temp_files.extend(video_temp_files)
399
- return content_list, temp_files
400
-
401
- if "<image>" in message["text"] and image_files:
402
- interleaved_content = process_interleaved_images({"text": message["text"], "files": image_files})
403
- if content_list and content_list[0]["type"] == "text":
404
- content_list = content_list[1:]
405
- return interleaved_content + content_list, temp_files
406
- else:
407
- for img_path in image_files:
408
- content_list.append({"type": "image", "url": img_path})
409
-
410
- return content_list, temp_files
411
-
412
-
413
- ##############################################################################
414
- # history -> LLM message conversion
415
- ##############################################################################
416
- def process_history(history: list[dict]) -> list[dict]:
417
- messages = []
418
- current_user_content: list[dict] = []
419
- for item in history:
420
- if item["role"] == "assistant":
421
- if current_user_content:
422
- messages.append({"role": "user", "content": current_user_content})
423
- current_user_content = []
424
- messages.append({"role": "assistant", "content": [{"type": "text", "text": item["content"]}]})
425
- else:
426
- content = item["content"]
427
- if isinstance(content, str):
428
- current_user_content.append({"type": "text", "text": content})
429
- elif isinstance(content, list) and len(content) > 0:
430
- file_path = content[0]
431
- if is_image_file(file_path):
432
- current_user_content.append({"type": "image", "url": file_path})
433
- else:
434
- current_user_content.append({"type": "text", "text": f"[File: {os.path.basename(file_path)}]"})
435
-
436
- if current_user_content:
437
- messages.append({"role": "user", "content": current_user_content})
438
-
439
- return messages
440
-
441
-
442
- ##############################################################################
443
- # Model generation function with OOM catch
444
- ##############################################################################
445
- def _model_gen_with_oom_catch(**kwargs):
446
- """
447
- Catch OutOfMemoryError in separate thread
448
- """
449
- try:
450
- model.generate(**kwargs)
451
- except torch.cuda.OutOfMemoryError:
452
- raise RuntimeError(
453
- "[OutOfMemoryError] GPU memory insufficient. "
454
- "Please reduce Max New Tokens or prompt length."
455
- )
456
- finally:
457
- # Clear cache after generation
458
- clear_cuda_cache()
459
-
460
-
461
- ##############################################################################
462
- # Main inference function (with auto web search)
463
- ##############################################################################
464
- @spaces.GPU(duration=120)
465
- def run(
466
- message: dict,
467
- history: list[dict],
468
- system_prompt: str = "",
469
- max_new_tokens: int = 512,
470
- use_web_search: bool = False,
471
- web_search_query: str = "",
472
- ) -> Iterator[str]:
473
-
474
- if not validate_media_constraints(message, history):
475
- yield ""
476
- return
477
-
478
- temp_files = [] # For tracking temp files
479
-
480
- try:
481
- combined_system_msg = ""
482
-
483
- # Used internally only (hidden from UI)
484
- if system_prompt.strip():
485
- combined_system_msg += f"[System Prompt]\n{system_prompt.strip()}\n\n"
486
-
487
- if use_web_search:
488
- user_text = message["text"]
489
- ws_query = extract_keywords(user_text, top_k=5)
490
- if ws_query.strip():
491
- logger.info(f"[Auto WebSearch Keyword] {ws_query!r}")
492
- ws_result = do_web_search(ws_query)
493
- combined_system_msg += f"[X-RAY Security Reference Data]\n{ws_result}\n\n"
494
- else:
495
- combined_system_msg += "[No valid keywords found, skipping WebSearch]\n\n"
496
-
497
- messages = []
498
- if combined_system_msg.strip():
499
- messages.append({
500
- "role": "system",
501
- "content": [{"type": "text", "text": combined_system_msg.strip()}],
502
- })
503
-
504
- messages.extend(process_history(history))
505
-
506
- user_content, user_temp_files = process_new_user_message(message)
507
- temp_files.extend(user_temp_files) # Track temp files
508
-
509
- for item in user_content:
510
- if item["type"] == "text" and len(item["text"]) > MAX_CONTENT_CHARS:
511
- item["text"] = item["text"][:MAX_CONTENT_CHARS] + "\n...(truncated)..."
512
- messages.append({"role": "user", "content": user_content})
513
-
514
- inputs = processor.apply_chat_template(
515
- messages,
516
- add_generation_prompt=True,
517
- tokenize=True,
518
- return_dict=True,
519
- return_tensors="pt",
520
- ).to(device=model.device, dtype=torch.bfloat16)
521
-
522
- # Limit input token count
523
- if inputs.input_ids.shape[1] > MAX_INPUT_LENGTH:
524
- inputs.input_ids = inputs.input_ids[:, -MAX_INPUT_LENGTH:]
525
- if 'attention_mask' in inputs:
526
- inputs.attention_mask = inputs.attention_mask[:, -MAX_INPUT_LENGTH:]
527
-
528
- streamer = TextIteratorStreamer(processor, timeout=30.0, skip_prompt=True, skip_special_tokens=True)
529
- gen_kwargs = dict(
530
- inputs,
531
- streamer=streamer,
532
- max_new_tokens=max_new_tokens,
533
- )
534
-
535
- t = Thread(target=_model_gen_with_oom_catch, kwargs=gen_kwargs)
536
- t.start()
537
-
538
- output = ""
539
- for new_text in streamer:
540
- output += new_text
541
- yield output
542
-
543
- except Exception as e:
544
- logger.error(f"Error in run: {str(e)}")
545
- yield f"Error occurred: {str(e)}"
546
-
547
- finally:
548
- # Delete temp files
549
- for temp_file in temp_files:
550
- try:
551
- if os.path.exists(temp_file):
552
- os.unlink(temp_file)
553
- logger.info(f"Deleted temp file: {temp_file}")
554
- except Exception as e:
555
- logger.warning(f"Failed to delete temp file {temp_file}: {e}")
556
-
557
- # Explicit memory cleanup
558
- try:
559
- del inputs, streamer
560
- except:
561
- pass
562
-
563
- clear_cuda_cache()
564
-
565
-
566
-
567
- ##############################################################################
568
- # X-RAY security scanning examples
569
- ##############################################################################
570
- examples = [
571
- [
572
- {
573
- "text": "업로드한 X-ray 이미지들에 대한 위험 요소 식별 및 분석을 시작합니다.",
574
- "files": [""],
575
- }
576
- ],
577
-
578
-
579
- ]
580
-
581
- ##############################################################################
582
- # Gradio UI (Blocks) 구성
583
- ##############################################################################
584
- css = """
585
- .gradio-container {
586
- background: white;
587
- padding: 30px 40px;
588
- margin: 20px auto;
589
- width: 100% !important;
590
- max-width: none !important;
591
- }
592
- .fillable {
593
- width: 100% !important;
594
- max-width: 100% !important;
595
- }
596
- body {
597
- background: white;
598
- margin: 0;
599
- padding: 0;
600
- font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
601
- color: #333;
602
- }
603
- button, .btn {
604
- background: transparent !important;
605
- border: 1px solid #ddd;
606
- color: #333;
607
- padding: 12px 24px;
608
- text-transform: uppercase;
609
- font-weight: bold;
610
- letter-spacing: 1px;
611
- cursor: pointer;
612
- }
613
- button:hover, .btn:hover {
614
- background: rgba(0, 0, 0, 0.05) !important;
615
- }
616
-
617
- h1, h2, h3 {
618
- color: #333;
619
- }
620
-
621
- .multimodal-textbox, textarea, input {
622
- background: rgba(255, 255, 255, 0.5) !important;
623
- border: 1px solid #ddd;
624
- color: #333;
625
- }
626
-
627
- .chatbox, .chatbot, .message {
628
- background: transparent !important;
629
- }
630
-
631
- #examples_container, .examples-container {
632
- margin: auto;
633
- width: 90%;
634
- background: transparent !important;
635
- }
636
- """
637
-
638
- title_html = """
639
- <h1 align="center" style="margin-bottom: 0.2em; font-size: 1.6em;">Gemma-3-R1984-4B-BEAM</h1>
640
- """
641
-
642
-
643
- with gr.Blocks(css=css, title="Gemma-3-R1984-4B-BEAM - X-RAY Security Scanner") as demo:
644
- gr.Markdown(title_html)
645
-
646
- # Display the web search option (while the system prompt and token slider remain hidden)
647
- web_search_checkbox = gr.Checkbox(
648
- label="Deep Research",
649
- value=False
650
- )
651
-
652
- # X-RAY security scanning system prompt
653
- system_prompt_box = gr.Textbox(
654
- lines=3,
655
- value="""반드시 한글로 답변하라. 당신은 위협 탐지와 항공 보안에 특화된 첨단 X-RAY 보안 스캐닝 AI입니다. 당신의 주 임무는 X-RAY 이미지에서 모든 잠재적 보안 위협을 최상의 정확도로 식별하는 것입니다.
656
-
657
- 탐지 우선순위:
658
- 1. **무기**: 화기(권총, 소총 등), 칼·날붙이·예리한 물체, 호신용·격투 무기
659
- 2. **폭발물**: 폭탄, 기폭장치, 폭발성 물질, 의심스러운 전자 장치, 배터리가 연결된 전선
660
- 3. **반입 금지 물품**: 가위, 대용량 배터리, 스프링(무기 부품 가능), 공구류
661
- 4. **액체**: 100 ml 이상 용기에 담긴 모든 액체(화학 위협 가능)
662
- 5. **EOD 구성품**: 폭발물로 조립될 수 있는 모든 부품
663
-
664
- 분석 프로토콜:
665
- - 좌상단에서 우하단으로 체계적으로 스캔
666
- - 위협 위치를 격자 기준으로 보고(예: “좌상단 사분면”)
667
- - 위협 심각도 분류
668
- - **HIGH** : 즉각적 위험
669
- - **MEDIUM** : 반입 금지
670
- - **LOW** : 추가 검사 필요
671
- - 전문 보안 용어 사용
672
- - 각 위협 항목별 권장 조치 제시
673
-
674
- ⚠️ 중대한 사항: 잠재적 위협을 절대 놓치지 마십시오. 의심스러울 경우 반드시 수동 검사를 요청하십시오.""",
675
- visible=False # hidden from view
676
- )
677
-
678
-
679
-
680
- max_tokens_slider = gr.Slider(
681
- label="Max New Tokens",
682
- minimum=100,
683
- maximum=8000,
684
- step=50,
685
- value=1000,
686
- visible=False # hidden from view
687
- )
688
-
689
- web_search_text = gr.Textbox(
690
- lines=1,
691
- label="Web Search Query",
692
- placeholder="",
693
- visible=False # hidden from view
694
- )
695
-
696
- # Configure the chat interface
697
- chat = gr.ChatInterface(
698
- fn=run,
699
- type="messages",
700
- chatbot=gr.Chatbot(type="messages", scale=1, allow_tags=["image"]),
701
- textbox=gr.MultimodalTextbox(
702
- file_types=[
703
- ".webp", ".png", ".jpg", ".jpeg", ".gif",
704
- ".mp4", ".csv", ".txt", ".pdf"
705
- ],
706
- file_count="multiple",
707
- autofocus=True
708
- ),
709
- multimodal=True,
710
- additional_inputs=[
711
- system_prompt_box,
712
- max_tokens_slider,
713
- web_search_checkbox,
714
- web_search_text,
715
- ],
716
- stop_btn=False,
717
- title='<a href="https://discord.gg/openfreeai" target="_blank">https://discord.gg/openfreeai</a>',
718
- # examples 파라미터 삭제
719
- run_examples_on_click=False,
720
- cache_examples=False,
721
- css_paths=None,
722
- delete_cache=(1800, 1800),
723
- )
724
-
725
-
726
-
727
-
728
- if __name__ == "__main__":
729
- # Run locally
730
- demo.launch()