lucas-ventura commited on
Commit
36a1678
·
verified ·
1 Parent(s): 3b83a0e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -11,11 +11,8 @@ from src.data.single_video import SingleVideo
11
  from src.data.utils_asr import PromptASR
12
  from src.models.llama_inference import inference
13
  from src.test.vidchapters import get_chapters
14
- from src.utils import RankedLogger
15
  from tools.download.models import download_model
16
 
17
- log = RankedLogger(__name__, rank_zero_only=True)
18
-
19
  # Set up proxies
20
  # from urllib.request import getproxies
21
  # proxies = getproxies()
@@ -37,7 +34,7 @@ def load_base_model():
37
  global base_model, tokenizer
38
 
39
  if base_model is None:
40
- log.info(f"Loading base model: {LLAMA_CKPT_PATH}")
41
  base_model = load_model_llamarecipes(
42
  model_name=LLAMA_CKPT_PATH,
43
  device_map="auto",
@@ -49,7 +46,7 @@ def load_base_model():
49
  tokenizer = AutoTokenizer.from_pretrained(LLAMA_CKPT_PATH)
50
  tokenizer.pad_token = tokenizer.eos_token
51
 
52
- log.info("Base model loaded successfully")
53
 
54
 
55
  class FastLlamaInference:
@@ -120,11 +117,11 @@ def load_peft(model_name: str = "asr-10k"):
120
 
121
  # Only load a new PEFT model if it's different from the current one
122
  if current_peft_model != model_name:
123
- log.info(f"Loading PEFT model: {model_name}")
124
  model_path = download_model(model_name)
125
 
126
  if not Path(model_path).exists():
127
- log.warning(f"PEFT model does not exist at {model_path}")
128
  return False
129
 
130
  # Apply the PEFT model to the base model
@@ -136,7 +133,7 @@ def load_peft(model_name: str = "asr-10k"):
136
  inference_model = FastLlamaInference(model=peft_model)
137
  current_peft_model = model_name
138
 
139
- log.info(f"PEFT model {model_name} loaded successfully")
140
  return True
141
 
142
  # Model already loaded
@@ -150,7 +147,7 @@ def download_from_url(url, output_path):
150
  try:
151
  import yt_dlp
152
  except ImportError:
153
- log.error("yt-dlp Python package is not installed")
154
  return (
155
  False,
156
  "yt-dlp Python package is not installed. Please install it with 'pip install yt-dlp'.",
@@ -178,7 +175,7 @@ def download_from_url(url, output_path):
178
  return True, None
179
  except Exception as e:
180
  error_msg = f"Error downloading video: {str(e)}"
181
- log.error(error_msg)
182
  return False, error_msg
183
 
184
 
@@ -318,4 +315,4 @@ with gr.Blocks(title="Chapter-Llama") as demo:
318
 
319
  if __name__ == "__main__":
320
  # Launch the Gradio app
321
- demo.launch()
 
11
  from src.data.utils_asr import PromptASR
12
  from src.models.llama_inference import inference
13
  from src.test.vidchapters import get_chapters
 
14
  from tools.download.models import download_model
15
 
 
 
16
  # Set up proxies
17
  # from urllib.request import getproxies
18
  # proxies = getproxies()
 
34
  global base_model, tokenizer
35
 
36
  if base_model is None:
37
+ print(f"Loading base model: {LLAMA_CKPT_PATH}")
38
  base_model = load_model_llamarecipes(
39
  model_name=LLAMA_CKPT_PATH,
40
  device_map="auto",
 
46
  tokenizer = AutoTokenizer.from_pretrained(LLAMA_CKPT_PATH)
47
  tokenizer.pad_token = tokenizer.eos_token
48
 
49
+ print("Base model loaded successfully")
50
 
51
 
52
  class FastLlamaInference:
 
117
 
118
  # Only load a new PEFT model if it's different from the current one
119
  if current_peft_model != model_name:
120
+ print(f"Loading PEFT model: {model_name}")
121
  model_path = download_model(model_name)
122
 
123
  if not Path(model_path).exists():
124
+ print(f"PEFT model does not exist at {model_path}")
125
  return False
126
 
127
  # Apply the PEFT model to the base model
 
133
  inference_model = FastLlamaInference(model=peft_model)
134
  current_peft_model = model_name
135
 
136
+ print(f"PEFT model {model_name} loaded successfully")
137
  return True
138
 
139
  # Model already loaded
 
147
  try:
148
  import yt_dlp
149
  except ImportError:
150
+ print("yt-dlp Python package is not installed")
151
  return (
152
  False,
153
  "yt-dlp Python package is not installed. Please install it with 'pip install yt-dlp'.",
 
175
  return True, None
176
  except Exception as e:
177
  error_msg = f"Error downloading video: {str(e)}"
178
+ print(error_msg)
179
  return False, error_msg
180
 
181
 
 
315
 
316
  if __name__ == "__main__":
317
  # Launch the Gradio app
318
+ demo.launch()