Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -194,7 +194,8 @@ class VideoClassifier:
|
|
194 |
except:
|
195 |
prompt1 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
|
196 |
messages = [{"role": "user", "content": prompt1}]
|
197 |
-
|
|
|
198 |
# output = ""
|
199 |
# for response in stream:
|
200 |
# output += response['token'].text
|
@@ -204,12 +205,15 @@ class VideoClassifier:
|
|
204 |
|
205 |
print(main_class)
|
206 |
print("#######################################################")
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
|
|
|
|
|
|
213 |
else:
|
214 |
prompt_text = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
|
215 |
response = self.genai_model.generate_content(contents=prompt_text)
|
@@ -270,18 +274,20 @@ class VideoClassifier:
|
|
270 |
except:
|
271 |
prompt2 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
|
272 |
messages = [{"role": "user", "content": prompt1}]
|
273 |
-
|
|
|
274 |
|
275 |
print("Preprocess Answer",answer)
|
276 |
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
|
|
285 |
else:
|
286 |
prompt_text2 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
|
287 |
response = self.genai_model.generate_content(contents=prompt_text2)
|
|
|
194 |
except:
|
195 |
prompt1 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
|
196 |
messages = [{"role": "user", "content": prompt1}]
|
197 |
+
stream = self.client.chat_completion(messages, max_tokens=100)
|
198 |
+
main_class = stream.choices[0].message.content.strip()
|
199 |
# output = ""
|
200 |
# for response in stream:
|
201 |
# output += response['token'].text
|
|
|
205 |
|
206 |
print(main_class)
|
207 |
print("#######################################################")
|
208 |
+
try:
|
209 |
+
pattern = r"Main-class =>\s*(.+)"
|
210 |
+
match = re.search(pattern, main_class)
|
211 |
+
if match:
|
212 |
+
main_class = match.group(1).strip()
|
213 |
+
else:
|
214 |
+
main_class = None
|
215 |
+
except:
|
216 |
+
main_class=main_class
|
217 |
else:
|
218 |
prompt_text = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
|
219 |
response = self.genai_model.generate_content(contents=prompt_text)
|
|
|
274 |
except:
|
275 |
prompt2 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
|
276 |
messages = [{"role": "user", "content": prompt1}]
|
277 |
+
stream = self.client.chat_completion(messages, max_tokens=100)
|
278 |
+
answer = stream.choices[0].message.content.strip()
|
279 |
|
280 |
print("Preprocess Answer",answer)
|
281 |
|
282 |
+
try:
|
283 |
+
pattern = r"Sub-class =>\s*(.+)"
|
284 |
+
match = re.search(pattern, answer)
|
285 |
+
if match:
|
286 |
+
sub_class = match.group(1).strip()
|
287 |
+
else:
|
288 |
+
sub_class = None
|
289 |
+
except:
|
290 |
+
subclass=answer
|
291 |
else:
|
292 |
prompt_text2 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
|
293 |
response = self.genai_model.generate_content(contents=prompt_text2)
|