Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import os
|
2 |
import io
|
3 |
import torch
|
@@ -7,6 +9,7 @@ import pdfplumber
|
|
7 |
import ffmpeg # β
Replaced moviepy with ffmpeg-python
|
8 |
import librosa
|
9 |
import soundfile as sf
|
|
|
10 |
from fastapi import FastAPI, UploadFile, File, HTTPException
|
11 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
12 |
from sentence_transformers import SentenceTransformer, util
|
@@ -18,12 +21,15 @@ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
|
|
18 |
# β
Ensure GPU is Used
|
19 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
|
21 |
-
# β
|
22 |
try:
|
23 |
-
if not spacy.util.is_package("en_core_web_sm"):
|
24 |
-
spacy.cli.download("en_core_web_sm")
|
25 |
-
|
26 |
nlp = spacy.load("en_core_web_sm")
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
summarizer = pipeline("summarization", model="nsi319/legal-pegasus", device=0 if torch.cuda.is_available() else -1)
|
28 |
embedding_model = SentenceTransformer("all-mpnet-base-v2", device=device)
|
29 |
ner_model = pipeline("ner", model="dslim/bert-base-NER", tokenizer="dslim/bert-base-NER", device=0 if torch.cuda.is_available() else -1)
|
@@ -153,6 +159,3 @@ async def analyze_video(file: UploadFile = File(...)):
|
|
153 |
if __name__ == "__main__":
|
154 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
155 |
|
156 |
-
|
157 |
-
|
158 |
-
|
|
|
1 |
+
|
2 |
+
|
3 |
import os
|
4 |
import io
|
5 |
import torch
|
|
|
9 |
import ffmpeg # β
Replaced moviepy with ffmpeg-python
|
10 |
import librosa
|
11 |
import soundfile as sf
|
12 |
+
import subprocess
|
13 |
from fastapi import FastAPI, UploadFile, File, HTTPException
|
14 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
15 |
from sentence_transformers import SentenceTransformer, util
|
|
|
21 |
# β
Ensure GPU is Used
|
22 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
23 |
|
24 |
+
# β
Fix Spacy Installation (Prevent Permission Errors)
|
25 |
try:
|
|
|
|
|
|
|
26 |
nlp = spacy.load("en_core_web_sm")
|
27 |
+
except OSError:
|
28 |
+
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm", "--user"])
|
29 |
+
nlp = spacy.load("en_core_web_sm")
|
30 |
+
|
31 |
+
# β
Load NLP Models
|
32 |
+
try:
|
33 |
summarizer = pipeline("summarization", model="nsi319/legal-pegasus", device=0 if torch.cuda.is_available() else -1)
|
34 |
embedding_model = SentenceTransformer("all-mpnet-base-v2", device=device)
|
35 |
ner_model = pipeline("ner", model="dslim/bert-base-NER", tokenizer="dslim/bert-base-NER", device=0 if torch.cuda.is_available() else -1)
|
|
|
159 |
if __name__ == "__main__":
|
160 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
161 |
|
|
|
|
|
|