Spaces:
Running
Running
commit main
Browse files
main.py
CHANGED
|
@@ -36,7 +36,10 @@ app.add_middleware(
|
|
| 36 |
|
| 37 |
from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
|
| 38 |
try:
|
|
|
|
| 39 |
interpreter =pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
|
|
|
|
|
|
|
| 40 |
#interpreter_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 41 |
#interpreter_processor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 42 |
#interpreter_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
|
@@ -47,7 +50,10 @@ except Exception as exp:
|
|
| 47 |
|
| 48 |
|
| 49 |
try:
|
|
|
|
| 50 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
|
|
|
|
|
|
| 51 |
except Exception as exp:
|
| 52 |
print("[ERROR] Can't load facebook/bart-large-cnn ")
|
| 53 |
print(str(exp))
|
|
@@ -63,7 +69,10 @@ except Exception as exp:
|
|
| 63 |
|
| 64 |
|
| 65 |
try:
|
|
|
|
| 66 |
generator = pipeline("text-generation", model="deepseek-ai/deepseek-coder-1.3b-instruct")
|
|
|
|
|
|
|
| 67 |
except Exception as exp:
|
| 68 |
print("[ERROR] Can't load deepseek-ai/deepseek-coder-1.3b-instruct ")
|
| 69 |
print(str(exp))
|
|
@@ -96,9 +105,11 @@ def index(req:Request):
|
|
| 96 |
|
| 97 |
@app.post('/get')
|
| 98 |
def g(f:str):
|
|
|
|
| 99 |
return generator(f)[0]["generated_text"]
|
| 100 |
@app.post('/gets')
|
| 101 |
def g(f:str):
|
|
|
|
| 102 |
return summarizer(f)[0]['summary_text']
|
| 103 |
|
| 104 |
|
|
@@ -110,6 +121,7 @@ def caption(file:UploadFile=File(...)):
|
|
| 110 |
if extension not in Supported_extensions:
|
| 111 |
return {"error": "Unsupported file type"}
|
| 112 |
image = Image.open(file.file)
|
|
|
|
| 113 |
caption = interpreter(image)
|
| 114 |
#pixel_values = interpreter_processor(images=image, return_tensors="pt").pixel_values
|
| 115 |
#output_ids = interpreter_model.generate(pixel_values, max_length=16, num_beams=4)
|
|
|
|
| 36 |
|
| 37 |
from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
|
| 38 |
try:
|
| 39 |
+
interpreter =None
|
| 40 |
interpreter =pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
|
| 41 |
+
if interpreter is None :
|
| 42 |
+
print("\n\n interpreter is nonne \n\n")
|
| 43 |
#interpreter_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 44 |
#interpreter_processor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 45 |
#interpreter_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
|
|
|
| 50 |
|
| 51 |
|
| 52 |
try:
|
| 53 |
+
summarizer=None
|
| 54 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
| 55 |
+
if summarizer is None :
|
| 56 |
+
print("\n\n summarizer is nonne \n\n")
|
| 57 |
except Exception as exp:
|
| 58 |
print("[ERROR] Can't load facebook/bart-large-cnn ")
|
| 59 |
print(str(exp))
|
|
|
|
| 69 |
|
| 70 |
|
| 71 |
try:
|
| 72 |
+
generator=None
|
| 73 |
generator = pipeline("text-generation", model="deepseek-ai/deepseek-coder-1.3b-instruct")
|
| 74 |
+
if generator is None :
|
| 75 |
+
print("\n\n generator is nonne \n\n")
|
| 76 |
except Exception as exp:
|
| 77 |
print("[ERROR] Can't load deepseek-ai/deepseek-coder-1.3b-instruct ")
|
| 78 |
print(str(exp))
|
|
|
|
| 105 |
|
| 106 |
@app.post('/get')
|
| 107 |
def g(f:str):
|
| 108 |
+
global generator
|
| 109 |
return generator(f)[0]["generated_text"]
|
| 110 |
@app.post('/gets')
|
| 111 |
def g(f:str):
|
| 112 |
+
global summarizer
|
| 113 |
return summarizer(f)[0]['summary_text']
|
| 114 |
|
| 115 |
|
|
|
|
| 121 |
if extension not in Supported_extensions:
|
| 122 |
return {"error": "Unsupported file type"}
|
| 123 |
image = Image.open(file.file)
|
| 124 |
+
|
| 125 |
caption = interpreter(image)
|
| 126 |
#pixel_values = interpreter_processor(images=image, return_tensors="pt").pixel_values
|
| 127 |
#output_ids = interpreter_model.generate(pixel_values, max_length=16, num_beams=4)
|