Update handler.py
Browse files- handler.py +2 -2
handler.py
CHANGED
@@ -12,11 +12,11 @@ class EndpointHandler():
|
|
12 |
)
|
13 |
self.pipeline = pipeline("image-to-text", model=path, model_kwargs={"quantization_config": quantization_config})
|
14 |
|
15 |
-
def __call__(self, data: Dict[
|
16 |
totalarr = []
|
17 |
inputs = data.pop("inputs", '')
|
18 |
prompt_base = data.pop("prompt", "")
|
19 |
image = Image.open(requests.get(inputs, stream=True).raw)
|
20 |
prompt = f"USER: <image>\n{prompt_base}.Answer in one word\nASSISTANT:"
|
21 |
outputs = self.pipeline(image, prompt=prompt, generate_kwargs={"max_new_tokens": 200})
|
22 |
-
return (outputs[0]['generated_text']) # Extract the generated_text from the pipeline output
|
|
|
12 |
)
|
13 |
self.pipeline = pipeline("image-to-text", model=path, model_kwargs={"quantization_config": quantization_config})
|
14 |
|
15 |
+
def __call__(self, data: Dict[Any, Any]):
|
16 |
totalarr = []
|
17 |
inputs = data.pop("inputs", '')
|
18 |
prompt_base = data.pop("prompt", "")
|
19 |
image = Image.open(requests.get(inputs, stream=True).raw)
|
20 |
prompt = f"USER: <image>\n{prompt_base}.Answer in one word\nASSISTANT:"
|
21 |
outputs = self.pipeline(image, prompt=prompt, generate_kwargs={"max_new_tokens": 200})
|
22 |
+
return (outputs[0]['generated_text']) # Extract the generated_text from the pipeline output
|