Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -144,36 +144,32 @@ def resize_image(image, max_size):
|
|
| 144 |
|
| 145 |
|
| 146 |
|
| 147 |
-
def process_image(image_input
|
| 148 |
-
#
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
"
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
|
|
|
|
|
|
| 165 |
|
| 166 |
-
#
|
| 167 |
-
|
| 168 |
-
|
| 169 |
|
| 170 |
-
|
| 171 |
-
response = requests.post(url, headers=headers, data=json.dumps(payload))
|
| 172 |
-
if response.status_code == 200:
|
| 173 |
-
results = response.json()
|
| 174 |
-
return results["result"]
|
| 175 |
-
else:
|
| 176 |
-
return f"Error: {response.status_code}, {response.text}"
|
| 177 |
|
| 178 |
|
| 179 |
def query_vectara(text):
|
|
@@ -336,7 +332,7 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
| 336 |
|
| 337 |
# Process text input
|
| 338 |
if text_input is not None:
|
| 339 |
-
combined_text = "
|
| 340 |
|
| 341 |
# Process audio input
|
| 342 |
if audio_input is not None:
|
|
@@ -344,15 +340,15 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
| 344 |
print("Audio Text:", audio_text) # Debug print
|
| 345 |
combined_text += "\n" + audio_text
|
| 346 |
|
| 347 |
-
# Check if only an image is provided without text
|
| 348 |
-
if image_input is not None and not combined_text.strip():
|
| 349 |
-
return "Error: Please provide text input along with the image.", "No hallucination evaluation"
|
| 350 |
-
|
| 351 |
# Process image input
|
| 352 |
if image_input is not None:
|
| 353 |
-
image_text = process_image(image_input
|
| 354 |
print("Image Text:", image_text) # Debug print
|
| 355 |
combined_text += "\n" + image_text
|
|
|
|
|
|
|
|
|
|
|
|
|
| 356 |
|
| 357 |
# Use the text to query Vectara
|
| 358 |
vectara_response_json = query_vectara(combined_text)
|
|
|
|
| 144 |
|
| 145 |
|
| 146 |
|
| 147 |
+
def process_image(image_input):
|
| 148 |
+
# Initialize the Gradio client with the URL of the Gradio server
|
| 149 |
+
client = Client("https://adept-fuyu-8b-demo.hf.space/--replicas/pqjvl/")
|
| 150 |
+
|
| 151 |
+
# Check if the image input is a file path (str) or a PIL Image
|
| 152 |
+
if isinstance(image_input, str):
|
| 153 |
+
# Assuming it's a file path or a URL
|
| 154 |
+
image_path = image_input
|
| 155 |
+
else:
|
| 156 |
+
# Assuming it's a PIL Image, save it to a temporary file
|
| 157 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp_file:
|
| 158 |
+
image_input.save(tmp_file.name)
|
| 159 |
+
image_path = tmp_file.name
|
| 160 |
+
|
| 161 |
+
# Call the predict method of the client
|
| 162 |
+
result = client.predict(
|
| 163 |
+
image_path, # File path or URL of the image
|
| 164 |
+
True, # Additional parameter for the server (e.g., enable detailed captioning)
|
| 165 |
+
fn_index=2 # Function index if the server has multiple functions
|
| 166 |
+
)
|
| 167 |
|
| 168 |
+
# Clean up the temporary file if created
|
| 169 |
+
if not isinstance(image_input, str):
|
| 170 |
+
os.remove(image_path)
|
| 171 |
|
| 172 |
+
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
|
| 174 |
|
| 175 |
def query_vectara(text):
|
|
|
|
| 332 |
|
| 333 |
# Process text input
|
| 334 |
if text_input is not None:
|
| 335 |
+
combined_text = "The user asks the following to his health adviser: " + text_input
|
| 336 |
|
| 337 |
# Process audio input
|
| 338 |
if audio_input is not None:
|
|
|
|
| 340 |
print("Audio Text:", audio_text) # Debug print
|
| 341 |
combined_text += "\n" + audio_text
|
| 342 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 343 |
# Process image input
|
| 344 |
if image_input is not None:
|
| 345 |
+
image_text = process_image(image_input) # Call process_image with only the image input
|
| 346 |
print("Image Text:", image_text) # Debug print
|
| 347 |
combined_text += "\n" + image_text
|
| 348 |
+
|
| 349 |
+
# Check if combined text is empty
|
| 350 |
+
if not combined_text.strip():
|
| 351 |
+
return "Error: Please provide some input (text, audio, or image).", "No hallucination evaluation"
|
| 352 |
|
| 353 |
# Use the text to query Vectara
|
| 354 |
vectara_response_json = query_vectara(combined_text)
|