Update app.py
Browse files
app.py
CHANGED
@@ -586,48 +586,66 @@ HTML_TEMPLATE = """
|
|
586 |
</html>
|
587 |
"""
|
588 |
|
|
|
|
|
589 |
@app.route('/')
|
590 |
def index():
|
591 |
return render_template_string(HTML_TEMPLATE)
|
592 |
|
|
|
|
|
|
|
|
|
|
|
|
|
593 |
@app.route('/upload', methods=['POST'])
|
594 |
def upload_file():
|
595 |
try:
|
596 |
if 'file' not in request.files:
|
597 |
return jsonify({'success': False, 'error': 'No file provided'})
|
598 |
-
|
599 |
file = request.files['file']
|
600 |
-
|
601 |
if file.filename == '':
|
602 |
return jsonify({'success': False, 'error': 'No file selected'})
|
603 |
-
|
604 |
# Read file content
|
605 |
file_content = file.read()
|
606 |
file_io = io.BytesIO(file_content)
|
607 |
-
|
608 |
-
#
|
609 |
-
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
614 |
# Create cache with system instruction
|
615 |
try:
|
616 |
system_instruction = "You are an expert document analyzer. Provide detailed, accurate answers based on the uploaded document content. Always be helpful and thorough in your responses."
|
617 |
-
|
618 |
# Use the correct model format as per documentation
|
619 |
model = 'models/gemini-2.0-flash-001'
|
620 |
-
|
|
|
621 |
cache = client.caches.create(
|
622 |
model=model,
|
623 |
config=types.CreateCachedContentConfig(
|
624 |
-
display_name='pdf document cache',
|
625 |
system_instruction=system_instruction,
|
626 |
-
contents=[document],
|
627 |
ttl="3600s", # 1 hour TTL
|
628 |
)
|
629 |
)
|
630 |
-
|
|
|
631 |
# Store cache info
|
632 |
cache_id = str(uuid.uuid4())
|
633 |
document_caches[cache_id] = {
|
@@ -635,25 +653,47 @@ def upload_file():
|
|
635 |
'document_name': file.filename,
|
636 |
'created_at': datetime.now().isoformat()
|
637 |
}
|
638 |
-
|
|
|
|
|
|
|
|
|
|
|
639 |
return jsonify({
|
640 |
'success': True,
|
641 |
'cache_id': cache_id,
|
642 |
-
'token_count':
|
643 |
})
|
644 |
-
|
645 |
except Exception as cache_error:
|
|
|
646 |
# If caching fails due to small content, provide alternative approach
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
653 |
else:
|
654 |
-
|
655 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
656 |
except Exception as e:
|
|
|
657 |
return jsonify({'success': False, 'error': str(e)})
|
658 |
|
659 |
@app.route('/upload-url', methods=['POST'])
|
@@ -661,39 +701,56 @@ def upload_from_url():
|
|
661 |
try:
|
662 |
data = request.get_json()
|
663 |
url = data.get('url')
|
664 |
-
|
665 |
if not url:
|
666 |
return jsonify({'success': False, 'error': 'No URL provided'})
|
667 |
-
|
668 |
# Download file from URL
|
669 |
-
|
670 |
-
|
671 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
672 |
file_io = io.BytesIO(response.content)
|
673 |
-
|
674 |
-
#
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
680 |
# Create cache with system instruction
|
681 |
try:
|
682 |
system_instruction = "You are an expert document analyzer. Provide detailed, accurate answers based on the uploaded document content. Always be helpful and thorough in your responses."
|
683 |
-
|
684 |
# Use the correct model format as per documentation
|
685 |
model = 'models/gemini-2.0-flash-001'
|
686 |
-
|
|
|
687 |
cache = client.caches.create(
|
688 |
model=model,
|
689 |
config=types.CreateCachedContentConfig(
|
690 |
-
display_name='pdf document cache',
|
691 |
system_instruction=system_instruction,
|
692 |
-
contents=[document],
|
693 |
ttl="3600s", # 1 hour TTL
|
694 |
)
|
695 |
)
|
696 |
-
|
|
|
697 |
# Store cache info
|
698 |
cache_id = str(uuid.uuid4())
|
699 |
document_caches[cache_id] = {
|
@@ -701,95 +758,148 @@ def upload_from_url():
|
|
701 |
'document_name': url,
|
702 |
'created_at': datetime.now().isoformat()
|
703 |
}
|
704 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
705 |
return jsonify({
|
706 |
'success': True,
|
707 |
'cache_id': cache_id,
|
708 |
-
'token_count':
|
709 |
})
|
710 |
-
|
711 |
except Exception as cache_error:
|
|
|
712 |
# If caching fails due to small content, provide alternative approach
|
713 |
-
if "Cached content is too small" in str(cache_error):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
714 |
return jsonify({
|
715 |
-
'success': False,
|
716 |
-
'error': 'PDF is too small for caching. Please upload a larger document
|
717 |
'suggestion': 'Try uploading a longer document or combine multiple documents.'
|
718 |
})
|
719 |
else:
|
720 |
-
|
721 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
722 |
except Exception as e:
|
|
|
723 |
return jsonify({'success': False, 'error': str(e)})
|
724 |
|
|
|
|
|
725 |
@app.route('/ask', methods=['POST'])
|
726 |
def ask_question():
|
727 |
try:
|
728 |
data = request.get_json()
|
729 |
question = data.get('question')
|
730 |
cache_id = data.get('cache_id')
|
731 |
-
|
732 |
if not question or not cache_id:
|
733 |
return jsonify({'success': False, 'error': 'Missing question or cache_id'})
|
734 |
-
|
735 |
if cache_id not in document_caches:
|
736 |
-
|
737 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
738 |
cache_info = document_caches[cache_id]
|
739 |
-
|
740 |
# Generate response using cached content with correct model format
|
741 |
response = client.models.generate_content(
|
742 |
model='models/gemini-2.0-flash-001',
|
743 |
-
contents=question,
|
744 |
-
|
745 |
cached_content=cache_info['cache_name']
|
746 |
)
|
747 |
)
|
748 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
749 |
return jsonify({
|
750 |
'success': True,
|
751 |
-
'answer':
|
752 |
})
|
753 |
-
|
754 |
-
except Exception as e:
|
755 |
-
return jsonify({'success': False, 'error': str(e)})
|
756 |
|
757 |
-
@app.route('/caches', methods=['GET'])
|
758 |
-
def list_caches():
|
759 |
-
try:
|
760 |
-
caches = []
|
761 |
-
for cache_id, cache_info in document_caches.items():
|
762 |
-
caches.append({
|
763 |
-
'cache_id': cache_id,
|
764 |
-
'document_name': cache_info['document_name'],
|
765 |
-
'created_at': cache_info['created_at']
|
766 |
-
})
|
767 |
-
|
768 |
-
return jsonify({'success': True, 'caches': caches})
|
769 |
-
|
770 |
except Exception as e:
|
|
|
771 |
return jsonify({'success': False, 'error': str(e)})
|
772 |
|
|
|
|
|
773 |
@app.route('/cache/<cache_id>', methods=['DELETE'])
|
774 |
def delete_cache(cache_id):
|
775 |
try:
|
776 |
if cache_id not in document_caches:
|
777 |
return jsonify({'success': False, 'error': 'Cache not found'})
|
778 |
-
|
779 |
cache_info = document_caches[cache_id]
|
780 |
-
|
781 |
# Delete from Gemini API
|
782 |
-
|
783 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
784 |
# Remove from local storage
|
785 |
del document_caches[cache_id]
|
786 |
-
|
|
|
787 |
return jsonify({'success': True, 'message': 'Cache deleted successfully'})
|
788 |
-
|
789 |
except Exception as e:
|
|
|
790 |
return jsonify({'success': False, 'error': str(e)})
|
791 |
|
|
|
792 |
if __name__ == '__main__':
|
793 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
794 |
port = int(os.environ.get("PORT", 7860))
|
795 |
-
|
|
|
|
|
|
586 |
</html>
|
587 |
"""
|
588 |
|
589 |
+
# ... (imports and initial setup) ...
|
590 |
+
|
591 |
@app.route('/')
|
592 |
def index():
|
593 |
return render_template_string(HTML_TEMPLATE)
|
594 |
|
595 |
+
# Add health check endpoint
|
596 |
+
@app.route('/health', methods=['GET'])
|
597 |
+
def health_check():
|
598 |
+
# A simple endpoint to check if the application is running
|
599 |
+
return jsonify({"status": "healthy"}), 200
|
600 |
+
|
601 |
@app.route('/upload', methods=['POST'])
|
602 |
def upload_file():
|
603 |
try:
|
604 |
if 'file' not in request.files:
|
605 |
return jsonify({'success': False, 'error': 'No file provided'})
|
606 |
+
|
607 |
file = request.files['file']
|
608 |
+
|
609 |
if file.filename == '':
|
610 |
return jsonify({'success': False, 'error': 'No file selected'})
|
611 |
+
|
612 |
# Read file content
|
613 |
file_content = file.read()
|
614 |
file_io = io.BytesIO(file_content)
|
615 |
+
|
616 |
+
# --- CORRECTED FILE UPLOAD CALL ---
|
617 |
+
# Upload to Gemini File API using the correct method client.upload_file
|
618 |
+
# Pass the file content as a tuple (filename, file-like object, mime_type)
|
619 |
+
# This replaces the incorrect client.files.upload call
|
620 |
+
try:
|
621 |
+
document = client.upload_file(
|
622 |
+
file=(file.filename, file_io, 'application/pdf'),
|
623 |
+
display_name=file.filename # Optional: provide a display name
|
624 |
+
)
|
625 |
+
print(f"File uploaded successfully: {document.name}") # Log for debugging
|
626 |
+
except Exception as upload_error:
|
627 |
+
return jsonify({'success': False, 'error': f'Error uploading file to Gemini API: {str(upload_error)}'})
|
628 |
+
# --- END CORRECTED FILE UPLOAD CALL ---
|
629 |
+
|
630 |
# Create cache with system instruction
|
631 |
try:
|
632 |
system_instruction = "You are an expert document analyzer. Provide detailed, accurate answers based on the uploaded document content. Always be helpful and thorough in your responses."
|
633 |
+
|
634 |
# Use the correct model format as per documentation
|
635 |
model = 'models/gemini-2.0-flash-001'
|
636 |
+
|
637 |
+
print(f"Attempting to create cache for file: {document.name}") # Log
|
638 |
cache = client.caches.create(
|
639 |
model=model,
|
640 |
config=types.CreateCachedContentConfig(
|
641 |
+
display_name=f'pdf document cache: {file.filename}', # Use filename in display_name
|
642 |
system_instruction=system_instruction,
|
643 |
+
contents=[document], # document is the File object returned by upload_file
|
644 |
ttl="3600s", # 1 hour TTL
|
645 |
)
|
646 |
)
|
647 |
+
print(f"Cache created successfully: {cache.name}") # Log
|
648 |
+
|
649 |
# Store cache info
|
650 |
cache_id = str(uuid.uuid4())
|
651 |
document_caches[cache_id] = {
|
|
|
653 |
'document_name': file.filename,
|
654 |
'created_at': datetime.now().isoformat()
|
655 |
}
|
656 |
+
|
657 |
+
# Get token count from cache metadata if available
|
658 |
+
token_count = 'Unknown'
|
659 |
+
if hasattr(cache, 'usage_metadata') and cache.usage_metadata:
|
660 |
+
token_count = getattr(cache.usage_metadata, 'cached_token_count', 'Unknown')
|
661 |
+
|
662 |
return jsonify({
|
663 |
'success': True,
|
664 |
'cache_id': cache_id,
|
665 |
+
'token_count': token_count
|
666 |
})
|
667 |
+
|
668 |
except Exception as cache_error:
|
669 |
+
print(f"Cache creation failed: {str(cache_error)}") # Log the cache error
|
670 |
# If caching fails due to small content, provide alternative approach
|
671 |
+
# Note: The exact error message might vary, checking substring is a bit fragile
|
672 |
+
# A better way might be to count tokens first, but requires API call
|
673 |
+
if "Cached content is too small" in str(cache_error) or "minimum" in str(cache_error).lower():
|
674 |
+
# Attempt to delete the uploaded file if caching failed (optional but good cleanup)
|
675 |
+
try:
|
676 |
+
client.files.delete(document.name)
|
677 |
+
print(f"Cleaned up uploaded file {document.name} after caching failure.")
|
678 |
+
except Exception as cleanup_error:
|
679 |
+
print(f"Failed to clean up file {document.name}: {cleanup_error}")
|
680 |
+
|
681 |
+
return jsonify({
|
682 |
+
'success': False,
|
683 |
+
'error': 'PDF content is too small for caching. Please upload a larger document. Minimum token count varies by model, but is typically 1024+.',
|
684 |
+
'suggestion': 'Try uploading a longer document or combine multiple documents.'
|
685 |
+
})
|
686 |
else:
|
687 |
+
# Attempt to delete the uploaded file if caching failed
|
688 |
+
try:
|
689 |
+
client.files.delete(document.name)
|
690 |
+
print(f"Cleaned up uploaded file {document.name} after caching failure.")
|
691 |
+
except Exception as cleanup_error:
|
692 |
+
print(f"Failed to clean up file {document.name}: {cleanup_error}")
|
693 |
+
raise cache_error # Re-raise other errors
|
694 |
+
|
695 |
except Exception as e:
|
696 |
+
print(f"An unexpected error occurred during upload: {str(e)}") # Log general errors
|
697 |
return jsonify({'success': False, 'error': str(e)})
|
698 |
|
699 |
@app.route('/upload-url', methods=['POST'])
|
|
|
701 |
try:
|
702 |
data = request.get_json()
|
703 |
url = data.get('url')
|
704 |
+
|
705 |
if not url:
|
706 |
return jsonify({'success': False, 'error': 'No URL provided'})
|
707 |
+
|
708 |
# Download file from URL
|
709 |
+
try:
|
710 |
+
response = httpx.get(url)
|
711 |
+
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
712 |
+
except httpx.HTTPStatusError as e:
|
713 |
+
return jsonify({'success': False, 'error': f'HTTP error downloading file from URL: {e.response.status_code} - {e.response.text}'})
|
714 |
+
except httpx.RequestError as e:
|
715 |
+
return jsonify({'success': False, 'error': f'Error downloading file from URL: {e}'})
|
716 |
+
|
717 |
+
|
718 |
file_io = io.BytesIO(response.content)
|
719 |
+
|
720 |
+
# --- CORRECTED FILE UPLOAD CALL ---
|
721 |
+
# Upload to Gemini File API using the correct method client.upload_file
|
722 |
+
# Pass the file content as a tuple (filename, file-like object, mime_type)
|
723 |
+
# Use a generic filename for the file-like object
|
724 |
+
try:
|
725 |
+
document = client.upload_file(
|
726 |
+
file=('downloaded_document.pdf', file_io, 'application/pdf'), # Use a placeholder filename
|
727 |
+
display_name=url # Use the URL as display name
|
728 |
+
)
|
729 |
+
print(f"File uploaded successfully: {document.name}") # Log
|
730 |
+
except Exception as upload_error:
|
731 |
+
return jsonify({'success': False, 'error': f'Error uploading file to Gemini API: {str(upload_error)}'})
|
732 |
+
# --- END CORRECTED FILE UPLOAD CALL ---
|
733 |
+
|
734 |
+
|
735 |
# Create cache with system instruction
|
736 |
try:
|
737 |
system_instruction = "You are an expert document analyzer. Provide detailed, accurate answers based on the uploaded document content. Always be helpful and thorough in your responses."
|
738 |
+
|
739 |
# Use the correct model format as per documentation
|
740 |
model = 'models/gemini-2.0-flash-001'
|
741 |
+
|
742 |
+
print(f"Attempting to create cache for file: {document.name}") # Log
|
743 |
cache = client.caches.create(
|
744 |
model=model,
|
745 |
config=types.CreateCachedContentConfig(
|
746 |
+
display_name=f'pdf document cache: {url}', # Use URL in display_name
|
747 |
system_instruction=system_instruction,
|
748 |
+
contents=[document], # document is the File object returned by upload_file
|
749 |
ttl="3600s", # 1 hour TTL
|
750 |
)
|
751 |
)
|
752 |
+
print(f"Cache created successfully: {cache.name}") # Log
|
753 |
+
|
754 |
# Store cache info
|
755 |
cache_id = str(uuid.uuid4())
|
756 |
document_caches[cache_id] = {
|
|
|
758 |
'document_name': url,
|
759 |
'created_at': datetime.now().isoformat()
|
760 |
}
|
761 |
+
|
762 |
+
# Get token count from cache metadata if available
|
763 |
+
token_count = 'Unknown'
|
764 |
+
if hasattr(cache, 'usage_metadata') and cache.usage_metadata:
|
765 |
+
token_count = getattr(cache.usage_metadata, 'cached_token_count', 'Unknown')
|
766 |
+
|
767 |
+
|
768 |
return jsonify({
|
769 |
'success': True,
|
770 |
'cache_id': cache_id,
|
771 |
+
'token_count': token_count
|
772 |
})
|
773 |
+
|
774 |
except Exception as cache_error:
|
775 |
+
print(f"Cache creation failed: {str(cache_error)}") # Log the cache error
|
776 |
# If caching fails due to small content, provide alternative approach
|
777 |
+
if "Cached content is too small" in str(cache_error) or "minimum" in str(cache_error).lower():
|
778 |
+
# Attempt to delete the uploaded file if caching failed (optional but good cleanup)
|
779 |
+
try:
|
780 |
+
client.files.delete(document.name)
|
781 |
+
print(f"Cleaned up uploaded file {document.name} after caching failure.")
|
782 |
+
except Exception as cleanup_error:
|
783 |
+
print(f"Failed to clean up file {document.name}: {cleanup_error}")
|
784 |
+
|
785 |
return jsonify({
|
786 |
+
'success': False,
|
787 |
+
'error': 'PDF content is too small for caching. Please upload a larger document. Minimum token count varies by model, but is typically 1024+.',
|
788 |
'suggestion': 'Try uploading a longer document or combine multiple documents.'
|
789 |
})
|
790 |
else:
|
791 |
+
# Attempt to delete the uploaded file if caching failed
|
792 |
+
try:
|
793 |
+
client.files.delete(document.name)
|
794 |
+
print(f"Cleaned up uploaded file {document.name} after caching failure.")
|
795 |
+
except Exception as cleanup_error:
|
796 |
+
print(f"Failed to clean up file {document.name}: {cleanup_error}")
|
797 |
+
raise cache_error # Re-raise other errors
|
798 |
+
|
799 |
+
|
800 |
except Exception as e:
|
801 |
+
print(f"An unexpected error occurred during URL upload: {str(e)}") # Log general errors
|
802 |
return jsonify({'success': False, 'error': str(e)})
|
803 |
|
804 |
+
# ... (ask_question, list_caches, delete_cache routes remain largely the same) ...
|
805 |
+
|
806 |
@app.route('/ask', methods=['POST'])
|
807 |
def ask_question():
|
808 |
try:
|
809 |
data = request.get_json()
|
810 |
question = data.get('question')
|
811 |
cache_id = data.get('cache_id')
|
812 |
+
|
813 |
if not question or not cache_id:
|
814 |
return jsonify({'success': False, 'error': 'Missing question or cache_id'})
|
815 |
+
|
816 |
if cache_id not in document_caches:
|
817 |
+
# Check if the cache still exists in Gemini API if it's not in our local map
|
818 |
+
# This adds robustness if the server restarts or cache expires
|
819 |
+
try:
|
820 |
+
cache_info_api = client.caches.get(name=document_caches[cache_id]['cache_name']) # Need cache_name from stored info
|
821 |
+
# If get succeeds, update local cache (or handle this differently)
|
822 |
+
# For simplicity here, let's just fail if not in local map as it's in-memory
|
823 |
+
return jsonify({'success': False, 'error': 'Cache not found or expired. Please upload the document again.'})
|
824 |
+
except Exception as get_error:
|
825 |
+
# If get fails, it's definitely gone
|
826 |
+
if cache_id in document_caches: # Clean up local entry if API confirms deletion/expiry
|
827 |
+
del document_caches[cache_id]
|
828 |
+
return jsonify({'success': False, 'error': 'Cache not found or expired. Please upload the document again.'})
|
829 |
+
|
830 |
+
|
831 |
cache_info = document_caches[cache_id]
|
832 |
+
|
833 |
# Generate response using cached content with correct model format
|
834 |
response = client.models.generate_content(
|
835 |
model='models/gemini-2.0-flash-001',
|
836 |
+
contents=question, # User's question
|
837 |
+
generation_config=types.GenerateContentConfig( # generation_config takes GenerateContentConfig
|
838 |
cached_content=cache_info['cache_name']
|
839 |
)
|
840 |
)
|
841 |
+
|
842 |
+
# Check if response has parts before accessing .text
|
843 |
+
answer = "Could not generate response."
|
844 |
+
if response and response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
|
845 |
+
answer = "".join(part.text for part in response.candidates[0].content.parts if hasattr(part, 'text'))
|
846 |
+
elif response and response.prompt_feedback and response.prompt_feedback.block_reason:
|
847 |
+
answer = f"Request blocked: {response.prompt_feedback.block_reason.name}"
|
848 |
+
else:
|
849 |
+
print(f"Unexpected response structure: {response}") # Log unexpected structure
|
850 |
+
|
851 |
+
|
852 |
return jsonify({
|
853 |
'success': True,
|
854 |
+
'answer': answer
|
855 |
})
|
|
|
|
|
|
|
856 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
857 |
except Exception as e:
|
858 |
+
print(f"An error occurred during question asking: {str(e)}") # Log errors
|
859 |
return jsonify({'success': False, 'error': str(e)})
|
860 |
|
861 |
+
# ... (list_caches, delete_cache remain largely the same) ...
|
862 |
+
|
863 |
@app.route('/cache/<cache_id>', methods=['DELETE'])
|
864 |
def delete_cache(cache_id):
|
865 |
try:
|
866 |
if cache_id not in document_caches:
|
867 |
return jsonify({'success': False, 'error': 'Cache not found'})
|
868 |
+
|
869 |
cache_info = document_caches[cache_id]
|
870 |
+
|
871 |
# Delete from Gemini API
|
872 |
+
try:
|
873 |
+
client.caches.delete(cache_info['cache_name'])
|
874 |
+
print(f"Gemini cache deleted: {cache_info['cache_name']}") # Log
|
875 |
+
except Exception as delete_error:
|
876 |
+
print(f"Error deleting Gemini cache {cache_info['cache_name']}: {delete_error}") # Log
|
877 |
+
# Decide if you want to fail if API deletion fails or just remove local entry
|
878 |
+
# For robustness, maybe log and still remove local entry? Or return error?
|
879 |
+
# Let's return an error for now.
|
880 |
+
return jsonify({'success': False, 'error': f'Failed to delete cache from API: {str(delete_error)}'})
|
881 |
+
|
882 |
# Remove from local storage
|
883 |
del document_caches[cache_id]
|
884 |
+
print(f"Local cache entry deleted for ID: {cache_id}") # Log
|
885 |
+
|
886 |
return jsonify({'success': True, 'message': 'Cache deleted successfully'})
|
887 |
+
|
888 |
except Exception as e:
|
889 |
+
print(f"An unexpected error occurred during cache deletion: {str(e)}") # Log
|
890 |
return jsonify({'success': False, 'error': str(e)})
|
891 |
|
892 |
+
|
893 |
if __name__ == '__main__':
|
894 |
import os
|
895 |
+
# Ensure GOOGLE_API_KEY is set
|
896 |
+
if not os.getenv('GOOGLE_API_KEY'):
|
897 |
+
print("Error: GOOGLE_API_KEY environment variable not set.")
|
898 |
+
# exit(1) # Or handle appropriately
|
899 |
+
# For local testing with debug=True, you might pass it directly or ensure your .env is loaded
|
900 |
+
pass # Allow running without key for now if needed, but API calls will fail
|
901 |
+
|
902 |
port = int(os.environ.get("PORT", 7860))
|
903 |
+
print(f"Starting Flask app on port {port}") # Log start
|
904 |
+
# In production, set debug=False
|
905 |
+
app.run(debug=True, host='0.0.0.0', port=port)
|