DineshJ96 commited on
Commit
c4036ac
·
1 Parent(s): 0274229

Add application file

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import os
3
  from PyPDF2 import PdfReader
4
- from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer # Added AutoModelForSeq2SeqLM, AutoTokenizer
5
  import math
6
  import tempfile
7
  import nltk
@@ -12,7 +12,7 @@ import nltk
12
  # --- Configuration ---
13
  QG_MODEL_NAME = "valhalla/t5-small-e2e-qg"
14
  # Define the local path where the model will be cached inside the Docker container
15
- LOCAL_MODEL_CACHE_DIR = "/app/model_cache" # Added local model cache directory
16
 
17
  CHUNK_SIZE_FOR_QG = 700 # Target characters per text chunk for LLM processing
18
  OVERLAP_PERCENTAGE = 0.15 # 15% overlap between chunks for context preservation
@@ -764,4 +764,5 @@ iface = gr.Interface(
764
 
765
  # Launch the Gradio app
766
  if __name__ == "__main__":
767
- iface.launch(share = True)
 
 
1
  import gradio as gr
2
  import os
3
  from PyPDF2 import PdfReader
4
+ from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
5
  import math
6
  import tempfile
7
  import nltk
 
12
  # --- Configuration ---
13
  QG_MODEL_NAME = "valhalla/t5-small-e2e-qg"
14
  # Define the local path where the model will be cached inside the Docker container
15
+ LOCAL_MODEL_CACHE_DIR = "/app/model_cache"
16
 
17
  CHUNK_SIZE_FOR_QG = 700 # Target characters per text chunk for LLM processing
18
  OVERLAP_PERCENTAGE = 0.15 # 15% overlap between chunks for context preservation
 
764
 
765
  # Launch the Gradio app
766
  if __name__ == "__main__":
767
+ # Set share=False to avoid potential hangs with public URL generation
768
+ iface.launch(share=False)