barunsaha commited on
Commit
2e8f41d
·
unverified ·
2 Parent(s): 9897233 5032c92

Merge pull request #111 from barun-saha/ollama

Browse files

Fix missing page range slider in the offline mode

Files changed (1) hide show
  1. app.py +35 -28
app.py CHANGED
@@ -62,7 +62,7 @@ def _get_prompt_template(is_refinement: bool) -> str:
62
 
63
  def are_all_inputs_valid(
64
  user_prompt: str,
65
- selected_provider: str,
66
  selected_model: str,
67
  user_key: str,
68
  azure_deployment_url: str = '',
@@ -73,7 +73,7 @@ def are_all_inputs_valid(
73
  Validate user input and LLM selection.
74
 
75
  :param user_prompt: The prompt.
76
- :param selected_provider: The LLM provider.
77
  :param selected_model: Name of the model.
78
  :param user_key: User-provided API key.
79
  :param azure_deployment_url: Azure OpenAI deployment URL.
@@ -91,12 +91,12 @@ def are_all_inputs_valid(
91
  )
92
  return False
93
 
94
- if not selected_provider or not selected_model:
95
  handle_error('No valid LLM provider and/or model name found!', False)
96
  return False
97
 
98
  if not llm_helper.is_valid_llm_provider_model(
99
- selected_provider, selected_model, user_key,
100
  azure_endpoint_name, azure_deployment_url, azure_api_version
101
  ):
102
  handle_error(
@@ -186,7 +186,7 @@ with st.sidebar:
186
  env_key_name = GlobalConfig.PROVIDER_ENV_KEYS.get(selected_provider)
187
  default_api_key = os.getenv(env_key_name, "") if env_key_name else ""
188
 
189
- # Always sync session state to env value if needed (auto-fill on provider change)
190
  if default_api_key and st.session_state.get('api_key_input', None) != default_api_key:
191
  st.session_state['api_key_input'] = default_api_key
192
 
@@ -222,10 +222,13 @@ with st.sidebar:
222
  value='2024-05-01-preview',
223
  )
224
 
225
- # Make slider with initial values
226
- page_range_slider = st.slider('7: Specify a page range for the PDF file:',
227
- 1, GlobalConfig.MAX_ALLOWED_PAGES, [1, GlobalConfig.MAX_ALLOWED_PAGES])
228
- st.session_state['page_range_slider'] = page_range_slider
 
 
 
229
 
230
 
231
  def build_ui():
@@ -292,36 +295,40 @@ def set_up_chat_ui():
292
  if prompt['files']:
293
  # Store uploaded pdf in session state
294
  uploaded_pdf = prompt['files'][0]
295
- st.session_state['pdf_file'] = uploaded_pdf
296
  # Apparently, Streamlit stores uploaded files in memory and clears on browser close
297
  # https://docs.streamlit.io/knowledge-base/using-streamlit/where-file-uploader-store-when-deleted
298
 
299
- # Check if pdf file is uploaded
300
  # (we can use the same file if the user doesn't upload a new one)
301
- if 'pdf_file' in st.session_state:
302
- # Get validated page range
303
- st.session_state['start_page'], st.session_state['end_page'] = filem.validate_page_range(
304
- st.session_state['pdf_file'],
305
- st.session_state['start_page'],
306
- st.session_state['end_page']
307
- )
 
 
 
308
  # Show sidebar text for page selection and file name
309
  with st.sidebar:
310
  if st.session_state['end_page'] is None: # If the PDF has only one page
311
- st.text('Extracting page %d in %s' % (
312
- st.session_state['start_page'], st.session_state['pdf_file'].name
313
- ))
 
314
  else:
315
- st.text('Extracting pages %d to %d in %s' % (
316
- st.session_state['start_page'], st.session_state['end_page'], st.session_state['pdf_file'].name
317
- ))
 
318
 
319
  # Get pdf contents
320
  st.session_state[ADDITIONAL_INFO] = filem.get_pdf_contents(
321
- st.session_state['pdf_file'],
322
- (st.session_state['start_page'],
323
- st.session_state['end_page'])
324
- )
325
  provider, llm_name = llm_helper.get_provider_model(
326
  llm_provider_to_use,
327
  use_ollama=RUN_IN_OFFLINE_MODE
 
62
 
63
  def are_all_inputs_valid(
64
  user_prompt: str,
65
+ provider: str,
66
  selected_model: str,
67
  user_key: str,
68
  azure_deployment_url: str = '',
 
73
  Validate user input and LLM selection.
74
 
75
  :param user_prompt: The prompt.
76
+ :param provider: The LLM provider.
77
  :param selected_model: Name of the model.
78
  :param user_key: User-provided API key.
79
  :param azure_deployment_url: Azure OpenAI deployment URL.
 
91
  )
92
  return False
93
 
94
+ if not provider or not selected_model:
95
  handle_error('No valid LLM provider and/or model name found!', False)
96
  return False
97
 
98
  if not llm_helper.is_valid_llm_provider_model(
99
+ provider, selected_model, user_key,
100
  azure_endpoint_name, azure_deployment_url, azure_api_version
101
  ):
102
  handle_error(
 
186
  env_key_name = GlobalConfig.PROVIDER_ENV_KEYS.get(selected_provider)
187
  default_api_key = os.getenv(env_key_name, "") if env_key_name else ""
188
 
189
+ # Always sync session state to env value if needed (autofill on provider change)
190
  if default_api_key and st.session_state.get('api_key_input', None) != default_api_key:
191
  st.session_state['api_key_input'] = default_api_key
192
 
 
222
  value='2024-05-01-preview',
223
  )
224
 
225
+ # Make slider with initial values
226
+ page_range_slider = st.slider(
227
+ 'Specify a page range for the uploaded PDF file (if any):',
228
+ 1, GlobalConfig.MAX_ALLOWED_PAGES,
229
+ [1, GlobalConfig.MAX_ALLOWED_PAGES]
230
+ )
231
+ st.session_state['page_range_slider'] = page_range_slider
232
 
233
 
234
  def build_ui():
 
295
  if prompt['files']:
296
  # Store uploaded pdf in session state
297
  uploaded_pdf = prompt['files'][0]
298
+ st.session_state['pdf_file'] = uploaded_pdf
299
  # Apparently, Streamlit stores uploaded files in memory and clears on browser close
300
  # https://docs.streamlit.io/knowledge-base/using-streamlit/where-file-uploader-store-when-deleted
301
 
302
+ # Check if pdf file is uploaded
303
  # (we can use the same file if the user doesn't upload a new one)
304
+ if 'pdf_file' in st.session_state:
305
+ # Get validated page range
306
+ (
307
+ st.session_state['start_page'],
308
+ st.session_state['end_page']
309
+ ) = filem.validate_page_range(
310
+ st.session_state['pdf_file'],
311
+ st.session_state['start_page'],
312
+ st.session_state['end_page']
313
+ )
314
  # Show sidebar text for page selection and file name
315
  with st.sidebar:
316
  if st.session_state['end_page'] is None: # If the PDF has only one page
317
+ st.text(
318
+ f'Extracting page {st.session_state["start_page"]} in'
319
+ f' {st.session_state["pdf_file"].name}'
320
+ )
321
  else:
322
+ st.text(
323
+ f'Extracting pages {st.session_state["start_page"]} to'
324
+ f' {st.session_state["end_page"]} in {st.session_state["pdf_file"].name}'
325
+ )
326
 
327
  # Get pdf contents
328
  st.session_state[ADDITIONAL_INFO] = filem.get_pdf_contents(
329
+ st.session_state['pdf_file'],
330
+ (st.session_state['start_page'], st.session_state['end_page'])
331
+ )
 
332
  provider, llm_name = llm_helper.get_provider_model(
333
  llm_provider_to_use,
334
  use_ollama=RUN_IN_OFFLINE_MODE