Shreyas094 commited on
Commit
532ed96
·
verified ·
1 Parent(s): c3aa982

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -9
app.py CHANGED
@@ -114,15 +114,40 @@ def generate_chunked_response(prompt, model, max_tokens=1000, num_calls=3, tempe
114
  clean_response = clean_response.replace("Using the following context:", "").strip()
115
  clean_response = clean_response.replace("Using the following context from the PDF documents:", "").strip()
116
 
117
- # Remove duplicate sentences
118
- sentences = clean_response.split('. ')
119
- unique_sentences = []
120
- for sentence in sentences:
121
- if sentence not in unique_sentences:
122
- unique_sentences.append(sentence)
 
 
 
 
 
 
 
 
 
 
123
 
124
- final_response = '. '.join(unique_sentences)
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  print(f"Final clean response: {final_response[:100]}...")
127
  return final_response
128
 
@@ -249,7 +274,7 @@ with gr.Blocks() as demo:
249
  with gr.Row():
250
  model_dropdown = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[1])
251
  temperature_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature")
252
- num_calls_slider = gr.Slider(minimum=1, maximum=5, value=3, step=1, label="Number of API Calls")
253
 
254
  with gr.Row():
255
  submit_btn = gr.Button("Send")
@@ -353,4 +378,4 @@ with gr.Blocks() as demo:
353
  """
354
  )
355
  if __name__ == "__main__":
356
- demo.launch(share=True)
 
114
  clean_response = clean_response.replace("Using the following context:", "").strip()
115
  clean_response = clean_response.replace("Using the following context from the PDF documents:", "").strip()
116
 
117
+ # Split the response into main content and sources
118
+ parts = re.split(r'\n\s*Sources:\s*\n', clean_response, flags=re.IGNORECASE, maxsplit=1)
119
+ main_content = parts[0].strip()
120
+ sources = parts[1].strip() if len(parts) > 1 else ""
121
+
122
+ # Process main content
123
+ paragraphs = main_content.split('\n\n')
124
+ unique_paragraphs = []
125
+ for paragraph in paragraphs:
126
+ if paragraph not in unique_paragraphs:
127
+ unique_sentences = []
128
+ sentences = paragraph.split('. ')
129
+ for sentence in sentences:
130
+ if sentence not in unique_sentences:
131
+ unique_sentences.append(sentence)
132
+ unique_paragraphs.append('. '.join(unique_sentences))
133
 
134
+ final_content = '\n\n'.join(unique_paragraphs)
135
 
136
+ # Process sources
137
+ if sources:
138
+ source_lines = sources.split('\n')
139
+ unique_sources = []
140
+ for line in source_lines:
141
+ if line.strip() and line not in unique_sources:
142
+ unique_sources.append(line)
143
+ final_sources = '\n'.join(unique_sources)
144
+ final_response = f"{final_content}\n\nSources:\n{final_sources}"
145
+ else:
146
+ final_response = final_content
147
+
148
+ # Remove any content after the sources
149
+ final_response = re.sub(r'(Sources:.*?)(?:\n\n|\Z).*', r'\1', final_response, flags=re.DOTALL)
150
+
151
  print(f"Final clean response: {final_response[:100]}...")
152
  return final_response
153
 
 
274
  with gr.Row():
275
  model_dropdown = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[1])
276
  temperature_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature")
277
+ num_calls_slider = gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls")
278
 
279
  with gr.Row():
280
  submit_btn = gr.Button("Send")
 
378
  """
379
  )
380
  if __name__ == "__main__":
381
+ demo.launch(share=True)