daniel-wojahn commited on
Commit
ce093f9
·
1 Parent(s): 1da7d18

feat(app): improve UI and backend robustness per user feedback

Browse files
Files changed (2) hide show
  1. app.py +4 -4
  2. pipeline/llm_service.py +2 -3
app.py CHANGED
@@ -19,7 +19,7 @@ def main_interface():
19
  with gr.Blocks(
20
  theme=tibetan_theme,
21
  title="Tibetan Text Metrics Web App",
22
- css=tibetan_theme.get_css_string(),
23
  ) as demo:
24
  gr.Markdown(
25
  """# Tibetan Text Metrics Web App
@@ -59,7 +59,7 @@ def main_interface():
59
  semantic_toggle_radio = gr.Radio(
60
  label="Compute semantic similarity? (Experimental)",
61
  choices=["Yes", "No"],
62
- value="Yes",
63
  info="Semantic similarity will be time-consuming. Choose 'No' to speed up analysis if these metrics are not required.",
64
  elem_id="semantic-radio-group",
65
  )
@@ -122,7 +122,7 @@ def main_interface():
122
  with gr.Row():
123
  with gr.Column():
124
  output_analysis = gr.Markdown(
125
- "## AI Analysis\n*The AI will analyze your text similarities and provide insights into patterns and relationships. Make sure to set up your OpenRouter API key for this feature.*",
126
  elem_classes="gr-markdown"
127
  )
128
 
@@ -239,7 +239,7 @@ Each segment is represented as a vector of these TF-IDF scores, and the cosine s
239
  This chart displays the number of words in each segment of your texts after tokenization.
240
  """)
241
  elif metric_key in metric_tooltips:
242
- gr.Markdown(value=metric_tooltips[metric_key])
243
  else:
244
  gr.Markdown(value=f"### {metric_key}\nDescription not found.")
245
 
 
19
  with gr.Blocks(
20
  theme=tibetan_theme,
21
  title="Tibetan Text Metrics Web App",
22
+ css=tibetan_theme.get_css_string() + ".metric-description { padding: 1.5rem !important; }"
23
  ) as demo:
24
  gr.Markdown(
25
  """# Tibetan Text Metrics Web App
 
59
  semantic_toggle_radio = gr.Radio(
60
  label="Compute semantic similarity? (Experimental)",
61
  choices=["Yes", "No"],
62
+ value="No",
63
  info="Semantic similarity will be time-consuming. Choose 'No' to speed up analysis if these metrics are not required.",
64
  elem_id="semantic-radio-group",
65
  )
 
122
  with gr.Row():
123
  with gr.Column():
124
  output_analysis = gr.Markdown(
125
+ "## AI Analysis\n*The AI will analyze your text similarities and provide insights into patterns and relationships.*",
126
  elem_classes="gr-markdown"
127
  )
128
 
 
239
  This chart displays the number of words in each segment of your texts after tokenization.
240
  """)
241
  elif metric_key in metric_tooltips:
242
+ gr.Markdown(value=metric_tooltips[metric_key], elem_classes="metric-description")
243
  else:
244
  gr.Markdown(value=f"### {metric_key}\nDescription not found.")
245
 
pipeline/llm_service.py CHANGED
@@ -552,10 +552,9 @@ Your analysis will be performed using the `{model_name}` model. Provide a concis
552
  # Count how many expected content markers we find
553
  content_matches = sum(1 for term in expected_content if term.lower() in response.lower())
554
 
555
- # If we find fewer than 3 expected content markers, it's likely not a good analysis
556
  if content_matches < 3:
557
  logger.warning(f"LLM response missing expected content sections (found {content_matches}/6)")
558
- raise ValueError("Response does not contain expected analysis sections")
559
 
560
  # Check for text names from the dataset
561
  # Extract text names from the Text Pair column
@@ -580,7 +579,7 @@ Your analysis will be performed using the `{model_name}` model. Provide a concis
580
  response = f"<div class='llm-analysis'>\n{response}\n</div>"
581
 
582
  # Format the response into a markdown block
583
- formatted_response = f"""## AI-Powered Analysis (Model: {model_name})"""
584
 
585
  return formatted_response
586
 
 
552
  # Count how many expected content markers we find
553
  content_matches = sum(1 for term in expected_content if term.lower() in response.lower())
554
 
555
+ # If we find fewer than 3 expected content markers, log a warning
556
  if content_matches < 3:
557
  logger.warning(f"LLM response missing expected content sections (found {content_matches}/6)")
 
558
 
559
  # Check for text names from the dataset
560
  # Extract text names from the Text Pair column
 
579
  response = f"<div class='llm-analysis'>\n{response}\n</div>"
580
 
581
  # Format the response into a markdown block
582
+ formatted_response = f"""## AI-Powered Analysis (Model: {model_name})\n\n{response}"""
583
 
584
  return formatted_response
585