Spaces:
Sleeping
Sleeping
File size: 3,892 Bytes
7bf325d e66f533 8e34de3 f533950 aefef4e f533950 e66f533 f533950 36f7a03 e66f533 36f7a03 8e34de3 36f7a03 e66f533 8e34de3 9a806ac e66f533 9a806ac e66f533 9a806ac e66f533 9a806ac e66f533 9a806ac e66f533 9a806ac e66f533 bb4ec07 da60688 9a806ac e66f533 d143368 e66f533 f533950 11b02dc e66f533 c435293 e66f533 8e34de3 36f7a03 e66f533 36f7a03 fe68698 e66f533 f533950 e66f533 f533950 8e34de3 a800293 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import gradio as gr
import logging
from data_handler import download_nltk_resources
from analysis_runner import run_analysis
from visualization_handler import create_visualization_components
from ui.dataset_input import create_dataset_input, load_example_dataset
from ui.analysis_screen import create_analysis_screen
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('gradio_app')
# Import the process_analysis_request function
# Try to use the improved version if available, otherwise use original
try:
from improved_analysis_handler import process_analysis_request
logger.info("Using improved analysis handler")
except ImportError:
logger.info("Using original analysis handler")
from ui.analysis_screen import process_analysis_request
def create_app():
"""
Create a streamlined Gradio app for dataset input and analysis.
Returns:
gr.Blocks: The Gradio application
"""
with gr.Blocks(title="LLM Response Comparator") as app:
# Application state to share data between tabs
dataset_state = gr.State({})
analysis_results_state = gr.State({})
# Dataset Input Tab
with gr.Tab("Dataset Input"):
dataset_inputs, example_dropdown, load_example_btn, create_btn, prompt, response1, model1, response2, model2 = create_dataset_input()
# Add status indicator to show when dataset is created
dataset_status = gr.Markdown("*No dataset loaded*")
# Load example dataset
load_example_btn.click(
fn=load_example_dataset,
inputs=[example_dropdown],
outputs=[prompt, response1, model1, response2, model2] # Update all field values
)
# Save dataset to state and update status
def create_dataset(p, r1, m1, r2, m2):
if not p or not r1 or not r2:
return {}, "❌ **Error:** Please fill in at least the prompt and both responses"
dataset = {
"entries": [
{"prompt": p, "response": r1, "model": m1 or "Model 1"},
{"prompt": p, "response": r2, "model": m2 or "Model 2"}
]
}
return dataset, "✅ **Dataset created successfully!** You can now go to the Analysis tab"
create_btn.click(
fn=create_dataset,
inputs=[prompt, response1, model1, response2, model2],
outputs=[dataset_state, dataset_status]
)
# Analysis Tab
with gr.Tab("Analysis"):
# Create analysis screen
analysis_components = create_analysis_screen()
analysis_options = analysis_components[0]
analysis_params = analysis_components[1]
run_analysis_btn = analysis_components[2]
analysis_output = analysis_components[3]
ngram_n = analysis_components[4]
topic_count = analysis_components[5]
# Create visualization components
visualization_components = create_visualization_components()
# Connect the run button to the analysis function
run_analysis_btn.click(
fn=run_analysis,
inputs=[dataset_state, analysis_options, ngram_n, ngram_top, topic_count], # Make sure ngram_top is included
outputs=visualization_components
)
return app
if __name__ == "__main__":
# Download required NLTK resources before launching the app
download_nltk_resources()
logger.info("Starting LLM Response Comparator application")
logger.info("===== Application Startup =====")
# Create and launch the application
app = create_app()
app.launch() |