import gradio as gr import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_regression from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error def get_plots(min_alpha, max_alpha): clf = Ridge() X, y, w = make_regression( n_samples=10, n_features=10, coef=True, random_state=1, bias=3.5 ) coefs = [] errors = [] alphas = np.logspace(min_alpha, max_alpha, 200) # Train the model with different regularisation strengths for a in alphas: clf.set_params(alpha=a) clf.fit(X, y) coefs.append(clf.coef_) errors.append(mean_squared_error(clf.coef_, w)) # Display results fig, ax = plt.subplots(1, 2, figsize=(20, 6)) ax[0].plot(alphas, coefs) ax[0].set_xscale("log") ax[0].set_xlabel("alpha") ax[0].set_ylabel("weights") ax[0].set_title("Ridge coefficients as a function of the regularization") ax[1].plot(alphas, errors) ax[1].set_xscale("log") ax[1].set_xlabel("alpha") ax[1].set_ylabel("error") ax[1].set_title("Coefficient error as a function of the regularization") fig.tight_layout() return fig with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): gr.Markdown( "Choose the range of alpha values to plot." + " The models you input for alpha are for the exponents of 10," + " so a value of -6 means 10^-6." ) min_alpha = gr.Slider( minimum=-10, maximum=10, step=0.5, value=-6, label="Minimum Alpha Exponent", ) max_alpha = gr.Slider( minimum=-10, maximum=10, step=0.5, value=6, label="Maximum Alpha Exponent", ) with gr.Column(scale=4): plots = gr.Plot() min_alpha.change( get_plots, [min_alpha, max_alpha], plots, queue=False, ) max_alpha.change( get_plots, [min_alpha, max_alpha], plots, queue=False, ) demo.load( get_plots, [min_alpha, max_alpha], plots, queue=False, ) if __name__ == "__main__": demo.launch()