Shamima's picture
add noise and decrease train size
1c5e06c
# -*- coding: utf-8 -*-
"""Gradio-regression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1qmfhcPafAIfczazACroyAYyRohdQbklK
"""
import numpy as np
import matplotlib.pyplot as plt
import gradio as gr
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
np.random.seed(2)
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.rand(100, 1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.6, random_state=42)
def build_model(alpha):
r_reg = Ridge(alpha=alpha)
r_reg.fit(X_train, y_train)
return r_reg
def predict(alpha):
ridge_reg = build_model(alpha)
preds = ridge_reg.predict(X_test)
fig = plt.figure()
plt.scatter(X_train, y_train, color="yellowgreen", marker=".", label="training data")
plt.scatter(X_test, y_test, color="gold", marker=".", label="test data")
plt.plot(X_test, preds, color="cornflowerblue",
linewidth=2,
label="Ridge regressor")
plt.ylabel("Y")
plt.xlabel("X")
plt.legend(loc="upper left")
return plt
inputs = gr.Slider(0, 20, label='alpha', default=1)
outputs = gr.Plot(show_label=True)
title = "Effect of regularization using Ridge regression"
description = "Alpha is the regularization parameter which basically restricts model. The idea is that using regularization the model even if performs poorly on the training data, it would provide a better fit for generalizing data. Try out yourself by increasing or decreasing the value of alpha."
gr.Interface(fn = predict, inputs = inputs, outputs = outputs, title = title, description = description).launch()