File size: 1,662 Bytes
cba16b9
 
 
 
 
 
 
 
 
 
 
d43dc97
cba16b9
 
 
1c5e06c
 
cba16b9
 
1c5e06c
cba16b9
 
 
 
 
 
 
 
 
 
ce30601
 
 
 
 
cba16b9
 
2ab0f3e
cba16b9
 
d60dc04
ea98520
8cbbdae
 
 
cba16b9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# -*- coding: utf-8 -*-
"""Gradio-regression.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1qmfhcPafAIfczazACroyAYyRohdQbklK
"""
import numpy as np
import matplotlib.pyplot as plt
import gradio as gr
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
np.random.seed(2)

X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.rand(100, 1)

X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.6, random_state=42)

def build_model(alpha):
  r_reg = Ridge(alpha=alpha)
  r_reg.fit(X_train, y_train)
  return r_reg

def predict(alpha):
  ridge_reg = build_model(alpha)
  preds = ridge_reg.predict(X_test)
  fig = plt.figure()
  plt.scatter(X_train, y_train, color="yellowgreen", marker=".", label="training data")
  plt.scatter(X_test, y_test, color="gold", marker=".", label="test data")
  plt.plot(X_test, preds, color="cornflowerblue",
    linewidth=2,
    label="Ridge  regressor")
  plt.ylabel("Y")
  plt.xlabel("X")
  plt.legend(loc="upper left")
  return plt

inputs = gr.Slider(0, 20, label='alpha', default=1)
outputs = gr.Plot(show_label=True)
title = "Effect of regularization using Ridge regression"
description = "Alpha is the regularization parameter which basically restricts model. The idea is that using regularization the model even if performs poorly on the training data, it would provide a better fit for generalizing data. Try out yourself by increasing or decreasing the value of alpha."
gr.Interface(fn = predict, inputs = inputs, outputs = outputs, title = title, description = description).launch()