NTaylor commited on
Commit
43b5430
·
1 Parent(s): b0661c4

Added n_samples and n_features as slider options

Browse files
Files changed (1) hide show
  1. app.py +61 -31
app.py CHANGED
@@ -31,24 +31,53 @@ X = iris.data
31
  y = iris.target
32
  target_names = iris.target_names
33
 
34
- # fit PCA
35
- pca = PCA(n_components=2)
36
- X_r = pca.fit(X).transform(X)
37
-
38
- # fit LDA
39
- lda = LinearDiscriminantAnalysis(n_components=2)
40
- X_r2 = lda.fit(X, y).transform(X)
41
-
42
- # Percentage of variance explained for each components
43
- print(
44
- "explained variance ratio (first two components): %s"
45
- % str(pca.explained_variance_ratio_)
46
- )
47
-
48
- # save models using skop
49
-
50
 
51
- def plot_lda_pca():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  # fig = plt.figure(1, facecolor="w", figsize=(5,5))
54
  fig, axes = plt.subplots(2,1, sharey= False, sharex=False, figsize = (8,6))
@@ -77,27 +106,28 @@ def plot_lda_pca():
77
  title = "2-D projection of Iris dataset using LDA and PCA"
78
  with gr.Blocks(title=title) as demo:
79
  gr.Markdown(f"# {title}")
80
- gr.Markdown(" This example shows how one can use Prinicipal Components Analysis (PCA) and Linear Discriminant Analysis (LDA) to cluster the Iris dataset based on provided features. <br>"
81
- " PCA applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. <br>"
82
- " <br>"
83
-
84
  " For further details please see the sklearn docs:"
85
  )
86
 
87
  gr.Markdown(" **[Demo is based on sklearn docs found here](https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.html#sphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py)** <br>")
88
 
89
- gr.Markdown(" **Dataset** : The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour and Virginica) with 4 attributes: sepal length, sepal width, petal length and petal width. . <br>")
90
-
91
- # with gr.Row():
92
- # n_samples = gr.Slider(value=100, minimum=10, maximum=1000, step=10, label="n_samples")
93
- # n_components = gr.Slider(value=2, minimum=1, maximum=20, step=1, label="n_components")
94
- # n_features = gr.Slider(value=5, minimum=5, maximum=25, step=1, label="n_features")
95
-
96
-
97
- # options for n_components
98
 
 
99
  btn = gr.Button(value="Run")
100
- btn.click(plot_lda_pca, outputs= gr.Plot(label='PCA vs LDA clustering') ) #
101
 
102
 
103
  demo.launch()
 
31
  y = iris.target
32
  target_names = iris.target_names
33
 
34
+ def plot_lda_pca(n_samples = 100,
35
+ n_components=2,
36
+ n_features=4):
37
+
38
+ '''
39
+ Function to plot LDA and PCA clustering.
40
+
41
+ Parameters
42
+ ----------
43
+ n_components : int, default=2
44
+ Number of components to keep.
 
 
 
 
 
45
 
46
+ n_features : int, default=5
47
+ Number of features to generate.
48
+
49
+ Returns
50
+ -------
51
+ fig : matplotlib.pyplot.figure
52
+ Figure object.
53
+ '''
54
+
55
+
56
+
57
+ # take sample of data
58
+ X = X[:n_samples, :n_features]
59
+ y = y[:n_samples]
60
+
61
+ # fit PCA
62
+ pca = PCA(n_components=n_components)
63
+ X_r = pca.fit(X).transform(X)
64
+ print(f"shape of X_r: {X_r.shape}")
65
+ # fit LDA
66
+ lda = LinearDiscriminantAnalysis(n_components=n_components)
67
+ X_r2 = lda.fit(X, y).transform(X)
68
+ print(f"shape of X_r2: {X_r2.shape}")
69
+ # take first two components
70
+ X_r = X_r[:, :2]
71
+ X_r2 = X_r2[:, :2]
72
+
73
+ print(f"shape of X_r after: {X_r.shape}")
74
+ print(f"shape of X_r2 after: {X_r2.shape}")
75
+
76
+ # Percentage of variance explained for each components
77
+ print(
78
+ "explained variance ratio (first two components): %s"
79
+ % str(pca.explained_variance_ratio_)
80
+ )
81
 
82
  # fig = plt.figure(1, facecolor="w", figsize=(5,5))
83
  fig, axes = plt.subplots(2,1, sharey= False, sharex=False, figsize = (8,6))
 
106
  title = "2-D projection of Iris dataset using LDA and PCA"
107
  with gr.Blocks(title=title) as demo:
108
  gr.Markdown(f"# {title}")
109
+ gr.Markdown(" This example shows how one can use Prinicipal Components Analysis (PCA) and Factor Analysis (FA) for model selection by observing the likelihood of a held-out dataset with added noise <br>"
110
+ " The number of samples (n_samples) will determine the number of data points to produce. <br>"
111
+ " The number of components (n_components) will determine the number of components each method will fit to, and will affect the likelihood of the held-out set. <br>"
112
+ " The number of features (n_components) determine the number of features the toy dataset X variable will have. <br>"
113
  " For further details please see the sklearn docs:"
114
  )
115
 
116
  gr.Markdown(" **[Demo is based on sklearn docs found here](https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.html#sphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py)** <br>")
117
 
118
+ gr.Markdown(" **Dataset** : A toy dataset with corrupted with homoscedastic noise (noise variance is the same for each feature) or heteroscedastic noise (noise variance is the different for each feature) . <br>")
119
+ gr.Markdown(" Different number of features and number of components affect how well the low rank space is recovered. <br>"
120
+ " Larger Depth trying to overfit and learn even the finner details of the data.<br>"
121
+ )
122
+ # set max samples
123
+ max_samples = len(iris.data)
124
+ with gr.Row():
125
+ n_samples = gr.Slider(value=100, minimum=2, maximum=max_samples, step=1, label="n_samples")
126
+ n_features = gr.Slider(value=4, minimum=2, maximum=4, step=1, label="n_features")
127
 
128
+
129
  btn = gr.Button(value="Run")
130
+ btn.click(plot_lda_pca,inputs= [n_samples, n_features], outputs= gr.Plot(label='PCA vs LDA clustering') ) #
131
 
132
 
133
  demo.launch()