markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
1-part latent predictive model
|
class LaPred1P(nn.Module):
def __init__(self, latent_features, input_features=None, timesteps=None,
data=None, bias=True):
super().__init__()
if data:
input_features = input_features or data.n_vars
timesteps = timesteps or data.n_timesteps
elif input_features is None or timesteps is None:
raise ValueError('You must either provide data or both input '
'features and timesteps.')
self.latent_features = latent_features
self.input_features = input_features
self.timesteps = timesteps
self.encoder = nn.Linear(input_features, latent_features, bias=bias)
self.predictor = nn.Linear(latent_features, timesteps*latent_features, bias=bias)
def forward(self, x):
code = self.encoder(x['input'])
prediction = self.predictor(code).\
reshape(self.timesteps, self.latent_features)
return prediction
hmm = patches.data.HiddenMarkovModel(cbm.to_array(), cbm.latent_array()[:, [0]])
lapred1p = LaPred1P(1, data=hmm, bias=False)
lapred1p(hmm[0])
hmm[0]['future_latent_values']
lapred1p = LaPred1P(1, data=hmm, bias=False)
optimizer = optim.Adam(lapred1p.parameters())
criterion = nn.MSELoss()
running_loss = 0
loss_traj = []
angles = []
for epoch in tqdm(range(10)):
for i, data in enumerate(hmm):
if i<len(hmm):
if i % 10 == 0:
est = list(lapred1p.parameters())[0].detach().numpy()
angles.append(np.matmul(ideal, est.T)/np.sqrt(np.matmul(est, est.T)))
optimizer.zero_grad()
prediction = lapred1p(data)
loss = criterion(prediction, data['future_latent_values'])
loss.backward()
optimizer.step()
running_loss += loss
if i % 50 == 49:
loss_traj.append(running_loss.detach().numpy()/50)
running_loss = 0
list(lapred1p.parameters())
(gg.ggplot(
lazytools.array_to_dataframe(
np.array(loss_traj)
),
gg.aes(x='dim0', y='array')
) +
gg.geom_smooth(method='loess'))
np.concatenate(angles, axis=1).shape
(gg.ggplot(
lazytools.array_to_dataframe(
np.concatenate(angles, axis=1)
),
gg.aes(x='dim1', y='array', color='dim0', group='dim0')
) +
gg.geom_line())
|
_____no_output_____
|
MIT
|
notebooks/03-framing-models.ipynb
|
sflippl/patches
|
2-part latent predictive model
|
class LaPred2P(nn.Module):
def __init__(self, latent_features, input_features=None, timesteps=None,
data=None, bias=True):
super().__init__()
if data:
input_features = input_features or data.n_vars
timesteps = timesteps or data.n_timesteps
elif input_features is None or timesteps is None:
raise ValueError('You must either provide data or both input '
'features and timesteps.')
self.latent_features = latent_features
self.input_features = input_features
self.timesteps = timesteps
self.encoder = nn.Linear(input_features, latent_features, bias=bias)
self.predictor = nn.Linear(latent_features, timesteps*latent_features, bias=bias)
def forward(self, x):
code = self.encoder(x['input'])
prediction = self.predictor(x['latent_values']).\
reshape(self.timesteps, self.latent_features)
return {
'latent_values': code,
'latent_prediction': prediction
}
lapred2p = LaPred2P(1, data=hmm, bias=False)
optimizer = optim.Adam(lapred2p.parameters())
criterion = nn.MSELoss()
loss_traj = []
angles = []
running_loss = 0
for epoch in tqdm(range(10)):
for i, data in enumerate(hmm):
if i<len(hmm):
if i % 10 == 0:
est = list(lapred2p.parameters())[0].detach().numpy()
angles.append(np.matmul(ideal, est.T)/np.sqrt(np.matmul(est, est.T)))
optimizer.zero_grad()
prediction = lapred2p(data)
loss = criterion(prediction['latent_prediction'], data['future_latent_values']) + \
criterion(prediction['latent_values'], data['latent_values'])
loss.backward()
optimizer.step()
running_loss += loss
if i % 50 == 49:
loss_traj.append(running_loss.detach().numpy()/50)
running_loss = 0
list(lapred2p.parameters())[0]
(gg.ggplot(
lazytools.array_to_dataframe(
np.array(loss_traj)
),
gg.aes(x='dim0', y='array')
) +
gg.geom_smooth(method='loess'))
(gg.ggplot(
lazytools.array_to_dataframe(
np.concatenate(angles, axis=1)
),
gg.aes(x='dim1', y='array', color='dim0', group='dim0')
) +
gg.geom_line())
|
_____no_output_____
|
MIT
|
notebooks/03-framing-models.ipynb
|
sflippl/patches
|
Contrastive predictive model
|
cts = patches.data.Contrastive1DTimeSeries(data=cbm.to_array())
ce = patches.networks.LinearScaffold(latent_features=1, data=cts)
criterion = patches.losses.ContrastiveLoss(loss=nn.MSELoss())
optimizer = optim.Adam(ce.parameters())
angles = []
loss_traj = []
running_loss = 0
for epoch in tqdm(range(10)):
for i, data in enumerate(cts):
if i<len(cts):
if i % 10 == 0:
est = list(ce.parameters())[0].detach().numpy()
angles.append(np.matmul(ideal, est.T)/np.sqrt(np.matmul(est, est.T)))
optimizer.zero_grad()
code = ce(data)
loss = criterion(code)
loss.backward()
optimizer.step()
running_loss += loss
if i % 50 == 49:
loss_traj.append(running_loss.detach().numpy()/50)
running_loss = 0
(gg.ggplot(
lazytools.array_to_dataframe(
np.array(loss_traj)
),
gg.aes(x='dim0', y='array')
) +
gg.geom_smooth(method='loess'))
(gg.ggplot(
lazytools.array_to_dataframe(
np.concatenate(angles, axis=1)
),
gg.aes(x='dim1', y='array', color='dim0', group='dim0')
) +
gg.geom_line())
list(ce.parameters())
|
_____no_output_____
|
MIT
|
notebooks/03-framing-models.ipynb
|
sflippl/patches
|
Sampling bias
|
def moving_average(array):
"""Moving average over axis 0."""
cumsum = array.cumsum(axis=0)
length = cumsum.shape[0]
rng = np.arange(1, length+1)
if cumsum.ndim>1:
rng = rng.reshape(length, 1).repeat(cumsum.shape[1], 1)
return cumsum/rng
exposure = moving_average(np.abs(cbm.to_array()))
(gg.ggplot(lazytools.array_to_dataframe(exposure), gg.aes(x='dim0', group='dim1', y='array')) +
gg.geom_line(alpha=0.2) +
gg.scale_x_log10())
coherence = moving_average(1-2*(cbm.latent_array()[:-1,:]!=cbm.latent_array()[1:,:]))
lazytools.array_to_dataframe(coherence)['dim1'].astype(str)
(gg.ggplot(lazytools.array_to_dataframe(coherence),
gg.aes(x='dim0', color='dim1', y='array', group='dim1')) +
gg.geom_line() +
gg.scale_x_log10())
coherence = moving_average(1-2*(cbm.latent_array()[:-2,:]!=cbm.latent_array()[2:,:]))
(gg.ggplot(lazytools.array_to_dataframe(coherence),
gg.aes(x='dim0', color='dim1', y='array', group='dim1')) +
gg.geom_line() +
gg.scale_x_log10())
dfs = []
for t in tqdm(range(1, 10)):
for pos_1 in range(10):
for pos_2 in range(10):
pos_subset = (cbm.to_array()[:-t,pos_1]!=0) & (cbm.to_array()[t:,pos_2]!=0)
tmp_coherence = moving_average(
1-2*(cbm.to_array()[:-t][pos_subset,pos_1]!=cbm.to_array()[t:][pos_subset,pos_2])
)
tmp_df = lazytools.array_to_dataframe(tmp_coherence)
tmp_df['pos_1'] = np.array(pos_1)
tmp_df['pos_2'] = np.array(pos_2)
tmp_df['t'] = np.array(t)
tmp_df['n'] = len(tmp_df)
dfs.append(tmp_df)
df = pd.concat(dfs)
df['dim0'] = (df['dim0']+1)/df['n']
df['coherent'] = (df['pos_1'] <= 4) & (df['pos_2']<= 4)
df['group'] = df['pos_1'].astype(str)+df['pos_2'].astype(str)
(gg.ggplot(df, gg.aes(x='dim0', y='array', group='group', color='coherent')) +
gg.geom_line(alpha=0.2) +
gg.facet_wrap('t') +
gg.scale_x_log10())
(gg.ggplot(df[(df['dim0']==1)], gg.aes(x='array', fill='coherent')) +
gg.geom_histogram(position='identity', alpha=.8) +
gg.facet_wrap('t'))
help(gg.labs)
str(cbm)
cbm.width
|
_____no_output_____
|
MIT
|
notebooks/03-framing-models.ipynb
|
sflippl/patches
|
**Data Visualization** Estimated time needed: **30** minutes In this lab, you will learn how to visualize and interpret data Objectives * Import Libraries* Lab Exercises * Identifying duplicates * Plotting Scatterplots * Plotting Boxplots *** Import Libraries All Libraries required for this lab are listed below. The libraries pre-installed on Skills Network Labs are commented. If you run this notebook in a different environment, e.g. your desktop, you may need to uncomment and install certain libraries.
|
# !pip install pandas
# !pip install numpy
# !pip install matplotlib
# !pip install seaborn
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Import the libraries we need for the lab
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Read in the csv file from the url using the request library
|
ratings_url = 'https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ST0151EN-SkillsNetwork/labs/teachingratings.csv'
ratings_df = pd.read_csv(ratings_url)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Lab Exercises Identify all duplicate cases using prof. Using all observations, find the average and standard deviation for age. Repeat the analysis by first filtering the data set to include one observation for each instructor with a total number of observations restricted to 94. Identify all duplicate cases using prof variable - find the unique values of the prof variables
|
ratings_df.prof.unique()
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Print out the number of unique values in the prof variable
|
ratings_df.prof.nunique()
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Using all observations, Find the average and standard deviation for age
|
ratings_df['age'].mean()
ratings_df['age'].std()
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Repeat the analysis by first filtering the data set to include one observation for each instructor with a total number of observations restricted to 94.> first we drop duplicates using prof as a subset and assign it a new dataframe name called no_duplicates_ratings_df
|
no_duplicates_ratings_df = ratings_df.drop_duplicates(subset =['prof'])
no_duplicates_ratings_df.head()
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
> Use the new dataset to get the mean of age
|
no_duplicates_ratings_df['age'].mean()
no_duplicates_ratings_df['age'].std()
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Using a bar chart, demonstrate if instructors teaching lower-division courses receive higher average teaching evaluations.
|
ratings_df.head()
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Find the average teaching evaluation in both groups of upper and lower-division
|
division_eval = ratings_df.groupby('division')[['eval']].mean().reset_index()
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Plot the barplot using the seaborn library
|
sns.set(style="whitegrid")
ax = sns.barplot(x="division", y="eval", data=division_eval)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Plot the relationship between age and teaching evaluation scores. Create a scatterplot with the scatterplot function in the seaborn library
|
ax = sns.scatterplot(x='age', y='eval', data=ratings_df)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Using gender-differentiated scatter plots, plot the relationship between age and teaching evaluation scores. Create a scatterplot with the scatterplot function in the seaborn library this time add the hue argument
|
ax = sns.scatterplot(x='age', y='eval', hue='gender',
data=ratings_df)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Create a box plot for beauty scores differentiated by credits. We use the boxplot() function from the seaborn library
|
ax = sns.boxplot(x='credits', y='beauty', data=ratings_df)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
What is the number of courses taught by gender? We use the catplot() function from the seaborn library
|
sns.catplot(x='gender', kind='count', data=ratings_df)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Create a group histogram of taught by gender and tenure We will add the hue = Tenure argument
|
sns.catplot(x='gender', hue = 'tenure', kind='count', data=ratings_df)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Add division as another factor to the above histogram We add another argument named row and use the division variable as the row
|
sns.catplot(x='gender', hue = 'tenure', row = 'division',
kind='count', data=ratings_df,
height = 3, aspect = 2)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Create a scatterplot of age and evaluation scores, differentiated by gender and tenure Use the relplot() function for complex scatter plots
|
sns.relplot(x="age", y="eval", hue="gender",
row="tenure",
data=ratings_df, height = 3, aspect = 2)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Create a distribution plot of teaching evaluation scores We use the distplot() function from the seaborn library, set kde = false because we don'e need the curve
|
ax = sns.distplot(ratings_df['eval'], kde = False)
|
D:\anaconda3\lib\site-packages\seaborn\distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Create a distribution plot of teaching evaluation score with gender as a factor
|
## use the distplot function from the seaborn library
sns.distplot(ratings_df[ratings_df['gender'] == 'female']['eval'], color='green', kde=False)
sns.distplot(ratings_df[ratings_df['gender'] == 'male']['eval'], color="orange", kde=False)
plt.show()
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Create a box plot - age of the instructor by gender
|
ax = sns.boxplot(x="gender", y="age", data=ratings_df)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Compare age along with tenure and gender
|
ax = sns.boxplot(x="tenure", y="age", hue="gender",
data=ratings_df)
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Practice Questions Question 1: Create a distribution plot of beauty scores with Native English speaker as a factor* Make the color of the native English speakers plot - orange and non - native English speakers - blue
|
## insert code
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Double-click **here** for the solution.<!-- The answer is below:sns.distplot(ratings_df[ratings_df['native'] == 'yes']['beauty'], color="orange", kde=False) sns.distplot(ratings_df[ratings_df['native'] == 'no']['beauty'], color="blue", kde=False) plt.show()--> Question 2: Create a Horizontal box plot of the age of the instructors by visible minority
|
## insert code
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Double-click **here** for a hint.<!-- The hint is below:Remember that the positions of the argument determine whether it will be vertical or horizontal--> Double-click **here** for the solution.<!-- The answer is below:ax = sns.boxplot(x="age", y="minority", data=ratings_df)--> Question 3: Create a group histogram of tenure by minority and add the gender factor
|
## insert code
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
Double-click **here** for the solution.<!-- The answer is below:sns.catplot(x='tenure', hue = 'minority', row = 'gender', kind='count', data=ratings_df, height = 3, aspect = 2)--> Question 4: Create a boxplot of the age variable
|
## insert code
|
_____no_output_____
|
MIT
|
IBM-statistics/2-Visualizing_Data.ipynb
|
brndnaxr/brndnaxr-micro-projects
|
An analysis of the dataset presented in [this technical comment](https://arxiv.org/abs/2004.06601), but with our quality cuts appliedAs a response to our paper [Dessert et al. _Science_ 2020](https://science.sciencemag.org/content/367/6485/1465) (DRS20), we received [a technical comment](https://arxiv.org/abs/2004.06601) (BRMS). BRMS performed a simplified version of our analysis in a partially overlapping dataset using 17 Ms of MOS observations spanning 20$^\circ$ to 35$^\circ$ from the Galactic Center. They assumed a single power-law background with additional lines at 3.1, 3.3, 3.7, and 3.9 keV, and claim a 4$\sigma$ detection of a line at 3.48 keV using an energy window of 3-4 keV. However, it is important to note that the BRMS analysis do not apply any (stated) quality cuts to their dataset. On the other hand, as detailed in DRS20, we selected low-background or blank-sky observations, so the data is much cleaner.In our formal response to the technical comment, we repeat this analysis on the 8.5 Ms of the BRMS dataset that passes the quality cuts. In this notebook, we show this data and analysis in detail. Many of the details will follow the procedure used in the notebook `DRS20_mos_stacked`. For a pedagogical introduction to the analysis here, we refer to that notebook.If you use the data in this example in a publication, please cite Dessert et al. _Science_ 2020.**Please direct any questions to [email protected].**
|
# Import required modules
%matplotlib inline
%load_ext autoreload
%autoreload 2
import sys,os
import numpy as np
from scipy.stats import chi2 as chi2_scipy
from scipy.optimize import dual_annealing
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from matplotlib import rc
from matplotlib import rcParams
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rcParams['text.usetex'] = True
rcParams['text.latex.unicode'] = True
|
_____no_output_____
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
**NB**: In this notebook, we minimize with `scipy` so that it is easy to run for the interested reader. For scientific analysis, we recommend [Minuit](https://iminuit.readthedocs.io/en/latest/) as a minimizer. In our paper, we used Minuit. Define signal line energyBy default we will look for an anomalous line at 3.48 keV, as defined by the EUXL parameter below, denoting the energy of the unidentified X-ray line. Lines at different energies can be searched for by changing this parameter accordingly (for example to 3.55 keV as in the previous notebook). We start with 3.48 keV as this is the fiducial line energy in BMRS. We note that 3.48 keV is the energy where the weakest limit is obtained, although on the clean data we will not find any evidence for a feature there.
|
EUXL = 3.48 # [keV]
|
_____no_output_____
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
**NB:** changing EUXL will of course vary the results below, and values in the surrounding discussion will not necessarily be reflective. Load in the data and modelsFirst we will load in the data products that we will use in the analysis. These include the stacked MOS data, associated energy bins, and uncertainties. We will use data from two regions of interest (ROI):- **Signal Region (SR)**: 20-35 degrees from the Galactic Center, this was the fiducial ROI in BRMS (DRS20 instead used 5-45);- **Background Region (BR)**: 60-90 degrees from the Galactic Center, a useful region for studying background as it contains less dark matter.We also load the appropriately averaged D-factors for these two regions (ROIs) for our fiducial NFW profile, along with the respective exposure times.
|
## Signal Region (20-35 degrees)
data = np.load("../data/data_mos_boyarsky_ROI_our_cuts.npy") # [cts/s/keV]
data_yerrs = np.load("../data/data_yerrs_mos_boyarsky_ROI_our_cuts.npy") # [cts/s/keV]
QPB = np.load("../data/QPB_mos_boyarsky_ROI_our_cuts.npy") # [cts/s/keV]
# Exposure time
Exp = 8.49e6 # [s]
# D-factor averaged over the signal ROI
D_signal = 4.4e28 # [keV/cm^2]
## Background Region (60-90 degrees)
# Data and associated errors
data_bkg = np.load("../data/data_mos_bkg_ROI.npy") # [cts/s/keV]
data_yerrs_bkg = np.load("../data/data_yerrs_mos_bkg_ROI.npy") # [cts/s/keV]
# Exposure time
Exp_bkg = 67.64e6 # [s]
# D-factor averaged over the background ROI
D_bkg = 1.91e28 # [keV/cm^2]
## Energy binning appropriate for both the signal and background
Energies=np.load("../data/mos_energies.npy") # [keV]
|
_____no_output_____
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
Load in the ModelsNext we use the models that will be used in fitting the above data.There are a sequence of models corresponding to physical line fluxes at the energies specified by `Es_line`. That is, `mod_UXL` gives the detectors counts as a function of energy after forward modeling a physical line at EUXL keV with a flux of 1 cts/cm$^2$/s/sr.
|
# Load the forward-modeled lines and energies
mods = np.load("../data/mos_mods.npy")
Es_line = np.load("../data/mos_mods_line_energies.npy")
# Load the detector response
det_res = np.load("../data/mos_det_res.npy")
arg_UXL = np.argmin((Es_line-EUXL)**2)
mod_UXL = mods[arg_UXL]
print "The energy of our "+str(EUXL)+" keV line example will be: "+str(Es_line[arg_UXL])+" keV"
# How to go from flux to sin^2(2\theta)
def return_sin_theta_lim(E_line,flux,D_factor):
"""
D_factor [keV/cm^2]
flux [cts/cm^2/s/sr]
E_line [keV] (dark matter mass is twice this value)
returns: associated sin^2(2theta)
"""
DMmass = 2.*E_line
res = (4.*np.pi*DMmass/D_factor)/1.361e-22*(1/DMmass)**5*flux
return res
|
The energy of our 3.48 keV line example will be: 3.4824707846410687 keV
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
Visualize the dataData in the signal region, where the dashed vertical line denotes the location of a putative signal line. Note in particular the flux is similar to that in Fig. 2 of DRS20, indicating that the included observations are low-background.
|
fig = plt.figure(figsize=(10,8))
plt.errorbar(Energies,data,yerr=data_yerrs,xerr=(Energies[1]-Energies[0])/2.,
color="black",label="data",marker="o", fmt='none',capsize=4)
plt.axvline(EUXL,color="black",linestyle="dashed")
plt.xlim(EUXL-0.25,EUXL+0.25)
plt.ylim(7.9e-2,0.1)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"SR Flux [cts/s/keV]",fontsize=22)
plt.show()
|
/sw/lsa/centos7/python-anaconda2/2019.03/lib/python2.7/site-packages/matplotlib/font_manager.py:1331: UserWarning: findfont: Font family [u'serif'] not found. Falling back to DejaVu Sans
(prop.get_family(), self.defaultFamily[fontext]))
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
Statistical analysisNow, let's perform a rigorous statistical analysis, using profile likelihood. As we operate in the large counts limit for the stacked data, we can perform a simple $\chi^2$ analysis rather than a full joint likelihood analysis as used by default in Dessert et al. 2020.
|
## Define the functions we will use
class chi2:
""" A set offunctions for calculation the chisq associated with different hypotheses
"""
def __init__(self,ens,dat,err,null_mod,sig_template):
self._ens = ens
self._dat = dat
self._err = err
self._null_mod = null_mod
self._sig_template = sig_template
self._A_sig = 0.0
def chi2(self,x):
null_mod = self._null_mod(self._ens,x[1:])
sig_mod = self._sig_template*x[0]
return np.sum((self._dat - null_mod - sig_mod)**2/self._err**2)
def chi2_null(self,x):
null_mod = self._null_mod(self._ens,x)
return np.sum((self._dat - null_mod)**2/self._err**2)
def chi2_fixed_signal(self,x):
null_mod = self._null_mod(self._ens,x)
sig_mod = self._sig_template*self._A_sig
return np.sum((self._dat - null_mod - sig_mod)**2/self._err**2)
def fix_signal_strength(self,A_sig):
self._A_sig = A_sig
|
_____no_output_____
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
Fit within $E_{\rm UXL} \pm 0.25$ keVFirst, we will fit the models from $[E_{\rm UXL}-0.25,\,E_{\rm UXL}+0.25]$ keV. Later in this notebook, we broaden this range to 3.0 to 4.0 keV. For the default $E_{\rm UXL} = 3.48$ keV, this corresponds to $3.23~{\rm keV} < E < 3.73~{\rm keV}$.To begin with then, let's reduce the dataset to this restricted range.
|
whs_reduced = np.where((Energies >= EUXL-0.25) & (Energies <= EUXL+0.25))[0]
Energies_reduced = Energies[whs_reduced]
data_reduced = data[whs_reduced]
data_yerrs_reduced = data_yerrs[whs_reduced]
data_bkg_reduced = data_bkg[whs_reduced]
data_yerrs_bkg_reduced = data_yerrs_bkg[whs_reduced]
mod_UXL_reduced = mod_UXL[whs_reduced]
|
_____no_output_____
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
Let's fit this data with the background only hypothesis and consider the quality of fit. A polynomial background modelHere we model the continuum background as a quadratic. In addition, we add degrees of freedom associated with the possible background lines at 3.3 keV and 3.7 keV.
|
arg_3p3 = np.argmin((Es_line-3.32)**2)
mod_3p3 = mods[arg_3p3]
arg_3p7 = np.argmin((Es_line-3.68)**2)
mod_3p7 = mods[arg_3p7]
def mod_poly_two_lines(ens,x):
"An extended background model to include two additional lines"
A, B, C, S1, S2 = x
return A+B*ens + C*ens**2 + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_poly_two_lines,mod_UXL_reduced)
mn_null_line = minimize(chi2_instance.chi2_null,np.array([0.282,-0.098, 0.011,0.1,0.1]),method='Nelder-Mead')
mn_line = minimize(chi2_instance.chi2,np.array([1.e-2,mn_null_line.x[0],mn_null_line.x[1],mn_null_line.x[2],mn_null_line.x[3],mn_null_line.x[4]]),method='Nelder-Mead',options={'fatol':1e-10,'xatol':1e-10,'adaptive':True})
print "The Delta chi^2 between signal and null model is:", mn_null_line.fun - mn_line.fun
print "The chi^2/DOF of the null-model fit is:", mn_null_line.fun/(len(whs_reduced)-5.)
print "Expected 68% containment for the chi^2/DOF:", np.array(chi2_scipy.interval(0.68,len(whs_reduced)-5.))/float(len(whs_reduced)-5.)
print "Expected 99% containment for the chi^2/DOF:", np.array(chi2_scipy.interval(0.99,len(whs_reduced)-5.))/float(len(whs_reduced)-5.)
|
The Delta chi^2 between signal and null model is: 0.18286814612878288
The chi^2/DOF of the null-model fit is: 0.9413439893438815
Expected 68% containment for the chi^2/DOF: [0.85614219 1.14370943]
Expected 99% containment for the chi^2/DOF: [0.66578577 1.41312157]
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
The null model is a good fit to the data, and the best-fit signal strength is still consistent with zero at 1$\sigma$.Next we plot the best fit signal and background model, in particular we see the model is almost identical in the two cases, emphasizing the lack of preference for a new emission line at 3.48 keV in this dataset.
|
fig = plt.figure(figsize=(10,8))
plt.errorbar(Energies,data,yerr=data_yerrs,xerr=(Energies[1]-Energies[0])/2.,
color="black",label="data",marker="o", fmt='none',capsize=4)
plt.plot(Energies_reduced,mod_poly_two_lines(Energies_reduced,mn_null_line.x),'k-',label =r"Null model")
plt.plot(Energies_reduced,mod_poly_two_lines(Energies_reduced,mn_line.x[1:])+mn_line.x[0]*mod_UXL_reduced,
'r-',label =r"Signal model")
plt.axvline(EUXL,color="black",linestyle="dashed")
plt.xlim(EUXL-0.25,EUXL+0.25)
plt.ylim(0.08,0.1)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"SR Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
|
_____no_output_____
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
Finally let's compute the associated limit via profile likelihood.
|
A_sig_array = np.linspace(mn_line.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn_line.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead',
options={'fatol':1e-10,'xatol':1e-10,'adaptive':True})
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
|
The 95% upper limit on the signal flux is 0.02664201119758925 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 2.382479159553265e-11
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
Power law background model Now let's try a power law for the continuum background model (along with the two lines) as done in BMRS. Given that the stacked data is the sum of power laws, we would not expect the stacked data to be a power law itself, although in our relatively clean dataset we find it to be a reasonable description.
|
def mod_power_two_lines(ens,x):
"An extended background model to include two additional lines"
A, n, S1, S2 = x
return A*ens**n + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_power_two_lines,mod_UXL_reduced)
mn_null_line = minimize(chi2_instance.chi2_null,np.array([0.18244131, -0.58714693, 0.02237754, 0.01157593]),method='Nelder-Mead')
mn_line = minimize(chi2_instance.chi2,np.array([1.e-2,mn_null_line.x[0],mn_null_line.x[1],mn_null_line.x[2],mn_null_line.x[3]]),method='Nelder-Mead',options={'fatol':1e-10,'xatol':1e-10,'adaptive':True})
print "The Delta chi^2 between signal and null model is:", mn_null_line.fun - mn_line.fun
print "The chi^2/DOF of the null-model fit is:", mn_null_line.fun/(len(whs_reduced)-4.)
fig = plt.figure(figsize=(10,8))
plt.errorbar(Energies,data,yerr=data_yerrs,xerr=(Energies[1]-Energies[0])/2.,
color="black",label="data",marker="o", fmt='none',capsize=4)
plt.plot(Energies_reduced,mod_power_two_lines(Energies_reduced,mn_null_line.x),'k-',label =r"Null model")
plt.plot(Energies_reduced,mod_power_two_lines(Energies_reduced,mn_line.x[1:])+mn_line.x[0]*mod_UXL_reduced,
'r-',label =r"Signal model")
plt.axvline(EUXL,color="black",linestyle="dashed")
plt.xlim(EUXL-0.25,EUXL+0.25)
plt.ylim(0.08,0.1)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"SR Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
A_sig_array = np.linspace(mn_line.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn_line.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead',
options={'fatol':1e-10,'xatol':1e-10,'adaptive':True})
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
|
The 95% upper limit on the signal flux is 0.020575238409308062 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 1.8399540616307525e-11
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
The power law continuum background does not substantively change the results: we still find no evidence for a line. Note this is the same procedure as in BMRS's test color-coded red in their Fig. 1 and Tab. 1. In that analysis, they find marginal 1.3$\sigma$ evidence for a line, although on our cleaner dataset we found no evidence. **NB:** As an aside, BMRS also perform an analysis, color-coded green in their Fig. 1 and Tab. 1, in which they fix the 3.3 keV and 3.7 keV emission lines to their best fit fluxes in the fit. They claim that DRS20, in our Supplementary Material Sec 2.7, also fixed the fluxes of these lines. This statement is incorrect. Departing from the narrow windowWe now fit the same dataset over the 3-4 keV range. Our procedure is as follows. Firstly, we update the dataset. Then we will define a new background model incorporating these additional lines. Finally we repeat our default $\chi^2$ fit procedure. Note that we continue to use a power law continuum background model here. As such, the following analysis is a repetition of the BMRS magenta color-coded analysis on this reduced and low-background dataset. In that magenta analysis, they claim a 4.0$\sigma$ detection of a line at 3.48 keV. Let us see what we obtain on when we include only the observations passing our quality cuts.
|
whs_reduced = np.where((Energies >= 3.0) & (Energies <= 4.0))[0]
Energies_reduced = Energies[whs_reduced]
data_reduced = data[whs_reduced]
data_yerrs_reduced = data_yerrs[whs_reduced]
data_bkg_reduced = data_bkg[whs_reduced]
data_yerrs_bkg_reduced = data_yerrs_bkg[whs_reduced]
mod_UXL_reduced = mod_UXL[whs_reduced]
arg_3p1 = np.argmin((Es_line-3.12)**2)
mod_3p1 = mods[arg_3p1]
arg_3p9 = np.argmin((Es_line-3.90)**2)
mod_3p9 = mods[arg_3p9]
arg_3p7 = np.argmin((Es_line-3.68)**2)
mod_3p7 = mods[arg_3p7]
arg_3p3 = np.argmin((Es_line-3.32)**2)
mod_3p3 = mods[arg_3p3]
def mod_power_four_lines(ens,x):
A, n,S1, S2, S3, S4 = x
return A*ens**n + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]+ S3*mod_3p1[whs_reduced] + S4*mod_3p9[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_power_four_lines,mod_UXL_reduced)
x0 = np.array([0.18088868 ,-0.58201284 , 0.02472505 , 0.01364361 , 0.08959867,
0.03220519])
bounds = np.array([[1e-6,5],[-3,0],[0,0.5],[0,0.5],[0,0.5],[0,0.5]])
mn_null = dual_annealing(chi2_instance.chi2_null,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=500)
boundss = np.array([[-0.5,0.5],[1e-6,5],[-3,0],[0,0.1],[0,0.1],[0,0.1],[0,0.2]])
x0s=np.array([1.e-2,mn_null.x[0],mn_null.x[1],mn_null.x[2],mn_null.x[3],mn_null.x[4],mn_null.x[5]])
mn = dual_annealing(chi2_instance.chi2,x0=x0s,bounds=boundss,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=500)
print "Best fit background parameters:", mn_null.x
print "Best fit signal+background parameters:", mn.x
print "The Delta chi^2 between signal and null model is:", mn_null.fun - mn.fun
print "The chi^2/DOF of the null-model fit is:", mn_null.fun/(len(whs_reduced)-6.)
print "NB: the best-fit signal strength in this case is", mn.x[0], "cts/cm$^2$/s/sr"
|
Best fit background parameters: [ 0.1807753 -0.58187775 0.02547398 0.01436228 0.09052193 0.03304785]
Best fit signal+background parameters: [ 0.0015216 0.18110231 -0.58372105 0.02608541 0.0154532 0.0911047
0.03434406]
The Delta chi^2 between signal and null model is: 0.03379302143113705
The chi^2/DOF of the null-model fit is: 0.9791145496670692
NB: the best-fit signal strength in this case is 0.0015216044839330481 cts/cm$^2$/s/sr
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
We find no evidence for a 3.5 keV line when we expand the energy window. Although the best-fit signal strength is positive, the $\Delta \chi^2 \sim 0.03$, which is entirely negligable significance. Let's have a look at the best fit signal and background models in this case. There are subtle difference between the two, but no excess is appearing at 3.48 keV.Additionally, we are defining a fixed signal to plot overtop the data for illustration. The default signal parameters here corresponds to a 2$\sigma$ downward fluctuationn in the signal reported in [Cappelluti et. al. ApJ 2018](https://iopscience.iop.org/article/10.3847/1538-4357/aaaa68/meta) from observations of the Chandra Deep Fields. Note that even taking the conservative downward flucutation, it is not a good fit to the data. This plot appears in our response to BMRS.
|
flux_ill = 4.8e-11 / return_sin_theta_lim(EUXL,1.,D_signal)
print "Flux [cts/cm^2/s/sr] and sin^(2theta) for illustration: ", flux_ill, return_sin_theta_lim(EUXL,flux_ill,D_signal)
chi2_instance.fix_signal_strength(flux_ill)
mn_f = dual_annealing(chi2_instance.chi2_fixed_signal,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=500)
print "Delta chi^2 between fixed signal and null:", mn_null.fun-mn_f.fun
def avg_data(data,n):
return np.mean(data.reshape(-1, n), axis=1)
fig = plt.figure(figsize=(10,8))
plt.errorbar(avg_data(Energies,6),avg_data(data,6),yerr=np.sqrt(6*avg_data(data_yerrs**2,6))/6.,xerr=6*(Energies[1]-Energies[0])/2.,
color="black",marker="o", fmt='none',capsize=4)
plt.plot(Energies_reduced,mod_power_four_lines(Energies_reduced,mn_null.x),
'k-',label =r"Null P.L. model")
plt.plot(Energies_reduced,mod_power_four_lines(Energies_reduced,mn.x[1:])+mn.x[0]*mod_UXL_reduced,
'r-',label =r"Best fit signal model")
plt.plot(Energies_reduced,mod_power_four_lines(Energies_reduced,mn_f.x)+chi2_instance._A_sig*mod_UXL_reduced,
'r--',label =r"$\sin^2(2\theta) = 4.8 \times 10^{-11}$")
plt.xlim(3,4)
plt.ylim(0.08,0.1)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
|
_____no_output_____
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
**NB:** In the plot above we averaged the data solely for presentation purposes, no averaging was performed in the analysis.Finally, we compute the limit in this case using the by now familiar procedure.
|
A_sig_array = np.linspace(mn.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead')
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
|
The 95% upper limit on the signal flux is 0.015232665842012591 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 1.362191038952712e-11
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
Now with a polynomial background Here we repeat the earlier analysis but with a polynomial background model, as used in the stacked analysis in DRS20 Supplementary Material Sec. 2.9.
|
whs_reduced = np.where((Energies >= 3.0) & (Energies <= 4.0))[0]
Energies_reduced = Energies[whs_reduced]
data_reduced = data[whs_reduced]
data_yerrs_reduced = data_yerrs[whs_reduced]
data_bkg_reduced = data_bkg[whs_reduced]
data_yerrs_bkg_reduced = data_yerrs_bkg[whs_reduced]
mod_UXL_reduced = mod_UXL[whs_reduced]
arg_3p1 = np.argmin((Es_line-3.12)**2) #3.12 #should really be 3.128
mod_3p1 = mods[arg_3p1]
arg_3p9 = np.argmin((Es_line-3.90)**2)
mod_3p9 = mods[arg_3p9]
arg_3p7 = np.argmin((Es_line-3.68)**2)
mod_3p7 = mods[arg_3p7]
arg_3p3 = np.argmin((Es_line-3.32)**2)
mod_3p3 = mods[arg_3p3]
def mod_poly_four_lines(ens,x):
A, B, C,S1, S2, S3, S4 = x
return A+B*ens + C*ens**2 + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]+ S3*mod_3p1[whs_reduced] + S4*mod_3p9[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_poly_four_lines,mod_UXL_reduced)
x0 = np.array([ 0.2015824 , -0.05098609 , 0.0052141 , 0.02854594 , 0.01742288,
0.08976637 , 0.029351 ])
bounds = np.array([[-1,1],[-0.5,0.5],[-0.1,0.1],[0,0.2],[0,0.2],[0,0.2],[0,0.2]])
mn_null = dual_annealing(chi2_instance.chi2_null,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=3000)
boundss = np.array([[-0.5,0.5],[-1,1],[-0.5,0.5],[-0.1,0.1],[0,0.2],[0,0.2],[0,0.2],[0,0.2]])
x0s=np.array([1.e-2,mn_null.x[0],mn_null.x[1],mn_null.x[2],mn_null.x[3],mn_null.x[4],mn_null.x[5],mn_null.x[6]])
mn = dual_annealing(chi2_instance.chi2,x0=x0s,bounds=boundss,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=3000)
print "Best fit background parameters:", mn_null.x
print "Best fit signal+background parameters:", mn.x
print "The Delta chi^2 between signal and null model is:", mn_null.fun - mn.fun
print "The chi^2/DOF of the null-model fit is:", mn_null.fun/(len(whs_reduced)-7.)
print "NB: the best-fit signal strength in this case is:", mn.x[0], "cts/cm$^2$/s/sr"
fig = plt.figure(figsize=(10,8))
plt.errorbar(avg_data(Energies,6),avg_data(data,6),yerr=np.sqrt(6*avg_data(data_yerrs**2,6))/6.,xerr=6*(Energies[1]-Energies[0])/2.,
color="black",marker="o", fmt='none',capsize=4)
plt.plot(Energies_reduced,mod_poly_four_lines(Energies_reduced,mn_null.x),
'k-',label =r"Null P.L. model")
plt.plot(Energies_reduced,mod_poly_four_lines(Energies_reduced,mn.x[1:])+mn.x[0]*mod_UXL_reduced,
'r-',label =r"Best fit signal model")
plt.xlim(3,4)
plt.ylim(0.08,0.1)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
A_sig_array = np.linspace(mn.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead',
options={'fatol':1e-10,'xatol':1e-10,'adaptive':True})
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
|
The 95% upper limit on the signal flux is 0.02781422393515111 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 2.487305045147695e-11
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
This change to the background continuum model does not change any conclusions. The 3.5 keV line is in tension with these limits. Subtract the background dataNow, we subtract off the data taken far away from the Galactic Center. We use a folded powerlaw for the background continuum under the assumption that the residual flux in the signal region should be astrophysical.
|
# A folded powerlaw function
def folded_PL(A,n):
mod_F = np.matmul(det_res,A*Energies**n)
return mod_F
def mod_folded_power_four_lines(ens,x):
A, n,S1, S2, S3, S4 = x
return folded_PL(A,n)[whs_reduced] + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]+ S3*mod_3p1[whs_reduced] + S4*mod_3p9[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced- data_bkg[whs_reduced],np.sqrt(data_yerrs_reduced**2+data_yerrs_bkg_reduced**2),mod_folded_power_four_lines,mod_UXL_reduced)
x0 = np.array([1.80533176e-02, -5.18514882e-01, 9.80776897e-03, 1.45353856e-04, 6.39560515e-02, 1.84053386e-02])
bounds = np.array([[0.0,0.1],[-2,0],[0,0.1],[0,0.2],[0,0.2],[0,0.2]])
mn_null = dual_annealing(chi2_instance.chi2_null,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
boundss = np.array([[-0.5,0.5],[0.0,0.1],[-2,0],[0,0.1],[0,0.2],[0,0.2],[0,0.2]])
x0s=np.array([1.e-2,mn_null.x[0],mn_null.x[1],mn_null.x[2],mn_null.x[3],mn_null.x[4],mn_null.x[5]])
mn = dual_annealing(chi2_instance.chi2,x0=x0s,bounds=boundss,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
print "Best fit background parameters:", mn_null.x
print "Best fit signal+background parameters:", mn.x
print "The Delta chi^2 between signal and null model is:", mn_null.fun - mn.fun
print "The chi^2/DOF of the null-model fit is:", mn_null.fun/(len(whs_reduced)-6.)
print "NB: the best-fit signal strength in this case is:", mn.x[0], "cts/cm$^2$/s/sr"
fig = plt.figure(figsize=(10,6))
plt.errorbar(avg_data(Energies,6),avg_data(data-data_bkg,6),yerr=np.sqrt(6*avg_data(data_yerrs**2+data_yerrs_bkg**2,6))/6.,xerr=6*(Energies[1]-Energies[0])/2.,
color="black",marker="o", fmt='none',capsize=4) #label="data"
plt.plot(Energies_reduced,mod_folded_power_four_lines(Energies_reduced,mn_null.x),
'k-',label =r"Null model")
plt.plot(Energies_reduced,mod_folded_power_four_lines(Energies_reduced,mn.x[1:])+mn.x[0]*mod_UXL_reduced,
'r-',label =r"Best fit signal model")
plt.xlim(3,4)
plt.ylim(0.006,0.015)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"SR Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
A_sig_array = np.linspace(mn.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead')
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal-D_bkg)
|
The 95% upper limit on the signal flux is 0.01567112720512729 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 2.476370769990894e-11
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
In this version of the analysis, too, we see no evidence for a 3.5 keV line and obtain comparable limits as in the stacked analyses in the previous sections. Include the Quiescent Particle Background (QPB)Now we will do a joint likelihood including the QPB data. The QPB data is complicated because the data is correlated from observation to observation. Thus, summing the data leads to correlated uncertainties. To account for this, we will estimate the uncertainties on the QPB data in a data-driven way by fixing the normalization of the $\chi^2$ function such that the powerlaw gives the expected $\chi^2/{\rm DOF}$. We note that this is just an approximation, which is not necessary within the context of the full joint likelihood framework.
|
# We are going to fix a powerlaw to the QPB data and then renormalize the chi^2 function
def PL(A,n,ens):
return A*ens**n
def chi2_QPB_UN(x):
A,n = x
mod = PL(A,n,Energies_reduced)
return np.sum((mod-QPB[whs_reduced])**2)
mn_QPB = minimize(chi2_QPB_UN,[0.084,-0.20],method="Nelder-Mead")
bf_QPB=mn_QPB.x
chi2_not_reduced = chi2_QPB_UN(bf_QPB)
# The function below has the expected normalization
chi2_QPB = lambda x: chi2_QPB_UN(x)/chi2_not_reduced*((len(QPB[whs_reduced])-2.))
fig = plt.figure(figsize=(10,8))
plt.scatter(Energies_reduced,QPB[whs_reduced],marker="o",color="black")
plt.plot(Energies_reduced,PL(bf_QPB[0],bf_QPB[1],Energies_reduced),'r-',label="best-fit P.L.")
plt.xlim(3,4)
plt.ylim(0.04,0.065)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"QPB [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.show()
def mod_2power_four_lines(ens,x):
AQPB, nQPB,A, n,S1, S2, S3, S4 = x
return PL(AQPB,nQPB,ens)+ folded_PL(A,n)[whs_reduced] + S1*mod_3p3[whs_reduced] + S2*mod_3p7[whs_reduced]+ S3*mod_3p1[whs_reduced] + S4*mod_3p9[whs_reduced]
chi2_instance = chi2(Energies_reduced,data_reduced,data_yerrs_reduced,mod_2power_four_lines,mod_UXL_reduced)
x0 = np.array([0.07377512 ,-0.28001362 , 0.15844243, -1.07912658 , 0.02877547,
0.01134023 , 0.08755627 , 0.03134949])
bounds = np.array([[0.75*bf_QPB[0],1.25*bf_QPB[0]],[-1,0],[0.0001,2.0],[-3,0],[0,0.1],[0,0.1],[0,0.1],[0,0.1]])
# Below is the joint likelihood for the null model
def joint_chi2(x):
return chi2_QPB(x[:2])+chi2_instance.chi2_null(x)
mn_null = dual_annealing(joint_chi2,x0=x0,bounds=bounds,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
# Below is the joint likelihood for the signal model
def joint_chi2_sig(x):
return chi2_QPB(x[1:3])+chi2_instance.chi2(x)
boundss = np.array([[-0.5,0.5],[0.5*bf_QPB[0],2*bf_QPB[0]],[-1,0],[0.0001,2.0],[-3,0],[0,0.1],[0,0.1],[0,0.1],[0,0.1]])
x0s=np.array([1.e-2,mn_null.x[0],mn_null.x[1],mn_null.x[2],mn_null.x[3],mn_null.x[4],mn_null.x[5],mn_null.x[6],mn_null.x[7]])
mn = dual_annealing(joint_chi2_sig,x0=x0s,bounds=boundss,local_search_options={"method": "Nelder-Mead"},seed=1234,maxiter=1000)
print "The Delta chi^2 between signal and null model is:", mn_null.fun - mn.fun
print "NB: the best-fit signal strength in this case is:", mn.x[0], "cts/cm$^2$/s/sr"
fig = plt.figure(figsize=(10,8))
plt.errorbar(avg_data(Energies,6),avg_data(data,6),yerr=np.sqrt(6*avg_data(data_yerrs**2,6))/6.,xerr=6*(Energies[1]-Energies[0])/2.,
color="black",marker="o", fmt='none',capsize=4) #label="data"
plt.plot(Energies_reduced,mod_2power_four_lines(Energies_reduced,mn.x[1:])+mn.x[0]*mod_UXL_reduced,
'r-',label =r"Best fit signal model")
x0 = np.array([bf_QPB[0],bf_QPB[1], 0.064218, -0.4306988 , 0.02542355 , 0.01451921 , 0.09027154, 0.03331636])
plt.plot(Energies_reduced,mod_2power_four_lines(Energies_reduced,mn_null.x),
'k-',label =r"Null P.L. model")
plt.xlim(3,4)
plt.ylim(0.08,0.1)
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.xlabel(r"$E$ [keV]",fontsize=22)
plt.ylabel(r"Flux [cts/s/keV]",fontsize=22)
plt.legend(fontsize=22)
plt.show()
A_sig_array = np.linspace(mn.x[0],0.05,100)
chi2_sig_array = np.zeros(len(A_sig_array))
bf = mn.x[1:]
for i in range(len(A_sig_array)):
chi2_instance.fix_signal_strength(A_sig_array[i])
mn_profile = minimize(chi2_instance.chi2_fixed_signal,bf,method='Nelder-Mead')
bf = mn_profile.x
chi2_sig_array[i] = mn_profile.fun
amin = np.argmin((chi2_sig_array-chi2_sig_array[0] - 2.71)**2)
limit_signal_strength = A_sig_array[amin]
print "The 95% upper limit on the signal flux is", limit_signal_strength, "cts/cm^2/s/sr"
print "This corresponds to a limit on sin^2(2theta) of", return_sin_theta_lim(EUXL,limit_signal_strength,D_signal)
|
The 95% upper limit on the signal flux is 0.019016670961038363 cts/cm^2/s/sr
This corresponds to a limit on sin^2(2theta) of 1.700578155032655e-11
|
MIT
|
Jupyter/BMRS20_mos_our_cuts.ipynb
|
bsafdi/BlankSkyfor3p5
|
Multi-ConvNet Sentiment Classifier In this notebook, we concatenate the outputs of *multiple, parallel convolutional layers* to classify IMDB movie reviews by their sentiment. Load dependencies
|
import tensorflow
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model # new!
from tensorflow.keras.layers import Input, concatenate # new!
from tensorflow.keras.layers import Dense, Dropout, Embedding, SpatialDropout1D, Conv1D, GlobalMaxPooling1D
from tensorflow.keras.callbacks import ModelCheckpoint
import os
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
|
_____no_output_____
|
MIT
|
notebooks/multi_convnet_sentiment_classifier.ipynb
|
ewbolme/DLTFpT
|
Set hyperparameters
|
# output directory name:
output_dir = 'model_output/multiconv'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000
max_review_length = 400
pad_type = trunc_type = 'pre'
drop_embed = 0.2
# convolutional layer architecture:
n_conv_1 = n_conv_2 = n_conv_3 = 256
k_conv_1 = 3
k_conv_2 = 2
k_conv_3 = 4
# dense layer architecture:
n_dense = 256
dropout = 0.2
|
_____no_output_____
|
MIT
|
notebooks/multi_convnet_sentiment_classifier.ipynb
|
ewbolme/DLTFpT
|
Load data
|
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words)
|
_____no_output_____
|
MIT
|
notebooks/multi_convnet_sentiment_classifier.ipynb
|
ewbolme/DLTFpT
|
Preprocess data
|
x_train = pad_sequences(x_train, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
|
_____no_output_____
|
MIT
|
notebooks/multi_convnet_sentiment_classifier.ipynb
|
ewbolme/DLTFpT
|
Design neural network architecture
|
input_layer = Input(shape=(max_review_length,),
dtype='int16', name='input')
# embedding:
embedding_layer = Embedding(n_unique_words, n_dim,
name='embedding')(input_layer)
drop_embed_layer = SpatialDropout1D(drop_embed,
name='drop_embed')(embedding_layer)
# three parallel convolutional streams:
conv_1 = Conv1D(n_conv_1, k_conv_1,
activation='relu', name='conv_1')(drop_embed_layer)
maxp_1 = GlobalMaxPooling1D(name='maxp_1')(conv_1)
conv_2 = Conv1D(n_conv_2, k_conv_2,
activation='relu', name='conv_2')(drop_embed_layer)
maxp_2 = GlobalMaxPooling1D(name='maxp_2')(conv_2)
conv_3 = Conv1D(n_conv_3, k_conv_3,
activation='relu', name='conv_3')(drop_embed_layer)
maxp_3 = GlobalMaxPooling1D(name='maxp_3')(conv_3)
# concatenate the activations from the three streams:
concat = concatenate([maxp_1, maxp_2, maxp_3])
# dense hidden layers:
dense_layer = Dense(n_dense,
activation='relu', name='dense')(concat)
drop_dense_layer = Dropout(dropout, name='drop_dense')(dense_layer)
dense_2 = Dense(int(n_dense/4),
activation='relu', name='dense_2')(drop_dense_layer)
dropout_2 = Dropout(dropout, name='drop_dense_2')(dense_2)
# sigmoid output layer:
predictions = Dense(1, activation='sigmoid', name='output')(dropout_2)
# create model:
model = Model(input_layer, predictions)
model.summary()
|
_____no_output_____
|
MIT
|
notebooks/multi_convnet_sentiment_classifier.ipynb
|
ewbolme/DLTFpT
|
Configure model
|
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
|
_____no_output_____
|
MIT
|
notebooks/multi_convnet_sentiment_classifier.ipynb
|
ewbolme/DLTFpT
|
Train!
|
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_valid, y_valid), callbacks=[modelcheckpoint])
|
_____no_output_____
|
MIT
|
notebooks/multi_convnet_sentiment_classifier.ipynb
|
ewbolme/DLTFpT
|
Evaluate
|
model.load_weights(output_dir+"/weights.02.hdf5")
y_hat = model.predict(x_valid)
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
"{:0.2f}".format(roc_auc_score(y_valid, y_hat)*100.0)
|
_____no_output_____
|
MIT
|
notebooks/multi_convnet_sentiment_classifier.ipynb
|
ewbolme/DLTFpT
|
工厂规划等级:中级 目的和先决条件此模型和Factory Planning II都是生产计划问题的示例。在生产计划问题中,必须选择要生产哪些产品,要生产多少产品以及要使用哪些资源,以在满足一系列限制的同时最大化利润或最小化成本。这些问题在广泛的制造环境中都很常见。 What You Will Learn在此特定示例中,我们将建模并解决生产组合问题:在每个阶段中,我们可以制造一系列产品。每种产品在不同的机器上生产需要不同的时间,并产生不同的利润。目的是创建最佳的多周期生产计划,以使利润最大化。由于维护,某些机器在特定时期内不可用。由于市场限制,每个产品每个月的销售量都有上限,并且存储容量也受到限制。In Factory Planning II, we’ll add more complexity to this example; the month in which each machine is down for maintenance will be chosen as a part of the optimized plan.More information on this type of model can be found in example 3 of the fifth edition of Modeling Building in Mathematical Programming by H. P. Williams on pages 255-256 and 300-302.This modeling example is at the intermediate level, where we assume that you know Python and are familiar with the Gurobi Python API. In addition, you should have some knowledge about building mathematical optimization models.**Note:** You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=CommercialDataScience) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=AcademicDataScience) as an *academic user*.--- Problem DescriptionA factory makes seven products (Prod 1 to Prod 7) using a range of machines including:- Four grinders- Two vertical drills- Three horizontal drills- One borer- One planerEach product has a defined profit contribution per unit sold (defined as the sales price per unit minus the cost of raw materials). In addition, the manufacturing of each product requires a certain amount of time on each machine (in hours). The contribution and manufacturing time value are shown below. A dash indicates that the manufacturing process for the given product does not require that machine.| | PROD1 | PROD2 | PROD3 | PROD4 | PROD5 | PROD6 | PROD7 || --- | --- | --- | --- | --- | --- | --- | --- || Profit | 10 | 6 | 8 | 4 | 11 | 9 | 3 || Grinding | 0.5 | 0.7 | - | - | 0.3 | 0.2 | 0.5 || Vertical Drilling | 0.1 | 0.2 | - | 0.3 | - | 0.6 | - || Horizontal Drilling | 0.2 | - | 0.8 | - | - | - | 0.6 || Boring | 0.05 | 0.03 | - | 0.07 | 0.1 | - | 0.08 || Planning | - | - | 0.01 | - | 0.05 | - | 0.05 |In each of the six months covered by this model, one or more of the machines is scheduled to be down for maintenance and as a result will not be available to use for production that month. The maintenance schedule is as follows:| Month | Machine || --- | --- || January | One grinder || February | Two horizontal drills || March | One borer || April | One vertical drill || May | One grinder and one vertical drill || June | One horizontal drill |There are limitations on how many of each product can be sold in a given month. These limits are shown below:| Month | PROD1 | PROD2 | PROD3 | PROD4 | PROD5 | PROD6 | PROD7 || --- | --- | --- | --- | --- | --- | --- | --- || January | 500 | 1000 | 300 | 300 | 800 | 200 | 100 || February | 600 | 500 | 200 | 0 | 400 | 300 | 150 || March | 300 | 600 | 0 | 0 | 500 | 400 | 100 || April | 200 | 300 | 400 | 500 | 200 | 0 | 100 || May | 0 | 100 | 500 | 100 | 1000 | 300 | 0 || June | 500 | 500 | 100 | 300 | 1100 | 500 | 60 |Up to 100 units of each product may be stored in inventory at a cost of $\$0.50$ per unit per month. At the start of January, there is no product inventory. However, by the end of June, there should be 50 units of each product in inventory.The factory produces products six days a week using two eight-hour shifts per day. It may be assumed that each month consists of 24 working days. Also, for the purposes of this model, there are no production sequencing issues that need to be taken into account.What should the production plan look like? Also, is it possible to recommend any price increases and determine the value of acquiring any new machines?This problem is based on a larger model built for the Cornish engineering company of Holman Brothers.--- Model Formulation Sets and Indices$t \in \text{Months}=\{\text{Jan},\text{Feb},\text{Mar},\text{Apr},\text{May},\text{Jun}\}$: Set of months.$p \in \text{Products}=\{1,2,\dots,7\}$: Set of products.$m \in \text{Machines}=\{\text{Grinder},\text{VertDrill},\text{horiDrill},\text{Borer},\text{Planer}\}$: Set of machines. Parameters$\text{hours_per_month} \in \mathbb{R}^+$: Time (in hours/month) available at any machine on a monthly basis. It results from multiplying the number of working days (24 days) by the number of shifts per day (2) by the duration of a shift (8 hours).$\text{max_inventory} \in \mathbb{N}$: Maximum number of units of a single product type that can be stored in inventory at any given month.$\text{holding_cost} \in \mathbb{R}^+$: Monthly cost (in USD/unit/month) of keeping in inventory a unit of any product type.$\text{store_target} \in \mathbb{N}$: Number of units of each product type to keep in inventory at the end of the planning horizon.$\text{profit}_p \in \mathbb{R}^+$: Profit (in USD/unit) of product $p$.$\text{installed}_m \in \mathbb{N}$: Number of machines of type $m$ installed in the factory.$\text{down}_{t,m} \in \mathbb{N}$: Number of machines of type $m$ scheduled for maintenance at month $t$.$\text{time_req}_{m,p} \in \mathbb{R}^+$: Time (in hours/unit) needed on machine $m$ to manufacture one unit of product $p$.$\text{max_sales}_{t,p} \in \mathbb{N}$: Maximum number of units of product $p$ that can be sold at month $t$. Decision Variables$\text{make}_{t,p} \in \mathbb{R}^+$: Number of units of product $p$ to manufacture at month $t$.$\text{store}_{t,p} \in [0, \text{max_inventory}] \subset \mathbb{R}^+$: Number of units of product $p$ to store at month $t$.$\text{sell}_{t,p} \in [0, \text{max_sales}_{t,p}] \subset \mathbb{R}^+$: Number of units of product $p$ to sell at month $t$.**Assumption:** We can produce fractional units. Objective Function- **Profit:** Maximize the total profit (in USD) of the planning horizon.\begin{equation}\text{Maximize} \quad Z = \sum_{t \in \text{Months}}\sum_{p \in \text{Products}}(\text{profit}_p*\text{make}_{t,p} - \text{holding_cost}*\text{store}_{t,p})\tag{0}\end{equation} Constraints- **Initial Balance:** For each product $p$, the number of units produced should be equal to the number of units sold plus the number stored (in units of product).\begin{equation}\text{make}_{\text{Jan},p} = \text{sell}_{\text{Jan},p} + \text{store}_{\text{Jan},p} \quad \forall p \in \text{Products}\tag{1}\end{equation}- **Balance:** For each product $p$, the number of units produced in month $t$ and the ones previously stored should be equal to the number of units sold and stored in that month (in units of product).\begin{equation}\text{store}_{t-1,p} + \text{make}_{t,p} = \text{sell}_{t,p} + \text{store}_{t,p} \quad \forall (t,p) \in \text{Months} \setminus \{\text{Jan}\} \times \text{Products}\tag{2}\end{equation}- **Inventory Target:** The number of units of product $p$ kept in inventory at the end of the planning horizon should hit the target (in units of product).\begin{equation}\text{store}_{\text{Jun},p} = \text{store_target} \quad \forall p \in \text{Products}\tag{3}\end{equation}- **Machine Capacity:** Total time used to manufacture any product at machine type $m$ cannot exceed its monthly capacity (in hours).\begin{equation}\sum_{p \in \text{Products}}\text{time_req}_{m,p}*\text{make}_{t,p} \leq \text{hours_per_month}*(\text{installed}_m - \text{down}_{t,m}) \quad \forall (t,m) \in \text{Months} \times \text{Machines}\tag{4}\end{equation}--- Python ImplementationWe import the Gurobi Python Module and other Python libraries.
|
import gurobipy as gp
import numpy as np
import pandas as pd
from gurobipy import GRB
# tested with Python 3.7.0 & Gurobi 9.0
|
_____no_output_____
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
Input DataWe define all the input data of the model.
|
# Parameters
products = ["Prod1", "Prod2", "Prod3", "Prod4", "Prod5", "Prod6", "Prod7"]
machines = ["grinder", "vertDrill", "horiDrill", "borer", "planer"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun"]
profit = {"Prod1":10, "Prod2":6, "Prod3":8, "Prod4":4, "Prod5":11, "Prod6":9, "Prod7":3}
time_req = {
"grinder": { "Prod1": 0.5, "Prod2": 0.7, "Prod5": 0.3,
"Prod6": 0.2, "Prod7": 0.5 },
"vertDrill": { "Prod1": 0.1, "Prod2": 0.2, "Prod4": 0.3,
"Prod6": 0.6 },
"horiDrill": { "Prod1": 0.2, "Prod3": 0.8, "Prod7": 0.6 },
"borer": { "Prod1": 0.05,"Prod2": 0.03,"Prod4": 0.07,
"Prod5": 0.1, "Prod7": 0.08 },
"planer": { "Prod3": 0.01,"Prod5": 0.05,"Prod7": 0.05 }
}
# number of machines down
down = {("Jan","grinder"): 1, ("Feb", "horiDrill"): 2, ("Mar", "borer"): 1,
("Apr", "vertDrill"): 1, ("May", "grinder"): 1, ("May", "vertDrill"): 1,
("Jun", "planer"): 1, ("Jun", "horiDrill"): 1}
# number of each machine available
installed = {"grinder":4, "vertDrill":2, "horiDrill":3, "borer":1, "planer":1}
# market limitation of sells
max_sales = {
("Jan", "Prod1") : 500,
("Jan", "Prod2") : 1000,
("Jan", "Prod3") : 300,
("Jan", "Prod4") : 300,
("Jan", "Prod5") : 800,
("Jan", "Prod6") : 200,
("Jan", "Prod7") : 100,
("Feb", "Prod1") : 600,
("Feb", "Prod2") : 500,
("Feb", "Prod3") : 200,
("Feb", "Prod4") : 0,
("Feb", "Prod5") : 400,
("Feb", "Prod6") : 300,
("Feb", "Prod7") : 150,
("Mar", "Prod1") : 300,
("Mar", "Prod2") : 600,
("Mar", "Prod3") : 0,
("Mar", "Prod4") : 0,
("Mar", "Prod5") : 500,
("Mar", "Prod6") : 400,
("Mar", "Prod7") : 100,
("Apr", "Prod1") : 200,
("Apr", "Prod2") : 300,
("Apr", "Prod3") : 400,
("Apr", "Prod4") : 500,
("Apr", "Prod5") : 200,
("Apr", "Prod6") : 0,
("Apr", "Prod7") : 100,
("May", "Prod1") : 0,
("May", "Prod2") : 100,
("May", "Prod3") : 500,
("May", "Prod4") : 100,
("May", "Prod5") : 1000,
("May", "Prod6") : 300,
("May", "Prod7") : 0,
("Jun", "Prod1") : 500,
("Jun", "Prod2") : 500,
("Jun", "Prod3") : 100,
("Jun", "Prod4") : 300,
("Jun", "Prod5") : 1100,
("Jun", "Prod6") : 500,
("Jun", "Prod7") : 60,
}
holding_cost = 0.5
max_inventory = 100
store_target = 50
hours_per_month = 2*8*24
|
_____no_output_____
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
Model DeploymentWe create a model and the variables. For each product (seven kinds of products) and each time period (month), we will create variables for the amount of which products get manufactured, held, and sold. In each month, there is an upper limit on the amount of each product that can be sold. This is due to market limitations.
|
factory = gp.Model('Factory Planning I')
make = factory.addVars(months, products, name="Make") # quantity manufactured
store = factory.addVars(months, products, ub=max_inventory, name="Store") # quantity stored
sell = factory.addVars(months, products, ub=max_sales, name="Sell") # quantity sold
|
Using license file c:\gurobi\gurobi.lic
Set parameter TokenServer to value SANTOS-SURFACE-
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
Next, we insert the constraints. The balance constraints ensure that the amount of product that is in storage in the prior month plus the amount that gets manufactured equals the amount that is sold and held for each product in the current month. This ensures that all products in the model are manufactured in some month. The initial storage is empty.
|
#1. Initial Balance
Balance0 = factory.addConstrs((make[months[0], product] == sell[months[0], product]
+ store[months[0], product] for product in products), name="Initial_Balance")
#2. Balance
Balance = factory.addConstrs((store[months[months.index(month) -1], product] +
make[month, product] == sell[month, product] + store[month, product]
for product in products for month in months
if month != months[0]), name="Balance")
|
_____no_output_____
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
The Inventory Target constraints force that at the end of the last month the storage contains the specified amount of each product.
|
#3. Inventory Target
TargetInv = factory.addConstrs((store[months[-1], product] == store_target for product in products), name="End_Balance")
|
_____no_output_____
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
The capacity constraints ensure that, for each month, the time all products require on a certain kind of machine is less than or equal to the available hours for that type of machine in that month multiplied by the number of available machines in that period. Each product requires some machine hours on different machines. Each machine is down in one or more months due to maintenance, so the number and type of available machines varies per month. There can be multiple machines per machine type.
|
#4. Machine Capacity
MachineCap = factory.addConstrs((gp.quicksum(time_req[machine][product] * make[month, product]
for product in time_req[machine])
<= hours_per_month * (installed[machine] - down.get((month, machine), 0))
for machine in machines for month in months),
name = "Capacity")
|
_____no_output_____
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
The objective is to maximize the profit of the company, which consists ofthe profit for each product minus the cost for storing the unsold products. This can be stated as:
|
#0. Objective Function
obj = gp.quicksum(profit[product] * sell[month, product] - holding_cost * store[month, product]
for month in months for product in products)
factory.setObjective(obj, GRB.MAXIMIZE)
|
_____no_output_____
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
Next, we start the optimization and Gurobi finds the optimal solution.
|
factory.optimize()
|
Gurobi Optimizer version 9.0.0 build v9.0.0rc2 (win64)
Optimize a model with 79 rows, 126 columns and 288 nonzeros
Model fingerprint: 0xead11e9d
Coefficient statistics:
Matrix range [1e-02, 1e+00]
Objective range [5e-01, 1e+01]
Bounds range [6e+01, 1e+03]
RHS range [5e+01, 2e+03]
Presolve removed 74 rows and 110 columns
Presolve time: 0.01s
Presolved: 5 rows, 16 columns, 21 nonzeros
Iteration Objective Primal Inf. Dual Inf. Time
0 1.2466500e+05 3.640000e+02 0.000000e+00 0s
2 9.3715179e+04 0.000000e+00 0.000000e+00 0s
Solved in 2 iterations and 0.01 seconds
Optimal objective 9.371517857e+04
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
--- AnalysisThe result of the optimization model shows that the maximum profit we can achieve is $\$93,715.18$.Let's see the solution that achieves that optimal result. Production PlanThis plan determines the amount of each product to make at each period of the planning horizon. For example, in February we make 700 units of product Prod1.
|
rows = months.copy()
columns = products.copy()
make_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, product in make.keys():
if (abs(make[month, product].x) > 1e-6):
make_plan.loc[month, product] = np.round(make[month, product].x, 1)
make_plan
|
_____no_output_____
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
Sales PlanThis plan defines the amount of each product to sell at each period of the planning horizon. For example, in February we sell 600 units of product Prod1.
|
rows = months.copy()
columns = products.copy()
sell_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, product in sell.keys():
if (abs(sell[month, product].x) > 1e-6):
sell_plan.loc[month, product] = np.round(sell[month, product].x, 1)
sell_plan
|
_____no_output_____
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
Inventory PlanThis plan reflects the amount of product in inventory at the end of each period of the planning horizon. For example, at the end of February we have 100 units of Prod1 in inventory.
|
rows = months.copy()
columns = products.copy()
store_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, product in store.keys():
if (abs(store[month, product].x) > 1e-6):
store_plan.loc[month, product] = np.round(store[month, product].x, 1)
store_plan
|
_____no_output_____
|
Apache-2.0
|
documents/Intermediate/FactoryPlanning1&2/factory_planning_1.ipynb
|
biancaitian/gurobi-official-examples
|
Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
|
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete
|
_____no_output_____
|
MIT
|
PyCitySchools_starter.ipynb
|
pratixashah/Pandas_City_School
|
District Summary* Calculate the total number of schools* Calculate the total number of students* Calculate the total budget* Calculate the average math score * Calculate the average reading score* Calculate the percentage of students with a passing math score (70 or greater)* Calculate the percentage of students with a passing reading score (70 or greater)* Calculate the percentage of students who passed math **and** reading (% Overall Passing)* Create a dataframe to hold the above results* Optional: give the displayed data cleaner formatting
|
total_school = len(school_data_complete["school_name"].unique())
total_student = sum(school_data_complete["size"].unique())
student_pass_math = school_data_complete.loc[school_data_complete["math_score"] >= 70]
student_pass_reading = school_data_complete.loc[school_data_complete["reading_score"] >= 70]
student_pass_math_and_reading = school_data_complete.loc[(school_data_complete["math_score"] >= 70) & (school_data_complete["reading_score"] >= 70)]
student_pass_math_percentage = student_pass_math["Student ID"].count()/total_student
student_pass_reading_percentage = student_pass_reading["Student ID"].count()/total_student
student_pass_math_and_reading_percentage = student_pass_math_and_reading["Student ID"].count()/total_student
district_summary_df = pd.DataFrame({
"Total Schools": [total_school],
"Total Students": [f"{total_student:,}"],
"Total Budget": [f"${sum(school_data_complete['budget'].unique()):,}"],
"Average Math Score":[f"${school_data_complete['math_score'].mean():.2f}"],
"Average Reading Score":[f"${school_data_complete['reading_score'].mean():.2f}"],
"% Passing Math":[f"{student_pass_math_percentage:.2%}"],
"% Passing Reading":[f"{student_pass_reading_percentage:.2%}"],
"% Overall Passing":[f"{student_pass_math_and_reading_percentage:.2%}"]
})
district_summary_df
|
_____no_output_____
|
MIT
|
PyCitySchools_starter.ipynb
|
pratixashah/Pandas_City_School
|
School Summary * Create an overview table that summarizes key metrics about each school, including: * School Name * School Type * Total Students * Total School Budget * Per Student Budget * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * % Overall Passing (The percentage of students that passed math **and** reading.) * Create a dataframe to hold the above results
|
group_by_school_data = school_data_complete.groupby(["school_name","type"])
group_by_school_pass_math = student_pass_math.groupby(["school_name","type"])
group_by_school_pass_reading = student_pass_reading.groupby(["school_name","type"])
group_by_school_pass_math_and_reading = student_pass_math_and_reading.groupby(["school_name","type"])
school_summary_df = pd.DataFrame({
"Total Students": group_by_school_data["school_name"].count(),
"Total School Budget": group_by_school_data['budget'].mean(),
"Per Student Budget": group_by_school_data["budget"].mean()/group_by_school_data["school_name"].count(),
"Average Math Score": group_by_school_data["math_score"].mean(),
"Average Reading Score": group_by_school_data["reading_score"].mean(),
"% Passing Math": group_by_school_pass_math["school_name"].count()/group_by_school_data["school_name"].count(),
"% Passing Reading": group_by_school_pass_reading["school_name"].count()/group_by_school_data["school_name"].count(),
"% Overall Passing": group_by_school_pass_math_and_reading["school_name"].count()/group_by_school_data["school_name"].count()
})
school_summary = school_summary_df.copy()
school_summary_df["Total School Budget"] = school_summary_df["Total School Budget"].map("${:,}".format)
school_summary_df["Per Student Budget"] = school_summary_df["Per Student Budget"].map("${:,.0f}".format)
school_summary_df["Average Math Score"] = school_summary_df["Average Math Score"].map("{:,.2f}".format)
school_summary_df["Average Reading Score"] = school_summary_df["Average Reading Score"].map("{:,.2f}".format)
school_summary_df["% Passing Math"] = school_summary_df["% Passing Math"].map("{:.2%}".format)
school_summary_df["% Passing Reading"] = school_summary_df["% Passing Reading"].map("{:,.2%}".format)
school_summary_df["% Overall Passing"] = school_summary_df["% Overall Passing"].map("{:,.2%}".format)
school_summary_df
|
_____no_output_____
|
MIT
|
PyCitySchools_starter.ipynb
|
pratixashah/Pandas_City_School
|
Top Performing Schools (By % Overall Passing) * Sort and display the top five performing schools by % overall passing.
|
group_by_school_data_sorted = school_summary_df.sort_values("% Overall Passing", ascending=False)
group_by_school_data_sorted.head(5)
|
_____no_output_____
|
MIT
|
PyCitySchools_starter.ipynb
|
pratixashah/Pandas_City_School
|
Bottom Performing Schools (By % Overall Passing) * Sort and display the five worst-performing schools by % overall passing.
|
group_by_school_data_sorted = school_summary_df.sort_values("% Overall Passing")
group_by_school_data_sorted.head(5)
|
_____no_output_____
|
MIT
|
PyCitySchools_starter.ipynb
|
pratixashah/Pandas_City_School
|
Math Scores by Grade * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school. * Create a pandas series for each grade. Hint: use a conditional statement. * Group each series by school * Combine the series into a dataframe * Optional: give the displayed data cleaner formatting
|
# math_score_by_grade = school_data_complete.groupby(["school_name","grade"])
# math_score_by_grade["math_score"].mean()
math_score_by_grade_9 = school_data_complete.loc[school_data_complete["grade"] == "9th"]
math_score_by_grade_9_school = math_score_by_grade_9.groupby("school_name")
math_score_by_grade_10 = school_data_complete.loc[school_data_complete["grade"] == "10th"]
math_score_by_grade_10_school = math_score_by_grade_10.groupby("school_name")
math_score_by_grade_11 = school_data_complete.loc[school_data_complete["grade"] == "11th"]
math_score_by_grade_11_school = math_score_by_grade_11.groupby("school_name")
math_score_by_grade_12 = school_data_complete.loc[school_data_complete["grade"] == "12th"]
math_score_by_grade_12_school = math_score_by_grade_12.groupby("school_name")
math_score_by_grade_df = pd.DataFrame({
"9th": math_score_by_grade_9_school["math_score"].mean().map("{:.2f}".format),
"10th":math_score_by_grade_10_school["math_score"].mean().map("{:.2f}".format),
"11th": math_score_by_grade_11_school["math_score"].mean().map("{:.2f}".format),
"12th":math_score_by_grade_12_school["math_score"].mean().map("{:.2f}".format)
})
math_score_by_grade_df
|
_____no_output_____
|
MIT
|
PyCitySchools_starter.ipynb
|
pratixashah/Pandas_City_School
|
Reading Score by Grade * Perform the same operations as above for reading scores
|
# reading_score_by_grade = school_data_complete.groupby(["school_name","grade"])
# reading_score_by_grade["reading_score"].mean()
reading_score_by_grade_9 = school_data_complete.loc[school_data_complete["grade"] == "9th"]
reading_score_by_grade_9_school = reading_score_by_grade_9.groupby("school_name")
reading_score_by_grade_10 = school_data_complete.loc[school_data_complete["grade"] == "10th"]
reading_score_by_grade_10_school = reading_score_by_grade_10.groupby("school_name")
reading_score_by_grade_11 = school_data_complete.loc[school_data_complete["grade"] == "11th"]
reading_score_by_grade_11_school = reading_score_by_grade_11.groupby("school_name")
reading_score_by_grade_12 = school_data_complete.loc[school_data_complete["grade"] == "12th"]
reading_score_by_grade_12_school = reading_score_by_grade_12.groupby("school_name")
reading_score_by_grade_df = pd.DataFrame({
"9th": reading_score_by_grade_9_school["reading_score"].mean().map("{:.2f}".format),
"10th":reading_score_by_grade_10_school["reading_score"].mean().map("{:.2f}".format),
"11th":reading_score_by_grade_11_school["reading_score"].mean().map("{:.2f}".format),
"12th":reading_score_by_grade_12_school["reading_score"].mean().map("{:.2f}".format)
})
reading_score_by_grade_df
|
_____no_output_____
|
MIT
|
PyCitySchools_starter.ipynb
|
pratixashah/Pandas_City_School
|
Scores by School Spending * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following: * Average Math Score * Average Reading Score * % Passing Math * % Passing Reading * Overall Passing Rate (Average of the above two)
|
score_by_spending = school_summary.copy()
bins=[0,585,630,645,680]
labels=["<$585","$585-630","$630-645","$645-680"]
score_by_spending["Spending Ranges (Per Student)"] = pd.cut(score_by_spending["Per Student Budget"],bins,labels=labels, include_lowest=True)
score_by_spending
score_by_spending_group = score_by_spending.groupby(["Spending Ranges (Per Student)"])
score_by_spending_df = pd.DataFrame({
"Average Math Score":map("{:.2f}".format,score_by_spending_group["Average Math Score"].mean()),
"Average Reading Score":map("{:.2f}".format,score_by_spending_group["Average Reading Score"].mean()),
"% Passing Math": map("{:.2%}".format,score_by_spending_group["% Passing Math"].mean()),
"% Passing Reading": map("{:.2%}".format,score_by_spending_group["% Passing Reading"].mean()),
"% Overall Passing":map("{:.2%}".format, score_by_spending_group["% Overall Passing"].mean())
})
score_by_spending_df
|
_____no_output_____
|
MIT
|
PyCitySchools_starter.ipynb
|
pratixashah/Pandas_City_School
|
Scores by School Size * Perform the same operations as above, based on school size.
|
score_by_school_size = school_summary.copy()
bins=[0,1000,2000,5000]
labels = ["Small (<1000)","Medium (1000-2000)","Large (2000-5000)"]
score_by_school_size["School Type"] = pd.cut(score_by_school_size["Total Students"],bins,labels=labels)
score_by_school_size_group = score_by_school_size.groupby(["School Type"])
score_by_school_size_df = pd.DataFrame({
"Average Math Score":map("{:.2f}".format,score_by_school_size_group["Average Math Score"].mean()),
"Average Reading Score":map("{:.2f}".format,score_by_school_size_group["Average Reading Score"].mean()),
"% Passing Math": map("{:.2%}".format,score_by_school_size_group["% Passing Math"].mean()),
"% Passing Reading": map("{:.2%}".format,score_by_school_size_group["% Passing Reading"].mean()),
"% Overall Passing":map("{:.2%}".format, score_by_school_size_group["% Overall Passing"].mean())
})
score_by_school_size_df
|
_____no_output_____
|
MIT
|
PyCitySchools_starter.ipynb
|
pratixashah/Pandas_City_School
|
Scores by School Type * Perform the same operations as above, based on school type
|
score_by_school_type = school_summary.copy()
score_by_school_type_group = score_by_school_type.groupby(["type"])
score_by_school_type_df = pd.DataFrame({
"Average Math Score":map("{:.2f}".format,score_by_school_type_group["Average Math Score"].mean()),
"Average Reading Score":map("{:.2f}".format,score_by_school_type_group["Average Reading Score"].mean()),
"% Passing Math": map("{:.2%}".format,score_by_school_type_group["% Passing Math"].mean()),
"% Passing Reading": map("{:.2%}".format,score_by_school_type_group["% Passing Reading"].mean()),
"% Overall Passing":map("{:.2%}".format, score_by_school_type_group["% Overall Passing"].mean())
})
score_by_school_type_df
|
_____no_output_____
|
MIT
|
PyCitySchools_starter.ipynb
|
pratixashah/Pandas_City_School
|
Lesson 2: Computer Vision Fundamentals Submission, Markus Schwickert, 2018-02-22 Photos
|
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
%matplotlib inline
#reading in an image
k1=0 # select here which of the images in the directory you want to process (0-5)
test_images=os.listdir("test_images/")
print ('test_images/'+test_images[k1])
image = mpimg.imread('test_images/'+test_images[k1])
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# Grab the x and y size and make a copy of the image
ysize = image.shape[0]
xsize = image.shape[1]
# Note: always make a copy rather than simply using "="
color_select = np.copy(image)
# Define our color selection criteria
# Note: if you run this code, you'll find these are not sensible values!!
# But you'll get a chance to play with them soon in a quiz
red_threshold = 180
green_threshold = 180
blue_threshold = 100
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
# Identify pixels below the threshold
thresholds = (image[:,:,0] < rgb_threshold[0]) \
| (image[:,:,1] < rgb_threshold[1]) \
| (image[:,:,2] < rgb_threshold[2])
color_select[thresholds] = [0,0,0]
# Display the image
plt.imshow(color_select)
plt.show()
gray = cv2.cvtColor(color_select, cv2.COLOR_RGB2GRAY) #grayscale conversion
plt.imshow(gray, cmap='gray')
# Define a polygon region of interest
# Keep in mind the origin (x=0, y=0) is in the upper left in image processing
left_bottom = [0, ysize]
right_bottom = [xsize, ysize]
fp1 = [450, 320]
fp2 = [490, 320]
mask = np.zeros_like(gray)
ignore_mask_color = 255
# This time we are defining a four sided polygon to mask
vertices = np.array([[left_bottom, fp1, fp2, right_bottom]], dtype=np.int32)
cv2.fillPoly(mask, vertices, ignore_mask_color)
grayROI = cv2.bitwise_and(gray, mask)
# Display the image
plt.imshow(grayROI, cmap='gray')
# Canny edge detection
# Define a kernel size for Gaussian smoothing / blurring
# Note: this step is optional as cv2.Canny() applies a 5x5 Gaussian internally
kernel_size = 5
blur_gray = cv2.GaussianBlur(grayROI,(kernel_size, kernel_size), 0)
# Define parameters for Canny and run it
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
# Display the image
plt.imshow(edges, cmap='Greys_r')
# Hough Transformation
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 1
theta = np.pi/180
threshold = 1
min_line_length = 16
max_line_gap = 20
line_image = np.copy(image)*0 #creating a blank to draw lines on
# Run Hough on edge detected image
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
# Iterate over the output "lines" and draw lines on the blank
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
# Create a "color" binary image to combine with line image
#color_edges = np.dstack((edges, edges, edges))
# Draw the lines on the edge image
print (test_images[k1])
combo = cv2.addWeighted(image, 0.8, line_image, 1, 0)
plt.imshow(combo)
mpimg.imsave('MS_images/'+test_images[k1], combo)
|
_____no_output_____
|
MIT
|
L2_computer_vision.ipynb
|
schwickert/auto
|
Estimating $\pi$ by Sampling PointsBy Evgenia "Jenny" Nitishinskaya and Delaney Granizo-MackenzieNotebook released under the Creative Commons Attribution 4.0 License.---A stochastic way to estimate the value of $\pi$ is to sample points from a square area. Some of the points will fall within the area of a circle as defined by $x^2 + y^2 = 1$, we count what percentage all points fall within this area, which allows us to estimate the area of the circle and therefore $\pi$.
|
# Import libraries
import math
import numpy as np
import matplotlib.pyplot as plt
in_circle = 0
outside_circle = 0
n = 10 ** 4
# Draw many random points
X = np.random.rand(n)
Y = np.random.rand(n)
for i in range(n):
if X[i]**2 + Y[i]**2 > 1:
outside_circle += 1
else:
in_circle += 1
area_of_quarter_circle = float(in_circle)/(in_circle + outside_circle)
pi_estimate = area_of_circle = area_of_quarter_circle * 4
pi_estimate
|
_____no_output_____
|
CC-BY-4.0
|
presentations/How To - Estimate Pi.ipynb
|
johnmathews/quant1
|
We can visualize the process to see how it works.
|
# Plot a circle for reference
circle1=plt.Circle((0,0),1,color='r', fill=False, lw=2)
fig = plt.gcf()
fig.gca().add_artist(circle1)
# Set the axis limits so the circle doesn't look skewed
plt.xlim((0, 1.8))
plt.ylim((0, 1.2))
plt.scatter(X, Y)
|
_____no_output_____
|
CC-BY-4.0
|
presentations/How To - Estimate Pi.ipynb
|
johnmathews/quant1
|
Finally, let's see how our estimate gets better as we increase $n$. We'll do this by computing the estimate for $\pi$ at each step and plotting that estimate to see how it converges.
|
in_circle = 0
outside_circle = 0
n = 10 ** 3
# Draw many random points
X = np.random.rand(n)
Y = np.random.rand(n)
# Make a new array
pi = np.ndarray(n)
for i in range(n):
if X[i]**2 + Y[i]**2 > 1:
outside_circle += 1
else:
in_circle += 1
area_of_quarter_circle = float(in_circle)/(in_circle + outside_circle)
pi_estimate = area_of_circle = area_of_quarter_circle * 4
pi[i] = pi_estimate
plt.plot(range(n), pi)
plt.xlabel('n')
plt.ylabel('pi estimate')
plt.plot(range(n), [math.pi] * n)
|
_____no_output_____
|
CC-BY-4.0
|
presentations/How To - Estimate Pi.ipynb
|
johnmathews/quant1
|
Training a recommender system on a standalone datasetTrain a recommender system with fastai using a standalone dataset- This notebook ingests the Amazon reviews dataset (https://www.kaggle.com/saurav9786/amazon-product-reviews)
|
# imports for notebook boilerplate
!pip install -Uqq fastbook
import fastbook
from fastbook import *
from fastai.collab import *
# set up the notebook for fast.ai
fastbook.setup_book()
modifier = 'apr13'
|
_____no_output_____
|
MIT
|
ch5/training_recommender_systems_on_standalone_dataset.ipynb
|
Vega95/Deep-Learning-with-fastai-Cookbook
|
Ingest the dataset- define the path object- define a dataframe to contain the dataset
|
# ingest the standalone dataset
# this step assumes you have completed the steps in "Getting Ready"
# in section "Training a recommender system on a standalone dataset" of Chapter 5
path = URLs.path('amazon_reviews')
# examine the directory structure
path.ls()
# ingest the dataset into a Pandas dataframe
df = pd.read_csv(path/'ratings_Electronics.csv',header = None)
# add the column names described in https://www.kaggle.com/saurav9786/amazon-product-reviews
df.columns = ['userID','productID','rating','timestamp']
|
_____no_output_____
|
MIT
|
ch5/training_recommender_systems_on_standalone_dataset.ipynb
|
Vega95/Deep-Learning-with-fastai-Cookbook
|
Examine the dataset
|
# examine the first few records in the dataframe
df.head()
# get the number of records in the dataset
df.shape
# get the count of unique values in each column of the dataset
df.nunique()
# count the number of missing values in each column of the dataset
df.isnull().sum()
df['rating'].nunique()
%%time
# defined a CollabDataLoaders object
dls=CollabDataLoaders.from_df(df,bs= 64)
dls.show_batch()
|
_____no_output_____
|
MIT
|
ch5/training_recommender_systems_on_standalone_dataset.ipynb
|
Vega95/Deep-Learning-with-fastai-Cookbook
|
Define and train the model
|
%%time
# define the model
learn=collab_learner(dls,y_range= [ 0 , 5.0 ] )
%%time
# train the model
learn.fit_one_cycle( 1 )
|
_____no_output_____
|
MIT
|
ch5/training_recommender_systems_on_standalone_dataset.ipynb
|
Vega95/Deep-Learning-with-fastai-Cookbook
|
Exercise the trained model- define a dataframe containing test data- apply the trained model to the dataframe
|
# set values for test dataframe
scoring_columns = ['userID','productID']
test_df = pd.DataFrame(columns=scoring_columns)
test_df.at[0,'userID'] = 'A2NYK9KWFMJV4Y'
test_df.at[0,'productID'] = 'B008ABOJKS'
test_df.at[1,'userID'] = 'A29ZTEO6EKSRDV'
test_df.at[1,'productID'] = 'B006202R44'
test_df.head()
dl = learn.dls.test_dl(test_df)
learn.get_preds(dl=dl)
learn.summary()
# save the model - first save the current path
keep_path = learn.path
learn.path
learn.path = Path('/notebooks/temp')
learn.model_dir
learn.save('recomm_'+modifier)
learn.path = keep_path
|
_____no_output_____
|
MIT
|
ch5/training_recommender_systems_on_standalone_dataset.ipynb
|
Vega95/Deep-Learning-with-fastai-Cookbook
|
Good review of numpy https://www.youtube.com/watch?v=GB9ByFAIAH4 Numpy library - Remember to do pip install numpy Numpy provides support for math and logical operations on arrays https://www.tutorialspoint.com/numpy/index.htm It supports many more data types than python https://www.tutorialspoint.com/numpy/numpy_data_types.htm Only a single data type is allowed in any particular array
|
a = np.array([1,2,3,4])
print(id(a))
print(type(a))
b = np.array(a)
print(f'b = {id(b)}')
a = a + 1
a
|
140321654298944
<class 'numpy.ndarray'>
b = 140321654357568
|
MIT
|
04-Linear_Regresion_Python/NumpyIntro-Answers.ipynb
|
Chilefase/MIT-1.001
|
# arange vs linspace - both generate a numpy array of numbers
import numpy as np
np.linspace(0,10,5) # specifies No. of values with 0 and 10 being first and last
np.arange(0, 10, 5) # specifies step size=5 starting at 0 up to but NOT including last
x = np.linspace(0,10,11) # generate 10 numbers
x = x + 1 # operates on all elements of the array
type(x)
# generate points and use function to transform them
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0,10,0.1)
y = np.sin(x)
plt.plot(x,y)
import numpy as np
import matplotlib.pyplot as plt
a = np.random.choice(np.linspace(0,10,10),100)
plt.hist(a,bins=np.arange(0,11,1))
np.linspace(0,10,11)
plt.hist(a,bins=np.arange(0,11,1),density=True)
# Use Bins 1/2 wide - What does plot meean?
plt.hist(a,bins=np.arange(0,11,0.5),density=True)
# Data as sampling from an unseen population
# Choose at random from 1 through 10
import numpy as np
import matplotlib.pyplot as plt
a = np.random.choice(np.arange(0,10),100)
a = np.random.random(100)*10.0
a
|
_____no_output_____
|
MIT
|
04-Linear_Regresion_Python/NumpyIntro-Answers.ipynb
|
Chilefase/MIT-1.001
|
|
Normal Distribution $\text{the normal distribution is given by} \\$$$f(z)=\frac{1}{\sqrt{2 \pi}}e^{-\frac{(z)^2}{2}} $$$\text{This can be rewritten in term of the mean and variance} \\$$$f(x)=\frac{1}{\sigma \sqrt{2 \pi}}e^{-\frac{(x- \mu)^2}{2 \sigma^2}}$$The random variable $X$ described by the PDF is a normal variable that follows a normal distribution with mean $\mu$ and variance $\sigma^2$.$\text{Normal distribution notation is} \\$$$X \sim N(\mu,\sigma^2) \\$$The total area under the PDF curve equals 1.
|
# Normal Data
a = np.random.normal(10,2,10)
plt.hist(a,bins=np.arange(5,16,1),density=True)
plt.scatter(np.arange(5,15,1),a)
plt.plot(a)
plt.hist(a,bins=np.arange(5,16,0.1), density=True)
plt.hist(a,bins=np.arange(5,16,1))
import numpy as np
import matplotlib.pyplot as plt
a = np.random.normal(0,2,200)
plt.hist(a, bins=np.arange(-5,5,1))
|
_____no_output_____
|
MIT
|
04-Linear_Regresion_Python/NumpyIntro-Answers.ipynb
|
Chilefase/MIT-1.001
|
Mean and Variance$$\mu = \frac{\sum(x)}{N}$$$$\sigma^{2} =\sum{\frac{(x - \mu)^{2}}{N} }$$
|
# IN CLASS - Generate a Population and calculate its mean and variance
import matplotlib.pyplot as plt
Npoints = 10
p = np.random.normal(0,10,Npoints*100)
def myMean(sample):
N = len(sample)
total = 0
for x in sample:
total = total + x
return x/N
pmean = myMean(p)
print(f'mean= {pmean}')
def myVar(sample,mean):
tsample = sample - mean
var = sum(tsample * tsample)/len(sample)
return var
pvar = myVar(p, pmean)
print(f'Variance = {pvar}')
print(f'realVar = ')
import numpy as np
import scipy as scipy
import matplotlib.pyplot as plt
from scipy.stats import norm
plt.style.use('ggplot')
fig, ax = plt.subplots()
x= np.arange(34,40,0.01)
y = np.random.normal(x)
lines = ax.plot(x, norm.pdf(x,loc=37,scale=1))
ax.set_ylim(0,0.45) # range
ax.set_xlabel('x',fontsize=20) # set x label
ax.set_ylabel('pdf(x)',fontsize=20,rotation=90) # set y label
ax.xaxis.set_label_coords(0.55, -0.05) # x label coordinate
ax.yaxis.set_label_coords(-0.1, 0.5) # y label coordinate
px=np.arange(36,37,0.1)
plt.fill_between(px,norm.pdf(px,loc=37,scale=1),color='r',alpha=0.5)
plt.show()
a = np.random.normal(10,1,20)
a
|
_____no_output_____
|
MIT
|
04-Linear_Regresion_Python/NumpyIntro-Answers.ipynb
|
Chilefase/MIT-1.001
|
Calculate the mean and subtract the mean from each data value$$
|
from matplotlib import collections as matcoll
Npoints = 20
x = np.arange(0,Npoints)
y = np.random.normal(loc=10, scale=2, size=Npoints )
lines = []
for i in range(Npoints):
pair=[(x[i],0), (x[i], y[i])]
lines.append(pair)
linecoll = matcoll.LineCollection(lines)
fig, ax = plt.subplots()
ax.add_collection(linecoll)
plt.scatter(x,y, marker='o', color='blue')
plt.xticks(x)
plt.ylim(0,40)
plt.show()
ylim=(0,10)
|
_____no_output_____
|
MIT
|
04-Linear_Regresion_Python/NumpyIntro-Answers.ipynb
|
Chilefase/MIT-1.001
|
Numpy 2D Arrays
|
## Multi-Dimensional Arrays
<img src='multiArray.png' width = 500>
import numpy as np
# Numpy 2_D Arrays
a = [0,1,2]
b = [3,4,5]
c = [6,7,8]
z = [a,
b,
c]
a = np.arange(0,9)
z = a.reshape(3,3)
z
z[2,2]
z[0:3:2,0:3:2]
## Exercise - Produce a 10x10 checkerboard of 1s and 0s
import numpy as np
import seaborn as sns
from matplotlib.colors import ListedColormap as lc
Z = np.zeros((8,8),dtype=int)
Z[1::2,::2] = 1
Z[::2,1::2] = 1
print(Z)
sns.heatmap(Z, annot=True,linewidths=5,cbar=False)
import seaborn as sns
sns.heatmap(Z, annot=True,linewidths=5,cbar=False)
# IN CLASS - use the above formula to plot the normal distribution over x = -4 to 4
# takee mean = 0, and sigma = 1
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-4,4,100)
y = (np.exp(-(x*x)/2))/np.sqrt(2*np.pi)
plt.plot(x,y)
import scipy.integrate as integrate
result = integrate.quad(lambda x: (np.exp(-(x*x)))/np.sqrt(2*np.pi) , -5, 5)
result
|
_____no_output_____
|
MIT
|
04-Linear_Regresion_Python/NumpyIntro-Answers.ipynb
|
Chilefase/MIT-1.001
|
Practical Data Science in Python Unsupervised Learning: Classifying Spotify Tracks by Genre with $k$-Means ClusteringAuthors: Matthew Finney, Paulina Toro Isaza Run this First! (Function Definitions)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_palette('Set1')
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from IPython.display import Audio, Image, clear_output
rs = 123
np.random.seed(rs)
def pca_plot(df, classes=None):
# Scale data for PCA
scaled_df = StandardScaler().fit_transform(df)
# Fit the PCA and extract the first two components
pca_results = PCA().fit_transform(scaled_df)
pca1_scores = pca_results[:,0]
pca2_scores = pca_results[:,1]
# Sort the legend labels
if classes is None:
hue_order = None
n_classes = 0
elif str(classes[0]).isnumeric():
classes = ['Cluster {}'.format(x) for x in classes]
hue_order = sorted(np.unique(classes))
n_classes = np.max(np.unique(classes).shape)
else:
hue_order = sorted(np.unique(classes))
n_classes = np.max(np.unique(classes).shape)
# Plot the first two principal components
plt.figure(figsize=(8.5,8.5))
plt.grid()
sns.scatterplot(pca1_scores, pca2_scores, s=50, hue=classes,
hue_order=hue_order, palette='Set1')
plt.xlabel("Principal Component {}".format(1))
plt.ylabel("Principal Component {}".format(2))
plt.title('Principal Component Plot')
plt.show()
def tracklist_player(track_list, df, header="Track Player"):
action = ''
for track in track_list:
print('{}\nTrack Name: {}\nArtist Name(s): {}'.format(header, df.loc[track,'name'],df.loc[track,'artist']))
try:
display(Image(df.loc[track,'cover_url'], format='jpeg', height=150))
except:
print('No cover art available')
try:
display(Audio(df.loc[track,'preview_url']+'.mp3', autoplay=True))
except:
print('No audio preview available')
print('Press <Enter> for the next track or q then <Enter> to quit: ')
action = input()
clear_output()
if action=='q':
break
print('No more clusters. Goodbye!')
def play_cluster_tracks(track_df, cluster_column="best_cluster"):
for cluster in sorted(track_df[cluster_column].unique()):
# Get the tracks in the cluster, and shuffle them for variety
tracks_list = track_df[track_df[cluster_column] == cluster].index.values
np.random.shuffle(tracks_list)
# Instantiate a tracklist player
tracklist_player(tracks_list, df=track_df, header='{}'.format(cluster))
# Load Track DataFrame
path = 'https://raw.githubusercontent.com/MattFinney/practical_data_science_in_python/main/spotify_track_data.csv'
tracks_df = pd.read_csv(path)
# Columns from the track dataframe which are relevant for our analysis
audio_feature_cols = ['danceability', 'energy', 'key', 'loudness', 'mode',
'speechiness', 'acousticness', 'instrumentalness',
'liveness', 'valence', 'tempo', 'duration_ms',
'time_signature']
# Show the first five rows of our dataframe
tracks_df.head()
|
/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
|
MIT
|
Session_2_Practical_Data_Science.ipynb
|
MattFinney/practical_data_science_in_python
|
Recap from Session 1 In our earlier session, we started working with a dataset of Spotify tracks. We explored the variables in the dataset, and determined that audio features - like danceability, accousticness, and tempo - vary across the songs in our dataset and might help us to thoughtfully group the tracks into different playlists. We then used Principal Component Analysis (PCA), a dimensionality reduction technique, to visualize the variation in songs.We'll pick up where we left off, with the PCA plot from last time. If you're just joining us for Session 2, don't fret! Attending Session 1 is NOT a prerequisite to learn and have fun in Session 2 today!
|
# Plot the principal component analysis results
pca_plot(tracks_df[audio_feature_cols])
|
_____no_output_____
|
MIT
|
Session_2_Practical_Data_Science.ipynb
|
MattFinney/practical_data_science_in_python
|
Today: Classification using $k$-Means Clustering Our Principal Component Analysis in the first session helped us to visualize the variation of track audio features in just two dimensions. Looking at the scatterplot of the first two principal components above, we can see that there are a few different groups of tracks. But how do we mathematically separate the tracks into these meaningful groups?One way to separate the tracks into meaningful groups based on similar audio features is to use clustering. Clustering is a machine learning technique that is very powerful for identifying patterns in unlabeled data where the ground truth is not known. What is $k$-Means Clustering? $k$-Means Clustering is one of the most popular clustering algorithms. The algorithm assigns each data point to a cluster using four main steps. **Step 1: Initialize the Clusters**\Based on the user's desired number of clusters $k$, the algorithm randomly chooses a centroid for each cluster. In this example, we choose a $k=3$, therefore the algorithm randomly picks 3 centroids. **Step 2: Assign Each Data Point**\The algorithm assigns each point to the closest centroid to get $k$ initial clusters. **Step 3: Recompute the Cluster Centers**\For every cluster, the algorithm recomputes the centroid by taking the average of all points in the cluster. The changes in centroids are shown below by arrows. **Step 4: Reassign the Points**\Since the centroids change, the algorithm then re-assigns the points to the closest centroid. The image below shows the new clusters after re-assignment. The algorithm repeats the calculation of centroids and assignment of points until points stop changing clusters. When clustering large datasets, you stop the algorithm before reaching convergence, using other criteria instead.*Note: Some content in this section was [adapted](https://creativecommons.org/licenses/by/4.0/) from Google's free [Clustering in Machine Learning](https://developers.google.com/machine-learning/clustering) course. The course is a great resource if you want to explore clustering in more detail!* Cluster the Spotify Tracks using their Audio Features Now, we will use the `sklearn.cluster.KMeans` Python library to apply the $k$-means algorithm to our `tracks_df` data. Based on our visual inspection of the PCA plot, let's start with a guess k=3 to get 3 clusters.
|
initial_k = ____
# Scale the data, so that the units of features don't impact feature importance
scaled_df = StandardScaler().fit_transform(tracks_df[audio_feature_cols])
# Cluster the data using the k means algorithm
initial_cluster_results = ______(n_clusters=initial_k, n_init=25, random_state=rs).fit(scaled_df)
|
_____no_output_____
|
MIT
|
Session_2_Practical_Data_Science.ipynb
|
MattFinney/practical_data_science_in_python
|
Now, let's print the cluster results. Notice that we're given a number (0 or 1) for each observation in our data set. This number is the id of the cluster assigned to each track.
|
# Print the cluster results
print(initial_cluster_results._______)
|
_____no_output_____
|
MIT
|
Session_2_Practical_Data_Science.ipynb
|
MattFinney/practical_data_science_in_python
|
And let's save the cluster results in our `tracks_df` dataframe as a column named `initial_cluster` so we can access them later.
|
# Save the cluster labels in our dataframe
tracks_df[______________] = ['Cluster ' + str(i) for i in __________.______]
|
_____no_output_____
|
MIT
|
Session_2_Practical_Data_Science.ipynb
|
MattFinney/practical_data_science_in_python
|
Let's plot the PCA plot and color each observation based on the assigned cluster to visualize our $k$-means results.
|
# Show a PCA plot of the clusters
pca_plot(tracks_df[audio_feature_cols], classes=tracks_df['initial_cluster'])
|
_____no_output_____
|
MIT
|
Session_2_Practical_Data_Science.ipynb
|
MattFinney/practical_data_science_in_python
|
Does it look like our $k$-means algorithm correctly separated the tracks into clusters? Does each color map to a distinct group of points? How do our clusters of songs differ? One way we can evaluate our clusters is by looking how the distribution of each data feature varies by cluster. In our case, let's check to see if tracks in the different clusters tend to have different values of energy, loudness, or speechiness.
|
# Plot the distribution of audio features by cluster
g = sns.pairplot(tracks_df, hue="initial_cluster",
vars=['danceability', 'energy', 'loudness', 'speechiness', 'tempo'],
hue_order=sorted(tracks_df.initial_cluster.unique()), palette='Set1')
g.fig.suptitle('Distribution of Audio Features by Cluster', y=1.05)
plt.show()
|
_____no_output_____
|
MIT
|
Session_2_Practical_Data_Science.ipynb
|
MattFinney/practical_data_science_in_python
|
Experiment with different values of $k$ Use the slider to select different values of $k$, then run the cell below to see how the choice of the number of clusters affects our results.
|
trial_k = 10 #@param {type:"slider", min:1, max:10, step:1}
# Cluster the data using the k means algorithm
trial_cluster_results = KMeans(n_clusters=trial_k, n_init=25, random_state=rs).fit(scaled_df)
# Save the cluster labels in our dataframe
tracks_df['trial_cluster'] = ['Cluster ' + str(i) for i in trial_cluster_results.labels_]
# Show a PCA plot of the clusters
pca_plot(tracks_df[audio_feature_cols], classes=tracks_df['trial_cluster'])
# Plot the distribution of audio features by cluster
g = sns.pairplot(tracks_df, hue="trial_cluster",
vars=['danceability', 'energy', 'loudness', 'speechiness', 'tempo'],
hue_order=sorted(tracks_df.trial_cluster.unique()), palette='Set1')
g.fig.suptitle('Distribution of Audio Features by Cluster', y=1.05)
plt.show()
|
_____no_output_____
|
MIT
|
Session_2_Practical_Data_Science.ipynb
|
MattFinney/practical_data_science_in_python
|
Which value of $k$ works best for our data? You may have noticed that the $k$-means algorithm requires you to choose $k$ and decide the number of clusters before you run the algorithm. But how do we know which value of $k$ is the best fit for our data? One approach is to track the total distance from points to their cluster centroid as we increase the number of clusters, $k$. Usually, the total distance decreases as we increase $k$, but we reach a value of $k$ where increasing $k$ only marginally decreases the total distance. An elbow plot helps us to find that value of $k$; it's the value of $k$ where the slope of the line in the elbow plot crosses the threshold of slope $=-1$. When you plot distance vs $k$, this point often looks like an "elbow".Let's build an elbow plot to select the value of $k$ that will give us the highest quality clusters that best explain the variation in our data.
|
# Calculate the Total Distance for each value of k between 1 and 10
scores = []
k_list = np.arange(____,____)
for i in k_list:
fit_k = _____(n_clusters=i, n_init=5, random_state=rs).fit(scaled_df)
scores.append(fit_k.inertia_)
# Plot this in an elbow plot
plt.figure(figsize=(11,8.5))
sns.lineplot(______, ______)
plt.xlabel('Number of clusters $k$')
plt.ylabel('Total Point to Centroid Distance')
plt.grid()
plt.title('The Elbow Method showing the optimal $k$')
plt.show()
|
_____no_output_____
|
MIT
|
Session_2_Practical_Data_Science.ipynb
|
MattFinney/practical_data_science_in_python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.