path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
18141020/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
y = data.Price
melb_predictors = data.drop(['Price'], axis=1)
X = melb_predictors.select_dtypes(exclude=['object'])
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
print('MAE from Approach 2 (Imputation):')
print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid)) | code |
18141020/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
y = data.Price
melb_predictors = data.drop(['Price'], axis=1)
X = melb_predictors.select_dtypes(exclude=['object'])
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
for col in cols_with_missing:
X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull()
X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull()
my_imputer = SimpleImputer()
imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))
imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))
imputed_X_train_plus.columns = X_train_plus.columns
imputed_X_valid_plus.columns = X_valid_plus.columns
missing_val_count_by_column = X_train.isnull().sum()
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
y = data.Price
X = data.drop(['Price'], axis=1)
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
X_train.head() | code |
18141020/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
y = data.Price
melb_predictors = data.drop(['Price'], axis=1)
X = melb_predictors.select_dtypes(exclude=['object'])
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
for col in cols_with_missing:
X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull()
X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull()
my_imputer = SimpleImputer()
imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))
imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))
imputed_X_train_plus.columns = X_train_plus.columns
imputed_X_valid_plus.columns = X_valid_plus.columns
missing_val_count_by_column = X_train.isnull().sum()
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
y = data.Price
X = data.drop(['Price'], axis=1)
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
s = X_train.dtypes == 'object'
object_cols = list(s[s].index)
print('Categorical variables:')
print(object_cols) | code |
18141020/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
y = data.Price
melb_predictors = data.drop(['Price'], axis=1)
X = melb_predictors.select_dtypes(exclude=['object'])
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
for col in cols_with_missing:
X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull()
X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull()
my_imputer = SimpleImputer()
imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))
imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))
imputed_X_train_plus.columns = X_train_plus.columns
imputed_X_valid_plus.columns = X_valid_plus.columns
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
y = data.Price
X = data.drop(['Price'], axis=1)
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()]
X_train_full.drop(cols_with_missing, axis=1, inplace=True)
X_valid_full.drop(cols_with_missing, axis=1, inplace=True)
low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object']
numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']]
my_cols = low_cardinality_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy() | code |
18141020/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
y = data.Price
melb_predictors = data.drop(['Price'], axis=1)
X = melb_predictors.select_dtypes(exclude=['object'])
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
from sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()
for col in cols_with_missing:
X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull()
X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull()
my_imputer = SimpleImputer()
imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))
imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))
imputed_X_train_plus.columns = X_train_plus.columns
imputed_X_valid_plus.columns = X_valid_plus.columns
print(X_train.shape)
missing_val_count_by_column = X_train.isnull().sum()
print(missing_val_count_by_column[missing_val_count_by_column > 0]) | code |
33122764/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
import plotly.express as px
import plotly.express as px
import plotly.express as px
import plotly.express as px
import plotly.graph_objects as go
import re
import requests
symptoms = {'symptom': ['Fever', 'Dry cough', 'Fatigue', 'Sputum production', 'Shortness of breath', 'Muscle pain', 'Sore throat', 'Headache', 'Chills', 'Nausea or vomiting', 'Nasal congestion', 'Diarrhoea', 'Haemoptysis', 'Conjunctival congestion'], 'percentage': [87.9, 67.7, 38.1, 33.4, 18.6, 14.8, 13.9, 13.6, 11.4, 5.0, 4.8, 3.7, 0.9, 0.8]}
symptoms = pd.DataFrame(data=symptoms, index=range(14))
fig = px.pie(symptoms,
values="percentage",
names="symptom",
title="Symtoms of Coronavirus",
template="seaborn")
fig.update_traces(rotation=90, pull=0.05, textinfo="value+percent+label")
fig.show()
link = 'https://api.covid19india.org/csv/latest/state_wise.csv'
df = pd.read_csv(link)
df = df.drop(df.index[0])
Date = df['Last_Updated_Time'].values.tolist()
State = df['State'].values.tolist()
Confirmed = df['Confirmed'].values.tolist()
Recovered = df['Recovered'].values.tolist()
Active = df['Active'].values.tolist()
Deaths = df['Deaths'].values.tolist()
import plotly.graph_objects as go
fig = go.Figure(data=[go.Bar(name='Active', x=State, y=Active), go.Bar(name='Recovered', x=State, y=Recovered), go.Bar(name='Deaths', x=State, y=Deaths)])
fig.update_layout(autosize=False, width=950, height=700, margin=dict(l=50, r=50, b=100, t=100, pad=4), paper_bgcolor='LightSteelBlue', title='Statewise Covid19 Case on ' + str(Date[0][0:10]) + ' Last Update at' + str(Date[0][10:]))
fig.update_layout(barmode='stack')
fig = px.pie(df, values=Confirmed, names=State, title='Statewise Confirmed Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Active, names=State, title='Statewise Active Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Deaths, names=State, title='Statewise Deaths Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Recovered, names=State, title='Statewise Recovered Case')
#fig.update_traces(rotation=60, pull=0.01)
fig.show()
import requests
import re
link2 = 'https://api.covid19india.org/data.json'
r = requests.get(link2)
india_Data = r.json()
india_Confirmed = []
india_Recovered = []
india_Deseased = []
timeStamp = []
for index in range(len(india_Data['cases_time_series'])):
india_Confirmed.append(int(re.sub(',', '', india_Data['cases_time_series'][index]['totalconfirmed'])))
india_Recovered.append(int(re.sub(',', '', india_Data['cases_time_series'][index]['totalrecovered'])))
india_Deseased.append(int(re.sub(',', '', india_Data['cases_time_series'][index]['totaldeceased'])))
timeStamp.append(india_Data['cases_time_series'][index]['date'])
fig = go.Figure()
fig = fig.add_trace(go.Scatter(x=timeStamp, y=india_Confirmed, mode='lines+markers', name='Confirmed Cases'))
fig = fig.add_trace(go.Scatter(x=timeStamp, y=india_Recovered, mode='lines+markers', name='Recoverd Patients'))
fig = fig.add_trace(go.Scatter(x=timeStamp, y=india_Deseased, mode='lines+markers', name='Deseased Patients'))
fig = fig.update_layout(title='India COVID-19 cases on ' + str(india_Data['cases_time_series'][-1]['date']) + '2020', xaxis_title='Date', yaxis_title='Cases')
fig.show() | code |
33122764/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd
import plotly.express as px
import plotly.express as px
import plotly.express as px
import plotly.express as px
import plotly.graph_objects as go
import plotly.graph_objects as go
import re
import requests
symptoms = {'symptom': ['Fever', 'Dry cough', 'Fatigue', 'Sputum production', 'Shortness of breath', 'Muscle pain', 'Sore throat', 'Headache', 'Chills', 'Nausea or vomiting', 'Nasal congestion', 'Diarrhoea', 'Haemoptysis', 'Conjunctival congestion'], 'percentage': [87.9, 67.7, 38.1, 33.4, 18.6, 14.8, 13.9, 13.6, 11.4, 5.0, 4.8, 3.7, 0.9, 0.8]}
symptoms = pd.DataFrame(data=symptoms, index=range(14))
fig = px.pie(symptoms,
values="percentage",
names="symptom",
title="Symtoms of Coronavirus",
template="seaborn")
fig.update_traces(rotation=90, pull=0.05, textinfo="value+percent+label")
fig.show()
link = 'https://api.covid19india.org/csv/latest/state_wise.csv'
df = pd.read_csv(link)
df = df.drop(df.index[0])
Date = df['Last_Updated_Time'].values.tolist()
State = df['State'].values.tolist()
Confirmed = df['Confirmed'].values.tolist()
Recovered = df['Recovered'].values.tolist()
Active = df['Active'].values.tolist()
Deaths = df['Deaths'].values.tolist()
import plotly.graph_objects as go
fig = go.Figure(data=[go.Bar(name='Active', x=State, y=Active), go.Bar(name='Recovered', x=State, y=Recovered), go.Bar(name='Deaths', x=State, y=Deaths)])
fig.update_layout(autosize=False, width=950, height=700, margin=dict(l=50, r=50, b=100, t=100, pad=4), paper_bgcolor='LightSteelBlue', title='Statewise Covid19 Case on ' + str(Date[0][0:10]) + ' Last Update at' + str(Date[0][10:]))
fig.update_layout(barmode='stack')
fig = px.pie(df, values=Confirmed, names=State, title='Statewise Confirmed Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Active, names=State, title='Statewise Active Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Deaths, names=State, title='Statewise Deaths Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Recovered, names=State, title='Statewise Recovered Case')
#fig.update_traces(rotation=60, pull=0.01)
fig.show()
import requests
import re
link2 = 'https://api.covid19india.org/data.json'
r = requests.get(link2)
india_Data = r.json()
india_Confirmed = []
india_Recovered = []
india_Deseased = []
timeStamp = []
for index in range(len(india_Data['cases_time_series'])):
india_Confirmed.append(int(re.sub(',', '', india_Data['cases_time_series'][index]['totalconfirmed'])))
india_Recovered.append(int(re.sub(',', '', india_Data['cases_time_series'][index]['totalrecovered'])))
india_Deseased.append(int(re.sub(',', '', india_Data['cases_time_series'][index]['totaldeceased'])))
timeStamp.append(india_Data['cases_time_series'][index]['date'])
fig = go.Figure()
fig = fig.add_trace(go.Scatter(x=timeStamp, y=india_Confirmed, mode='lines+markers', name='Confirmed Cases'))
fig = fig.add_trace(go.Scatter(x=timeStamp, y=india_Recovered, mode='lines+markers', name='Recoverd Patients'))
fig = fig.add_trace(go.Scatter(x=timeStamp, y=india_Deseased, mode='lines+markers', name='Deseased Patients'))
fig = fig.update_layout(title='India COVID-19 cases on ' + str(india_Data['cases_time_series'][-1]['date']) + '2020', xaxis_title='Date', yaxis_title='Cases')
link3 = 'https://api.covid19india.org/v2/state_district_wise.json'
r = requests.get(link3)
states_Data = r.json()
telangana = 27
district = []
district_Confirmed = []
district_Recovered = []
district_Deseased = []
district_Active = []
for index in range(len(states_Data[telangana]['districtData'])):
district.append(str(re.sub(',', '', states_Data[telangana]['districtData'][index]['district'])))
district_Confirmed.append(int(states_Data[telangana]['districtData'][index]['confirmed']))
district_Recovered.append(int(states_Data[telangana]['districtData'][index]['recovered']))
district_Deseased.append(int(states_Data[telangana]['districtData'][index]['deceased']))
district_Active.append(int(states_Data[telangana]['districtData'][index]['active']))
import plotly.graph_objects as go
fig = go.Figure(data=[go.Bar(name='Active', x=district, y=district_Active), go.Bar(name='Recovered', x=district, y=district_Recovered), go.Bar(name='Deaths', x=district, y=district_Deseased)])
fig.update_layout(autosize=False, width=950, height=700, margin=dict(l=50, r=50, b=100, t=100, pad=4), paper_bgcolor='LightSteelBlue', title='Statewise Covid19 Case on ' + str(Date[0][0:10]))
fig.update_layout(barmode='stack')
fig.show() | code |
33122764/cell_23 | [
"text_html_output_1.png"
] | import requests
import requests
import re
link2 = 'https://api.covid19india.org/data.json'
r = requests.get(link2)
india_Data = r.json()
link3 = 'https://api.covid19india.org/v2/state_district_wise.json'
r = requests.get(link3)
states_Data = r.json()
for i in range(len(states_Data[:])):
print(states_Data[i]['state'], '>>>', i) | code |
33122764/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import plotly.express as px
symptoms = {'symptom': ['Fever', 'Dry cough', 'Fatigue', 'Sputum production', 'Shortness of breath', 'Muscle pain', 'Sore throat', 'Headache', 'Chills', 'Nausea or vomiting', 'Nasal congestion', 'Diarrhoea', 'Haemoptysis', 'Conjunctival congestion'], 'percentage': [87.9, 67.7, 38.1, 33.4, 18.6, 14.8, 13.9, 13.6, 11.4, 5.0, 4.8, 3.7, 0.9, 0.8]}
symptoms = pd.DataFrame(data=symptoms, index=range(14))
fig = px.pie(symptoms, values='percentage', names='symptom', title='Symtoms of Coronavirus', template='seaborn')
fig.update_traces(rotation=90, pull=0.05, textinfo='value+percent+label')
fig.show() | code |
33122764/cell_2 | [
"text_html_output_1.png"
] | import IPython
import IPython
IPython.display.HTML('<div class="flourish-embed flourish-bar-chart-race" data-src="visualisation/1977187" data-url="https://flo.uri.sh/visualisation/1977187/embed"><script src="https://public.flourish.studio/resources/embed.js"></script></div>') | code |
33122764/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
import plotly.express as px
import plotly.express as px
import plotly.express as px
import plotly.express as px
import plotly.graph_objects as go
symptoms = {'symptom': ['Fever', 'Dry cough', 'Fatigue', 'Sputum production', 'Shortness of breath', 'Muscle pain', 'Sore throat', 'Headache', 'Chills', 'Nausea or vomiting', 'Nasal congestion', 'Diarrhoea', 'Haemoptysis', 'Conjunctival congestion'], 'percentage': [87.9, 67.7, 38.1, 33.4, 18.6, 14.8, 13.9, 13.6, 11.4, 5.0, 4.8, 3.7, 0.9, 0.8]}
symptoms = pd.DataFrame(data=symptoms, index=range(14))
fig = px.pie(symptoms,
values="percentage",
names="symptom",
title="Symtoms of Coronavirus",
template="seaborn")
fig.update_traces(rotation=90, pull=0.05, textinfo="value+percent+label")
fig.show()
link = 'https://api.covid19india.org/csv/latest/state_wise.csv'
df = pd.read_csv(link)
df = df.drop(df.index[0])
Date = df['Last_Updated_Time'].values.tolist()
State = df['State'].values.tolist()
Confirmed = df['Confirmed'].values.tolist()
Recovered = df['Recovered'].values.tolist()
Active = df['Active'].values.tolist()
Deaths = df['Deaths'].values.tolist()
import plotly.graph_objects as go
fig = go.Figure(data=[go.Bar(name='Active', x=State, y=Active), go.Bar(name='Recovered', x=State, y=Recovered), go.Bar(name='Deaths', x=State, y=Deaths)])
fig.update_layout(autosize=False, width=950, height=700, margin=dict(l=50, r=50, b=100, t=100, pad=4), paper_bgcolor='LightSteelBlue', title='Statewise Covid19 Case on ' + str(Date[0][0:10]) + ' Last Update at' + str(Date[0][10:]))
fig.update_layout(barmode='stack')
fig = px.pie(df, values=Confirmed, names=State, title='Statewise Confirmed Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Active, names=State, title='Statewise Active Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Deaths, names=State, title='Statewise Deaths Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Recovered, names=State, title='Statewise Recovered Case')
fig.show() | code |
33122764/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
import plotly.express as px
import plotly.express as px
import plotly.express as px
import plotly.graph_objects as go
symptoms = {'symptom': ['Fever', 'Dry cough', 'Fatigue', 'Sputum production', 'Shortness of breath', 'Muscle pain', 'Sore throat', 'Headache', 'Chills', 'Nausea or vomiting', 'Nasal congestion', 'Diarrhoea', 'Haemoptysis', 'Conjunctival congestion'], 'percentage': [87.9, 67.7, 38.1, 33.4, 18.6, 14.8, 13.9, 13.6, 11.4, 5.0, 4.8, 3.7, 0.9, 0.8]}
symptoms = pd.DataFrame(data=symptoms, index=range(14))
fig = px.pie(symptoms,
values="percentage",
names="symptom",
title="Symtoms of Coronavirus",
template="seaborn")
fig.update_traces(rotation=90, pull=0.05, textinfo="value+percent+label")
fig.show()
link = 'https://api.covid19india.org/csv/latest/state_wise.csv'
df = pd.read_csv(link)
df = df.drop(df.index[0])
Date = df['Last_Updated_Time'].values.tolist()
State = df['State'].values.tolist()
Confirmed = df['Confirmed'].values.tolist()
Recovered = df['Recovered'].values.tolist()
Active = df['Active'].values.tolist()
Deaths = df['Deaths'].values.tolist()
import plotly.graph_objects as go
fig = go.Figure(data=[go.Bar(name='Active', x=State, y=Active), go.Bar(name='Recovered', x=State, y=Recovered), go.Bar(name='Deaths', x=State, y=Deaths)])
fig.update_layout(autosize=False, width=950, height=700, margin=dict(l=50, r=50, b=100, t=100, pad=4), paper_bgcolor='LightSteelBlue', title='Statewise Covid19 Case on ' + str(Date[0][0:10]) + ' Last Update at' + str(Date[0][10:]))
fig.update_layout(barmode='stack')
fig = px.pie(df, values=Confirmed, names=State, title='Statewise Confirmed Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Active, names=State, title='Statewise Active Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Deaths, names=State, title='Statewise Deaths Case')
fig.show() | code |
33122764/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import plotly.express as px
import plotly.express as px
import plotly.graph_objects as go
symptoms = {'symptom': ['Fever', 'Dry cough', 'Fatigue', 'Sputum production', 'Shortness of breath', 'Muscle pain', 'Sore throat', 'Headache', 'Chills', 'Nausea or vomiting', 'Nasal congestion', 'Diarrhoea', 'Haemoptysis', 'Conjunctival congestion'], 'percentage': [87.9, 67.7, 38.1, 33.4, 18.6, 14.8, 13.9, 13.6, 11.4, 5.0, 4.8, 3.7, 0.9, 0.8]}
symptoms = pd.DataFrame(data=symptoms, index=range(14))
fig = px.pie(symptoms,
values="percentage",
names="symptom",
title="Symtoms of Coronavirus",
template="seaborn")
fig.update_traces(rotation=90, pull=0.05, textinfo="value+percent+label")
fig.show()
link = 'https://api.covid19india.org/csv/latest/state_wise.csv'
df = pd.read_csv(link)
df = df.drop(df.index[0])
Date = df['Last_Updated_Time'].values.tolist()
State = df['State'].values.tolist()
Confirmed = df['Confirmed'].values.tolist()
Recovered = df['Recovered'].values.tolist()
Active = df['Active'].values.tolist()
Deaths = df['Deaths'].values.tolist()
import plotly.graph_objects as go
fig = go.Figure(data=[go.Bar(name='Active', x=State, y=Active), go.Bar(name='Recovered', x=State, y=Recovered), go.Bar(name='Deaths', x=State, y=Deaths)])
fig.update_layout(autosize=False, width=950, height=700, margin=dict(l=50, r=50, b=100, t=100, pad=4), paper_bgcolor='LightSteelBlue', title='Statewise Covid19 Case on ' + str(Date[0][0:10]) + ' Last Update at' + str(Date[0][10:]))
fig.update_layout(barmode='stack')
fig = px.pie(df, values=Confirmed, names=State, title='Statewise Confirmed Case')
fig.show()
import plotly.express as px
fig = px.pie(df, values=Active, names=State, title='Statewise Active Case')
fig.show() | code |
33122764/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
symptoms = {'symptom': ['Fever', 'Dry cough', 'Fatigue', 'Sputum production', 'Shortness of breath', 'Muscle pain', 'Sore throat', 'Headache', 'Chills', 'Nausea or vomiting', 'Nasal congestion', 'Diarrhoea', 'Haemoptysis', 'Conjunctival congestion'], 'percentage': [87.9, 67.7, 38.1, 33.4, 18.6, 14.8, 13.9, 13.6, 11.4, 5.0, 4.8, 3.7, 0.9, 0.8]}
symptoms = pd.DataFrame(data=symptoms, index=range(14))
fig = px.pie(symptoms,
values="percentage",
names="symptom",
title="Symtoms of Coronavirus",
template="seaborn")
fig.update_traces(rotation=90, pull=0.05, textinfo="value+percent+label")
fig.show()
link = 'https://api.covid19india.org/csv/latest/state_wise.csv'
df = pd.read_csv(link)
df = df.drop(df.index[0])
Date = df['Last_Updated_Time'].values.tolist()
State = df['State'].values.tolist()
Confirmed = df['Confirmed'].values.tolist()
Recovered = df['Recovered'].values.tolist()
Active = df['Active'].values.tolist()
Deaths = df['Deaths'].values.tolist()
import plotly.graph_objects as go
fig = go.Figure(data=[go.Bar(name='Active', x=State, y=Active), go.Bar(name='Recovered', x=State, y=Recovered), go.Bar(name='Deaths', x=State, y=Deaths)])
fig.update_layout(autosize=False, width=950, height=700, margin=dict(l=50, r=50, b=100, t=100, pad=4), paper_bgcolor='LightSteelBlue', title='Statewise Covid19 Case on ' + str(Date[0][0:10]) + ' Last Update at' + str(Date[0][10:]))
fig.update_layout(barmode='stack')
fig.show() | code |
33122764/cell_12 | [
"text_html_output_2.png"
] | import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
symptoms = {'symptom': ['Fever', 'Dry cough', 'Fatigue', 'Sputum production', 'Shortness of breath', 'Muscle pain', 'Sore throat', 'Headache', 'Chills', 'Nausea or vomiting', 'Nasal congestion', 'Diarrhoea', 'Haemoptysis', 'Conjunctival congestion'], 'percentage': [87.9, 67.7, 38.1, 33.4, 18.6, 14.8, 13.9, 13.6, 11.4, 5.0, 4.8, 3.7, 0.9, 0.8]}
symptoms = pd.DataFrame(data=symptoms, index=range(14))
fig = px.pie(symptoms,
values="percentage",
names="symptom",
title="Symtoms of Coronavirus",
template="seaborn")
fig.update_traces(rotation=90, pull=0.05, textinfo="value+percent+label")
fig.show()
link = 'https://api.covid19india.org/csv/latest/state_wise.csv'
df = pd.read_csv(link)
df = df.drop(df.index[0])
Date = df['Last_Updated_Time'].values.tolist()
State = df['State'].values.tolist()
Confirmed = df['Confirmed'].values.tolist()
Recovered = df['Recovered'].values.tolist()
Active = df['Active'].values.tolist()
Deaths = df['Deaths'].values.tolist()
import plotly.graph_objects as go
fig = go.Figure(data=[go.Bar(name='Active', x=State, y=Active), go.Bar(name='Recovered', x=State, y=Recovered), go.Bar(name='Deaths', x=State, y=Deaths)])
fig.update_layout(autosize=False, width=950, height=700, margin=dict(l=50, r=50, b=100, t=100, pad=4), paper_bgcolor='LightSteelBlue', title='Statewise Covid19 Case on ' + str(Date[0][0:10]) + ' Last Update at' + str(Date[0][10:]))
fig.update_layout(barmode='stack')
fig = px.pie(df, values=Confirmed, names=State, title='Statewise Confirmed Case')
fig.show() | code |
105179005/cell_13 | [
"text_plain_output_1.png"
] | a = "data anlytic's"
a.find('data')
a.split()
a = 'd24%343cbdcjh'
a.isalnum()
a = 'i am learning python'
l = len(a)
r = ' '
while l > 0:
r = r + a[l - 1]
l = l - 1
print(r) | code |
105179005/cell_9 | [
"text_plain_output_1.png"
] | b = ' we_are_doing_a_good_progress '
c = b.split('_')
b = b.strip()
c = b.lstrip()
b
b = 'dfffdsf'
b.isalpha() | code |
105179005/cell_4 | [
"text_plain_output_1.png"
] | a = "data anlytic's"
print(a[-3])
print(a[-8:-1])
print(a[0:9:2])
a.find('data')
a.split() | code |
105179005/cell_6 | [
"text_plain_output_1.png"
] | b = ' we_are_doing_a_good_progress '
c = b.split('_')
print(c[5])
b = b.strip()
c = b.lstrip()
b | code |
105179005/cell_2 | [
"text_plain_output_1.png"
] | a = "data anlytic's"
print(a) | code |
105179005/cell_11 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | b = ' we_are_doing_a_good_progress '
c = b.split('_')
b = b.strip()
c = b.lstrip()
b
c = 'adnan k'
c.islower()
c = c.upper()
c
c = c.capitalize()
c | code |
105179005/cell_8 | [
"text_plain_output_1.png"
] | a = "data anlytic's"
a.find('data')
a.split()
a = 'd24%343cbdcjh'
a.isalnum() | code |
105179005/cell_10 | [
"text_plain_output_1.png"
] | b = ' we_are_doing_a_good_progress '
c = b.split('_')
b = b.strip()
c = b.lstrip()
b
c = 'adnan k'
c.islower()
c = c.upper()
c | code |
105179005/cell_12 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | b = ' we_are_doing_a_good_progress '
c = b.split('_')
b = b.strip()
c = b.lstrip()
b
c = 'adnan k'
c.islower()
c = c.upper()
c
c = c.capitalize()
c
c = c.title()
c | code |
88081842/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf['sal'] = xdf['sal'].astype('float64')
xdf.isna().sum()
conversionVal = {'₹': 0.013, '$': 1, '£': 1.36, 'AFN': 0.011}
conversionVal
xdf['salary_usd'] = 0
for key, value in conversionVal.items():
for x, xRow in xdf.iterrows():
if key in xRow['currency']:
salVal = xRow['sal']
salVal = round(int(salVal) * value, 2)
xdf.at[x, 'salary_usd'] = salVal
xdf | code |
88081842/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf[xdf['currency'] == 'A'] | code |
88081842/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf[xdf['salary'].str.contains('\\$')].head() | code |
88081842/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf['sal'] = xdf['sal'].astype('float64')
xdf.isna().sum()
conversionVal = {'₹': 0.013, '$': 1, '£': 1.36, 'AFN': 0.011}
conversionVal
xdf['salary_usd'] = 0
for key, value in conversionVal.items():
for x, xRow in xdf.iterrows():
if key in xRow['currency']:
salVal = xRow['sal']
salVal = round(int(salVal) * value, 2)
xdf.at[x, 'salary_usd'] = salVal
xdf
xdf[xdf.index == 3876] | code |
88081842/cell_20 | [
"text_html_output_1.png"
] | conversionVal = {'₹': 0.013, '$': 1, '£': 1.36, 'AFN': 0.011}
conversionVal | code |
88081842/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns | code |
88081842/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88081842/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf.head() | code |
88081842/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf['sal'] = xdf['sal'].astype('float64')
xdf.isna().sum() | code |
88081842/cell_28 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf['sal'] = xdf['sal'].astype('float64')
xdf.isna().sum()
conversionVal = {'₹': 0.013, '$': 1, '£': 1.36, 'AFN': 0.011}
conversionVal
xdf['salary_usd'] = 0
for key, value in conversionVal.items():
for x, xRow in xdf.iterrows():
if key in xRow['currency']:
salVal = xRow['sal']
salVal = round(int(salVal) * value, 2)
xdf.at[x, 'salary_usd'] = salVal
xdf
multiplier_per_year = {'yr': 1, 'mo': 12, 'hr': 2064}
multiplier_per_year
xdf[xdf.index == 3876]
xdf['salary_usd_yearly'] = 0
for key, value in multiplier_per_year.items():
xdf['salary_usd_yearly'] = xdf.apply(lambda row: row['salary_usd'] * value if row['duration'] == key else row['salary_usd_yearly'] * 1, axis=1)
xdf[xdf.index == 3876] | code |
88081842/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf.info() | code |
88081842/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf['currency'] = xdf['currency'].str.replace('A', 'AFN')
xdf['sal'] = xdf['sal'].str.replace('FN', '').str.strip()
xdf[xdf['currency'].str.contains('A')] | code |
88081842/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf.info() | code |
88081842/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
print('Dataset Shape: ', df.shape, '\n--------------------------------')
df.head() | code |
88081842/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf['sal'] = xdf['sal'].astype('float64')
xdf.info() | code |
88081842/cell_24 | [
"text_plain_output_1.png"
] | multiplier_per_year = {'yr': 1, 'mo': 12, 'hr': 2064}
multiplier_per_year | code |
88081842/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf['sal'] = xdf['sal'].astype('float64')
xdf.isna().sum()
conversionVal = {'₹': 0.013, '$': 1, '£': 1.36, 'AFN': 0.011}
conversionVal
xdf['salary_usd'] = 0
for key, value in conversionVal.items():
for x, xRow in xdf.iterrows():
if key in xRow['currency']:
salVal = xRow['sal']
salVal = round(int(salVal) * value, 2)
xdf.at[x, 'salary_usd'] = salVal
xdf
xdf['duration'].unique() | code |
88081842/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf['sal'] = xdf['sal'].astype('float64')
xdf.isna().sum()
conversionVal = {'₹': 0.013, '$': 1, '£': 1.36, 'AFN': 0.011}
conversionVal
xdf['salary_usd'] = 0
for key, value in conversionVal.items():
for x, xRow in xdf.iterrows():
if key in xRow['currency']:
salVal = xRow['sal']
salVal = round(int(salVal) * value, 2)
xdf.at[x, 'salary_usd'] = salVal
xdf
multiplier_per_year = {'yr': 1, 'mo': 12, 'hr': 2064}
multiplier_per_year
xdf[xdf.index == 3876]
xdf['salary_usd_yearly'] = 0
for key, value in multiplier_per_year.items():
xdf['salary_usd_yearly'] = xdf.apply(lambda row: row['salary_usd'] * value if row['duration'] == key else row['salary_usd_yearly'] * 1, axis=1)
xdf.head() | code |
88081842/cell_12 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns
xdf = df.rename(columns={'Company Name': 'company_name', 'Job Title': 'job_title', 'Salaries Reported': 'sal_reported', 'Location': 'location', 'Salary': 'salary'})
xdf.columns
xdf['currency'] = xdf['salary'].str.slice(start=0, stop=1)
xdf['duration'] = xdf['salary'].str.split('/', expand=True)[1]
xdf['sal'] = xdf['salary'].str.split('/', expand=True)[0].str.slice(start=1).str.replace(',', '')
xdf.head() | code |
88081842/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/analytics-industry-salaries-2022-india/Salary Dataset.csv')
df.columns | code |
18146356/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_test.describe() | code |
18146356/cell_34 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import missingno as msno
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
f,ax=plt.subplots(1,2,figsize=(16,6))
df_train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=False)
ax[0].set_title('Survived')
ax[0].set_ylabel('')
sns.countplot('Survived',data=df_train,ax=ax[1])
ax[1].set_title('Survived')
plt.show()
df_train.groupby(['Sex', 'Survived'])['Survived'].count()
f,ax=plt.subplots(1,2,figsize=(14,4))
df_train[['Sex','Survived']].groupby(['Sex']).mean().plot.bar(ax=ax[0])
ax[0].set_title('Survived vs Sex')
sns.countplot('Sex',hue='Survived',data=df_train,ax=ax[1])
ax[1].set_title('Sex:Survived vs Dead')
plt.show()
pd.crosstab(df_train.Pclass, df_train.Survived, margins=True)
f, ax = plt.subplots(1, 2, figsize=(16, 8))
df_train['Pclass'].value_counts().plot.bar(color=['black', 'silver', 'yellow'], ax=ax[0])
ax[0].set_title('Number Of Passengers By Pclass')
ax[0].set_ylabel('Count')
sns.countplot('Pclass', hue='Survived', data=df_train, ax=ax[1])
ax[1].set_title('Pclass:Survived vs Dead')
plt.show() | code |
18146356/cell_30 | [
"text_plain_output_1.png"
] | import missingno as msno
import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
df_train.groupby(['Sex', 'Survived'])['Survived'].count()
pd.crosstab(df_train.Pclass, df_train.Survived, margins=True) | code |
18146356/cell_20 | [
"text_plain_output_1.png"
] | import missingno as msno
import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
df_train.groupby(['Sex', 'Survived'])['Survived'].count() | code |
18146356/cell_39 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import missingno as msno
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
f,ax=plt.subplots(1,2,figsize=(16,6))
df_train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=False)
ax[0].set_title('Survived')
ax[0].set_ylabel('')
sns.countplot('Survived',data=df_train,ax=ax[1])
ax[1].set_title('Survived')
plt.show()
df_train.groupby(['Sex', 'Survived'])['Survived'].count()
f,ax=plt.subplots(1,2,figsize=(14,4))
df_train[['Sex','Survived']].groupby(['Sex']).mean().plot.bar(ax=ax[0])
ax[0].set_title('Survived vs Sex')
sns.countplot('Sex',hue='Survived',data=df_train,ax=ax[1])
ax[1].set_title('Sex:Survived vs Dead')
plt.show()
pd.crosstab(df_train.Pclass, df_train.Survived, margins=True)
f,ax=plt.subplots(1,2,figsize=(16,8))
df_train['Pclass'].value_counts().plot.bar(color=['black','silver','yellow'],ax=ax[0])
ax[0].set_title('Number Of Passengers By Pclass')
ax[0].set_ylabel('Count')
sns.countplot('Pclass',hue='Survived',data=df_train,ax=ax[1])
ax[1].set_title('Pclass:Survived vs Dead')
plt.show()
pd.crosstab([df_train.Sex, df_train.Survived], df_train.Pclass, margins=True)
sns.factorplot('Pclass', 'Survived', hue='Sex', data=df_train)
plt.show() | code |
18146356/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import missingno as msno
import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
df_train.groupby(['Sex', 'Survived'])['Survived'].count()
df_train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=True).count() | code |
18146356/cell_7 | [
"image_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.describe() | code |
18146356/cell_32 | [
"image_output_1.png"
] | import missingno as msno
import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
df_train.groupby(['Sex', 'Survived'])['Survived'].count()
pd.crosstab(df_train.Pclass, df_train.Survived, margins=True)
df_train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=True).mean().sort_values(by='Survived', ascending=False).plot.bar() | code |
18146356/cell_28 | [
"image_output_1.png"
] | import missingno as msno
import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
df_train.groupby(['Sex', 'Survived'])['Survived'].count()
df_train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=True).sum() | code |
18146356/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.info() | code |
18146356/cell_15 | [
"text_html_output_1.png"
] | import missingno as msno
import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
msno.bar(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.2, 0.5, 0.2)) | code |
18146356/cell_38 | [
"text_html_output_1.png"
] | import missingno as msno
import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
df_train.groupby(['Sex', 'Survived'])['Survived'].count()
pd.crosstab(df_train.Pclass, df_train.Survived, margins=True)
pd.crosstab([df_train.Sex, df_train.Survived], df_train.Pclass, margins=True) | code |
18146356/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import missingno as msno
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
f, ax = plt.subplots(1, 2, figsize=(16, 6))
df_train['Survived'].value_counts().plot.pie(explode=[0, 0.1], autopct='%1.1f%%', ax=ax[0], shadow=False)
ax[0].set_title('Survived')
ax[0].set_ylabel('')
sns.countplot('Survived', data=df_train, ax=ax[1])
ax[1].set_title('Survived')
plt.show() | code |
18146356/cell_14 | [
"text_plain_output_1.png"
] | import missingno as msno
import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2)) | code |
18146356/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import missingno as msno
import pandas as pd
import seaborn as sns
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum()
msno.matrix(df=df_train.iloc[:, :], figsize=(7, 5), color=(0.5, 0.1, 0.2))
f,ax=plt.subplots(1,2,figsize=(16,6))
df_train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=False)
ax[0].set_title('Survived')
ax[0].set_ylabel('')
sns.countplot('Survived',data=df_train,ax=ax[1])
ax[1].set_title('Survived')
plt.show()
df_train.groupby(['Sex', 'Survived'])['Survived'].count()
f, ax = plt.subplots(1, 2, figsize=(14, 4))
df_train[['Sex', 'Survived']].groupby(['Sex']).mean().plot.bar(ax=ax[0])
ax[0].set_title('Survived vs Sex')
sns.countplot('Sex', hue='Survived', data=df_train, ax=ax[1])
ax[1].set_title('Sex:Survived vs Dead')
plt.show() | code |
18146356/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_test.info() | code |
18146356/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.isnull().sum() | code |
18146356/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.head() | code |
50210546/cell_21 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp'
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1
df = pd.merge(df, df1, on='LINE_ITEM_NAME', how='right')
val = df[df['Actual_eCPM'] == '-'].index
df['Actual_eCPM'].iloc[val] = 0
df['Actual_eCPM'] = df['Actual_eCPM'].astype(str).astype('float64')
df['Actual_Revenue'] = 0
Total_Impr = df['Impressions'].sum()
df['Actual_Revenue'] = Total_Impr * df['Actual_eCPM']
df.dtypes
df1 = df[df['amp_or_non_amp'] == 'Amp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
df2 = df[df['amp_or_non_amp'] == 'Nonmp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
frames = [df1, df2]
dff = pd.concat(frames, axis=0, ignore_index=True) | code |
50210546/cell_4 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1 | code |
50210546/cell_30 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.api as sm
import statsmodels.api as sm
import statsmodels.api as sm
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp'
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1
df = pd.merge(df, df1, on='LINE_ITEM_NAME', how='right')
val = df[df['Actual_eCPM'] == '-'].index
df['Actual_eCPM'].iloc[val] = 0
df['Actual_eCPM'] = df['Actual_eCPM'].astype(str).astype('float64')
df['Actual_Revenue'] = 0
Total_Impr = df['Impressions'].sum()
df['Actual_Revenue'] = Total_Impr * df['Actual_eCPM']
df.dtypes
df1 = df[df['amp_or_non_amp'] == 'Amp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
df2 = df[df['amp_or_non_amp'] == 'Nonmp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
frames = [df1, df2]
dff = pd.concat(frames, axis=0, ignore_index=True)
df = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df
data = df
data.columns
data = df
data = data.reindex(columns=['DAY', 'Tags_served', 'Impressions', 'Clicks', 'CTR', 'Revenue', 'eCPM', 'AD_UNIT_NAME', 'ORDER_NAME', 'ADVERTISER_NAME', 'LINE_ITEM_NAME', 'DATE'])
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(15, 20))
ax = axes.flatten()
for i, val in enumerate(data.columns.values[:7]):
sm.qqplot(data[val], fit=True, line='q', ax=ax[i])
ax[i].legend([val])
plt.show() | code |
50210546/cell_33 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.api as sm
import statsmodels.api as sm
import statsmodels.api as sm
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp'
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1
df = pd.merge(df, df1, on='LINE_ITEM_NAME', how='right')
val = df[df['Actual_eCPM'] == '-'].index
df['Actual_eCPM'].iloc[val] = 0
df['Actual_eCPM'] = df['Actual_eCPM'].astype(str).astype('float64')
df['Actual_Revenue'] = 0
Total_Impr = df['Impressions'].sum()
df['Actual_Revenue'] = Total_Impr * df['Actual_eCPM']
df.dtypes
df1 = df[df['amp_or_non_amp'] == 'Amp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
df2 = df[df['amp_or_non_amp'] == 'Nonmp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
frames = [df1, df2]
dff = pd.concat(frames, axis=0, ignore_index=True)
df = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df
data = df
data.columns
data = df
data = data.reindex(columns=['DAY', 'Tags_served', 'Impressions', 'Clicks', 'CTR', 'Revenue', 'eCPM', 'AD_UNIT_NAME', 'ORDER_NAME', 'ADVERTISER_NAME', 'LINE_ITEM_NAME', 'DATE'])
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(15,20))
ax= axes.flatten()
for i, val in enumerate(data.columns.values[:7]):
sm.qqplot(data[val], fit = True, line='q', ax=ax[i])
ax[i].legend([val])
plt.show()
data.dtypes | code |
50210546/cell_26 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp'
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1
df = pd.merge(df, df1, on='LINE_ITEM_NAME', how='right')
val = df[df['Actual_eCPM'] == '-'].index
df['Actual_eCPM'].iloc[val] = 0
df['Actual_eCPM'] = df['Actual_eCPM'].astype(str).astype('float64')
df['Actual_Revenue'] = 0
Total_Impr = df['Impressions'].sum()
df['Actual_Revenue'] = Total_Impr * df['Actual_eCPM']
df.dtypes
df1 = df[df['amp_or_non_amp'] == 'Amp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
df2 = df[df['amp_or_non_amp'] == 'Nonmp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
frames = [df1, df2]
dff = pd.concat(frames, axis=0, ignore_index=True)
df = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df | code |
50210546/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import pandas
import os
import gc
import pylab
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr, probplot, norm, shapiro
import statsmodels.api as sm
pal = sns.color_palette()
pd.set_option('display.max_columns', 50)
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import plotly.tools as tls
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score, f1_score
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
from sklearn import linear_model
import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn.model_selection import train_test_split
style.use('fivethirtyeight')
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
import statsmodels.api as sm
from statsmodels.formula.api import ols
import scipy.stats as stats
from bioinfokit.analys import stat
from scipy.stats import chi2_contingency
import io
import re
import nltk
from nltk.corpus import stopwords
import string
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import random
from wordcloud import WordCloud, STOPWORDS
from textblob import TextBlob
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50210546/cell_1 | [
"text_plain_output_1.png"
] | !pip install bioinfokit | code |
50210546/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback' | code |
50210546/cell_18 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp'
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1
df = pd.merge(df, df1, on='LINE_ITEM_NAME', how='right')
val = df[df['Actual_eCPM'] == '-'].index
df['Actual_eCPM'].iloc[val] = 0
df['Actual_eCPM'] = df['Actual_eCPM'].astype(str).astype('float64')
df['Actual_Revenue'] = 0
Total_Impr = df['Impressions'].sum()
df['Actual_Revenue'] = Total_Impr * df['Actual_eCPM']
df.dtypes
df | code |
50210546/cell_32 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.api as sm
import statsmodels.api as sm
import statsmodels.api as sm
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp'
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1
df = pd.merge(df, df1, on='LINE_ITEM_NAME', how='right')
val = df[df['Actual_eCPM'] == '-'].index
df['Actual_eCPM'].iloc[val] = 0
df['Actual_eCPM'] = df['Actual_eCPM'].astype(str).astype('float64')
df['Actual_Revenue'] = 0
Total_Impr = df['Impressions'].sum()
df['Actual_Revenue'] = Total_Impr * df['Actual_eCPM']
df.dtypes
df1 = df[df['amp_or_non_amp'] == 'Amp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
df2 = df[df['amp_or_non_amp'] == 'Nonmp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
frames = [df1, df2]
dff = pd.concat(frames, axis=0, ignore_index=True)
df = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df
data = df
data.columns
data = df
data = data.reindex(columns=['DAY', 'Tags_served', 'Impressions', 'Clicks', 'CTR', 'Revenue', 'eCPM', 'AD_UNIT_NAME', 'ORDER_NAME', 'ADVERTISER_NAME', 'LINE_ITEM_NAME', 'DATE'])
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(15,20))
ax= axes.flatten()
for i, val in enumerate(data.columns.values[:7]):
sm.qqplot(data[val], fit = True, line='q', ax=ax[i])
ax[i].legend([val])
plt.show()
print(data.shape) | code |
50210546/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df[df['position'] != 'top'][df['position'] != 'bottom'][df['position'] != 'middle'][df['position'] != 'leaderboard'][df['position'] != 'passback'] | code |
50210546/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1 | code |
50210546/cell_16 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp'
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1
df = pd.merge(df, df1, on='LINE_ITEM_NAME', how='right')
val = df[df['Actual_eCPM'] == '-'].index
df['Actual_eCPM'].iloc[val] = 0
df['Actual_eCPM'] = df['Actual_eCPM'].astype(str).astype('float64')
df['Actual_Revenue'] = 0
Total_Impr = df['Impressions'].sum()
df['Actual_Revenue'] = Total_Impr * df['Actual_eCPM'] | code |
50210546/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp'
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1
df = pd.merge(df, df1, on='LINE_ITEM_NAME', how='right')
val = df[df['Actual_eCPM'] == '-'].index
df['Actual_eCPM'].iloc[val] = 0
df['Actual_eCPM'] = df['Actual_eCPM'].astype(str).astype('float64')
df['Actual_Revenue'] = 0
Total_Impr = df['Impressions'].sum()
df['Actual_Revenue'] = Total_Impr * df['Actual_eCPM']
df.dtypes | code |
50210546/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp'
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1
df = pd.merge(df, df1, on='LINE_ITEM_NAME', how='right')
val = df[df['Actual_eCPM'] == '-'].index
df['Actual_eCPM'].iloc[val] = 0
df['Actual_eCPM'] = df['Actual_eCPM'].astype(str).astype('float64')
df['Actual_Revenue'] = 0
Total_Impr = df['Impressions'].sum()
df['Actual_Revenue'] = Total_Impr * df['Actual_eCPM']
df.dtypes
df1 = df[df['amp_or_non_amp'] == 'Amp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
df2 = df[df['amp_or_non_amp'] == 'Nonmp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
frames = [df1, df2]
dff = pd.concat(frames, axis=0, ignore_index=True)
dff | code |
50210546/cell_10 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp' | code |
50210546/cell_27 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df['position'] = np.nan
list = ['top']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'top'
list = ['bottom']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'bottom'
list = ['middle']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'middle'
list = ['leaderboard']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'leaderboard'
list = ['passback']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['position'].iloc[my_ind] = 'passback'
df['story'] = np.nan
df['story'] = df.AD_UNIT_NAME.str.split('_', expand=True)[2]
df['amp_or_non_amp'] = 'Nonamp'
list = ['amp']
val = df['AD_UNIT_NAME'].apply(lambda x: any([k in x for k in list]))
my_ind = [i for i in val.index if val[i]]
df['amp_or_non_amp'].iloc[my_ind] = 'Amp'
df1 = pd.read_csv('/kaggle/input/dash-assignment/Actual_eCPM.csv')
df1
df = pd.merge(df, df1, on='LINE_ITEM_NAME', how='right')
val = df[df['Actual_eCPM'] == '-'].index
df['Actual_eCPM'].iloc[val] = 0
df['Actual_eCPM'] = df['Actual_eCPM'].astype(str).astype('float64')
df['Actual_Revenue'] = 0
Total_Impr = df['Impressions'].sum()
df['Actual_Revenue'] = Total_Impr * df['Actual_eCPM']
df.dtypes
df1 = df[df['amp_or_non_amp'] == 'Amp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
df2 = df[df['amp_or_non_amp'] == 'Nonmp'][df['eCPM'] > 77][df['Revenue'] > 455]['position']
frames = [df1, df2]
dff = pd.concat(frames, axis=0, ignore_index=True)
df = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df
data = df
data.columns | code |
50210546/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df1 = pd.read_excel('/kaggle/input/dash-assignment/DFP.xlsx', 'Data')
df1
df = df1
df.head(3) | code |
73096186/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr() | code |
73096186/cell_30 | [
"text_plain_output_1.png"
] | from category_encoders import OneHotEncoder, OrdinalEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.metrics import roc_curve, plot_roc_curve, mean_absolute_error, mean_squared_error, accuracy_score
from xgboost import XGBRegressor
y_pred = [y_train.mean()] * len(y_train)
mean_baseline_pred = y_train.mean()
baseline_mae = mean_absolute_error(y_train, y_pred)
baseline_rmse = mean_squared_error(y_train, y_pred, squared=False)
onehot = OneHotEncoder(use_cat_names=True)
onehot_fit = onehot.fit(X_train)
XT_train = onehot.transform(X_train)
XT_val = onehot.transform(X_val)
simp = SimpleImputer(strategy='mean')
simp_fit = simp.fit(XT_train)
XT_train = simp.transform(XT_train)
XT_val = simp.transform(XT_val)
model_lr = LinearRegression()
model_r = Ridge()
model_r.fit(XT_train, y_train)
model_lr.fit(XT_train, y_train)
def check_metrics(model):
pass
model = [model_r, model_lr]
for m in model:
check_metrics(m)
ordinal = OrdinalEncoder()
ordinal_fit = ordinal.fit(X_train)
XT_train = ordinal.transform(X_train)
XT_val = ordinal.transform(X_val)
simp = SimpleImputer(strategy='mean')
simp_fit = simp.fit(XT_train)
XT_train = simp.transform(XT_train)
XT_val = simp.transform(XT_val)
model_rfr = RandomForestRegressor()
model_xgbr = XGBRegressor()
model_rfr.fit(XT_train, y_train)
model_xgbr.fit(XT_train, y_train)
def check_metrics(model):
print(model)
print('===================================================================')
print('Training MAE:', mean_absolute_error(y_train, model.predict(XT_train)))
print('-------------------------------------------------------------------')
print('Validation MAE:', mean_absolute_error(y_val, model.predict(XT_val)))
print('-------------------------------------------------------------------')
print('Validation R2 score:', model.score(XT_val, y_val))
print('===================================================================')
model = [model_xgbr, model_rfr]
for m in model:
check_metrics(m) | code |
73096186/cell_33 | [
"text_plain_output_1.png"
] | from category_encoders import OneHotEncoder, OrdinalEncoder
from sklearn.impute import SimpleImputer
from sklearn.metrics import roc_curve, plot_roc_curve, mean_absolute_error, mean_squared_error, accuracy_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, RandomizedSearchCV
from sklearn.pipeline import make_pipeline
from xgboost import XGBRegressor
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr()
correlations = df.corr(method='pearson')
def wrangle(filepath):
'''
,,,,,,,
(\\-"""-/) / | | / \\ 0 0 // \\_o_// /\\ /
/` `\\ | \\,/
/ \\ |
\\ ( ) / |
/ \\_)-(_/ \\ |
| /_____\\ | /
\\ \\ N.C / / /
\\ '.___.' / /
.' \\-=-/ '.
/ /` `\\ (//./ \\.\\)
`"` `"`
'''
df = pd.read_csv(filepath, parse_dates=['time'], index_col='time')
df.columns = df.columns.str.replace(' ', '_').str.replace('-', '_')
df.index = pd.to_datetime(df.index, utc=True)
df.drop(columns=['generation_fossil_oil_shale', 'generation_fossil_coal_derived_gas', 'generation_fossil_peat', 'generation_geothermal', 'generation_hydro_pumped_storage_aggregated', 'generation_marine', 'generation_wind_offshore', 'forecast_wind_offshore_eday_ahead', 'price_day_ahead', 'total_load_forecast', 'forecast_wind_onshore_day_ahead', 'forecast_solar_day_ahead'], inplace=True)
df = df.drop(pd.Timestamp('2014-12-31 23:00:00+00:00'))
df = df.sort_index()
condition_winter = (df.index.month >= 1) & (df.index.month <= 3)
condtion_spring = (df.index.month >= 4) & (df.index.month <= 6)
condition_summer = (df.index.month >= 7) & (df.index.month <= 9)
condition_automn = (df.index.month >= 10) @ (df.index.month <= 12)
df['season'] = np.where(condition_winter, 'winter', np.where(condtion_spring, 'spring', np.where(condition_summer, 'summer', np.where(condition_automn, 'automn', np.nan))))
return df
df = wrangle('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
y_pred = [y_train.mean()] * len(y_train)
mean_baseline_pred = y_train.mean()
baseline_mae = mean_absolute_error(y_train, y_pred)
baseline_rmse = mean_squared_error(y_train, y_pred, squared=False)
pipe_rs_xgb = make_pipeline(OrdinalEncoder(), SimpleImputer(), XGBRegressor(random_state=42, n_jobs=-1))
paramajama = {'simpleimputer__strategy': ['meadian', 'mean'], 'xgbregressor__max_depth': range(5, 35, 5), 'xgbregressor__learning_rate': np.arange(0.2, 1, 0.1), 'xgbregressor__booster': ['gbtree', 'gblinear', 'dart'], 'xgbregressor__min_child_weight': range(1, 10, 1), 'xgbregressor__gamma': np.arange(0, 1, 0.1), 'xgbregressor__max_delta_step': np.arange(0, 1, 0.1), 'xgbregressor__subsample': np.arange(0.5, 1, 0.1)}
model_rs_xgbr = RandomizedSearchCV(pipe_rs_xgb, param_distributions=paramajama, n_iter=20, n_jobs=-1)
model_rs_xgbr.fit(X_train, y_train)
print('Training MAE:', mean_absolute_error(y_train, model_rs_xgbr.predict(X_train)))
print('-------------------------------------------------------------------')
print('Validation MAE:', mean_absolute_error(y_val, model_rs_xgbr.predict(X_val)))
print('-------------------------------------------------------------------')
print('R2 score:', model_rs_xgbr.score(X_val, y_val))
print('===================================================================')
model_rs_xgbr.best_params_ | code |
73096186/cell_44 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from category_encoders import OneHotEncoder, OrdinalEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.metrics import roc_curve, plot_roc_curve, mean_absolute_error, mean_squared_error, accuracy_score
from xgboost import XGBRegressor
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import shap
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr()
correlations = df.corr(method='pearson')
def wrangle(filepath):
'''
,,,,,,,
(\\-"""-/) / | | / \\ 0 0 // \\_o_// /\\ /
/` `\\ | \\,/
/ \\ |
\\ ( ) / |
/ \\_)-(_/ \\ |
| /_____\\ | /
\\ \\ N.C / / /
\\ '.___.' / /
.' \\-=-/ '.
/ /` `\\ (//./ \\.\\)
`"` `"`
'''
df = pd.read_csv(filepath, parse_dates=['time'], index_col='time')
df.columns = df.columns.str.replace(' ', '_').str.replace('-', '_')
df.index = pd.to_datetime(df.index, utc=True)
df.drop(columns=['generation_fossil_oil_shale', 'generation_fossil_coal_derived_gas', 'generation_fossil_peat', 'generation_geothermal', 'generation_hydro_pumped_storage_aggregated', 'generation_marine', 'generation_wind_offshore', 'forecast_wind_offshore_eday_ahead', 'price_day_ahead', 'total_load_forecast', 'forecast_wind_onshore_day_ahead', 'forecast_solar_day_ahead'], inplace=True)
df = df.drop(pd.Timestamp('2014-12-31 23:00:00+00:00'))
df = df.sort_index()
condition_winter = (df.index.month >= 1) & (df.index.month <= 3)
condtion_spring = (df.index.month >= 4) & (df.index.month <= 6)
condition_summer = (df.index.month >= 7) & (df.index.month <= 9)
condition_automn = (df.index.month >= 10) @ (df.index.month <= 12)
df['season'] = np.where(condition_winter, 'winter', np.where(condtion_spring, 'spring', np.where(condition_summer, 'summer', np.where(condition_automn, 'automn', np.nan))))
return df
df = wrangle('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
y_pred = [y_train.mean()] * len(y_train)
mean_baseline_pred = y_train.mean()
baseline_mae = mean_absolute_error(y_train, y_pred)
baseline_rmse = mean_squared_error(y_train, y_pred, squared=False)
onehot = OneHotEncoder(use_cat_names=True)
onehot_fit = onehot.fit(X_train)
XT_train = onehot.transform(X_train)
XT_val = onehot.transform(X_val)
simp = SimpleImputer(strategy='mean')
simp_fit = simp.fit(XT_train)
XT_train = simp.transform(XT_train)
XT_val = simp.transform(XT_val)
model_lr = LinearRegression()
model_r = Ridge()
model_r.fit(XT_train, y_train)
model_lr.fit(XT_train, y_train)
def check_metrics(model):
pass
model = [model_r, model_lr]
for m in model:
check_metrics(m)
ordinal = OrdinalEncoder()
ordinal_fit = ordinal.fit(X_train)
XT_train = ordinal.transform(X_train)
XT_val = ordinal.transform(X_val)
simp = SimpleImputer(strategy='mean')
simp_fit = simp.fit(XT_train)
XT_train = simp.transform(XT_train)
XT_val = simp.transform(XT_val)
model_rfr = RandomForestRegressor()
model_xgbr = XGBRegressor()
model_rfr.fit(XT_train, y_train)
model_xgbr.fit(XT_train, y_train)
def check_metrics(model):
pass
model = [model_xgbr, model_rfr]
for m in model:
check_metrics(m)
samp = pd.DataFrame(XT_val, columns=ordinal_fit.get_feature_names())
explainer = shap.TreeExplainer(model_xgbr)
shap_values = explainer(samp.head(1))
shap.plots.waterfall(shap_values[0]) | code |
73096186/cell_6 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | import shap
import seaborn as sns
import plotly.express as px
import matplotlib.pyplot as plt
import eli5
from eli5.sklearn import PermutationImportance
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 50)
from xgboost import XGBRegressor
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.inspection import permutation_importance
from sklearn.linear_model import Ridge, LinearRegression
from category_encoders import OneHotEncoder, OrdinalEncoder
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, RandomizedSearchCV
from sklearn.model_selection import train_test_split, cross_val_score, validation_curve, GridSearchCV
from sklearn.metrics import roc_curve, plot_roc_curve, mean_absolute_error, mean_squared_error, accuracy_score
import warnings
warnings.filterwarnings('ignore') | code |
73096186/cell_29 | [
"text_html_output_2.png"
] | from category_encoders import OneHotEncoder, OrdinalEncoder
from sklearn.impute import SimpleImputer
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.metrics import roc_curve, plot_roc_curve, mean_absolute_error, mean_squared_error, accuracy_score
y_pred = [y_train.mean()] * len(y_train)
mean_baseline_pred = y_train.mean()
baseline_mae = mean_absolute_error(y_train, y_pred)
baseline_rmse = mean_squared_error(y_train, y_pred, squared=False)
onehot = OneHotEncoder(use_cat_names=True)
onehot_fit = onehot.fit(X_train)
XT_train = onehot.transform(X_train)
XT_val = onehot.transform(X_val)
simp = SimpleImputer(strategy='mean')
simp_fit = simp.fit(XT_train)
XT_train = simp.transform(XT_train)
XT_val = simp.transform(XT_val)
model_lr = LinearRegression()
model_r = Ridge()
model_r.fit(XT_train, y_train)
model_lr.fit(XT_train, y_train)
def check_metrics(model):
print(model)
print('===================================================================')
print('Training MAE:', mean_absolute_error(y_train, model.predict(XT_train)))
print('-------------------------------------------------------------------')
print('Validation MAE:', mean_absolute_error(y_val, model.predict(XT_val)))
print('-------------------------------------------------------------------')
print('Validation R2 score:', model.score(XT_val, y_val))
print('===================================================================')
model = [model_r, model_lr]
for m in model:
check_metrics(m) | code |
73096186/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
df.info() | code |
73096186/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73096186/cell_45 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from category_encoders import OneHotEncoder, OrdinalEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.metrics import roc_curve, plot_roc_curve, mean_absolute_error, mean_squared_error, accuracy_score
from xgboost import XGBRegressor
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import shap
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr()
correlations = df.corr(method='pearson')
def wrangle(filepath):
'''
,,,,,,,
(\\-"""-/) / | | / \\ 0 0 // \\_o_// /\\ /
/` `\\ | \\,/
/ \\ |
\\ ( ) / |
/ \\_)-(_/ \\ |
| /_____\\ | /
\\ \\ N.C / / /
\\ '.___.' / /
.' \\-=-/ '.
/ /` `\\ (//./ \\.\\)
`"` `"`
'''
df = pd.read_csv(filepath, parse_dates=['time'], index_col='time')
df.columns = df.columns.str.replace(' ', '_').str.replace('-', '_')
df.index = pd.to_datetime(df.index, utc=True)
df.drop(columns=['generation_fossil_oil_shale', 'generation_fossil_coal_derived_gas', 'generation_fossil_peat', 'generation_geothermal', 'generation_hydro_pumped_storage_aggregated', 'generation_marine', 'generation_wind_offshore', 'forecast_wind_offshore_eday_ahead', 'price_day_ahead', 'total_load_forecast', 'forecast_wind_onshore_day_ahead', 'forecast_solar_day_ahead'], inplace=True)
df = df.drop(pd.Timestamp('2014-12-31 23:00:00+00:00'))
df = df.sort_index()
condition_winter = (df.index.month >= 1) & (df.index.month <= 3)
condtion_spring = (df.index.month >= 4) & (df.index.month <= 6)
condition_summer = (df.index.month >= 7) & (df.index.month <= 9)
condition_automn = (df.index.month >= 10) @ (df.index.month <= 12)
df['season'] = np.where(condition_winter, 'winter', np.where(condtion_spring, 'spring', np.where(condition_summer, 'summer', np.where(condition_automn, 'automn', np.nan))))
return df
df = wrangle('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
y_pred = [y_train.mean()] * len(y_train)
mean_baseline_pred = y_train.mean()
baseline_mae = mean_absolute_error(y_train, y_pred)
baseline_rmse = mean_squared_error(y_train, y_pred, squared=False)
onehot = OneHotEncoder(use_cat_names=True)
onehot_fit = onehot.fit(X_train)
XT_train = onehot.transform(X_train)
XT_val = onehot.transform(X_val)
simp = SimpleImputer(strategy='mean')
simp_fit = simp.fit(XT_train)
XT_train = simp.transform(XT_train)
XT_val = simp.transform(XT_val)
model_lr = LinearRegression()
model_r = Ridge()
model_r.fit(XT_train, y_train)
model_lr.fit(XT_train, y_train)
def check_metrics(model):
pass
model = [model_r, model_lr]
for m in model:
check_metrics(m)
ordinal = OrdinalEncoder()
ordinal_fit = ordinal.fit(X_train)
XT_train = ordinal.transform(X_train)
XT_val = ordinal.transform(X_val)
simp = SimpleImputer(strategy='mean')
simp_fit = simp.fit(XT_train)
XT_train = simp.transform(XT_train)
XT_val = simp.transform(XT_val)
model_rfr = RandomForestRegressor()
model_xgbr = XGBRegressor()
model_rfr.fit(XT_train, y_train)
model_xgbr.fit(XT_train, y_train)
def check_metrics(model):
pass
model = [model_xgbr, model_rfr]
for m in model:
check_metrics(m)
samp = pd.DataFrame(XT_val, columns=ordinal_fit.get_feature_names())
explainer = shap.TreeExplainer(model_xgbr)
shap_values = explainer()
shap.plots.waterfall(shap_values[0])
explainer = shap.TreeExplainer(model_xgbr)
shap_values = explainer.shap_values(samp.head(1))
shap.initjs()
shap.force_plot(base_value=explainer.expected_value, shap_values=shap_values, features=samp.head(1)) | code |
73096186/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
df.head(5) | code |
73096186/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr()
correlations = df.corr(method='pearson')
plt.figure(figsize=(15, 12.5))
sns.heatmap(df.corr(), annot=True, cmap='Blues', linewidth=0.9)
plt.show() | code |
73096186/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr()
correlations = df.corr(method='pearson')
sns.pairplot(df) | code |
73096186/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr()
correlations = df.corr(method='pearson')
plt.figure(figsize=(15, 10))
sns.histplot(df, x='price actual') | code |
73096186/cell_46 | [
"image_output_1.png"
] | from category_encoders import OneHotEncoder, OrdinalEncoder
from eli5.sklearn import PermutationImportance
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.metrics import roc_curve, plot_roc_curve, mean_absolute_error, mean_squared_error, accuracy_score
from xgboost import XGBRegressor
import eli5
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import shap
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr()
correlations = df.corr(method='pearson')
def wrangle(filepath):
'''
,,,,,,,
(\\-"""-/) / | | / \\ 0 0 // \\_o_// /\\ /
/` `\\ | \\,/
/ \\ |
\\ ( ) / |
/ \\_)-(_/ \\ |
| /_____\\ | /
\\ \\ N.C / / /
\\ '.___.' / /
.' \\-=-/ '.
/ /` `\\ (//./ \\.\\)
`"` `"`
'''
df = pd.read_csv(filepath, parse_dates=['time'], index_col='time')
df.columns = df.columns.str.replace(' ', '_').str.replace('-', '_')
df.index = pd.to_datetime(df.index, utc=True)
df.drop(columns=['generation_fossil_oil_shale', 'generation_fossil_coal_derived_gas', 'generation_fossil_peat', 'generation_geothermal', 'generation_hydro_pumped_storage_aggregated', 'generation_marine', 'generation_wind_offshore', 'forecast_wind_offshore_eday_ahead', 'price_day_ahead', 'total_load_forecast', 'forecast_wind_onshore_day_ahead', 'forecast_solar_day_ahead'], inplace=True)
df = df.drop(pd.Timestamp('2014-12-31 23:00:00+00:00'))
df = df.sort_index()
condition_winter = (df.index.month >= 1) & (df.index.month <= 3)
condtion_spring = (df.index.month >= 4) & (df.index.month <= 6)
condition_summer = (df.index.month >= 7) & (df.index.month <= 9)
condition_automn = (df.index.month >= 10) @ (df.index.month <= 12)
df['season'] = np.where(condition_winter, 'winter', np.where(condtion_spring, 'spring', np.where(condition_summer, 'summer', np.where(condition_automn, 'automn', np.nan))))
return df
df = wrangle('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
y_pred = [y_train.mean()] * len(y_train)
mean_baseline_pred = y_train.mean()
baseline_mae = mean_absolute_error(y_train, y_pred)
baseline_rmse = mean_squared_error(y_train, y_pred, squared=False)
onehot = OneHotEncoder(use_cat_names=True)
onehot_fit = onehot.fit(X_train)
XT_train = onehot.transform(X_train)
XT_val = onehot.transform(X_val)
simp = SimpleImputer(strategy='mean')
simp_fit = simp.fit(XT_train)
XT_train = simp.transform(XT_train)
XT_val = simp.transform(XT_val)
model_lr = LinearRegression()
model_r = Ridge()
model_r.fit(XT_train, y_train)
model_lr.fit(XT_train, y_train)
def check_metrics(model):
pass
model = [model_r, model_lr]
for m in model:
check_metrics(m)
ordinal = OrdinalEncoder()
ordinal_fit = ordinal.fit(X_train)
XT_train = ordinal.transform(X_train)
XT_val = ordinal.transform(X_val)
simp = SimpleImputer(strategy='mean')
simp_fit = simp.fit(XT_train)
XT_train = simp.transform(XT_train)
XT_val = simp.transform(XT_val)
model_rfr = RandomForestRegressor()
model_xgbr = XGBRegressor()
model_rfr.fit(XT_train, y_train)
model_xgbr.fit(XT_train, y_train)
def check_metrics(model):
pass
model = [model_xgbr, model_rfr]
for m in model:
check_metrics(m)
samp = pd.DataFrame(XT_val, columns=ordinal_fit.get_feature_names())
explainer = shap.TreeExplainer(model_xgbr)
shap_values = explainer()
shap.plots.waterfall(shap_values[0])
explainer = shap.TreeExplainer(model_xgbr)
shap_values = explainer.shap_values()
shap.initjs()
perm = PermutationImportance(model_xgbr, random_state=42).fit(XT_val, y_val)
eli5.show_weights(perm, feature_names=samp.columns.tolist()) | code |
73096186/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr()
correlations = df.corr(method='pearson')
print(correlations['price actual'].sort_values(ascending=False).to_string()) | code |
73096186/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import seaborn as sns
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr()
correlations = df.corr(method='pearson')
def wrangle(filepath):
'''
,,,,,,,
(\\-"""-/) / | | / \\ 0 0 // \\_o_// /\\ /
/` `\\ | \\,/
/ \\ |
\\ ( ) / |
/ \\_)-(_/ \\ |
| /_____\\ | /
\\ \\ N.C / / /
\\ '.___.' / /
.' \\-=-/ '.
/ /` `\\ (//./ \\.\\)
`"` `"`
'''
df = pd.read_csv(filepath, parse_dates=['time'], index_col='time')
df.columns = df.columns.str.replace(' ', '_').str.replace('-', '_')
df.index = pd.to_datetime(df.index, utc=True)
df.drop(columns=['generation_fossil_oil_shale', 'generation_fossil_coal_derived_gas', 'generation_fossil_peat', 'generation_geothermal', 'generation_hydro_pumped_storage_aggregated', 'generation_marine', 'generation_wind_offshore', 'forecast_wind_offshore_eday_ahead', 'price_day_ahead', 'total_load_forecast', 'forecast_wind_onshore_day_ahead', 'forecast_solar_day_ahead'], inplace=True)
df = df.drop(pd.Timestamp('2014-12-31 23:00:00+00:00'))
df = df.sort_index()
condition_winter = (df.index.month >= 1) & (df.index.month <= 3)
condtion_spring = (df.index.month >= 4) & (df.index.month <= 6)
condition_summer = (df.index.month >= 7) & (df.index.month <= 9)
condition_automn = (df.index.month >= 10) @ (df.index.month <= 12)
df['season'] = np.where(condition_winter, 'winter', np.where(condtion_spring, 'spring', np.where(condition_summer, 'summer', np.where(condition_automn, 'automn', np.nan))))
return df
df = wrangle('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
fig = px.scatter(df, x='total_load_actual', y='price_actual', facet_col='season', opacity=0.1, title='Price Per KW Hour Compaired To Total Energy Genereated Per Season', animation_frame=df.index.year)
fig.update_traces(marker=dict(size=12, line=dict(width=2, color='darkslateblue')), selector=dict(mode='markers')) | code |
73096186/cell_27 | [
"image_output_1.png"
] | from sklearn.metrics import roc_curve, plot_roc_curve, mean_absolute_error, mean_squared_error, accuracy_score
y_pred = [y_train.mean()] * len(y_train)
mean_baseline_pred = y_train.mean()
baseline_mae = mean_absolute_error(y_train, y_pred)
baseline_rmse = mean_squared_error(y_train, y_pred, squared=False)
print('Mean Price Per KW/h Baseline Pred:', mean_baseline_pred)
print('-------------------------------------------------------------------')
print('Baseline Mae:', baseline_mae)
print('-------------------------------------------------------------------')
print('Baseline RMSE:', baseline_rmse) | code |
73096186/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2) | code |
73096186/cell_36 | [
"text_plain_output_1.png"
] | from category_encoders import OneHotEncoder, OrdinalEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import roc_curve, plot_roc_curve, mean_absolute_error, mean_squared_error, accuracy_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, RandomizedSearchCV
from sklearn.pipeline import make_pipeline
from xgboost import XGBRegressor
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
round(df.isnull().sum() / len(df) * 100, 2)
df.corr()
correlations = df.corr(method='pearson')
def wrangle(filepath):
'''
,,,,,,,
(\\-"""-/) / | | / \\ 0 0 // \\_o_// /\\ /
/` `\\ | \\,/
/ \\ |
\\ ( ) / |
/ \\_)-(_/ \\ |
| /_____\\ | /
\\ \\ N.C / / /
\\ '.___.' / /
.' \\-=-/ '.
/ /` `\\ (//./ \\.\\)
`"` `"`
'''
df = pd.read_csv(filepath, parse_dates=['time'], index_col='time')
df.columns = df.columns.str.replace(' ', '_').str.replace('-', '_')
df.index = pd.to_datetime(df.index, utc=True)
df.drop(columns=['generation_fossil_oil_shale', 'generation_fossil_coal_derived_gas', 'generation_fossil_peat', 'generation_geothermal', 'generation_hydro_pumped_storage_aggregated', 'generation_marine', 'generation_wind_offshore', 'forecast_wind_offshore_eday_ahead', 'price_day_ahead', 'total_load_forecast', 'forecast_wind_onshore_day_ahead', 'forecast_solar_day_ahead'], inplace=True)
df = df.drop(pd.Timestamp('2014-12-31 23:00:00+00:00'))
df = df.sort_index()
condition_winter = (df.index.month >= 1) & (df.index.month <= 3)
condtion_spring = (df.index.month >= 4) & (df.index.month <= 6)
condition_summer = (df.index.month >= 7) & (df.index.month <= 9)
condition_automn = (df.index.month >= 10) @ (df.index.month <= 12)
df['season'] = np.where(condition_winter, 'winter', np.where(condtion_spring, 'spring', np.where(condition_summer, 'summer', np.where(condition_automn, 'automn', np.nan))))
return df
df = wrangle('../input/energy-consumption-generation-prices-and-weather/energy_dataset.csv')
y_pred = [y_train.mean()] * len(y_train)
mean_baseline_pred = y_train.mean()
baseline_mae = mean_absolute_error(y_train, y_pred)
baseline_rmse = mean_squared_error(y_train, y_pred, squared=False)
pipe_rs_xgb = make_pipeline(OrdinalEncoder(), SimpleImputer(), XGBRegressor(random_state=42, n_jobs=-1))
paramajama = {'simpleimputer__strategy': ['meadian', 'mean'], 'xgbregressor__max_depth': range(5, 35, 5), 'xgbregressor__learning_rate': np.arange(0.2, 1, 0.1), 'xgbregressor__booster': ['gbtree', 'gblinear', 'dart'], 'xgbregressor__min_child_weight': range(1, 10, 1), 'xgbregressor__gamma': np.arange(0, 1, 0.1), 'xgbregressor__max_delta_step': np.arange(0, 1, 0.1), 'xgbregressor__subsample': np.arange(0.5, 1, 0.1)}
model_rs_xgbr = RandomizedSearchCV(pipe_rs_xgb, param_distributions=paramajama, n_iter=20, n_jobs=-1)
model_rs_xgbr.fit(X_train, y_train)
model_rs_xgbr.best_params_
pipe_rs_rfr = make_pipeline(OrdinalEncoder(), SimpleImputer(), RandomForestRegressor(random_state=42, n_jobs=-1))
pramajams = {'simpleimputer__strategy': ['mean', 'meadian'], 'randomforestregressor__max_depth': range(5, 35, 5), 'randomforestregressor__n_estimators': range(25, 200, 10), 'randomforestregressor__max_samples': np.arange(0.2, 1, 0.1), 'randomforestregressor__max_features': ['sqrt', 'log2'], 'randomforestregressor__min_samples_split': np.arange(2, 5, 1)}
model_rs_rfr = RandomizedSearchCV(pipe_rs_rfr, param_distributions=pramajams, n_iter=20, n_jobs=-1)
model_rs_rfr.fit(X_train, y_train)
print('Training MAE:', mean_absolute_error(y_train, model_rs_rfr.predict(X_train)))
print('-------------------------------------------------------------------')
print('Validation MAE:', mean_absolute_error(y_val, model_rs_rfr.predict(X_val)))
print('-------------------------------------------------------------------')
print('R2 score:', model_rs_rfr.score(X_val, y_val))
print('===================================================================')
model_rs_rfr.best_params_ | code |
74070897/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.