path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
74061207/cell_6 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/sales-store-product-details/Salesstore.csv')
df.isna().sum()
plot_1 = sns.histplot(data=df, x='Ship_Mode')
plt.show()
plot_2 = sns.histplot(data=df, x='Order_Priority')
plt.show()
plot_3 = sns.histplot(data=df, x='Customer_Segment')
plt.show()
plot_4 = sns.histplot(data=df, x='Product_Category')
plt.show()
plot_5 = sns.histplot(data=df, x='Product_Container')
plt.show() | code |
74061207/cell_7 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/sales-store-product-details/Salesstore.csv')
df.isna().sum()
plot_1=sns.histplot(data=df, x='Ship_Mode')
plt.show()
plot_2=sns.histplot(data=df, x='Order_Priority')
plt.show()
plot_3=sns.histplot(data=df, x='Customer_Segment')
plt.show()
plot_4=sns.histplot(data=df, x='Product_Category')
plt.show()
plot_5=sns.histplot(data=df, x='Product_Container')
plt.show()
plot_6 = sns.barplot(data=df, x='Order_Priority', y='Profit', hue='Ship_Mode')
plt.show()
plot_7 = sns.barplot(data=df, x='Region', y='Profit', hue='Ship_Mode')
plt.xticks(rotation=45)
plt.show()
plot_8 = sns.barplot(data=df, x='Region', y='Sales', hue='Ship_Mode')
plt.xticks(rotation=45)
plt.show()
plot_9 = sns.barplot(data=df, x='Region', y='Profit', hue='Customer_Segment')
plt.xticks(rotation=45)
plt.show()
plot_10 = sns.barplot(data=df, x='Region', y='Profit', hue='Product_Category')
plt.xticks(rotation=45)
plt.show()
plot_11 = sns.lineplot(data=df, x='Order_Quantity', y='Sales')
plt.show()
plot_12 = sns.lmplot(data=df, x='Order_Quantity', y='Profit')
plt.show() | code |
74061207/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/sales-store-product-details/Salesstore.csv')
df.isna().sum()
plot_1=sns.histplot(data=df, x='Ship_Mode')
plt.show()
plot_2=sns.histplot(data=df, x='Order_Priority')
plt.show()
plot_3=sns.histplot(data=df, x='Customer_Segment')
plt.show()
plot_4=sns.histplot(data=df, x='Product_Category')
plt.show()
plot_5=sns.histplot(data=df, x='Product_Container')
plt.show()
plot_6=sns.barplot(data=df,x='Order_Priority',y='Profit',hue='Ship_Mode')
plt.show()
plot_7=sns.barplot(data=df,x='Region',y='Profit',hue='Ship_Mode')
plt.xticks(rotation=45)
plt.show()
plot_8=sns.barplot(data=df,x='Region',y='Sales',hue='Ship_Mode')
plt.xticks(rotation=45)
plt.show()
plot_9=sns.barplot(data=df,x='Region',y='Profit',hue='Customer_Segment')
plt.xticks(rotation=45)
plt.show()
plot_10=sns.barplot(data=df,x='Region',y='Profit',hue='Product_Category')
plt.xticks(rotation=45)
plt.show()
plot_11=sns.lineplot(data=df,x='Order_Quantity',y='Sales')
plt.show()
plot_12=sns.lmplot(data=df,x='Order_Quantity',y='Profit')
plt.show()
plot_14 = sns.barplot(data=df, x='Product_Category', y='Profit', hue='Product_Container')
plt.show() | code |
74061207/cell_3 | [
"image_output_5.png",
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/sales-store-product-details/Salesstore.csv')
print(df.shape)
df.head() | code |
74061207/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/sales-store-product-details/Salesstore.csv')
df.isna().sum()
print(df.Order_Priority.unique(), df.Ship_Mode.unique(), df.Region.unique(), df.Customer_Segment.unique(), df.Product_Category.unique(), df.Product_Container.unique()) | code |
74061207/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/sales-store-product-details/Salesstore.csv')
df.isna().sum()
plot_1=sns.histplot(data=df, x='Ship_Mode')
plt.show()
plot_2=sns.histplot(data=df, x='Order_Priority')
plt.show()
plot_3=sns.histplot(data=df, x='Customer_Segment')
plt.show()
plot_4=sns.histplot(data=df, x='Product_Category')
plt.show()
plot_5=sns.histplot(data=df, x='Product_Container')
plt.show()
plot_6=sns.barplot(data=df,x='Order_Priority',y='Profit',hue='Ship_Mode')
plt.show()
plot_7=sns.barplot(data=df,x='Region',y='Profit',hue='Ship_Mode')
plt.xticks(rotation=45)
plt.show()
plot_8=sns.barplot(data=df,x='Region',y='Sales',hue='Ship_Mode')
plt.xticks(rotation=45)
plt.show()
plot_9=sns.barplot(data=df,x='Region',y='Profit',hue='Customer_Segment')
plt.xticks(rotation=45)
plt.show()
plot_10=sns.barplot(data=df,x='Region',y='Profit',hue='Product_Category')
plt.xticks(rotation=45)
plt.show()
plot_11=sns.lineplot(data=df,x='Order_Quantity',y='Sales')
plt.show()
plot_12=sns.lmplot(data=df,x='Order_Quantity',y='Profit')
plt.show()
plot_14=sns.barplot(data=df,x='Product_Category',y='Profit',hue='Product_Container')
plt.show()
plot_11=sns.regplot(data=df,x='Sales',y='Profit')
plt.show()
# Linear relationship between the profits and sales
corrMatt = df[['Order_ID', 'Order_Priority', 'Order_Quantity', 'Sales', 'Ship_Mode', 'Region', 'Customer_Segment', 'Product_Category', 'Product_Sub-Category', 'Product_Name', 'Product_Container', 'Profit']].corr()
mask = np.array(corrMatt)
mask[np.tril_indices_from(mask)] = False
fig, ax = plt.subplots()
fig.set_size_inches(20, 10)
sns.heatmap(corrMatt, mask=mask, vmax=0.8, square=True, annot=True) | code |
50224445/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
fraud_data = data[data['isFraud'] == 1]
fraud_data.head() | code |
50224445/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
data[data['isFraud'] == 1]['type'].unique() | code |
50224445/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
100 * data['isFraud'].value_counts() / len(data) | code |
50224445/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
data.info() | code |
50224445/cell_30 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
fraud_data = data[data['isFraud'] == 1]
safe_data = data[data['isFraud'] == 0]
sampled_data = safe_data.sample(n=len(fraud_data))
df = pd.concat([fraud_data, sampled_data])
100 * df.isFraud.value_counts() / len(df)
df.info() | code |
50224445/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
fraud_data = data[data['isFraud'] == 1]
safe_data = data[data['isFraud'] == 0]
sampled_data = safe_data.sample(n=len(fraud_data))
df = pd.concat([fraud_data, sampled_data])
100 * df.isFraud.value_counts() / len(df) | code |
50224445/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
data['type'].unique() | code |
50224445/cell_50 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier()
rf_model.fit(X_train, y_train) | code |
50224445/cell_52 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, f1_score, classification_report, confusion_matrix
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier()
rf_model.fit(X_train, y_train)
from sklearn.metrics import accuracy_score, f1_score, classification_report, confusion_matrix
preds = rf_model.predict(X_test)
print('classification_report')
print(classification_report(y_test, preds)) | code |
50224445/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50224445/cell_32 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
fraud_data = data[data['isFraud'] == 1]
safe_data = data[data['isFraud'] == 0]
sampled_data = safe_data.sample(n=len(fraud_data))
df = pd.concat([fraud_data, sampled_data])
100 * df.isFraud.value_counts() / len(df)
df['type'].unique() | code |
50224445/cell_51 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, f1_score, classification_report, confusion_matrix
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier()
rf_model.fit(X_train, y_train)
from sklearn.metrics import accuracy_score, f1_score, classification_report, confusion_matrix
preds = rf_model.predict(X_test)
print('Accuracy Score :', accuracy_score(y_test, preds))
print('F1-score :', f1_score(y_test, preds)) | code |
50224445/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
data['isFraud'].value_counts() | code |
50224445/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
data['isFlaggedFraud'].value_counts() | code |
50224445/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
fraud_data = data[data['isFraud'] == 1]
safe_data = data[data['isFraud'] == 0]
sampled_data = safe_data.sample(n=len(fraud_data))
df = pd.concat([fraud_data, sampled_data])
100 * df.isFraud.value_counts() / len(df)
df.head() | code |
50224445/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/paysim1/PS_20174392719_1491204439457_log.csv')
fraud_data = data[data['isFraud'] == 1]
len(fraud_data) | code |
50224445/cell_53 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, f1_score, classification_report, confusion_matrix
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier()
rf_model.fit(X_train, y_train)
from sklearn.metrics import accuracy_score, f1_score, classification_report, confusion_matrix
preds = rf_model.predict(X_test)
print('Confusion_matrix : ')
print(confusion_matrix(y_test, preds)) | code |
2042925/cell_9 | [
"text_html_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
data = pd.read_csv('../input/shot_logs.csv')
playerIDList = list(data['player_id'].unique())
defenderIDList = list(data['CLOSEST_DEFENDER_PLAYER_ID'].unique())
attackerIDList = list(data['player_id'].unique())
playerData = pd.DataFrame(index=playerIDList, columns=['player', 'made', 'missed', 'fg_percentage', 'made_against', 'missed_against', 'fg_percentage_against'])
for defenderID in defenderIDList:
name = data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['CLOSEST_DEFENDER'].iloc[0]
made = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[defenderID, 'player'] = name
playerData.at[defenderID, 'made_against'] = made
playerData.at[defenderID, 'missed_against'] = missed
playerData.at[defenderID, 'fg_percentage_against'] = percentage
for attackerID in attackerIDList:
made = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[attackerID, 'made'] = made
playerData.at[attackerID, 'missed'] = missed
playerData.at[attackerID, 'fg_percentage'] = percentage
newPlayerData = playerData.sort_values('fg_percentage_against')
newPlayerData2 = newPlayerData.drop(newPlayerData[newPlayerData.missed_against < 200].index)
ESPNRankTop30 = ['James, LeBron', 'Paul, Chris', 'Davis, Anthony', 'Westbrook, Russell', 'Griffin, Blake', 'Curry, Stephen', 'Love, Kevin', 'Durant, Kevin', 'Harden, James', 'Howard, Dwight', 'Anthony, Carmelo', 'Noah, Joakim', 'Aldridge, LaMarcus', 'Gasol, Marc', 'Parker, Tony', 'Lillard, Damian', 'Nowitzki, Dirk', 'Wall, John', 'Cousins, DeMarcus', 'Bosh, Chris', 'Duncan, Tim', 'Jefferson, Al', 'Irving, Kyrie', 'Leonard, Kawhi', 'Ibaka, Serge', 'Horford, Al', 'Dragic, Goran', 'Rose, Derrick', 'Lowry, Kyle', 'Drummond, Andre']
newPlayerData3 = newPlayerData[newPlayerData['player'].isin(ESPNRankTop30)]
newPlayerData3 = newPlayerData3.sort_values('player')
newPlayerData3['ranking'] = 0
for i in range(len(ESPNRankTop30)):
newPlayerData3.loc[newPlayerData3['player'] == ESPNRankTop30[i], 'ranking'] = str(i + 1)
newPlayerData3 | code |
2042925/cell_4 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
data = pd.read_csv('../input/shot_logs.csv')
playerIDList = list(data['player_id'].unique())
defenderIDList = list(data['CLOSEST_DEFENDER_PLAYER_ID'].unique())
attackerIDList = list(data['player_id'].unique())
playerData = pd.DataFrame(index=playerIDList, columns=['player', 'made', 'missed', 'fg_percentage', 'made_against', 'missed_against', 'fg_percentage_against'])
for defenderID in defenderIDList:
name = data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['CLOSEST_DEFENDER'].iloc[0]
made = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[defenderID, 'player'] = name
playerData.at[defenderID, 'made_against'] = made
playerData.at[defenderID, 'missed_against'] = missed
playerData.at[defenderID, 'fg_percentage_against'] = percentage
for attackerID in attackerIDList:
made = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[attackerID, 'made'] = made
playerData.at[attackerID, 'missed'] = missed
playerData.at[attackerID, 'fg_percentage'] = percentage
newPlayerData = playerData.sort_values('fg_percentage_against')
newPlayerData2 = newPlayerData.drop(newPlayerData[newPlayerData.missed_against < 200].index) | code |
2042925/cell_20 | [
"text_html_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
data = pd.read_csv('../input/shot_logs.csv')
playerIDList = list(data['player_id'].unique())
defenderIDList = list(data['CLOSEST_DEFENDER_PLAYER_ID'].unique())
attackerIDList = list(data['player_id'].unique())
playerData = pd.DataFrame(index=playerIDList, columns=['player', 'made', 'missed', 'fg_percentage', 'made_against', 'missed_against', 'fg_percentage_against'])
for defenderID in defenderIDList:
name = data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['CLOSEST_DEFENDER'].iloc[0]
made = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[defenderID, 'player'] = name
playerData.at[defenderID, 'made_against'] = made
playerData.at[defenderID, 'missed_against'] = missed
playerData.at[defenderID, 'fg_percentage_against'] = percentage
for attackerID in attackerIDList:
made = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[attackerID, 'made'] = made
playerData.at[attackerID, 'missed'] = missed
playerData.at[attackerID, 'fg_percentage'] = percentage
newPlayerData = playerData.sort_values('fg_percentage_against')
newPlayerData2 = newPlayerData.drop(newPlayerData[newPlayerData.missed_against < 200].index)
ESPNRankTop30 = ['James, LeBron', 'Paul, Chris', 'Davis, Anthony', 'Westbrook, Russell', 'Griffin, Blake', 'Curry, Stephen', 'Love, Kevin', 'Durant, Kevin', 'Harden, James', 'Howard, Dwight', 'Anthony, Carmelo', 'Noah, Joakim', 'Aldridge, LaMarcus', 'Gasol, Marc', 'Parker, Tony', 'Lillard, Damian', 'Nowitzki, Dirk', 'Wall, John', 'Cousins, DeMarcus', 'Bosh, Chris', 'Duncan, Tim', 'Jefferson, Al', 'Irving, Kyrie', 'Leonard, Kawhi', 'Ibaka, Serge', 'Horford, Al', 'Dragic, Goran', 'Rose, Derrick', 'Lowry, Kyle', 'Drummond, Andre']
newPlayerData3 = newPlayerData[newPlayerData['player'].isin(ESPNRankTop30)]
newPlayerData3 = newPlayerData3.sort_values('player')
newPlayerData3['ranking'] = 0
for i in range(len(ESPNRankTop30)):
newPlayerData3.loc[newPlayerData3['player'] == ESPNRankTop30[i], 'ranking'] = str(i + 1)
line = Scatter(x=[0, 1], y=[0, 1], marker=dict(size=1, color='rgba(200, 200, 200, .5)'), name='Line of Neutrality')
trace1 = Scatter(x=newPlayerData2['fg_percentage'], y=newPlayerData2['fg_percentage_against'], mode='markers', marker=dict(size=10, color='rgba(132, 123, 255, .9)', line=dict(width=2)), name='League', text=newPlayerData2['player'])
trace2 = Scatter(x=newPlayerData3['fg_percentage'], y=newPlayerData3['fg_percentage_against'], mode='markers', marker=dict(size=10, color='rgba(255, 123, 132, .9)', line=dict(width=2)), name='#NBARank Top 30', text=newPlayerData3['player'] + ' (#' + newPlayerData3['ranking'] + ')')
data = [line, trace1, trace2]
layout = Layout(hovermode='closest', annotations=Annotations([Annotation(x=0.5004254919715793, y=-0.16191064079952971, showarrow=False, text='Made Field Goal %', xref='paper', yref='paper'), Annotation(x=-0.05944728761514841, y=0.4714285714285711, showarrow=False, text='Allowed Field Goal %', textangle=-90, xref='paper', yref='paper')]), autosize=True, margin=Margin(b=100), title='Made Vs. Allowed FG%', xaxis=XAxis(autorange=False, range=[0.35, 0.72], type='linear'), yaxis=YAxis(autorange=False, range=[0.35, 0.55], type='linear'))
graph = Figure(data=data, layout=layout)
data = pd.read_csv('../input/shot_logs.csv')
defenderIDList = list(data['CLOSEST_DEFENDER_PLAYER_ID'].unique())
attackerIDList = list(data['player_id'].unique())
playerData = pd.DataFrame(index=playerIDList, columns=['player', 'made', 'missed', 'fg_percentage', 'fg_distance'])
for attackerID in attackerIDList:
name = data[data['player_id'] == attackerID]['player_name'].iloc[0]
spacePos = name.find(' ')
firstname = name[0].upper() + name[1:spacePos]
lastname = name[spacePos + 1].upper() + name[spacePos + 2:]
name = firstname + ' ' + lastname
made = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
averageDist = np.mean(data[data['player_id'] == attackerID]['SHOT_DIST'])
playerData.at[attackerID, 'player'] = name
playerData.at[attackerID, 'made'] = made
playerData.at[attackerID, 'missed'] = missed
playerData.at[attackerID, 'fg_percentage'] = percentage
playerData.at[attackerID, 'fg_distance'] = averageDist
newPlayerData = playerData.sort_values('fg_distance', ascending=False)
newPlayerData2 = newPlayerData.drop(newPlayerData[newPlayerData.made < 200].index)
newPlayerData2
import plotly
import plotly.plotly as py
from plotly.graph_objs import *
from ipywidgets import widgets
from IPython.display import display, clear_output, Image
from plotly.graph_objs import *
from plotly.widgets import GraphWidget
ESPNRankTop30 = ['Lebron James', 'Chris Paul', 'Anthony Davis', 'Russell Westbrook', 'Blake Griffin', 'Stephen Curry', 'Kevin Love', 'Kevin Durant', 'James Harden', 'Dwight Howard', 'Carmelo Anthony', 'Joakim Noah', 'Lamarcus Aldridge', 'Marc Gasol', 'Tony Parker', 'Damian Lillard', 'Dirk Nowtizski', 'John Wall', 'Demarcus Cousins', 'Chris Bosh', 'Tim Duncan', 'Al Jefferson', 'Kyrie Irving', 'Kawhi Leonard', 'Serge Ibaka', 'Al Horford', 'Goran Dragic', 'Derrick Rose', 'Kyle Lowry', 'Andre Drummond']
trace1 = Scatter(x=newPlayerData2['fg_distance'], y=newPlayerData2['fg_percentage'], mode='markers', marker=dict(size=newPlayerData2['made'] / 20, color='rgba(132, 123, 255, .9)', line=dict(width=2)), name='League', text=newPlayerData2['player'])
newPlayerData3 = newPlayerData2[newPlayerData2.player.isin(ESPNRankTop30)]
trace2 = Scatter(x=newPlayerData3['fg_distance'], y=newPlayerData3['fg_percentage'], mode='markers', marker=dict(size=newPlayerData3['made'] / 20, color='rgba(255, 123, 132, .9)', line=dict(width=2)), name='#NBARank Top 30', text=newPlayerData3['player'])
data = [trace1, trace2]
layout = Layout(hovermode='closest', annotations=Annotations([Annotation(x=0.5004254919715793, y=-0.16191064079952971, showarrow=False, text='Average Shot Distance (Feet)', xref='paper', yref='paper'), Annotation(x=-0.06944728761514841, y=0.4714285714285711, showarrow=False, text='Field Goal %', textangle=-90, xref='paper', yref='paper')]), autosize=True, margin=Margin(b=100), title="Comparing Players' FG% and Average Shot Distance (Minimum 200 Made Shots)")
graph = Figure(data=data, layout=layout)
data = pd.read_csv('../input/shot_logs.csv')
attackerIDList = list(data['player_id'].unique())
playerIDList = []
for ID in attackerIDList:
for period in range(1, 4):
playerIDList.append(ID + period / 10)
playerData = pd.DataFrame(index=playerIDList, columns=['player', 'period', 'made', 'missed', 'fg_percentage'])
for attackerID in attackerIDList:
name = data[data['player_id'] == attackerID]['player_name'].iloc[0]
spacePos = name.find(' ')
firstname = name[0].upper() + name[1:spacePos]
lastname = name[spacePos + 1].upper() + name[spacePos + 2:]
name = firstname + ' ' + lastname
for period in range(1, 5):
made = np.sum(np.logical_and(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'made', data[data['player_id'] == attackerID]['PERIOD'] == period))
missed = np.sum(np.logical_and(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'missed', data[data['player_id'] == attackerID]['PERIOD'] == period))
percentage = made / (made + missed)
playerData.at[attackerID + period / 10, 'player'] = name
playerData.at[attackerID + period / 10, 'period'] = period
playerData.at[attackerID + period / 10, 'made'] = made
playerData.at[attackerID + period / 10, 'missed'] = missed
playerData.at[attackerID + period / 10, 'fg_percentage'] = percentage
newPlayerData = playerData.sort_values('player', ascending=True)
inelligibleNames = newPlayerData[newPlayerData.made < 50]['player']
inelligibleNames = inelligibleNames.unique()
newPlayerData2 = newPlayerData[~newPlayerData.player.isin(inelligibleNames)]
newPlayerData2 | code |
2042925/cell_6 | [
"text_html_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
data = pd.read_csv('../input/shot_logs.csv')
playerIDList = list(data['player_id'].unique())
defenderIDList = list(data['CLOSEST_DEFENDER_PLAYER_ID'].unique())
attackerIDList = list(data['player_id'].unique())
playerData = pd.DataFrame(index=playerIDList, columns=['player', 'made', 'missed', 'fg_percentage', 'made_against', 'missed_against', 'fg_percentage_against'])
for defenderID in defenderIDList:
name = data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['CLOSEST_DEFENDER'].iloc[0]
made = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[defenderID, 'player'] = name
playerData.at[defenderID, 'made_against'] = made
playerData.at[defenderID, 'missed_against'] = missed
playerData.at[defenderID, 'fg_percentage_against'] = percentage
for attackerID in attackerIDList:
made = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[attackerID, 'made'] = made
playerData.at[attackerID, 'missed'] = missed
playerData.at[attackerID, 'fg_percentage'] = percentage
newPlayerData = playerData.sort_values('fg_percentage_against')
newPlayerData2 = newPlayerData.drop(newPlayerData[newPlayerData.missed_against < 200].index)
newPlayerData2 | code |
2042925/cell_16 | [
"text_html_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
data = pd.read_csv('../input/shot_logs.csv')
playerIDList = list(data['player_id'].unique())
defenderIDList = list(data['CLOSEST_DEFENDER_PLAYER_ID'].unique())
attackerIDList = list(data['player_id'].unique())
playerData = pd.DataFrame(index=playerIDList, columns=['player', 'made', 'missed', 'fg_percentage', 'made_against', 'missed_against', 'fg_percentage_against'])
for defenderID in defenderIDList:
name = data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['CLOSEST_DEFENDER'].iloc[0]
made = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[defenderID, 'player'] = name
playerData.at[defenderID, 'made_against'] = made
playerData.at[defenderID, 'missed_against'] = missed
playerData.at[defenderID, 'fg_percentage_against'] = percentage
for attackerID in attackerIDList:
made = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[attackerID, 'made'] = made
playerData.at[attackerID, 'missed'] = missed
playerData.at[attackerID, 'fg_percentage'] = percentage
newPlayerData = playerData.sort_values('fg_percentage_against')
newPlayerData2 = newPlayerData.drop(newPlayerData[newPlayerData.missed_against < 200].index)
ESPNRankTop30 = ['James, LeBron', 'Paul, Chris', 'Davis, Anthony', 'Westbrook, Russell', 'Griffin, Blake', 'Curry, Stephen', 'Love, Kevin', 'Durant, Kevin', 'Harden, James', 'Howard, Dwight', 'Anthony, Carmelo', 'Noah, Joakim', 'Aldridge, LaMarcus', 'Gasol, Marc', 'Parker, Tony', 'Lillard, Damian', 'Nowitzki, Dirk', 'Wall, John', 'Cousins, DeMarcus', 'Bosh, Chris', 'Duncan, Tim', 'Jefferson, Al', 'Irving, Kyrie', 'Leonard, Kawhi', 'Ibaka, Serge', 'Horford, Al', 'Dragic, Goran', 'Rose, Derrick', 'Lowry, Kyle', 'Drummond, Andre']
newPlayerData3 = newPlayerData[newPlayerData['player'].isin(ESPNRankTop30)]
newPlayerData3 = newPlayerData3.sort_values('player')
newPlayerData3['ranking'] = 0
for i in range(len(ESPNRankTop30)):
newPlayerData3.loc[newPlayerData3['player'] == ESPNRankTop30[i], 'ranking'] = str(i + 1)
line = Scatter(x=[0, 1], y=[0, 1], marker=dict(size=1, color='rgba(200, 200, 200, .5)'), name='Line of Neutrality')
trace1 = Scatter(x=newPlayerData2['fg_percentage'], y=newPlayerData2['fg_percentage_against'], mode='markers', marker=dict(size=10, color='rgba(132, 123, 255, .9)', line=dict(width=2)), name='League', text=newPlayerData2['player'])
trace2 = Scatter(x=newPlayerData3['fg_percentage'], y=newPlayerData3['fg_percentage_against'], mode='markers', marker=dict(size=10, color='rgba(255, 123, 132, .9)', line=dict(width=2)), name='#NBARank Top 30', text=newPlayerData3['player'] + ' (#' + newPlayerData3['ranking'] + ')')
data = [line, trace1, trace2]
layout = Layout(hovermode='closest', annotations=Annotations([Annotation(x=0.5004254919715793, y=-0.16191064079952971, showarrow=False, text='Made Field Goal %', xref='paper', yref='paper'), Annotation(x=-0.05944728761514841, y=0.4714285714285711, showarrow=False, text='Allowed Field Goal %', textangle=-90, xref='paper', yref='paper')]), autosize=True, margin=Margin(b=100), title='Made Vs. Allowed FG%', xaxis=XAxis(autorange=False, range=[0.35, 0.72], type='linear'), yaxis=YAxis(autorange=False, range=[0.35, 0.55], type='linear'))
graph = Figure(data=data, layout=layout)
data = pd.read_csv('../input/shot_logs.csv')
defenderIDList = list(data['CLOSEST_DEFENDER_PLAYER_ID'].unique())
attackerIDList = list(data['player_id'].unique())
playerData = pd.DataFrame(index=playerIDList, columns=['player', 'made', 'missed', 'fg_percentage', 'fg_distance'])
for attackerID in attackerIDList:
name = data[data['player_id'] == attackerID]['player_name'].iloc[0]
spacePos = name.find(' ')
firstname = name[0].upper() + name[1:spacePos]
lastname = name[spacePos + 1].upper() + name[spacePos + 2:]
name = firstname + ' ' + lastname
made = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
averageDist = np.mean(data[data['player_id'] == attackerID]['SHOT_DIST'])
playerData.at[attackerID, 'player'] = name
playerData.at[attackerID, 'made'] = made
playerData.at[attackerID, 'missed'] = missed
playerData.at[attackerID, 'fg_percentage'] = percentage
playerData.at[attackerID, 'fg_distance'] = averageDist
newPlayerData = playerData.sort_values('fg_distance', ascending=False)
newPlayerData2 = newPlayerData.drop(newPlayerData[newPlayerData.made < 200].index)
newPlayerData2
import plotly
import plotly.plotly as py
from plotly.graph_objs import *
from ipywidgets import widgets
from IPython.display import display, clear_output, Image
from plotly.graph_objs import *
from plotly.widgets import GraphWidget
ESPNRankTop30 = ['Lebron James', 'Chris Paul', 'Anthony Davis', 'Russell Westbrook', 'Blake Griffin', 'Stephen Curry', 'Kevin Love', 'Kevin Durant', 'James Harden', 'Dwight Howard', 'Carmelo Anthony', 'Joakim Noah', 'Lamarcus Aldridge', 'Marc Gasol', 'Tony Parker', 'Damian Lillard', 'Dirk Nowtizski', 'John Wall', 'Demarcus Cousins', 'Chris Bosh', 'Tim Duncan', 'Al Jefferson', 'Kyrie Irving', 'Kawhi Leonard', 'Serge Ibaka', 'Al Horford', 'Goran Dragic', 'Derrick Rose', 'Kyle Lowry', 'Andre Drummond']
trace1 = Scatter(x=newPlayerData2['fg_distance'], y=newPlayerData2['fg_percentage'], mode='markers', marker=dict(size=newPlayerData2['made'] / 20, color='rgba(132, 123, 255, .9)', line=dict(width=2)), name='League', text=newPlayerData2['player'])
newPlayerData3 = newPlayerData2[newPlayerData2.player.isin(ESPNRankTop30)]
trace2 = Scatter(x=newPlayerData3['fg_distance'], y=newPlayerData3['fg_percentage'], mode='markers', marker=dict(size=newPlayerData3['made'] / 20, color='rgba(255, 123, 132, .9)', line=dict(width=2)), name='#NBARank Top 30', text=newPlayerData3['player'])
data = [trace1, trace2]
layout = Layout(hovermode='closest', annotations=Annotations([Annotation(x=0.5004254919715793, y=-0.16191064079952971, showarrow=False, text='Average Shot Distance (Feet)', xref='paper', yref='paper'), Annotation(x=-0.06944728761514841, y=0.4714285714285711, showarrow=False, text='Field Goal %', textangle=-90, xref='paper', yref='paper')]), autosize=True, margin=Margin(b=100), title="Comparing Players' FG% and Average Shot Distance (Minimum 200 Made Shots)")
graph = Figure(data=data, layout=layout)
iplot(graph) | code |
2042925/cell_14 | [
"text_html_output_1.png"
] | from plotly.offline import iplot, init_notebook_mode
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
import plotly.graph_objs as go
from plotly import tools
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
data = pd.read_csv('../input/shot_logs.csv')
playerIDList = list(data['player_id'].unique())
defenderIDList = list(data['CLOSEST_DEFENDER_PLAYER_ID'].unique())
attackerIDList = list(data['player_id'].unique())
playerData = pd.DataFrame(index=playerIDList, columns=['player', 'made', 'missed', 'fg_percentage', 'made_against', 'missed_against', 'fg_percentage_against'])
for defenderID in defenderIDList:
name = data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['CLOSEST_DEFENDER'].iloc[0]
made = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['CLOSEST_DEFENDER_PLAYER_ID'] == defenderID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[defenderID, 'player'] = name
playerData.at[defenderID, 'made_against'] = made
playerData.at[defenderID, 'missed_against'] = missed
playerData.at[defenderID, 'fg_percentage_against'] = percentage
for attackerID in attackerIDList:
made = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
playerData.at[attackerID, 'made'] = made
playerData.at[attackerID, 'missed'] = missed
playerData.at[attackerID, 'fg_percentage'] = percentage
newPlayerData = playerData.sort_values('fg_percentage_against')
newPlayerData2 = newPlayerData.drop(newPlayerData[newPlayerData.missed_against < 200].index)
ESPNRankTop30 = ['James, LeBron', 'Paul, Chris', 'Davis, Anthony', 'Westbrook, Russell', 'Griffin, Blake', 'Curry, Stephen', 'Love, Kevin', 'Durant, Kevin', 'Harden, James', 'Howard, Dwight', 'Anthony, Carmelo', 'Noah, Joakim', 'Aldridge, LaMarcus', 'Gasol, Marc', 'Parker, Tony', 'Lillard, Damian', 'Nowitzki, Dirk', 'Wall, John', 'Cousins, DeMarcus', 'Bosh, Chris', 'Duncan, Tim', 'Jefferson, Al', 'Irving, Kyrie', 'Leonard, Kawhi', 'Ibaka, Serge', 'Horford, Al', 'Dragic, Goran', 'Rose, Derrick', 'Lowry, Kyle', 'Drummond, Andre']
newPlayerData3 = newPlayerData[newPlayerData['player'].isin(ESPNRankTop30)]
newPlayerData3 = newPlayerData3.sort_values('player')
newPlayerData3['ranking'] = 0
for i in range(len(ESPNRankTop30)):
newPlayerData3.loc[newPlayerData3['player'] == ESPNRankTop30[i], 'ranking'] = str(i + 1)
line = Scatter(x=[0, 1], y=[0, 1], marker=dict(size=1, color='rgba(200, 200, 200, .5)'), name='Line of Neutrality')
trace1 = Scatter(x=newPlayerData2['fg_percentage'], y=newPlayerData2['fg_percentage_against'], mode='markers', marker=dict(size=10, color='rgba(132, 123, 255, .9)', line=dict(width=2)), name='League', text=newPlayerData2['player'])
trace2 = Scatter(x=newPlayerData3['fg_percentage'], y=newPlayerData3['fg_percentage_against'], mode='markers', marker=dict(size=10, color='rgba(255, 123, 132, .9)', line=dict(width=2)), name='#NBARank Top 30', text=newPlayerData3['player'] + ' (#' + newPlayerData3['ranking'] + ')')
data = [line, trace1, trace2]
layout = Layout(hovermode='closest', annotations=Annotations([Annotation(x=0.5004254919715793, y=-0.16191064079952971, showarrow=False, text='Made Field Goal %', xref='paper', yref='paper'), Annotation(x=-0.05944728761514841, y=0.4714285714285711, showarrow=False, text='Allowed Field Goal %', textangle=-90, xref='paper', yref='paper')]), autosize=True, margin=Margin(b=100), title='Made Vs. Allowed FG%', xaxis=XAxis(autorange=False, range=[0.35, 0.72], type='linear'), yaxis=YAxis(autorange=False, range=[0.35, 0.55], type='linear'))
graph = Figure(data=data, layout=layout)
data = pd.read_csv('../input/shot_logs.csv')
defenderIDList = list(data['CLOSEST_DEFENDER_PLAYER_ID'].unique())
attackerIDList = list(data['player_id'].unique())
playerData = pd.DataFrame(index=playerIDList, columns=['player', 'made', 'missed', 'fg_percentage', 'fg_distance'])
for attackerID in attackerIDList:
name = data[data['player_id'] == attackerID]['player_name'].iloc[0]
spacePos = name.find(' ')
firstname = name[0].upper() + name[1:spacePos]
lastname = name[spacePos + 1].upper() + name[spacePos + 2:]
name = firstname + ' ' + lastname
made = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'made')
missed = np.sum(data[data['player_id'] == attackerID]['SHOT_RESULT'] == 'missed')
percentage = made / (made + missed)
averageDist = np.mean(data[data['player_id'] == attackerID]['SHOT_DIST'])
playerData.at[attackerID, 'player'] = name
playerData.at[attackerID, 'made'] = made
playerData.at[attackerID, 'missed'] = missed
playerData.at[attackerID, 'fg_percentage'] = percentage
playerData.at[attackerID, 'fg_distance'] = averageDist
newPlayerData = playerData.sort_values('fg_distance', ascending=False)
newPlayerData2 = newPlayerData.drop(newPlayerData[newPlayerData.made < 200].index)
newPlayerData2 | code |
74056226/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
def percent_missing(df: pd.DataFrame):
totalCells = np.product(df.shape)
missingCount = df.isnull().sum()
totalMissing = missingCount.sum()
return
def every_column_percent_missing(df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_db = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing})
missing_value_db.sort_values('percent_missing', inplace=True)
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
pass
def plot_dist(df: pd.DataFrame, column: str):
pass
def plot_count(df: pd.DataFrame, column: str) -> None:
pass
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
pass
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
def bar_plot(x, y, title, palette_len, xlim = None, ylim = None,
xticklabels = None, yticklabels = None,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
xlabel = None, ylabel = None, figsize = (10, 4),
axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.barplot(x = x, y = y, edgecolor = 'black', ax = ax,
palette = reversed(sns.color_palette("viridis", len(palette_len))))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklabels, fontfamily = 'serif')
ax.set_yticklabels(yticklabels, fontfamily = 'serif')
plt.xlabel(xlabel, fontfamily = 'serif')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def line_plot(data, y, title, color,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (10, 4), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.lineplot(x = range(len(data[y])), y = data[y], dashes = False,
color = color, linewidth = .5)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
ax.set_xticks([])
plt.xticks(rotation = 90)
plt.xlabel('')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def corr_plot(data,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (15, 11), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title('Correlations (Pearson)', size = 15, fontweight = 'bold', fontfamily = 'serif')
mask = np.triu(np.ones_like(data.corr(), dtype = bool))
sns.heatmap(round(data.corr(), 2), mask = mask, cmap = 'viridis', annot = True)
plt.show()
def columns_viz(data, color):
for i in range(len(data.columns)):
line_plot(data = data, y = data.columns[i],
color = color,
title = '{} dynamics'.format(data.columns[i]),
bottom_visible = False, figsize = (10, 2))
districts_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv")
products_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv")
eng_path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info.isnull().sum()
districts_cleaned = districts_info.dropna()
districts_cleaned.duplicated().sum()
list(districts_cleaned.columns.values) | code |
74056226/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
def percent_missing(df: pd.DataFrame):
totalCells = np.product(df.shape)
missingCount = df.isnull().sum()
totalMissing = missingCount.sum()
return
def every_column_percent_missing(df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_db = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing})
missing_value_db.sort_values('percent_missing', inplace=True)
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
pass
def plot_dist(df: pd.DataFrame, column: str):
pass
def plot_count(df: pd.DataFrame, column: str) -> None:
pass
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
pass
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
def bar_plot(x, y, title, palette_len, xlim = None, ylim = None,
xticklabels = None, yticklabels = None,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
xlabel = None, ylabel = None, figsize = (10, 4),
axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.barplot(x = x, y = y, edgecolor = 'black', ax = ax,
palette = reversed(sns.color_palette("viridis", len(palette_len))))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklabels, fontfamily = 'serif')
ax.set_yticklabels(yticklabels, fontfamily = 'serif')
plt.xlabel(xlabel, fontfamily = 'serif')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def line_plot(data, y, title, color,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (10, 4), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.lineplot(x = range(len(data[y])), y = data[y], dashes = False,
color = color, linewidth = .5)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
ax.set_xticks([])
plt.xticks(rotation = 90)
plt.xlabel('')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def corr_plot(data,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (15, 11), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title('Correlations (Pearson)', size = 15, fontweight = 'bold', fontfamily = 'serif')
mask = np.triu(np.ones_like(data.corr(), dtype = bool))
sns.heatmap(round(data.corr(), 2), mask = mask, cmap = 'viridis', annot = True)
plt.show()
def columns_viz(data, color):
for i in range(len(data.columns)):
line_plot(data = data, y = data.columns[i],
color = color,
title = '{} dynamics'.format(data.columns[i]),
bottom_visible = False, figsize = (10, 2))
districts_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv")
products_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv")
eng_path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info.isnull().sum() | code |
74056226/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
def percent_missing(df: pd.DataFrame):
totalCells = np.product(df.shape)
missingCount = df.isnull().sum()
totalMissing = missingCount.sum()
return
def every_column_percent_missing(df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_db = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing})
missing_value_db.sort_values('percent_missing', inplace=True)
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
pass
def plot_dist(df: pd.DataFrame, column: str):
pass
def plot_count(df: pd.DataFrame, column: str) -> None:
pass
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
pass
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
def bar_plot(x, y, title, palette_len, xlim = None, ylim = None,
xticklabels = None, yticklabels = None,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
xlabel = None, ylabel = None, figsize = (10, 4),
axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.barplot(x = x, y = y, edgecolor = 'black', ax = ax,
palette = reversed(sns.color_palette("viridis", len(palette_len))))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklabels, fontfamily = 'serif')
ax.set_yticklabels(yticklabels, fontfamily = 'serif')
plt.xlabel(xlabel, fontfamily = 'serif')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def line_plot(data, y, title, color,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (10, 4), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.lineplot(x = range(len(data[y])), y = data[y], dashes = False,
color = color, linewidth = .5)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
ax.set_xticks([])
plt.xticks(rotation = 90)
plt.xlabel('')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def corr_plot(data,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (15, 11), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title('Correlations (Pearson)', size = 15, fontweight = 'bold', fontfamily = 'serif')
mask = np.triu(np.ones_like(data.corr(), dtype = bool))
sns.heatmap(round(data.corr(), 2), mask = mask, cmap = 'viridis', annot = True)
plt.show()
def columns_viz(data, color):
for i in range(len(data.columns)):
line_plot(data = data, y = data.columns[i],
color = color,
title = '{} dynamics'.format(data.columns[i]),
bottom_visible = False, figsize = (10, 2))
districts_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv")
products_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv")
eng_path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info.isnull().sum()
districts_cleaned = districts_info.dropna()
districts_cleaned.duplicated().sum() | code |
74056226/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
def percent_missing(df: pd.DataFrame):
totalCells = np.product(df.shape)
missingCount = df.isnull().sum()
totalMissing = missingCount.sum()
return
def every_column_percent_missing(df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_db = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing})
missing_value_db.sort_values('percent_missing', inplace=True)
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
pass
def plot_dist(df: pd.DataFrame, column: str):
pass
def plot_count(df: pd.DataFrame, column: str) -> None:
pass
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
pass
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
def bar_plot(x, y, title, palette_len, xlim = None, ylim = None,
xticklabels = None, yticklabels = None,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
xlabel = None, ylabel = None, figsize = (10, 4),
axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.barplot(x = x, y = y, edgecolor = 'black', ax = ax,
palette = reversed(sns.color_palette("viridis", len(palette_len))))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklabels, fontfamily = 'serif')
ax.set_yticklabels(yticklabels, fontfamily = 'serif')
plt.xlabel(xlabel, fontfamily = 'serif')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def line_plot(data, y, title, color,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (10, 4), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.lineplot(x = range(len(data[y])), y = data[y], dashes = False,
color = color, linewidth = .5)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
ax.set_xticks([])
plt.xticks(rotation = 90)
plt.xlabel('')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def corr_plot(data,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (15, 11), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title('Correlations (Pearson)', size = 15, fontweight = 'bold', fontfamily = 'serif')
mask = np.triu(np.ones_like(data.corr(), dtype = bool))
sns.heatmap(round(data.corr(), 2), mask = mask, cmap = 'viridis', annot = True)
plt.show()
def columns_viz(data, color):
for i in range(len(data.columns)):
line_plot(data = data, y = data.columns[i],
color = color,
title = '{} dynamics'.format(data.columns[i]),
bottom_visible = False, figsize = (10, 2))
districts_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv")
products_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv")
eng_path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info.head() | code |
74056226/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
def percent_missing(df: pd.DataFrame):
totalCells = np.product(df.shape)
missingCount = df.isnull().sum()
totalMissing = missingCount.sum()
return
def every_column_percent_missing(df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_db = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing})
missing_value_db.sort_values('percent_missing', inplace=True)
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
pass
def plot_dist(df: pd.DataFrame, column: str):
pass
def plot_count(df: pd.DataFrame, column: str) -> None:
pass
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
pass
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
def bar_plot(x, y, title, palette_len, xlim = None, ylim = None,
xticklabels = None, yticklabels = None,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
xlabel = None, ylabel = None, figsize = (10, 4),
axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.barplot(x = x, y = y, edgecolor = 'black', ax = ax,
palette = reversed(sns.color_palette("viridis", len(palette_len))))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklabels, fontfamily = 'serif')
ax.set_yticklabels(yticklabels, fontfamily = 'serif')
plt.xlabel(xlabel, fontfamily = 'serif')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def line_plot(data, y, title, color,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (10, 4), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.lineplot(x = range(len(data[y])), y = data[y], dashes = False,
color = color, linewidth = .5)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
ax.set_xticks([])
plt.xticks(rotation = 90)
plt.xlabel('')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def corr_plot(data,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (15, 11), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title('Correlations (Pearson)', size = 15, fontweight = 'bold', fontfamily = 'serif')
mask = np.triu(np.ones_like(data.corr(), dtype = bool))
sns.heatmap(round(data.corr(), 2), mask = mask, cmap = 'viridis', annot = True)
plt.show()
def columns_viz(data, color):
for i in range(len(data.columns)):
line_plot(data = data, y = data.columns[i],
color = color,
title = '{} dynamics'.format(data.columns[i]),
bottom_visible = False, figsize = (10, 2))
districts_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv")
products_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv")
eng_path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info.isnull().sum()
districts_cleaned = districts_info.dropna()
percent_missing(districts_cleaned) | code |
74056226/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74056226/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
def percent_missing(df: pd.DataFrame):
totalCells = np.product(df.shape)
missingCount = df.isnull().sum()
totalMissing = missingCount.sum()
return
def every_column_percent_missing(df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_db = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing})
missing_value_db.sort_values('percent_missing', inplace=True)
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
pass
def plot_dist(df: pd.DataFrame, column: str):
pass
def plot_count(df: pd.DataFrame, column: str) -> None:
pass
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
pass
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
def bar_plot(x, y, title, palette_len, xlim = None, ylim = None,
xticklabels = None, yticklabels = None,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
xlabel = None, ylabel = None, figsize = (10, 4),
axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.barplot(x = x, y = y, edgecolor = 'black', ax = ax,
palette = reversed(sns.color_palette("viridis", len(palette_len))))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklabels, fontfamily = 'serif')
ax.set_yticklabels(yticklabels, fontfamily = 'serif')
plt.xlabel(xlabel, fontfamily = 'serif')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def line_plot(data, y, title, color,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (10, 4), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.lineplot(x = range(len(data[y])), y = data[y], dashes = False,
color = color, linewidth = .5)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
ax.set_xticks([])
plt.xticks(rotation = 90)
plt.xlabel('')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def corr_plot(data,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (15, 11), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title('Correlations (Pearson)', size = 15, fontweight = 'bold', fontfamily = 'serif')
mask = np.triu(np.ones_like(data.corr(), dtype = bool))
sns.heatmap(round(data.corr(), 2), mask = mask, cmap = 'viridis', annot = True)
plt.show()
def columns_viz(data, color):
for i in range(len(data.columns)):
line_plot(data = data, y = data.columns[i],
color = color,
title = '{} dynamics'.format(data.columns[i]),
bottom_visible = False, figsize = (10, 2))
districts_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv")
products_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv")
eng_path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info.isnull().sum()
districts_cleaned = districts_info.dropna()
every_column_percent_missing(districts_cleaned) | code |
74056226/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
def percent_missing(df: pd.DataFrame):
totalCells = np.product(df.shape)
missingCount = df.isnull().sum()
totalMissing = missingCount.sum()
return
def every_column_percent_missing(df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_db = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing})
missing_value_db.sort_values('percent_missing', inplace=True)
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
pass
def plot_dist(df: pd.DataFrame, column: str):
pass
def plot_count(df: pd.DataFrame, column: str) -> None:
pass
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
pass
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
def bar_plot(x, y, title, palette_len, xlim = None, ylim = None,
xticklabels = None, yticklabels = None,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
xlabel = None, ylabel = None, figsize = (10, 4),
axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.barplot(x = x, y = y, edgecolor = 'black', ax = ax,
palette = reversed(sns.color_palette("viridis", len(palette_len))))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklabels, fontfamily = 'serif')
ax.set_yticklabels(yticklabels, fontfamily = 'serif')
plt.xlabel(xlabel, fontfamily = 'serif')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def line_plot(data, y, title, color,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (10, 4), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.lineplot(x = range(len(data[y])), y = data[y], dashes = False,
color = color, linewidth = .5)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
ax.set_xticks([])
plt.xticks(rotation = 90)
plt.xlabel('')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def corr_plot(data,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (15, 11), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title('Correlations (Pearson)', size = 15, fontweight = 'bold', fontfamily = 'serif')
mask = np.triu(np.ones_like(data.corr(), dtype = bool))
sns.heatmap(round(data.corr(), 2), mask = mask, cmap = 'viridis', annot = True)
plt.show()
def columns_viz(data, color):
for i in range(len(data.columns)):
line_plot(data = data, y = data.columns[i],
color = color,
title = '{} dynamics'.format(data.columns[i]),
bottom_visible = False, figsize = (10, 2))
districts_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv")
products_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv")
eng_path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info.isnull().sum()
every_column_percent_missing(districts_info) | code |
74056226/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
def percent_missing(df: pd.DataFrame):
totalCells = np.product(df.shape)
missingCount = df.isnull().sum()
totalMissing = missingCount.sum()
return
def every_column_percent_missing(df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_db = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing})
missing_value_db.sort_values('percent_missing', inplace=True)
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
pass
def plot_dist(df: pd.DataFrame, column: str):
pass
def plot_count(df: pd.DataFrame, column: str) -> None:
pass
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
pass
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
def bar_plot(x, y, title, palette_len, xlim = None, ylim = None,
xticklabels = None, yticklabels = None,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
xlabel = None, ylabel = None, figsize = (10, 4),
axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.barplot(x = x, y = y, edgecolor = 'black', ax = ax,
palette = reversed(sns.color_palette("viridis", len(palette_len))))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklabels, fontfamily = 'serif')
ax.set_yticklabels(yticklabels, fontfamily = 'serif')
plt.xlabel(xlabel, fontfamily = 'serif')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def line_plot(data, y, title, color,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (10, 4), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.lineplot(x = range(len(data[y])), y = data[y], dashes = False,
color = color, linewidth = .5)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
ax.set_xticks([])
plt.xticks(rotation = 90)
plt.xlabel('')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def corr_plot(data,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (15, 11), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title('Correlations (Pearson)', size = 15, fontweight = 'bold', fontfamily = 'serif')
mask = np.triu(np.ones_like(data.corr(), dtype = bool))
sns.heatmap(round(data.corr(), 2), mask = mask, cmap = 'viridis', annot = True)
plt.show()
def columns_viz(data, color):
for i in range(len(data.columns)):
line_plot(data = data, y = data.columns[i],
color = color,
title = '{} dynamics'.format(data.columns[i]),
bottom_visible = False, figsize = (10, 2))
districts_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv")
products_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv")
eng_path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info.isnull().sum()
districts_cleaned = districts_info.dropna()
print(f' There are {districts_cleaned.shape[0]} rows and {districts_cleaned.shape[1]} columns') | code |
74056226/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
def percent_missing(df: pd.DataFrame):
totalCells = np.product(df.shape)
missingCount = df.isnull().sum()
totalMissing = missingCount.sum()
return
def every_column_percent_missing(df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_db = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing})
missing_value_db.sort_values('percent_missing', inplace=True)
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
pass
def plot_dist(df: pd.DataFrame, column: str):
pass
def plot_count(df: pd.DataFrame, column: str) -> None:
pass
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
pass
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
def bar_plot(x, y, title, palette_len, xlim = None, ylim = None,
xticklabels = None, yticklabels = None,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
xlabel = None, ylabel = None, figsize = (10, 4),
axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.barplot(x = x, y = y, edgecolor = 'black', ax = ax,
palette = reversed(sns.color_palette("viridis", len(palette_len))))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklabels, fontfamily = 'serif')
ax.set_yticklabels(yticklabels, fontfamily = 'serif')
plt.xlabel(xlabel, fontfamily = 'serif')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def line_plot(data, y, title, color,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (10, 4), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.lineplot(x = range(len(data[y])), y = data[y], dashes = False,
color = color, linewidth = .5)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
ax.set_xticks([])
plt.xticks(rotation = 90)
plt.xlabel('')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def corr_plot(data,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (15, 11), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title('Correlations (Pearson)', size = 15, fontweight = 'bold', fontfamily = 'serif')
mask = np.triu(np.ones_like(data.corr(), dtype = bool))
sns.heatmap(round(data.corr(), 2), mask = mask, cmap = 'viridis', annot = True)
plt.show()
def columns_viz(data, color):
for i in range(len(data.columns)):
line_plot(data = data, y = data.columns[i],
color = color,
title = '{} dynamics'.format(data.columns[i]),
bottom_visible = False, figsize = (10, 2))
districts_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv")
products_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv")
eng_path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info.isnull().sum()
percent_missing(districts_info) | code |
74056226/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
def percent_missing(df: pd.DataFrame):
totalCells = np.product(df.shape)
missingCount = df.isnull().sum()
totalMissing = missingCount.sum()
return
def every_column_percent_missing(df):
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_db = pd.DataFrame({'column_name': df.columns, 'percent_missing': percent_missing})
missing_value_db.sort_values('percent_missing', inplace=True)
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
pass
def plot_dist(df: pd.DataFrame, column: str):
pass
def plot_count(df: pd.DataFrame, column: str) -> None:
pass
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
pass
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
def bar_plot(x, y, title, palette_len, xlim = None, ylim = None,
xticklabels = None, yticklabels = None,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
xlabel = None, ylabel = None, figsize = (10, 4),
axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.barplot(x = x, y = y, edgecolor = 'black', ax = ax,
palette = reversed(sns.color_palette("viridis", len(palette_len))))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklabels, fontfamily = 'serif')
ax.set_yticklabels(yticklabels, fontfamily = 'serif')
plt.xlabel(xlabel, fontfamily = 'serif')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def line_plot(data, y, title, color,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (10, 4), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title(title, size = 15, fontweight = 'bold', fontfamily = 'serif')
for i in ['top', 'right', 'bottom', 'left']:
ax.spines[i].set_color('black')
ax.spines['top'].set_visible(top_visible)
ax.spines['right'].set_visible(right_visible)
ax.spines['bottom'].set_visible(bottom_visible)
ax.spines['left'].set_visible(left_visible)
sns.lineplot(x = range(len(data[y])), y = data[y], dashes = False,
color = color, linewidth = .5)
ax.xaxis.set_major_locator(plt.MaxNLocator(20))
ax.set_xticks([])
plt.xticks(rotation = 90)
plt.xlabel('')
plt.ylabel(ylabel, fontfamily = 'serif')
ax.grid(axis = axis_grid, linestyle = '--', alpha = 0.9)
plt.show()
def corr_plot(data,
top_visible = False, right_visible = False,
bottom_visible = True, left_visible = False,
ylabel = None, figsize = (15, 11), axis_grid = 'y'):
fig, ax = plt.subplots(figsize = figsize)
plt.title('Correlations (Pearson)', size = 15, fontweight = 'bold', fontfamily = 'serif')
mask = np.triu(np.ones_like(data.corr(), dtype = bool))
sns.heatmap(round(data.corr(), 2), mask = mask, cmap = 'viridis', annot = True)
plt.show()
def columns_viz(data, color):
for i in range(len(data.columns)):
line_plot(data = data, y = data.columns[i],
color = color,
title = '{} dynamics'.format(data.columns[i]),
bottom_visible = False, figsize = (10, 2))
districts_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv")
products_info = pd.read_csv("../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv")
eng_path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info.describe() | code |
130005876/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from datetime import datetime
from usgs_scraper import extra_data_scraper
import pandas as pd
import re
import requests
import sys
"""
Get all the monitoring locations for a state from the USGS Water Services API.
Input:
The state we want data from (Arizona, New York, etc.)
Output:
A CSV of all monitoring locations, including:
- the agency monitoring them (usually USGS)
- monitoring location ID number
- name of monitoring location
- latitude
- longitude
- lat/long type
- county
- hydrologic unit
- drainage area (square miles)
- datum of gage (AKA elevation)
Example CSV:
agency
Data:
USGS National Water Dashboard:
https://dashboard.waterdata.usgs.gov/app/nwd/en/?aoi=default
USGS Water Services API:
https://waterdata.usgs.gov/nwis/rt
USGS Water Services API URL builder tool:
https://waterservices.usgs.gov/rest/IV-Test-Tool.html
"""
import csv
import numpy as np
import pandas as pd
import re
import requests
import sys
from datetime import datetime
from usgs_scraper import extra_data_scraper
def add_location_details(row):
"""
Scrapes the following from the USGS website and adds it to the dataframe for the current row:
- latitude
- longitude
- lat/long type
- county
- hydrologic unit
- drainage area (square miles)
- datum of gage (AKA elevation)
"""
location_id = row['location_id']
scraped_data = extra_data_scraper(location_id)
row['latitude'] = scraped_data['latitude']
row['longitude'] = scraped_data['longitude']
row['lat_long_type'] = scraped_data['lat_long_type']
row['county'] = scraped_data['county']
row['hydrologic_unit'] = scraped_data['hydrologic_unit']
row['drainage_area'] = scraped_data['drainage_area']
row['datum_of_gage'] = scraped_data['datum_of_gage']
row['datum_type'] = scraped_data['datum_type']
return row
def main(arg):
state = arg
print(f'Getting USGS streamgage data for state: {state}')
timestamp = datetime.now().strftime('%Y%m%d')
outfile = f'usgs_streamgages_{state}_{timestamp}.csv'
data_type = 'rdb'
water_api_url = f'https://nwis.waterservices.usgs.gov/nwis/iv/?format={data_type}&stateCd={state}¶meterCd=00060,00065&siteStatus=all'
response = requests.get(water_api_url)
text = response.text
lines = text.split('\n')
lines = lines[17:]
monitoring_locations = []
for line in lines:
if line[:5] == '# ---':
break
else:
regex = '# ([a-zA-Z]+) ([0-9]+) ([A-Za-z0-9\\.\\,\\s\\-\\@\\(\\)]+)'
extraction = re.search(regex, line)
agency = extraction.group(1)
location_id = extraction.group(2)
name = extraction.group(3)
monitoring_locations.append({'agency': agency, 'location_id': location_id, 'name': name})
df = pd.DataFrame(monitoring_locations)
df = df.apply(add_location_details, axis=1)
print(df)
df.to_csv(outfile)
if __name__ == '__main__':
if sys.argv[1]:
state = sys.argv[1]
main(state)
else:
default_state = 'az'
main(default_state) | code |
104120001/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
nyra_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
nyra_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
nyra_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
track_id = 'SAR'
race_date = '2019-07-24'
race_number = 5
target_tracking = nyra_tracking.query('track_id == @track_id & race_date == @race_date & race_number == @race_number').sort_values('trakus_index')
target_tracking | code |
104120001/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
!pip install pymap3d
import pymap3d as pm
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
nyra_tracking = pd.read_csv("/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv")
nyra_start = pd.read_csv("/kaggle/input/big-data-derby-2022/nyra_start_table.csv")
nyra_race = pd.read_csv("/kaggle/input/big-data-derby-2022/nyra_race_table.csv") | code |
104120001/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_html_output_1.png",
"text_plain_output_1.png"
] | from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pymap3d as pm
nyra_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
nyra_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
nyra_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
track_id = 'SAR'
race_date = '2019-07-24'
race_number = 5
target_tracking = nyra_tracking.query('track_id == @track_id & race_date == @race_date & race_number == @race_number').sort_values('trakus_index')
target_tracking
dtime = 0.25
if track_id == 'AQU':
elevation = 3
elif track_id == 'BEL':
elevation = 20
elif track_id == 'SAR':
elevation = 93
target_trackings = []
for number, group in target_tracking.groupby('program_number'):
ecef = np.array(pm.geodetic2ecef(group['latitude'].values, group['longitude'].values, np.array([elevation] * len(group)))).T
v_ecef = np.sqrt(np.sum(np.diff(ecef, axis=0) ** 2, axis=1)) * 3.6 / dtime
group['time'] = group['trakus_index'] * dtime - dtime
group['speed'] = np.insert(v_ecef, 0, 0)
target_trackings.append(group)
target_tracking = pd.concat(target_trackings)
target_tracking
poly = [[43.071833, -73.770353], [43.071685, -73.770699], [43.07247, -73.77113], [43.072486, -73.77086]]
df_poly = pd.DataFrame(poly, columns=['Lat', 'Lon'], dtype=float)
polygon = Polygon([tuple(x) for x in df_poly[['Lat', 'Lon']].to_numpy()])
target_tracking['Within'] = target_tracking.apply(lambda target_tracking: polygon.contains(Point(target_tracking['latitude'], target_tracking['longitude'])), axis=1)
new_df = target_tracking.query('Within == True')
new_df = new_df[new_df.trakus_index > 100]
new_df.sort_values(by=['trakus_index'], inplace=True)
new_df.drop_duplicates(subset='program_number', keep='first', inplace=True)
def cross(row):
x1 = 43.071816
y1 = -73.77039
x2 = 43.07248
y2 = -73.770883
return (row['latitude'] - x1) * (y2 - y1) - (row['longitude'] - y1) * (x2 - x1)
target_tracking['crossed'] = target_tracking.apply(cross, axis=1)
df2 = target_tracking[(target_tracking['Within'] == True) & (target_tracking['crossed'] > 0)]
df2.drop_duplicates(subset='program_number', keep='last', inplace=True)
df2.sort_values(by=['trakus_index', 'crossed'], ascending=[True, False], inplace=True)
print('Results')
print('Track:', track_id, 'Date:', race_date, 'Race:', race_number)
df2 | code |
104120001/cell_5 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pymap3d as pm
nyra_tracking = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_tracking_table.csv')
nyra_start = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv')
nyra_race = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_race_table.csv')
track_id = 'SAR'
race_date = '2019-07-24'
race_number = 5
target_tracking = nyra_tracking.query('track_id == @track_id & race_date == @race_date & race_number == @race_number').sort_values('trakus_index')
target_tracking
dtime = 0.25
if track_id == 'AQU':
elevation = 3
elif track_id == 'BEL':
elevation = 20
elif track_id == 'SAR':
elevation = 93
target_trackings = []
for number, group in target_tracking.groupby('program_number'):
ecef = np.array(pm.geodetic2ecef(group['latitude'].values, group['longitude'].values, np.array([elevation] * len(group)))).T
v_ecef = np.sqrt(np.sum(np.diff(ecef, axis=0) ** 2, axis=1)) * 3.6 / dtime
group['time'] = group['trakus_index'] * dtime - dtime
group['speed'] = np.insert(v_ecef, 0, 0)
target_trackings.append(group)
print('No.', number, ' Mean speed : {:.2f} km/h'.format(np.mean(v_ecef)))
target_tracking = pd.concat(target_trackings)
target_tracking | code |
1009451/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.models import Sequential
from keras.models import model_from_json
from keras.optimizers import SGD
import cv2
import cv2
import glob
import glob
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import os
import os
import pickle
import random
import random
labels = [1, 2, 3]
count = 0
for l in labels:
train_files = ['../input/train/Type_' + str(l) + '/' + f for f in os.listdir('../input/train/Type_' + str(l) + '/')]
random_file = random.choice(train_files)
im = cv2.imread(random_file)
plt.axis('off')
count += 1
img_rows = 224
img_cols = 224
def get_im_cv2(path, img_rows, img_cols, color_type=3):
if color_type == 1:
img = cv2.imread(path, 0)
elif color_type == 3:
img = cv2.imread(path)
resized = cv2.resize(img, (img_cols, img_rows))
return resized
def load_train(img_rows, img_cols, color_type=3):
X_train = []
y_train = []
for j in range(1, 4):
path = os.path.join('..', 'input', 'train', 'Type_' + str(j), '*.jpg')
files = glob.glob(path)
for fl in files:
flbase = os.path.basename(fl)
img = get_im_cv2(fl, img_rows, img_cols, color_type)
X_train.append(img)
y_train.append(j)
return (X_train, y_train)
X_train, y_train = load_train(64, 64, 3)
def cache_data(data, path):
if os.path.isdir(os.path.dirname(path)):
file = open(path, 'wb')
pickle.dump(data, file)
file.close()
def restore_data(path):
data = dict()
if os.path.isfile(path):
file = open(path, 'rb')
data = pickle.load(file)
return data
def save_model(model):
json_string = model.to_json()
if not os.path.isdir('cache'):
os.mkdir('cache')
open(os.path.join('cache', 'architecture.json'), 'w').write(json_string)
model.save_weights(os.path.join('cache', 'model_weights.h5'), overwrite=True)
def read_model():
model = model_from_json(open(os.path.join('cache', 'architecture.json')).read())
model.load_weights(os.path.join('cache', 'model_weights.h5'))
return model
def create_model(img_rows, img_cols, color_type=3):
nb_classes = 3
nb_filters = 8
nb_pool = 2
nb_conv = 2
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(color_type, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
sgd = SGD(lr=0.01, decay=0, momentum=0, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
return model
nb_epoch = 3
batch_size = 16
model = create_model_v1(img_rows, img_cols, color_type_global)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1) | code |
1009451/cell_4 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import cv2
import cv2
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import os
import os
import random
import random
labels = [1, 2, 3]
count = 0
for l in labels:
train_files = ['../input/train/Type_' + str(l) + '/' + f for f in os.listdir('../input/train/Type_' + str(l) + '/')]
random_file = random.choice(train_files)
im = cv2.imread(random_file)
print('{} : {}'.format(random_file, im.shape))
plt.subplot(1, 4, count + 1).set_title(labels[l])
plt.imshow(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
plt.axis('off')
count += 1 | code |
1009451/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import numpy as np
np.random.seed(2016)
import os
import glob
import cv2
import math
import pickle
import datetime
import pandas as pd
import statistics
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.models import model_from_json
from sklearn.metrics import log_loss
from scipy.misc import imread, imresize | code |
1009451/cell_5 | [
"text_plain_output_1.png"
] | import cv2
import cv2
import glob
import glob
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import os
import os
import random
import random
labels = [1, 2, 3]
count = 0
for l in labels:
train_files = ['../input/train/Type_' + str(l) + '/' + f for f in os.listdir('../input/train/Type_' + str(l) + '/')]
random_file = random.choice(train_files)
im = cv2.imread(random_file)
plt.axis('off')
count += 1
img_rows = 224
img_cols = 224
def get_im_cv2(path, img_rows, img_cols, color_type=3):
if color_type == 1:
img = cv2.imread(path, 0)
elif color_type == 3:
img = cv2.imread(path)
resized = cv2.resize(img, (img_cols, img_rows))
return resized
def load_train(img_rows, img_cols, color_type=3):
X_train = []
y_train = []
print('Read train images')
for j in range(1, 4):
print('Load folder Type_{}'.format(j))
path = os.path.join('..', 'input', 'train', 'Type_' + str(j), '*.jpg')
files = glob.glob(path)
for fl in files:
flbase = os.path.basename(fl)
img = get_im_cv2(fl, img_rows, img_cols, color_type)
X_train.append(img)
y_train.append(j)
return (X_train, y_train)
X_train, y_train = load_train(64, 64, 3) | code |
34128064/cell_13 | [
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/youtube-new/USvideos.csv')
cnt_video_per_category = df.groupby(["category_id"]).count().reset_index()
cnt_video_per_category = cnt_video_per_category.loc[:,['category_id','video_id']]
df_1 = pd.merge(cnt_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
df_1 = df_1.sort_values(by='video_id', ascending = False)
df_1["Proportion"] = round((df_1["video_id"]/sum(df_1["video_id"]) * 100),2)
print(df_1)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
ax = sns.barplot(x="category_name",y="video_id", data=df_1)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#this section is used to check the likes, dislike, and comment rate
#first we need to create 3 new variable
df["likes_rate"] = df["likes"] /df["views"] * 100
df["dislikes_rate"] = df["dislikes"] / df["views"] * 100
df["comment_rate"] = df["comment_count"] / df["views"] * 100
#grouping the likes rate per category
cnt_likes_per_video_per_category = df.groupby("category_id").mean().reset_index()
cnt_likes_per_video_per_category = cnt_likes_per_video_per_category.loc[:,['category_id','likes_rate','dislikes_rate','comment_rate']]
#left join to get the category name
df_2 = pd.merge(cnt_likes_per_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
print(df_2)
#likes rate
df_2 = df_2.sort_values(by='likes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("likes rate")
ax = sns.barplot(x="category_name",y="likes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#dislikes rate
df_2 = df_2.sort_values(by='dislikes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("dislikes rate")
ax = sns.barplot(x="category_name",y="dislikes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#comments rate
df_2 = df_2.sort_values(by='comment_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("comments rate")
ax = sns.barplot(x="category_name",y="comment_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
p_null = (len(df) - df.count()) * 100.0 / len(df)
p_null
train = df[['trending_date', 'title', 'channel_title', 'publish_time', 'tags', 'views', 'likes', 'dislikes', 'comment_count', 'video_error_or_removed']]
df.isnull().any()
train['video_error_or_removed'].fillna('M', inplace=True)
train.isnull().any()
train['video_error_or_removed'].interpolate(inplace=True)
train.isnull().any()
train['video_error_or_removed'].replace('M', 2, inplace=True)
train['video_error_or_removed'].replace('D', 1, inplace=True)
train['video_error_or_removed'].replace('A', 0, inplace=True)
sns.heatmap(train.corr(), cmap='coolwarm', annot=True) | code |
34128064/cell_9 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/youtube-new/USvideos.csv')
cnt_video_per_category = df.groupby(["category_id"]).count().reset_index()
cnt_video_per_category = cnt_video_per_category.loc[:,['category_id','video_id']]
df_1 = pd.merge(cnt_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
df_1 = df_1.sort_values(by='video_id', ascending = False)
df_1["Proportion"] = round((df_1["video_id"]/sum(df_1["video_id"]) * 100),2)
print(df_1)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
ax = sns.barplot(x="category_name",y="video_id", data=df_1)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#this section is used to check the likes, dislike, and comment rate
#first we need to create 3 new variable
df["likes_rate"] = df["likes"] /df["views"] * 100
df["dislikes_rate"] = df["dislikes"] / df["views"] * 100
df["comment_rate"] = df["comment_count"] / df["views"] * 100
#grouping the likes rate per category
cnt_likes_per_video_per_category = df.groupby("category_id").mean().reset_index()
cnt_likes_per_video_per_category = cnt_likes_per_video_per_category.loc[:,['category_id','likes_rate','dislikes_rate','comment_rate']]
#left join to get the category name
df_2 = pd.merge(cnt_likes_per_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
print(df_2)
#likes rate
df_2 = df_2.sort_values(by='likes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("likes rate")
ax = sns.barplot(x="category_name",y="likes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#dislikes rate
df_2 = df_2.sort_values(by='dislikes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("dislikes rate")
ax = sns.barplot(x="category_name",y="dislikes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#comments rate
df_2 = df_2.sort_values(by='comment_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("comments rate")
ax = sns.barplot(x="category_name",y="comment_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
p_null = (len(df) - df.count()) * 100.0 / len(df)
p_null
train = df[['trending_date', 'title', 'channel_title', 'publish_time', 'tags', 'views', 'likes', 'dislikes', 'comment_count', 'video_error_or_removed']]
df.isnull().any() | code |
34128064/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/youtube-new/USvideos.csv')
print(df.columns) | code |
34128064/cell_6 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/youtube-new/USvideos.csv')
cnt_video_per_category = df.groupby(['category_id']).count().reset_index()
cnt_video_per_category = cnt_video_per_category.loc[:, ['category_id', 'video_id']]
df_1 = pd.merge(cnt_video_per_category, category_df, left_on='category_id', right_on='category_id', how='left')
df_1 = df_1.sort_values(by='video_id', ascending=False)
df_1['Proportion'] = round(df_1['video_id'] / sum(df_1['video_id']) * 100, 2)
print(df_1)
sns.set(style='whitegrid')
plt.figure(figsize=(11, 10))
ax = sns.barplot(x='category_name', y='video_id', data=df_1)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha='right')
plt.tight_layout()
plt.show() | code |
34128064/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/youtube-new/USvideos.csv')
cnt_video_per_category = df.groupby(["category_id"]).count().reset_index()
cnt_video_per_category = cnt_video_per_category.loc[:,['category_id','video_id']]
df_1 = pd.merge(cnt_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
df_1 = df_1.sort_values(by='video_id', ascending = False)
df_1["Proportion"] = round((df_1["video_id"]/sum(df_1["video_id"]) * 100),2)
print(df_1)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
ax = sns.barplot(x="category_name",y="video_id", data=df_1)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#this section is used to check the likes, dislike, and comment rate
#first we need to create 3 new variable
df["likes_rate"] = df["likes"] /df["views"] * 100
df["dislikes_rate"] = df["dislikes"] / df["views"] * 100
df["comment_rate"] = df["comment_count"] / df["views"] * 100
#grouping the likes rate per category
cnt_likes_per_video_per_category = df.groupby("category_id").mean().reset_index()
cnt_likes_per_video_per_category = cnt_likes_per_video_per_category.loc[:,['category_id','likes_rate','dislikes_rate','comment_rate']]
#left join to get the category name
df_2 = pd.merge(cnt_likes_per_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
print(df_2)
#likes rate
df_2 = df_2.sort_values(by='likes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("likes rate")
ax = sns.barplot(x="category_name",y="likes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#dislikes rate
df_2 = df_2.sort_values(by='dislikes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("dislikes rate")
ax = sns.barplot(x="category_name",y="dislikes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#comments rate
df_2 = df_2.sort_values(by='comment_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("comments rate")
ax = sns.barplot(x="category_name",y="comment_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
p_null = (len(df) - df.count()) * 100.0 / len(df)
p_null
train = df[['trending_date', 'title', 'channel_title', 'publish_time', 'tags', 'views', 'likes', 'dislikes', 'comment_count', 'video_error_or_removed']]
df.isnull().any()
train['video_error_or_removed'].fillna('M', inplace=True)
train.isnull().any()
train['video_error_or_removed'].interpolate(inplace=True)
train.isnull().any() | code |
34128064/cell_7 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/youtube-new/USvideos.csv')
cnt_video_per_category = df.groupby(["category_id"]).count().reset_index()
cnt_video_per_category = cnt_video_per_category.loc[:,['category_id','video_id']]
df_1 = pd.merge(cnt_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
df_1 = df_1.sort_values(by='video_id', ascending = False)
df_1["Proportion"] = round((df_1["video_id"]/sum(df_1["video_id"]) * 100),2)
print(df_1)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
ax = sns.barplot(x="category_name",y="video_id", data=df_1)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
df['likes_rate'] = df['likes'] / df['views'] * 100
df['dislikes_rate'] = df['dislikes'] / df['views'] * 100
df['comment_rate'] = df['comment_count'] / df['views'] * 100
cnt_likes_per_video_per_category = df.groupby('category_id').mean().reset_index()
cnt_likes_per_video_per_category = cnt_likes_per_video_per_category.loc[:, ['category_id', 'likes_rate', 'dislikes_rate', 'comment_rate']]
df_2 = pd.merge(cnt_likes_per_video_per_category, category_df, left_on='category_id', right_on='category_id', how='left')
print(df_2)
df_2 = df_2.sort_values(by='likes_rate', ascending=False)
sns.set(style='whitegrid')
plt.figure(figsize=(11, 10))
plt.title('likes rate')
ax = sns.barplot(x='category_name', y='likes_rate', data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha='right')
plt.tight_layout()
plt.show()
df_2 = df_2.sort_values(by='dislikes_rate', ascending=False)
sns.set(style='whitegrid')
plt.figure(figsize=(11, 10))
plt.title('dislikes rate')
ax = sns.barplot(x='category_name', y='dislikes_rate', data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha='right')
plt.tight_layout()
plt.show()
df_2 = df_2.sort_values(by='comment_rate', ascending=False)
sns.set(style='whitegrid')
plt.figure(figsize=(11, 10))
plt.title('comments rate')
ax = sns.barplot(x='category_name', y='comment_rate', data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha='right')
plt.tight_layout()
plt.show() | code |
34128064/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/youtube-new/USvideos.csv')
cnt_video_per_category = df.groupby(["category_id"]).count().reset_index()
cnt_video_per_category = cnt_video_per_category.loc[:,['category_id','video_id']]
df_1 = pd.merge(cnt_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
df_1 = df_1.sort_values(by='video_id', ascending = False)
df_1["Proportion"] = round((df_1["video_id"]/sum(df_1["video_id"]) * 100),2)
print(df_1)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
ax = sns.barplot(x="category_name",y="video_id", data=df_1)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#this section is used to check the likes, dislike, and comment rate
#first we need to create 3 new variable
df["likes_rate"] = df["likes"] /df["views"] * 100
df["dislikes_rate"] = df["dislikes"] / df["views"] * 100
df["comment_rate"] = df["comment_count"] / df["views"] * 100
#grouping the likes rate per category
cnt_likes_per_video_per_category = df.groupby("category_id").mean().reset_index()
cnt_likes_per_video_per_category = cnt_likes_per_video_per_category.loc[:,['category_id','likes_rate','dislikes_rate','comment_rate']]
#left join to get the category name
df_2 = pd.merge(cnt_likes_per_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
print(df_2)
#likes rate
df_2 = df_2.sort_values(by='likes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("likes rate")
ax = sns.barplot(x="category_name",y="likes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#dislikes rate
df_2 = df_2.sort_values(by='dislikes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("dislikes rate")
ax = sns.barplot(x="category_name",y="dislikes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#comments rate
df_2 = df_2.sort_values(by='comment_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("comments rate")
ax = sns.barplot(x="category_name",y="comment_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
p_null = (len(df) - df.count()) * 100.0 / len(df)
p_null | code |
34128064/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/youtube-new/USvideos.csv')
cnt_video_per_category = df.groupby(["category_id"]).count().reset_index()
cnt_video_per_category = cnt_video_per_category.loc[:,['category_id','video_id']]
df_1 = pd.merge(cnt_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
df_1 = df_1.sort_values(by='video_id', ascending = False)
df_1["Proportion"] = round((df_1["video_id"]/sum(df_1["video_id"]) * 100),2)
print(df_1)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
ax = sns.barplot(x="category_name",y="video_id", data=df_1)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#this section is used to check the likes, dislike, and comment rate
#first we need to create 3 new variable
df["likes_rate"] = df["likes"] /df["views"] * 100
df["dislikes_rate"] = df["dislikes"] / df["views"] * 100
df["comment_rate"] = df["comment_count"] / df["views"] * 100
#grouping the likes rate per category
cnt_likes_per_video_per_category = df.groupby("category_id").mean().reset_index()
cnt_likes_per_video_per_category = cnt_likes_per_video_per_category.loc[:,['category_id','likes_rate','dislikes_rate','comment_rate']]
#left join to get the category name
df_2 = pd.merge(cnt_likes_per_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
print(df_2)
#likes rate
df_2 = df_2.sort_values(by='likes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("likes rate")
ax = sns.barplot(x="category_name",y="likes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#dislikes rate
df_2 = df_2.sort_values(by='dislikes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("dislikes rate")
ax = sns.barplot(x="category_name",y="dislikes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#comments rate
df_2 = df_2.sort_values(by='comment_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("comments rate")
ax = sns.barplot(x="category_name",y="comment_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
p_null = (len(df) - df.count()) * 100.0 / len(df)
p_null
train = df[['trending_date', 'title', 'channel_title', 'publish_time', 'tags', 'views', 'likes', 'dislikes', 'comment_count', 'video_error_or_removed']]
df.isnull().any()
train['video_error_or_removed'].fillna('M', inplace=True)
train.isnull().any() | code |
34128064/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/youtube-new/USvideos.csv')
cnt_video_per_category = df.groupby(["category_id"]).count().reset_index()
cnt_video_per_category = cnt_video_per_category.loc[:,['category_id','video_id']]
df_1 = pd.merge(cnt_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
df_1 = df_1.sort_values(by='video_id', ascending = False)
df_1["Proportion"] = round((df_1["video_id"]/sum(df_1["video_id"]) * 100),2)
print(df_1)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
ax = sns.barplot(x="category_name",y="video_id", data=df_1)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#this section is used to check the likes, dislike, and comment rate
#first we need to create 3 new variable
df["likes_rate"] = df["likes"] /df["views"] * 100
df["dislikes_rate"] = df["dislikes"] / df["views"] * 100
df["comment_rate"] = df["comment_count"] / df["views"] * 100
#grouping the likes rate per category
cnt_likes_per_video_per_category = df.groupby("category_id").mean().reset_index()
cnt_likes_per_video_per_category = cnt_likes_per_video_per_category.loc[:,['category_id','likes_rate','dislikes_rate','comment_rate']]
#left join to get the category name
df_2 = pd.merge(cnt_likes_per_video_per_category,category_df,left_on='category_id',right_on='category_id',how='left')
print(df_2)
#likes rate
df_2 = df_2.sort_values(by='likes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("likes rate")
ax = sns.barplot(x="category_name",y="likes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#dislikes rate
df_2 = df_2.sort_values(by='dislikes_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("dislikes rate")
ax = sns.barplot(x="category_name",y="dislikes_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
#comments rate
df_2 = df_2.sort_values(by='comment_rate', ascending = False)
sns.set(style="whitegrid")
plt.figure(figsize=(11, 10))
plt.title("comments rate")
ax = sns.barplot(x="category_name",y="comment_rate", data=df_2)
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right")
plt.tight_layout()
plt.show()
p_null = (len(df) - df.count()) * 100.0 / len(df)
p_null
train = df[['trending_date', 'title', 'channel_title', 'publish_time', 'tags', 'views', 'likes', 'dislikes', 'comment_count', 'video_error_or_removed']]
df.isnull().any()
train['video_error_or_removed'].fillna('M', inplace=True)
train.isnull().any()
train['video_error_or_removed'].interpolate(inplace=True)
train.isnull().any()
train['video_error_or_removed'].replace('M', 2, inplace=True)
train['video_error_or_removed'].replace('D', 1, inplace=True)
train['video_error_or_removed'].replace('A', 0, inplace=True)
train.head() | code |
34128064/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/youtube-new/USvideos.csv')
df.head() | code |
105207443/cell_42 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
def pie_plot(df, cols_list, rows, cols):
fig, axes = plt.subplots(rows, cols)
for ax, col in zip(axes.ravel(), cols_list):
df[col].value_counts().plot(ax=ax, kind='pie', figsize=(15, 15), fontsize=10, autopct='%1.0f%%')
ax.set_title(str(col), fontsize = 12)
plt.show()
df_c = df.copy()
df_c = df_c.drop_duplicates()
df_c = pd.get_dummies(df_c, columns=['fueltype', 'aspiration', 'carbody', 'drivewheel', 'enginelocation', 'enginetype', 'fuelsystem'])
df_c = pd.get_dummies(df_c, columns=['CarName'])
def box_plot(num_cols):
for i in range(len(num_cols)):
if i == 16:
break
else:
l = num_cols[i]
def corr(x, y, **kwargs):
coef = np.corrcoef(x, y)[0][1]
label = '$\\rho$ = ' + str(round(coef, 2))
ax = plt.gca()
ax.annotate(label, xy=(0.3, 1), size=30, xycoords=ax.transAxes)
def scatter_features(l):
g = sns.PairGrid(df_c, y_vars='price', x_vars=df_c[l].columns, height=5)
g.map(plt.scatter, color='darkred', alpha=0.2)
g.map(corr)
plt.figure(figsize=(8, 8))
sns.heatmap(df.corr(), annot=True, cmap='Blues', fmt='.3f')
plt.show() | code |
105207443/cell_6 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.head() | code |
105207443/cell_29 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
df_c = df.copy()
df_c = df_c.drop_duplicates()
df_c = pd.get_dummies(df_c, columns=['fueltype', 'aspiration', 'carbody', 'drivewheel', 'enginelocation', 'enginetype', 'fuelsystem'])
df_c = pd.get_dummies(df_c, columns=['CarName'])
df_c.head() | code |
105207443/cell_39 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
def pie_plot(df, cols_list, rows, cols):
fig, axes = plt.subplots(rows, cols)
for ax, col in zip(axes.ravel(), cols_list):
df[col].value_counts().plot(ax=ax, kind='pie', figsize=(15, 15), fontsize=10, autopct='%1.0f%%')
ax.set_title(str(col), fontsize = 12)
plt.show()
df_c = df.copy()
df_c = df_c.drop_duplicates()
df_c = pd.get_dummies(df_c, columns=['fueltype', 'aspiration', 'carbody', 'drivewheel', 'enginelocation', 'enginetype', 'fuelsystem'])
df_c = pd.get_dummies(df_c, columns=['CarName'])
def box_plot(num_cols):
for i in range(len(num_cols)):
if i == 16:
break
else:
l = num_cols[i]
def corr(x, y, **kwargs):
coef = np.corrcoef(x, y)[0][1]
label = '$\\rho$ = ' + str(round(coef, 2))
ax = plt.gca()
ax.annotate(label, xy=(0.3, 1), size=30, xycoords=ax.transAxes)
def scatter_features(l):
g = sns.PairGrid(df_c, y_vars='price', x_vars=df_c[l].columns, height=5)
g.map(plt.scatter, color='darkred', alpha=0.2)
g.map(corr)
scatter_features(['stroke', 'compressionratio', 'horsepower', 'peakrpm', 'citympg']) | code |
105207443/cell_41 | [
"image_output_1.png"
] | from statsmodels.stats.outliers_influence import variance_inflation_factor
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
df_c = df.copy()
df_c = df_c.drop_duplicates()
df_c = pd.get_dummies(df_c, columns=['fueltype', 'aspiration', 'carbody', 'drivewheel', 'enginelocation', 'enginetype', 'fuelsystem'])
df_c = pd.get_dummies(df_c, columns=['CarName'])
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif_df = df_c.loc[:, df_c.columns != 'price']
vif_data = pd.DataFrame()
vif_data['feature'] = vif_df.columns
vif_data['VIF'] = [variance_inflation_factor(vif_df.values, i) for i in range(len(vif_df.columns))]
vif_data.head(17) | code |
105207443/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
df_c = df.copy()
df_c = df_c.drop_duplicates()
print('Before dropping duplicates {} after dropping duplicates {}'.format(df.shape[0], df_c.shape[0])) | code |
105207443/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.info() | code |
105207443/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
def pie_plot(df, cols_list, rows, cols):
fig, axes = plt.subplots(rows, cols)
for ax, col in zip(axes.ravel(), cols_list):
df[col].value_counts().plot(ax=ax, kind='pie', figsize=(15, 15), fontsize=10, autopct='%1.0f%%')
ax.set_title(str(col), fontsize = 12)
plt.show()
df_c = df.copy()
df_c = df_c.drop_duplicates()
df_c = pd.get_dummies(df_c, columns=['fueltype', 'aspiration', 'carbody', 'drivewheel', 'enginelocation', 'enginetype', 'fuelsystem'])
df_c = pd.get_dummies(df_c, columns=['CarName'])
def box_plot(num_cols):
for i in range(len(num_cols)):
if i == 16:
break
else:
l = num_cols[i]
box_plot(['symboling', 'doornumber', 'wheelbase', 'carlength', 'carwidth', 'carheight', 'curbweight', 'cylindernumber', 'enginesize', 'boreratio', 'stroke', 'compressionratio', 'horsepower', 'peakrpm', 'citympg', 'highwaympg']) | code |
105207443/cell_8 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'}) | code |
105207443/cell_38 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
def pie_plot(df, cols_list, rows, cols):
fig, axes = plt.subplots(rows, cols)
for ax, col in zip(axes.ravel(), cols_list):
df[col].value_counts().plot(ax=ax, kind='pie', figsize=(15, 15), fontsize=10, autopct='%1.0f%%')
ax.set_title(str(col), fontsize = 12)
plt.show()
df_c = df.copy()
df_c = df_c.drop_duplicates()
df_c = pd.get_dummies(df_c, columns=['fueltype', 'aspiration', 'carbody', 'drivewheel', 'enginelocation', 'enginetype', 'fuelsystem'])
df_c = pd.get_dummies(df_c, columns=['CarName'])
def box_plot(num_cols):
for i in range(len(num_cols)):
if i == 16:
break
else:
l = num_cols[i]
def corr(x, y, **kwargs):
coef = np.corrcoef(x, y)[0][1]
label = '$\\rho$ = ' + str(round(coef, 2))
ax = plt.gca()
ax.annotate(label, xy=(0.3, 1), size=30, xycoords=ax.transAxes)
def scatter_features(l):
g = sns.PairGrid(df_c, y_vars='price', x_vars=df_c[l].columns, height=5)
g.map(plt.scatter, color='darkred', alpha=0.2)
g.map(corr)
scatter_features(['carheight', 'curbweight', 'cylindernumber', 'enginesize', 'boreratio']) | code |
105207443/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
df_c = df.copy()
df_c = df_c.drop_duplicates()
df_c = pd.get_dummies(df_c, columns=['fueltype', 'aspiration', 'carbody', 'drivewheel', 'enginelocation', 'enginetype', 'fuelsystem'])
df_c = pd.get_dummies(df_c, columns=['CarName'])
df_c.head() | code |
105207443/cell_46 | [
"image_output_1.png"
] | from statsmodels.stats.outliers_influence import variance_inflation_factor
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
df_c = df.copy()
df_c = df_c.drop_duplicates()
df_c = pd.get_dummies(df_c, columns=['fueltype', 'aspiration', 'carbody', 'drivewheel', 'enginelocation', 'enginetype', 'fuelsystem'])
df_c = pd.get_dummies(df_c, columns=['CarName'])
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif_df = df_c.loc[:, df_c.columns != 'price']
vif_data = pd.DataFrame()
vif_data['feature'] = vif_df.columns
vif_data['VIF'] = [variance_inflation_factor(vif_df.values, i) for i in range(len(vif_df.columns))]
df_num_clean = df_c[['symboling', 'doornumber', 'wheelbase', 'carlength', 'carwidth', 'carheight', 'curbweight', 'cylindernumber', 'enginesize', 'boreratio', 'stroke', 'compressionratio', 'horsepower', 'peakrpm', 'citympg', 'highwaympg']].copy()
df_num_clean.head() | code |
105207443/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
sns.heatmap(df.isnull()) | code |
105207443/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
df.hist(bins=200, figsize=[20, 10]) | code |
105207443/cell_37 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
def pie_plot(df, cols_list, rows, cols):
fig, axes = plt.subplots(rows, cols)
for ax, col in zip(axes.ravel(), cols_list):
df[col].value_counts().plot(ax=ax, kind='pie', figsize=(15, 15), fontsize=10, autopct='%1.0f%%')
ax.set_title(str(col), fontsize = 12)
plt.show()
df_c = df.copy()
df_c = df_c.drop_duplicates()
df_c = pd.get_dummies(df_c, columns=['fueltype', 'aspiration', 'carbody', 'drivewheel', 'enginelocation', 'enginetype', 'fuelsystem'])
df_c = pd.get_dummies(df_c, columns=['CarName'])
def box_plot(num_cols):
for i in range(len(num_cols)):
if i == 16:
break
else:
l = num_cols[i]
def corr(x, y, **kwargs):
coef = np.corrcoef(x, y)[0][1]
label = '$\\rho$ = ' + str(round(coef, 2))
ax = plt.gca()
ax.annotate(label, xy=(0.3, 1), size=30, xycoords=ax.transAxes)
def scatter_features(l):
g = sns.PairGrid(df_c, y_vars='price', x_vars=df_c[l].columns, height=5)
g.map(plt.scatter, color='darkred', alpha=0.2)
g.map(corr)
scatter_features(['symboling', 'doornumber', 'wheelbase', 'carlength', 'carwidth']) | code |
105207443/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv')
df.describe(include='all').style.background_gradient(cmap='Blues').set_properties(**{'font-family': 'Segoe UI'})
def pie_plot(df, cols_list, rows, cols):
fig, axes = plt.subplots(rows, cols)
for ax, col in zip(axes.ravel(), cols_list):
df[col].value_counts().plot(ax=ax, kind='pie', figsize=(15, 15), fontsize=10, autopct='%1.0f%%')
ax.set_title(str(col), fontsize = 12)
plt.show()
pie_plot(df, ['fueltype', 'aspiration', 'doornumber', 'cylindernumber', 'carbody', 'enginetype', 'fuelsystem', 'enginelocation', 'drivewheel'], 3, 3) | code |
72065687/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
test.shape
train.isnull().sum()
test.isnull().sum()
train.dropna(inplace=True)
test.dropna(inplace=True)
train.shape
test.shape
travel_dum = pd.get_dummies(train[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(train[['Class']], drop_first=True)
train = pd.concat([train, travel_dum, class_dum], axis=1)
travel_dum = pd.get_dummies(test[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(test[['Class']], drop_first=True)
test = pd.concat([test, travel_dum, class_dum], axis=1)
test.head() | code |
72065687/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape | code |
72065687/cell_25 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
test.shape
train.isnull().sum()
test.isnull().sum()
train.dropna(inplace=True)
test.dropna(inplace=True)
train.shape
test.shape
travel_dum = pd.get_dummies(train[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(train[['Class']], drop_first=True)
train = pd.concat([train, travel_dum, class_dum], axis=1)
travel_dum = pd.get_dummies(test[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(test[['Class']], drop_first=True)
test = pd.concat([test, travel_dum, class_dum], axis=1)
test.drop(['Type of Travel', 'Class'], axis=1, inplace=True)
test.head() | code |
72065687/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape | code |
72065687/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape
travel_dum = pd.get_dummies(train[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(train[['Class']], drop_first=True)
train = pd.concat([train, travel_dum, class_dum], axis=1)
train.drop(['Type of Travel', 'Class'], axis=1, inplace=True)
sns.histplot(x='Flight Distance', hue='satisfaction', data=train, kde=True, palette='dark') | code |
72065687/cell_30 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
plt.figure(figsize=(20, 20)) | code |
72065687/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape
travel_dum = pd.get_dummies(train[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(train[['Class']], drop_first=True)
train = pd.concat([train, travel_dum, class_dum], axis=1)
train.drop(['Type of Travel', 'Class'], axis=1, inplace=True)
sns.countplot(x='Customer Type', hue='satisfaction', data=train) | code |
72065687/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.info() | code |
72065687/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape
travel_dum = pd.get_dummies(train[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(train[['Class']], drop_first=True)
train = pd.concat([train, travel_dum, class_dum], axis=1)
train.head() | code |
72065687/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72065687/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum() | code |
72065687/cell_32 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape
travel_dum = pd.get_dummies(train[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(train[['Class']], drop_first=True)
train = pd.concat([train, travel_dum, class_dum], axis=1)
train.drop(['Type of Travel', 'Class'], axis=1, inplace=True)
sns.histplot(x='Age', hue='satisfaction', data=train, kde=True, palette='flare') | code |
72065687/cell_28 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape
travel_dum = pd.get_dummies(train[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(train[['Class']], drop_first=True)
train = pd.concat([train, travel_dum, class_dum], axis=1)
train.drop(['Type of Travel', 'Class'], axis=1, inplace=True)
train.info() | code |
72065687/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
test.shape
test.isnull().sum() | code |
72065687/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape
train.head() | code |
72065687/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape
train.info() | code |
72065687/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.head() | code |
72065687/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
test.shape
test.isnull().sum()
test.dropna(inplace=True)
test.shape
test.info() | code |
72065687/cell_31 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape
travel_dum = pd.get_dummies(train[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(train[['Class']], drop_first=True)
train = pd.concat([train, travel_dum, class_dum], axis=1)
train.drop(['Type of Travel', 'Class'], axis=1, inplace=True)
sns.countplot(x='Online boarding', hue='satisfaction', data=train, color='green') | code |
72065687/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape
travel_dum = pd.get_dummies(train[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(train[['Class']], drop_first=True)
train = pd.concat([train, travel_dum, class_dum], axis=1)
train.drop(['Type of Travel', 'Class'], axis=1, inplace=True)
train.head() | code |
72065687/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
test.shape
test.isnull().sum()
test.dropna(inplace=True)
test.shape | code |
72065687/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
train.shape
train.isnull().sum()
train.dropna(inplace=True)
train.shape
travel_dum = pd.get_dummies(train[['Type of Travel']], drop_first=True)
class_dum = pd.get_dummies(train[['Class']], drop_first=True)
train = pd.concat([train, travel_dum, class_dum], axis=1)
train.drop(['Type of Travel', 'Class'], axis=1, inplace=True)
train.head() | code |
72065687/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('/kaggle/input/airline-passenger-satisfaction/test.csv')
test.shape | code |
128010675/cell_30 | [
"text_html_output_1.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from datasets import load_dataset
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from torch.utils.data import DataLoader
from transformers import TrainingArguments, Trainer
from transformers import ViTForImageClassification
from transformers import ViTImageProcessor
import numpy as np
import torch
import torch
from datasets import load_dataset
train_ds = load_dataset('miladfa7/5-Flower-Types-Classification-Dataset')
train_ds = train_ds['train'].train_test_split(test_size=0.15)
train_data = train_ds['train']
test_data = train_ds['test']
label = list(set(train_data['label']))
id2label = {id: label for id, label in enumerate(label)}
label2id = {label: id for id, label in id2label.items()}
from transformers import ViTImageProcessor
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')
from torchvision.transforms import CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor
image_mean, image_std = (processor.image_mean, processor.image_std)
size = processor.size['height']
normalize = Normalize(mean=image_mean, std=image_std)
_train_transforms = Compose([Resize((size, size)), RandomHorizontalFlip(), ToTensor(), normalize])
_val_transforms = Compose([Resize((size, size)), ToTensor(), normalize])
def train_transforms(examples):
examples['pixel_values'] = [_train_transforms(image.convert('RGB')) for image in examples['image']]
return examples
def val_transforms(examples):
examples['pixel_values'] = [_val_transforms(image.convert('RGB')) for image in examples['image']]
return examples
train_data.set_transform(train_transforms)
test_data.set_transform(val_transforms)
from torch.utils.data import DataLoader
import torch
def collate_fn(examples):
pixel_values = torch.stack([example['pixel_values'] for example in examples])
labels = torch.tensor([label2id[example['label']] for example in examples])
return {'pixel_values': pixel_values, 'labels': labels}
train_dataloader = DataLoader(train_data, collate_fn=collate_fn, batch_size=4)
test_dataloader = DataLoader(test_data, collate_fn=collate_fn, batch_size=4)
from transformers import ViTForImageClassification
model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224-in21k', id2label=id2label, label2id=label2id)
from transformers import TrainingArguments, Trainer
metric_name = 'accuracy'
args = TrainingArguments('5-Flower-Types-Classification', save_strategy='epoch', evaluation_strategy='epoch', learning_rate=2e-05, per_device_train_batch_size=32, per_device_eval_batch_size=4, num_train_epochs=5, weight_decay=0.01, load_best_model_at_end=True, metric_for_best_model=metric_name, logging_dir='logs', remove_unused_columns=False)
from sklearn.metrics import accuracy_score
import numpy as np
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return dict(accuracy=accuracy_score(predictions, labels))
import torch
trainer = Trainer(model, args, train_dataset=train_data, eval_dataset=test_data, data_collator=collate_fn, compute_metrics=compute_metrics, tokenizer=processor)
trainer.train()
outputs = trainer.predict(test_data)
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
labels = ['Lilly', 'otus', 'Sunflower', 'Orchid', 'Tulip']
y_true = outputs.label_ids
y_pred = outputs.predictions.argmax(1)
cm = confusion_matrix(y_true, y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=labels)
disp.plot(xticks_rotation=45) | code |
128010675/cell_6 | [
"text_plain_output_1.png"
] | from datasets import load_dataset
from datasets import load_dataset
train_ds = load_dataset('miladfa7/5-Flower-Types-Classification-Dataset')
train_ds = train_ds['train'].train_test_split(test_size=0.15)
train_data = train_ds['train']
test_data = train_ds['test']
train_data[52]['image'] | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.