path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2025748/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2025748/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print(len(train_set), 'train +', len(test_set), 'test') | code |
2025748/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_train_set.head() | code |
2025748/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
housing = strat_train_set.copy()
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
housing['rooms_per_household'] = housing['total_rooms'] / housing['households']
housing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms']
housing['population_per_household'] = housing['population'] / housing['households']
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
housing = strat_train_set.drop('median_house_value', axis=1)
housing_labels = strat_train_set['median_house_value'].copy()
median = housing['total_bedrooms'].median()
housing['total_bedrooms'].fillna(median)
housing.info()
print(median) | code |
2025748/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing.info() | code |
2025748/cell_14 | [
"image_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
housing = strat_train_set.copy()
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
housing['rooms_per_household'] = housing['total_rooms'] / housing['households']
housing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms']
housing['population_per_household'] = housing['population'] / housing['households']
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False) | code |
2025748/cell_10 | [
"text_html_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
housing = strat_train_set.copy()
housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4, s=housing['population'] / 100, label='population', c='median_house_value', cmap=plt.get_cmap('jet'), colorbar=True)
plt.legend() | code |
2025748/cell_12 | [
"text_plain_output_1.png"
] | from pandas.tools.plotting import scatter_matrix
from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
housing = strat_train_set.copy()
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
from pandas.tools.plotting import scatter_matrix
attributes = ['median_house_value', 'median_income', 'total_rooms', 'housing_median_age']
scatter_matrix(housing[attributes], figsize=(12, 8)) | code |
2025748/cell_5 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing.describe() | code |
327301/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df['Age'].groupby(titanic_df['Survived']).mean() | code |
327301/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
327301/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
print('Number of records in titanic_df= {}'.format(len(titanic_df)))
print('Number of records in test_df = {}'.format(len(test_df)))
print(list(titanic_df.columns.values)) | code |
327301/cell_5 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
pd.crosstab(index=titanic_df['Survived'], columns=titanic_df['Sex']) | code |
34118808/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T
train_df.head().T
train_df = train_df.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test_df = test_df.drop(['Name', 'Ticket'], axis=1)
train_df.isnull().sum() | code |
34118808/cell_4 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
train_df.head().T | code |
34118808/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T
test_df.describe(include='all') | code |
34118808/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T | code |
34118808/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T
train_df.head().T
train_df = train_df.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test_df = test_df.drop(['Name', 'Ticket'], axis=1)
train_df.isnull().sum()
test_df.isnull().sum()
# Embarked variable
# 2 missing data in the train Data set .. So fill it with Mode.
train_df["Embarked"].fillna(train_df["Embarked"].mode()[0], inplace=True)
# check to verify the null is gone in the embarked
train_df.isnull().sum()
# Plot the embarked and survival relation
fig, (axis1,axis2,axis3) = plt.subplots(1,3,figsize=(15,5))
sns.countplot(x='Embarked', data=train_df , ax = axis1 )
sns.countplot(x='Survived', hue="Embarked", data=train_df, order=[1,0] , ax = axis2)
combined = train_df[["Embarked", "Survived"]].groupby(['Embarked'],as_index=False).mean()
sns.barplot(x='Embarked', y='Survived', data=combined,order=['S','C','Q'],ax=axis3)
# convert the category variable to numeric by dummy variable method
train_df1 = pd.get_dummies(train_df, prefix ='Embark', columns = ['Embarked'])
test_df1 = pd.get_dummies(test_df, prefix ='Embark', columns = ['Embarked'])
train_df1.head().T
test_df1['Fare'].fillna(test_df1['Fare'].median(), inplace=True)
#age
# age has missing values in the train and test . replaced with the median
test_df1["Age"].fillna(test_df1["Age"].median(), inplace=True)
train_df1["Age"].fillna(train_df1["Age"].median(), inplace=True)
# check if the NAN has removed with median
test_df1.isnull().sum()
# convert from float to int
train_df1['Age'] = train_df1['Age'].astype(int)
test_df1['Age'] = test_df1['Age'].astype(int)
#plot for aged and survived
# average survived passengers by age
fig, axis1 = plt.subplots(1,1,figsize=(18,4))
average_age = train_df1[["Age", "Survived"]].groupby(['Age'],as_index=False).mean()
sns.barplot(x='Age', y='Survived', data=average_age)
train_df1.drop('Cabin', axis=1, inplace=True)
test_df1.drop('Cabin', axis=1, inplace=True)
train_df1.head().T
train_df1['Family'] = train_df1['Parch'] + train_df1['SibSp']
train_df1['Family'].loc[train_df1['Family'] > 0] = 1
train_df1['Family'].loc[train_df1['Family'] == 0] = 0
test_df1['Family'] = test_df1['Parch'] + test_df['SibSp']
test_df1['Family'].loc[test_df1['Family'] > 0] = 1
test_df1['Family'].loc[test_df1['Family'] == 0] = 0
train_df1 = train_df1.drop(['SibSp', 'Parch'], axis=1)
test_df1 = test_df1.drop(['SibSp', 'Parch'], axis=1)
fig, (axis1, axis2) = plt.subplots(1, 2, sharex=True, figsize=(10, 5))
sns.countplot(x='Family', data=train_df1, order=[1, 0], ax=axis1)
family_perc = train_df1[['Family', 'Survived']].groupby(['Family'], as_index=False).mean()
sns.barplot(x='Family', y='Survived', data=family_perc, order=[1, 0], ax=axis2)
axis1.set_xticklabels(['With Family', 'Alone'], rotation=0) | code |
34118808/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB | code |
34118808/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T
train_df.head().T
train_df = train_df.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test_df = test_df.drop(['Name', 'Ticket'], axis=1)
train_df.isnull().sum()
test_df.isnull().sum()
# Embarked variable
# 2 missing data in the train Data set .. So fill it with Mode.
train_df["Embarked"].fillna(train_df["Embarked"].mode()[0], inplace=True)
# check to verify the null is gone in the embarked
train_df.isnull().sum()
# Plot the embarked and survival relation
fig, (axis1,axis2,axis3) = plt.subplots(1,3,figsize=(15,5))
sns.countplot(x='Embarked', data=train_df , ax = axis1 )
sns.countplot(x='Survived', hue="Embarked", data=train_df, order=[1,0] , ax = axis2)
combined = train_df[["Embarked", "Survived"]].groupby(['Embarked'],as_index=False).mean()
sns.barplot(x='Embarked', y='Survived', data=combined,order=['S','C','Q'],ax=axis3)
# convert the category variable to numeric by dummy variable method
train_df1 = pd.get_dummies(train_df, prefix ='Embark', columns = ['Embarked'])
test_df1 = pd.get_dummies(test_df, prefix ='Embark', columns = ['Embarked'])
train_df1.head().T
test_df1['Fare'].fillna(test_df1['Fare'].median(), inplace=True)
#age
# age has missing values in the train and test . replaced with the median
test_df1["Age"].fillna(test_df1["Age"].median(), inplace=True)
train_df1["Age"].fillna(train_df1["Age"].median(), inplace=True)
# check if the NAN has removed with median
test_df1.isnull().sum()
# convert from float to int
train_df1['Age'] = train_df1['Age'].astype(int)
test_df1['Age'] = test_df1['Age'].astype(int)
#plot for aged and survived
# average survived passengers by age
fig, axis1 = plt.subplots(1,1,figsize=(18,4))
average_age = train_df1[["Age", "Survived"]].groupby(['Age'],as_index=False).mean()
sns.barplot(x='Age', y='Survived', data=average_age)
train_df1.drop('Cabin', axis=1, inplace=True)
test_df1.drop('Cabin', axis=1, inplace=True)
train_df1.head().T | code |
34118808/cell_8 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T
train_df.head().T
train_df = train_df.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test_df = test_df.drop(['Name', 'Ticket'], axis=1)
train_df.info() | code |
34118808/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T
train_df.head().T
train_df = train_df.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test_df = test_df.drop(['Name', 'Ticket'], axis=1)
train_df.isnull().sum()
test_df.isnull().sum()
# Embarked variable
# 2 missing data in the train Data set .. So fill it with Mode.
train_df["Embarked"].fillna(train_df["Embarked"].mode()[0], inplace=True)
# check to verify the null is gone in the embarked
train_df.isnull().sum()
# Plot the embarked and survival relation
fig, (axis1,axis2,axis3) = plt.subplots(1,3,figsize=(15,5))
sns.countplot(x='Embarked', data=train_df , ax = axis1 )
sns.countplot(x='Survived', hue="Embarked", data=train_df, order=[1,0] , ax = axis2)
combined = train_df[["Embarked", "Survived"]].groupby(['Embarked'],as_index=False).mean()
sns.barplot(x='Embarked', y='Survived', data=combined,order=['S','C','Q'],ax=axis3)
# convert the category variable to numeric by dummy variable method
train_df1 = pd.get_dummies(train_df, prefix ='Embark', columns = ['Embarked'])
test_df1 = pd.get_dummies(test_df, prefix ='Embark', columns = ['Embarked'])
train_df1.head().T
sns.boxplot(x='Fare', data=train_df1)
test_df1['Fare'].fillna(test_df1['Fare'].median(), inplace=True) | code |
34118808/cell_16 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T
train_df.head().T
train_df = train_df.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test_df = test_df.drop(['Name', 'Ticket'], axis=1)
train_df.isnull().sum()
test_df.isnull().sum()
# Embarked variable
# 2 missing data in the train Data set .. So fill it with Mode.
train_df["Embarked"].fillna(train_df["Embarked"].mode()[0], inplace=True)
# check to verify the null is gone in the embarked
train_df.isnull().sum()
# Plot the embarked and survival relation
fig, (axis1,axis2,axis3) = plt.subplots(1,3,figsize=(15,5))
sns.countplot(x='Embarked', data=train_df , ax = axis1 )
sns.countplot(x='Survived', hue="Embarked", data=train_df, order=[1,0] , ax = axis2)
combined = train_df[["Embarked", "Survived"]].groupby(['Embarked'],as_index=False).mean()
sns.barplot(x='Embarked', y='Survived', data=combined,order=['S','C','Q'],ax=axis3)
# convert the category variable to numeric by dummy variable method
train_df1 = pd.get_dummies(train_df, prefix ='Embark', columns = ['Embarked'])
test_df1 = pd.get_dummies(test_df, prefix ='Embark', columns = ['Embarked'])
train_df1.head().T
test_df1['Fare'].fillna(test_df1['Fare'].median(), inplace=True)
test_df1['Age'].fillna(test_df1['Age'].median(), inplace=True)
train_df1['Age'].fillna(train_df1['Age'].median(), inplace=True)
test_df1.isnull().sum()
train_df1['Age'] = train_df1['Age'].astype(int)
test_df1['Age'] = test_df1['Age'].astype(int)
fig, axis1 = plt.subplots(1, 1, figsize=(18, 4))
average_age = train_df1[['Age', 'Survived']].groupby(['Age'], as_index=False).mean()
sns.barplot(x='Age', y='Survived', data=average_age) | code |
34118808/cell_3 | [
"text_html_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T | code |
34118808/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T
train_df.head().T
train_df = train_df.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test_df = test_df.drop(['Name', 'Ticket'], axis=1)
train_df.isnull().sum()
test_df.isnull().sum()
train_df['Embarked'].fillna(train_df['Embarked'].mode()[0], inplace=True)
train_df.isnull().sum()
fig, (axis1, axis2, axis3) = plt.subplots(1, 3, figsize=(15, 5))
sns.countplot(x='Embarked', data=train_df, ax=axis1)
sns.countplot(x='Survived', hue='Embarked', data=train_df, order=[1, 0], ax=axis2)
combined = train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()
sns.barplot(x='Embarked', y='Survived', data=combined, order=['S', 'C', 'Q'], ax=axis3)
train_df1 = pd.get_dummies(train_df, prefix='Embark', columns=['Embarked'])
test_df1 = pd.get_dummies(test_df, prefix='Embark', columns=['Embarked'])
train_df1.head().T | code |
34118808/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T
train_df.head().T
train_df = train_df.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test_df = test_df.drop(['Name', 'Ticket'], axis=1)
test_df.isnull().sum() | code |
34118808/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
test_df.head().T
train_df.head().T
train_df = train_df.drop(['PassengerId', 'Name', 'Ticket'], axis=1)
test_df = test_df.drop(['Name', 'Ticket'], axis=1)
train_df.isnull().sum()
train_df.hist(figsize=(12, 8))
plt.show() | code |
34118808/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
train_df.head().T
train_df.head().T
train_df.describe(include='all') | code |
18103228/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import os
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
Survials_By_Age = train_data.groupby('Age')['Survived'].sum().reset_index()
sns.lineplot(y='Survived', x='Age', data=Survials_By_Age)
Survials_By_Age_Segment = []
age_difference = 5
max_age = 70
for i in range(max_age // age_difference):
s = Survials_By_Age.loc['Age', i * age_difference:(i + 1) * age_difference]['Survived'].sum()
Survials_By_Age_Segment.append(s)
print(Survials_By_Age_Segment) | code |
18103228/cell_1 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import os
print(os.listdir('../input'))
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv') | code |
18103228/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
import os
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
Survials_By_Age = train_data.groupby('Age')['Survived'].sum().reset_index()
Survials_By_Age_Segment = []
age_difference = 5
max_age = 70
for i in range(max_age // age_difference):
s = Survials_By_Age.loc['Age', i * age_difference:(i + 1) * age_difference]['Survived'].sum()
Survials_By_Age_Segment.append(s)
boolean_Survivals = train_data['Survived'] == 1
Survivals = train_data[boolean_Survivals]
sns.barplot(y='title', x='average_rating', data=ayu) | code |
90119831/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time'])
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time'])
sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv')
missing_values_train = train.isna().any().sum()
missing_values_test = test.isna().any().sum()
duplicates_train = train.duplicated().sum()
duplicates_test = test.duplicated().sum()
def add_road_feature(df):
df['road'] = df['x'].astype(str) + df['y'].astype(str) + df['direction']
return df.drop(['x', 'y', 'direction'], axis=1)
train = add_road_feature(train)
test = add_road_feature(test)
le = LabelEncoder()
train['road'] = le.fit_transform(train['road'])
test['road'] = le.transform(test['road'])
group = ['road', 'weekday', 'hour', 'minute']
congestion = train.groupby(group).congestion
def add_feature(feature, feature_name):
feature = feature.rename(columns={'congestion': feature_name})
return (train.merge(feature, on=group, how='left'), test.merge(feature, on=group, how='left'))
train, test = add_feature(pd.DataFrame(congestion.max().astype(int)).reset_index(), 'min')
train, test = add_feature(pd.DataFrame(congestion.max().astype(int)).reset_index(), 'max')
train, test = add_feature(pd.DataFrame(congestion.median().astype(int)).reset_index(), 'median')
train.drop(['month', 'day', 'weekday', 'hour', 'minute', 'time'], axis=1, inplace=True)
test.drop(['month', 'day', 'weekday', 'hour', 'minute', 'time'], axis=1, inplace=True)
train.head() | code |
90119831/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time'])
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time'])
sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv')
missing_values_train = train.isna().any().sum()
missing_values_test = test.isna().any().sum()
duplicates_train = train.duplicated().sum()
print('Duplicates in train data: {0}'.format(duplicates_train))
duplicates_test = test.duplicated().sum()
print('Duplicates in test data: {0}'.format(duplicates_test)) | code |
90119831/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time'])
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time'])
sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv')
print('Train data shape:', train.shape)
print('Test data shape:', test.shape) | code |
90119831/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time'])
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time'])
sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv')
missing_values_train = train.isna().any().sum()
missing_values_test = test.isna().any().sum()
duplicates_train = train.duplicated().sum()
duplicates_test = test.duplicated().sum()
def add_road_feature(df):
df['road'] = df['x'].astype(str) + df['y'].astype(str) + df['direction']
return df.drop(['x', 'y', 'direction'], axis=1)
train = add_road_feature(train)
test = add_road_feature(test)
le = LabelEncoder()
train['road'] = le.fit_transform(train['road'])
test['road'] = le.transform(test['road'])
group = ['road', 'weekday', 'hour', 'minute']
congestion = train.groupby(group).congestion
def add_feature(feature, feature_name):
feature = feature.rename(columns={'congestion': feature_name})
return (train.merge(feature, on=group, how='left'), test.merge(feature, on=group, how='left'))
train, test = add_feature(pd.DataFrame(congestion.max().astype(int)).reset_index(), 'min')
train, test = add_feature(pd.DataFrame(congestion.max().astype(int)).reset_index(), 'max')
train, test = add_feature(pd.DataFrame(congestion.median().astype(int)).reset_index(), 'median')
train.drop(['month', 'day', 'weekday', 'hour', 'minute', 'time'], axis=1, inplace=True)
test.drop(['month', 'day', 'weekday', 'hour', 'minute', 'time'], axis=1, inplace=True)
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
elif c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose:
print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
reduce_mem_usage(train)
reduce_mem_usage(test) | code |
90119831/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time'])
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time'])
sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv')
train.describe() | code |
90119831/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time'])
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time'])
sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv')
missing_values_train = train.isna().any().sum()
print('Missing values in train data: {0}'.format(missing_values_train[missing_values_train > 0]))
missing_values_test = test.isna().any().sum()
print('Missing values in test data: {0}'.format(missing_values_test[missing_values_test > 0])) | code |
90119831/cell_7 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time'])
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time'])
sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv')
print('Columns: \n{0}'.format(list(train.columns))) | code |
90119831/cell_27 | [
"text_plain_output_1.png"
] | from catboost import CatBoostRegressor
from sklearn.preprocessing import LabelEncoder
import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time'])
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time'])
sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv')
missing_values_train = train.isna().any().sum()
missing_values_test = test.isna().any().sum()
duplicates_train = train.duplicated().sum()
duplicates_test = test.duplicated().sum()
def add_road_feature(df):
df['road'] = df['x'].astype(str) + df['y'].astype(str) + df['direction']
return df.drop(['x', 'y', 'direction'], axis=1)
train = add_road_feature(train)
test = add_road_feature(test)
le = LabelEncoder()
train['road'] = le.fit_transform(train['road'])
test['road'] = le.transform(test['road'])
group = ['road', 'weekday', 'hour', 'minute']
congestion = train.groupby(group).congestion
def add_feature(feature, feature_name):
feature = feature.rename(columns={'congestion': feature_name})
return (train.merge(feature, on=group, how='left'), test.merge(feature, on=group, how='left'))
train, test = add_feature(pd.DataFrame(congestion.max().astype(int)).reset_index(), 'min')
train, test = add_feature(pd.DataFrame(congestion.max().astype(int)).reset_index(), 'max')
train, test = add_feature(pd.DataFrame(congestion.median().astype(int)).reset_index(), 'median')
train.drop(['month', 'day', 'weekday', 'hour', 'minute', 'time'], axis=1, inplace=True)
test.drop(['month', 'day', 'weekday', 'hour', 'minute', 'time'], axis=1, inplace=True)
y = train.loc[:, 'congestion']
X = train.drop('congestion', axis=1)
test_X = test
model = CatBoostRegressor(silent=True)
model.fit(X, y)
train_predictions = pd.Series(model.predict(X), index=X.index)
test_predictions = pd.Series(model.predict(test_X), index=test_X.index)
sub['congestion'] = test_predictions.round().astype(int)
sub.to_csv('submission.csv', index=False)
sub | code |
90119831/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv', index_col='row_id', parse_dates=['time'])
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv', index_col='row_id', parse_dates=['time'])
sub = pd.read_csv('../input/tabular-playground-series-mar-2022/sample_submission.csv')
train.head() | code |
333806/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
import re
fanboy_text = [re.sub('[^a-zA-Z]', ' ', j).lower() for i in fanboy_space_split for j in i if not '@' in j and (not '#' in j)]
about_text = [re.sub('[^a-zA-Z]', ' ', j).lower() for i in about_space_split for j in i if not '@' in j and (not '#' in j)]
from sklearn.feature_extraction.text import CountVectorizer
fc_vectorizer = CountVectorizer(stop_words='english', max_features=1000)
fanboy_counts = fc_vectorizer.fit_transform(fanboy_text)
ac_vectorizer = CountVectorizer(stop_words='english', max_features=1000)
about_counts = ac_vectorizer.fit_transform(about_text)
def print_top_words(model, feature_names, n_top_words):
pass
from sklearn.decomposition import NMF
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
fanboy_nmf = NMF(n_components=n_topics, random_state=1, alpha=0.1, l1_ratio=0.5).fit(fanboy_counts)
fanboy_feature_names = fc_vectorizer.get_feature_names()
print_top_words(fanboy_nmf, fanboy_feature_names, n_top_words) | code |
333806/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib
import networkx as nx
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
fanboy_edges = [(k, j[1:]) for k, i in zip(fanboy_data['username'], fanboy_space_split) for j in i if '@' in j]
about_edges = [(k, j[1:]) for k, i in zip(about_data['username'], about_space_split) for j in i if '@' in j]
about_graph = nx.Graph()
fanboy_graph = nx.Graph()
about_graph.add_edges_from(about_edges)
fanboy_graph.add_edges_from(fanboy_edges)
fanboy_cc = nx.connected_component_subgraphs(fanboy_graph)
bet_cen = nx.betweenness_centrality([i for i in fanboy_cc][0])
fanboy_cc = nx.connected_component_subgraphs(fanboy_graph)
clo_cen = nx.closeness_centrality([i for i in fanboy_cc][0])
fig, ax = matplotlib.pyplot.subplots()
ax.scatter(list(clo_cen.values()), list(bet_cen.values()))
ax.set_ylim(0.04, 0.3)
ax.set_xlim(0.32, 0.45)
ax.set_xlabel('Closeness Centrality')
ax.set_ylabel('Betweenness Centrality')
ax.set_yscale('log')
for i, txt in enumerate(list(clo_cen.keys())):
ax.annotate(txt, (list(clo_cen.values())[i], list(bet_cen.values())[i])) | code |
333806/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
import re
fanboy_text = [re.sub('[^a-zA-Z]', ' ', j).lower() for i in fanboy_space_split for j in i if not '@' in j and (not '#' in j)]
about_text = [re.sub('[^a-zA-Z]', ' ', j).lower() for i in about_space_split for j in i if not '@' in j and (not '#' in j)]
from sklearn.feature_extraction.text import CountVectorizer
fc_vectorizer = CountVectorizer(stop_words='english', max_features=1000)
fanboy_counts = fc_vectorizer.fit_transform(fanboy_text)
ac_vectorizer = CountVectorizer(stop_words='english', max_features=1000)
about_counts = ac_vectorizer.fit_transform(about_text)
def print_top_words(model, feature_names, n_top_words):
pass
from sklearn.decomposition import NMF
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
fanboy_nmf = NMF(n_components=n_topics, random_state=1, alpha=0.1, l1_ratio=0.5).fit(fanboy_counts)
about_nmf = NMF(n_components=n_topics, random_state=1, alpha=0.1, l1_ratio=0.5).fit(about_counts)
about_feature_names = ac_vectorizer.get_feature_names()
print_top_words(about_nmf, about_feature_names, n_top_words) | code |
333806/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
print(len(set(fanboy_data['username'])) / len(set(fanboy_handles)), len(set(about_data['username'])) / len(set(about_handles))) | code |
333806/cell_3 | [
"image_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
from matplotlib import *
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
333806/cell_14 | [
"text_plain_output_1.png"
] | import networkx as nx
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys()
fanboy_space_split = [str(i).split() for i in fanboy_data['tweets']]
fanboy_handles = [j for i in fanboy_space_split for j in i if '@' in j]
about_space_split = [str(i).split() for i in about_data['tweets']]
about_handles = [j for i in about_space_split for j in i if '@' in j]
fanboy_edges = [(k, j[1:]) for k, i in zip(fanboy_data['username'], fanboy_space_split) for j in i if '@' in j]
about_edges = [(k, j[1:]) for k, i in zip(about_data['username'], about_space_split) for j in i if '@' in j]
about_graph = nx.Graph()
fanboy_graph = nx.Graph()
about_graph.add_edges_from(about_edges)
fanboy_graph.add_edges_from(fanboy_edges)
print(1 / (float(fanboy_graph.order()) / float(fanboy_graph.size())))
print(1 / (float(about_graph.order()) / float(about_graph.size()))) | code |
333806/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
about_data = pd.read_csv('../input/AboutIsis.csv', encoding='ISO-8859-1')
fanboy_data = pd.read_csv('../input/IsisFanboy.csv', encoding='ISO-8859-1')
about_data.keys() | code |
72063178/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_district.isnull().sum()
df_district.columns
df_district.shape
df_district.dtypes
df_district = df_district.drop_duplicates()
plt.figure(figsize=(12, 10))
plt.xticks(rotation=60)
sns.countplot(df_district.state, edgecolor=sns.color_palette('dark', 3)) | code |
72063178/cell_25 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_district.isnull().sum()
df_district.columns
df_district.shape
df_district.dtypes
df_district = df_district.drop_duplicates()
plt.xticks(rotation=60)
fig, ax = plt.subplots(figsize=(12, 6))
fig.suptitle('Locale Distribution', size = 20)
explode = (0.04, 0.04, 0.04, 0.04)
labels = list(df_district.locale.value_counts().index)
sizes = df_district.locale.value_counts().values
ax.pie(sizes, explode=explode,startangle=60, labels=labels, shadow= True)
ax.add_artist(plt.Circle((0,0),0.4,fc='white'))
plt.show()
sns.countplot(data=df_district, x='pct_free/reduced')
plt.show() | code |
72063178/cell_23 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_district.isnull().sum()
df_district.columns
df_district.shape
df_district.dtypes
df_district = df_district.drop_duplicates()
plt.xticks(rotation=60)
fig, ax = plt.subplots(figsize=(12, 6))
fig.suptitle('Locale Distribution', size=20)
explode = (0.04, 0.04, 0.04, 0.04)
labels = list(df_district.locale.value_counts().index)
sizes = df_district.locale.value_counts().values
ax.pie(sizes, explode=explode, startangle=60, labels=labels, shadow=True)
ax.add_artist(plt.Circle((0, 0), 0.4, fc='white'))
plt.show() | code |
72063178/cell_28 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_district.isnull().sum()
df_district.columns
df_district.shape
df_district.dtypes
df_district = df_district.drop_duplicates()
plt.xticks(rotation=60)
fig, ax = plt.subplots(figsize=(12, 6))
fig.suptitle('Locale Distribution', size = 20)
explode = (0.04, 0.04, 0.04, 0.04)
labels = list(df_district.locale.value_counts().index)
sizes = df_district.locale.value_counts().values
ax.pie(sizes, explode=explode,startangle=60, labels=labels, shadow= True)
ax.add_artist(plt.Circle((0,0),0.4,fc='white'))
plt.show()
df_products = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
c1 = c2 = c3 = 0
for s in df_products['Sector(s)']:
if not pd.isnull(s):
s = s.split(';')
for i in range(len(s)):
sub = s[i].strip()
if sub == 'PreK-12':
c1 += 1
if sub == 'Higher Ed':
c2 += 1
if sub == 'Corporate':
c3 += 1
fig, ax = plt.subplots(figsize=(16, 8))
fig.suptitle('Sector Distribution', size=20)
explode = (0.05, 0.05, 0.05)
labels = ['PreK-12', 'Higher Ed', 'Corporate']
sizes = [c1, c2, c3]
ax.pie(sizes, explode=explode, startangle=60, labels=labels)
ax.add_artist(plt.Circle((0, 0), 0.4, fc='white'))
plt.show() | code |
72063178/cell_8 | [
"image_output_1.png"
] | import pandas as pd
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_district.head(5) | code |
72063178/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_district.isnull().sum()
df_district.columns
df_district.shape
df_district.dtypes | code |
72063178/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_district.isnull().sum()
df_district.columns
df_district.shape
df_district.dtypes
df_district = df_district.drop_duplicates()
plt.xticks(rotation=60)
fig, ax = plt.subplots(figsize=(12, 6))
fig.suptitle('Locale Distribution', size = 20)
explode = (0.04, 0.04, 0.04, 0.04)
labels = list(df_district.locale.value_counts().index)
sizes = df_district.locale.value_counts().values
ax.pie(sizes, explode=explode,startangle=60, labels=labels, shadow= True)
ax.add_artist(plt.Circle((0,0),0.4,fc='white'))
plt.show()
sns.countplot(data=df_district, x='pct_black/hispanic')
plt.show() | code |
72063178/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_district.isnull().sum()
df_district.columns
df_district.shape | code |
72063178/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_district.isnull().sum() | code |
72063178/cell_27 | [
"image_output_1.png"
] | import pandas as pd
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_products = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
df_products.head(5) | code |
72063178/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
df_district = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
df_district.isnull().sum()
df_district.columns | code |
129006184/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import warnings
import torch
import matplotlib.pyplot as plt
import numpy as np
import torchvision
from torchvision import datasets, transforms
import torch.nn as nn
import torch.optim as optim
import seaborn as sns
import warnings
warnings.filterwarnings('ignore') | code |
129006184/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import mlflow
get_ipython().system_raw('mlflow ui --port 5000 &')
mlflow.pytorch.autolog() | code |
129006184/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_1.png"
] | from pyngrok import ngrok
from pyngrok import ngrok
from getpass import getpass
ngrok.kill()
NGROK_AUTH_TOKEN = '2Padn9VzXvPy7nJXe6eAUTR3Dbd_6cXCwQeLNLwZDCWL5ypKs'
ngrok.set_auth_token(NGROK_AUTH_TOKEN)
ngrok_tunnel = ngrok.connect(addr='5000', proto='http', bind_tls=True)
print('MLflow Tracking UI:', ngrok_tunnel.public_url) | code |
129006184/cell_3 | [
"text_plain_output_1.png"
] | # Install the requiered packages to run MLFlow
!pip install mlflow --quiet
!pip install pyngrok --quiet | code |
90116628/cell_3 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import requests
webpage_response = requests.get('https://bank.gov.ua/ua/markets/exchangerates')
webpage = webpage_response.content
soup = BeautifulSoup(webpage, 'html.parser')
soup.table | code |
90116628/cell_5 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests
webpage_response = requests.get('https://bank.gov.ua/ua/markets/exchangerates')
webpage = webpage_response.content
soup = BeautifulSoup(webpage, 'html.parser')
soup.table
k = soup.find_all(attrs={'data-label': 'Офіційний курс'})
t1 = []
for i in k:
m = i.string
t1.append(float(m.replace(',', '.')))
k = soup.find_all(attrs={'data-label': 'Код літерний'})
t2 = []
for i in k:
t2.append(i.string)
t = {'ccy': t2, 'value': t1}
import pandas as pd
df = pd.DataFrame(data=t)
print(df) | code |
2037081/cell_9 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def sigmoid(z):
return 1.0 / (1 + np.exp(-z))
def z(theta, x):
assert theta.shape[1] == 1
assert theta.shape[0] == x.shape[1]
return np.dot(x, theta)
a = np.array([[1, 2], [3, 4]])
b = np.array([[4, 1], [2, 2]])
a = np.array([1, 1])
b = np.array([2, 3])
def hypothesis(theta, x):
return sigmoid(z(theta, x))
def cost(theta, x, y):
assert x.shape[1] == theta.shape[0]
assert x.shape[0] == y.shape[0]
assert y.shape[1] == 1
assert theta.shape[1] == 1
h = hypothesis(theta, x)
cost = -1 / len(x) * np.sum(np.dot(y.T, np.log(h)) + np.dot((1 - y).T, np.log(1 - h)))
return cost
def gradient_descent(theta, x, y, learning_rate):
h = hypothesis(theta, x)
theta = theta - learning_rate / len(x) * np.dot(x.T, h - y)
return theta
def minimize(theta, x, y, iterations, learning_rate):
costs = []
for _ in range(iterations):
theta = gradient_descent(theta, x, y, learning_rate)
costs.append(cost(theta, x, y))
return (theta, costs)
mushroom_data = pd.read_csv('../input/mushrooms.csv').dropna()
mushroom_x = pd.get_dummies(mushroom_data.drop('class', axis=1))
mushroom_x['bias'] = 1
mushroom_x = mushroom_x.values
mushroom_y = (np.atleast_2d(mushroom_data['class']).T == 'p').astype(int)
x_train, x_test, y_train, y_test = train_test_split(mushroom_x, mushroom_y, train_size=0.85, test_size=0.15)
print('x_train, y_train')
print(x_train.shape, y_train.shape)
candidate = np.atleast_2d([np.random.uniform(-1, 1, 118)]).T
theta, costs = minimize(candidate, x_train, y_train, 1200, 1.2)
plt.plot(range(len(costs)), costs)
plt.show()
print(costs[-1])
predictions = x_test.dot(theta) > 0
len(list(filter(lambda x: x[0] == x[1], np.dstack((predictions, y_test))[:, 0]))) / len(predictions) | code |
2037081/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
def sigmoid(z):
return 1.0 / (1 + np.exp(-z))
def z(theta, x):
assert theta.shape[1] == 1
assert theta.shape[0] == x.shape[1]
return np.dot(x, theta)
a = np.array([[1, 2], [3, 4]])
b = np.array([[4, 1], [2, 2]])
print('a.T*b is:', np.dot(a.T, b))
print('a*b.T is:', np.dot(a, b.T))
a = np.array([1, 1])
b = np.array([2, 3])
print(a.shape)
print(b.T.shape)
print('a*b is: ', np.dot(a, b.T)) | code |
2037081/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split | code |
121149831/cell_9 | [
"image_output_1.png"
] | from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
X, y = fetch_openml('titanic', version=1, as_frame=True, return_X_y=True)
rng = np.random.RandomState(seed=42)
X['random_cat'] = rng.randint(3, size=X.shape[0])
X['random_num'] = rng.randn(X.shape[0])
categorical_columns = ['pclass', 'sex', 'embarked', 'random_cat']
numerical_columns = ['age', 'sibsp', 'parch', 'fare', 'random_num']
X = X[categorical_columns + numerical_columns]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
categorical_encoder = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1)
numerical_pipe = SimpleImputer(strategy='mean')
preprocessing = ColumnTransformer([('cat', categorical_encoder, categorical_columns), ('num', numerical_pipe, numerical_columns)], verbose_feature_names_out=True)
rf = Pipeline([('preprocess', preprocessing), ('classifier', RandomForestClassifier(random_state=42))])
rf.fit(X_train, y_train)
feature_names = categorical_columns + numerical_columns
mdi_importances = pd.Series(rf[-1].feature_importances_, index=feature_names).sort_values(ascending=True)
ax = mdi_importances.plot.barh()
ax.figure.tight_layout()
from sklearn.inspection import permutation_importance
result = permutation_importance(rf, X_test, y_test, n_repeats=10, random_state=42, n_jobs=2)
sorted_importances_idx = result.importances_mean.argsort()
importances = pd.DataFrame(result.importances[sorted_importances_idx].T, columns=X.columns[sorted_importances_idx])
ax = importances.plot.box(vert=False, whis=10)
ax.set_title('Permutation Importances (test set)')
ax.axvline(x=0, color='k', linestyle='--')
ax.set_xlabel('Decrease in accuracy score')
ax.figure.tight_layout() | code |
121149831/cell_4 | [
"text_plain_output_1.png"
] | from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
import numpy as np # linear algebra
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
X, y = fetch_openml('titanic', version=1, as_frame=True, return_X_y=True)
rng = np.random.RandomState(seed=42)
X['random_cat'] = rng.randint(3, size=X.shape[0])
X['random_num'] = rng.randn(X.shape[0])
categorical_columns = ['pclass', 'sex', 'embarked', 'random_cat']
numerical_columns = ['age', 'sibsp', 'parch', 'fare', 'random_num']
X = X[categorical_columns + numerical_columns]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
categorical_encoder = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1)
numerical_pipe = SimpleImputer(strategy='mean')
preprocessing = ColumnTransformer([('cat', categorical_encoder, categorical_columns), ('num', numerical_pipe, numerical_columns)], verbose_feature_names_out=True)
rf = Pipeline([('preprocess', preprocessing), ('classifier', RandomForestClassifier(random_state=42))])
rf.fit(X_train, y_train) | code |
121149831/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
import numpy as np # linear algebra
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
X, y = fetch_openml('titanic', version=1, as_frame=True, return_X_y=True)
rng = np.random.RandomState(seed=42)
X['random_cat'] = rng.randint(3, size=X.shape[0])
X['random_num'] = rng.randn(X.shape[0])
categorical_columns = ['pclass', 'sex', 'embarked', 'random_cat']
numerical_columns = ['age', 'sibsp', 'parch', 'fare', 'random_num']
X = X[categorical_columns + numerical_columns]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
categorical_encoder = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1)
numerical_pipe = SimpleImputer(strategy='mean')
preprocessing = ColumnTransformer([('cat', categorical_encoder, categorical_columns), ('num', numerical_pipe, numerical_columns)], verbose_feature_names_out=True)
rf = Pipeline([('preprocess', preprocessing), ('classifier', RandomForestClassifier(random_state=42))])
rf.fit(X_train, y_train)
rf[0].output_indices_ | code |
121149831/cell_8 | [
"image_output_1.png"
] | from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
X, y = fetch_openml('titanic', version=1, as_frame=True, return_X_y=True)
rng = np.random.RandomState(seed=42)
X['random_cat'] = rng.randint(3, size=X.shape[0])
X['random_num'] = rng.randn(X.shape[0])
categorical_columns = ['pclass', 'sex', 'embarked', 'random_cat']
numerical_columns = ['age', 'sibsp', 'parch', 'fare', 'random_num']
X = X[categorical_columns + numerical_columns]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
categorical_encoder = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1)
numerical_pipe = SimpleImputer(strategy='mean')
preprocessing = ColumnTransformer([('cat', categorical_encoder, categorical_columns), ('num', numerical_pipe, numerical_columns)], verbose_feature_names_out=True)
rf = Pipeline([('preprocess', preprocessing), ('classifier', RandomForestClassifier(random_state=42))])
rf.fit(X_train, y_train)
feature_names = categorical_columns + numerical_columns
mdi_importances = pd.Series(rf[-1].feature_importances_, index=feature_names).sort_values(ascending=True)
ax = mdi_importances.plot.barh()
ax.set_title('Random Forest Feature Importances (MDI)')
ax.figure.tight_layout() | code |
121149831/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
import numpy as np # linear algebra
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
X, y = fetch_openml('titanic', version=1, as_frame=True, return_X_y=True)
rng = np.random.RandomState(seed=42)
X['random_cat'] = rng.randint(3, size=X.shape[0])
X['random_num'] = rng.randn(X.shape[0])
categorical_columns = ['pclass', 'sex', 'embarked', 'random_cat']
numerical_columns = ['age', 'sibsp', 'parch', 'fare', 'random_num']
X = X[categorical_columns + numerical_columns]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
categorical_encoder = OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=-1)
numerical_pipe = SimpleImputer(strategy='mean')
preprocessing = ColumnTransformer([('cat', categorical_encoder, categorical_columns), ('num', numerical_pipe, numerical_columns)], verbose_feature_names_out=True)
rf = Pipeline([('preprocess', preprocessing), ('classifier', RandomForestClassifier(random_state=42))])
rf.fit(X_train, y_train)
print(f'RF train accuracy: {rf.score(X_train, y_train):.3f}')
print(f'RF test accuracy: {rf.score(X_test, y_test):.3f}') | code |
34144733/cell_2 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | !pip install hyperopt
!pip install geffnet | code |
34144733/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34144733/cell_10 | [
"text_plain_output_1.png"
] | from PIL import ImageOps, ImageEnhance
from abc import ABC, abstractmethod
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import geffnet
import numpy as np
import numpy as np # linear algebra
import pickle
import torch
import torch.nn as nn
import torch.nn as nn
import torch.utils.data as Data
import torchvision.transforms as transforms
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
from PIL import ImageOps, ImageEnhance
from PIL import Image as I
class BaseTransform(ABC):
def __init__(self, prob, mag):
self.prob = prob
self.mag = mag
def __call__(self, img):
return transforms.RandomApply([self.transform], self.prob)(img)
def __repr__(self):
return '%s(prob=%.2f, magnitude=%.2f)' % (self.__class__.__name__, self.prob, self.mag)
@abstractmethod
def transform(self, img):
pass
class ShearXY(BaseTransform):
def transform(self, img):
degrees = self.mag * 360
t = transforms.RandomAffine(0, shear=degrees, resample=I.BILINEAR)
return t(img)
class TranslateXY(BaseTransform):
def transform(self, img):
translate = (self.mag, self.mag)
t = transforms.RandomAffine(0, translate=translate, resample=I.BILINEAR)
return t(img)
class Rotate(BaseTransform):
def transform(self, img):
degrees = self.mag * 360
t = transforms.RandomRotation(degrees, I.BILINEAR)
return t(img)
class AutoContrast(BaseTransform):
def transform(self, img):
cutoff = int(self.mag * 49)
return ImageOps.autocontrast(img, cutoff=cutoff)
class Invert(BaseTransform):
def transform(self, img):
return ImageOps.invert(img)
class Equalize(BaseTransform):
def transform(self, img):
return ImageOps.equalize(img)
class Solarize(BaseTransform):
def transform(self, img):
threshold = (1 - self.mag) * 255
return ImageOps.solarize(img, threshold)
class Posterize(BaseTransform):
def transform(self, img):
bits = int((1 - self.mag) * 8)
return ImageOps.posterize(img, bits=bits)
class Contrast(BaseTransform):
def transform(self, img):
factor = self.mag * 10
return ImageEnhance.Contrast(img).enhance(factor)
class Color(BaseTransform):
def transform(self, img):
factor = self.mag * 10
return ImageEnhance.Color(img).enhance(factor)
class Brightness(BaseTransform):
def transform(self, img):
factor = self.mag * 10
return ImageEnhance.Brightness(img).enhance(factor)
class Sharpness(BaseTransform):
def transform(self, img):
factor = self.mag * 10
return ImageEnhance.Sharpness(img).enhance(factor)
class Cutout(BaseTransform):
def transform(self, img):
n_holes = 1
length = 24 * self.mag
cutout_op = CutoutOp(n_holes=n_holes, length=length)
return cutout_op(img)
class CutoutOp(object):
"""
https://github.com/uoguelph-mlrg/Cutout
Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
w, h = img.size
mask = np.ones((h, w, 1), np.uint8)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h).astype(int)
y2 = np.clip(y + self.length // 2, 0, h).astype(int)
x1 = np.clip(x - self.length // 2, 0, w).astype(int)
x2 = np.clip(x + self.length // 2, 0, w).astype(int)
mask[y1:y2, x1:x2, :] = 0.0
img = mask * np.asarray(img).astype(np.uint8)
img = I.fromarray(mask * np.asarray(img))
return img
class TestDataset(Data.Dataset):
def __init__(self, names, image_labels, transform):
super(TestDataset, self).__init__()
self.names = names
self.image_labels = image_labels
self.transform = transform
def __getitem__(self, index):
name = self.names[index]
if type(name) == list:
name = name[0]
label = self.image_labels[name]
image = I.open(name)
image = self.transform(image)
return (image, label)
def __len__(self):
return len(self.names)
DEFALUT_CANDIDATES = [ShearXY, TranslateXY, Rotate, AutoContrast, Invert, Equalize, Solarize, Posterize, Contrast, Color, Brightness, Sharpness, Cutout]
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class myNet(nn.Module):
def __init__(self):
super(myNet, self).__init__()
backbone = geffnet.efficientnet_b3(pretrained=True)
self.backbone = torch.nn.Sequential(backbone.conv_stem, backbone.bn1, backbone.act1, backbone.blocks, backbone.conv_head, backbone.bn2, backbone.act2, backbone.global_pool)
self.global_avgpool = torch.nn.AdaptiveAvgPool2d(1)
self.global_bn = nn.BatchNorm1d(1536)
self.global_bn.bias.requires_grad = False
self.local_conv = nn.Conv2d(1536, 512, 1)
self.local_bn = nn.BatchNorm2d(512)
self.local_bn.bias.requires_grad = False
self.fc = nn.Linear(1536, 20)
nn.init.kaiming_normal_(self.fc.weight, mode='fan_out')
nn.init.constant_(self.fc.bias, 0)
def forward(self, x):
x = self.backbone(x)
global_feat = self.global_avgpool(x)
global_feat = global_feat.view(global_feat.shape[0], -1)
global_feat = F.dropout(global_feat, p=0.2)
global_feat = self.global_bn(global_feat)
global_feat = l2_norm(global_feat)
local_feat = torch.mean(x, -1, keepdim=True)
local_feat = self.local_bn(self.local_conv(local_feat))
local_feat = local_feat.squeeze(-1).permute(0, 2, 1)
local_feat = l2_norm(local_feat, axis=-1)
out = self.fc(global_feat) * 16
return (global_feat, local_feat, out)
import torch.nn as nn
import torch
class TripletLoss(nn.Module):
def __init__(self, margin=0.3):
super(TripletLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
def shortest_dist(self, dist_mat):
m, n = dist_mat.size()[:2]
dist = [[0 for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
if i == 0 and j == 0:
dist[i][j] = dist_mat[i, j]
elif i == 0 and j > 0:
dist[i][j] = dist[i][j - 1] + dist_mat[i, j]
elif i > 0 and j == 0:
dist[i][j] = dist[i - 1][j] + dist_mat[i, j]
else:
dist[i][j] = torch.min(dist[i - 1][j], dist[i][j - 1]) + dist_mat[i, j]
dist = dist[-1][-1]
return dist
'局部特征的距离矩阵'
def compute_local_dist(self, x, y):
M, m, d = x.size()
N, n, d = y.size()
x = x.contiguous().view(M * m, d)
y = y.contiguous().view(N * n, d)
dist_mat = self.comput_dist(x, y)
dist_mat = (torch.exp(dist_mat) - 1.0) / (torch.exp(dist_mat) + 1.0)
dist_mat = dist_mat.contiguous().view(M, m, N, n).permute(1, 3, 0, 2)
dist_mat = self.shortest_dist(dist_mat)
return dist_mat
'全局特征的距离矩阵'
def comput_dist(self, x, y):
m, n = (x.size(0), y.size(0))
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
def hard_example_mining(self, dist_mat, labels, return_inds=False):
assert len(dist_mat.size()) == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
list_ap = []
list_an = []
for i in range(N):
list_ap.append(dist_mat[i][is_pos[i]].max().unsqueeze(0))
list_an.append(dist_mat[i][is_neg[i]].min().unsqueeze(0))
dist_ap = torch.cat(list_ap)
dist_an = torch.cat(list_an)
return (dist_ap, dist_an)
def forward(self, feat_type, feat, labels):
"""
:param feat_type: 'global'代表计算全局特征的三重损失,'local'代表计算局部特征
:param feat: 经过网络计算出来的结果
:param labels: 标签
:return:
"""
if feat_type == 'global':
dist_mat = self.comput_dist(feat, feat)
else:
dist_mat = self.compute_local_dist(feat, feat)
dist_ap, dist_an = self.hard_example_mining(dist_mat, labels)
y = torch.ones_like(dist_an)
loss = self.ranking_loss(dist_an, dist_ap, y)
return loss
def one_hot_smooth_label(x, num_class, smooth=0.1):
num = x.shape[0]
labels = torch.zeros((num, 20))
for i in range(num):
labels[i][x[i]] = 1
labels = (1 - (num_class - 1) / num_class * smooth) * labels + smooth / num_class
return labels
class Criterion:
def __init__(self):
self.triplet_criterion = TripletLoss()
self.cls_criterion = nn.BCEWithLogitsLoss()
def __call__(self, global_feat, local_feat, cls_score, label):
global_loss = self.triplet_criterion('global', global_feat, label)
local_loss = self.triplet_criterion('local', local_feat, label)
label = one_hot_smooth_label(label, 20)
cls_loss = self.cls_criterion(cls_score, label)
return global_loss + local_loss + cls_loss
def validate_child(model, dl, criterion, transform):
device = torch.device('cuda:0')
model = model.to(device)
model.eval()
steps = len(dl)
total_metric = 0
total_loss = 0
dl.dataset.transform = transform
with torch.no_grad():
for images, labels in dl:
images = images.to(device)
global_feat, local_feat, logits = model(images)
global_feat = global_feat.to('cpu')
logits = logits.to('cpu')
local_feat = local_feat.to('cpu')
loss = criterion(global_feat, local_feat, logits, labels)
metric = accuracy(logits, labels)
total_loss += loss
total_metric += metric
metric = total_metric / steps
loss = total_loss / steps
return (metric, loss)
def get_next_subpolicy(transform_candidates, op_per_subpolicy=2):
n_candidates = len(transform_candidates)
subpolicy = []
for i in range(op_per_subpolicy):
index = random.randrange(n_candidates)
prob = random.random()
mag = random.random()
subpolicy.append(transform_candidates[index](prob, mag))
subpolicy = transforms.Compose([*subpolicy, transforms.Resize([300, 300]), transforms.ToTensor()])
return subpolicy
def search_subpolicies_hyperopt(transform_candidates, child_model, dl, B, criterion):
def _objective(sampled):
subpolicy = [transform(prob, mag) for transform, prob, mag in sampled]
subpolicy = transforms.Compose([transforms.Resize([300, 300]), *subpolicy, transforms.ToTensor()])
val_res = validate_child(child_model, dl, criterion, subpolicy)
loss = val_res[1].cpu().detach().numpy()
return {'loss': loss, 'status': STATUS_OK}
space = [(hp.choice('transform1', transform_candidates), hp.uniform('prob1', 0, 1.0), hp.uniform('mag1', 0, 1.0)), (hp.choice('transform2', transform_candidates), hp.uniform('prob2', 0, 1.0), hp.uniform('mag2', 0, 1.0)), (hp.choice('transform3', transform_candidates), hp.uniform('prob3', 0, 1.0), hp.uniform('mag3', 0, 1.0))]
trials = Trials()
best = fmin(_objective, space=space, algo=tpe.suggest, max_evals=B, trials=trials)
subpolicies = []
for t in trials.trials:
vals = t['misc']['vals']
subpolicy = [transform_candidates[vals['transform1'][0]](vals['prob1'][0], vals['mag1'][0]), transform_candidates[vals['transform2'][0]](vals['prob2'][0], vals['mag2'][0]), transform_candidates[vals['transform3'][0]](vals['prob3'][0], vals['mag3'][0])]
subpolicy = transforms.Compose([transforms.RandomHorizontalFlip(), *subpolicy, transforms.ToTensor()])
subpolicies.append((subpolicy, t['result']['loss']))
return subpolicies
def get_topn_subpolicies(subpolicies, N=10):
return sorted(subpolicies, key=lambda subpolicy: subpolicy[1])[:N]
def process_fn(child_model, Da_dl, T, transform_candidates, B, N):
transform = []
criterion = Criterion()
for i in range(T):
subpolicies = search_subpolicies_hyperopt(transform_candidates, child_model, Da_dl, B, criterion)
subpolicies = get_topn_subpolicies(subpolicies, N)
transform.extend([subpolicy[0] for subpolicy in subpolicies])
return transform
from tqdm import tqdm
def fast_auto_augment(model, Da_dl, B=300, T=2, N=10):
transform_list = []
transform_candidates = DEFALUT_CANDIDATES
transform = process_fn(model, Da_dl, T, transform_candidates, B, N)
transform_list.extend(transform)
return transform_list
import pickle
def main():
k = 0
with open('/kaggle/input/linshi/valid_dl{}.txt'.format(k), 'rb') as f:
valid_dl = pickle.load(f)
ds = valid_dl.dataset
new_valid_dl = Data.DataLoader(ds, batch_size=32, shuffle=True)
with open('/kaggle/input/linshi/model{}.txt'.format(k), 'rb') as f:
model = pickle.load(f)
transform_list = fast_auto_augment(model, new_valid_dl)
file = open('transform_list{}.txt'.format(k), 'wb+')
pickle.dump(transform_list, file)
file.close()
main() | code |
34121284/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import requests
import json
import pandas as pd
import time
import plotly
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89135088/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
percent_over.percent_completed_hs.value_counts()
percent_over.info() | code |
89135088/cell_9 | [
"image_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
kill.name.value_counts() | code |
89135088/cell_4 | [
"text_plain_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
percentage_people.info() | code |
89135088/cell_6 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
percentage_people.poverty_rate.value_counts()
percentage_people.poverty_rate.replace(['-'], 0.0, inplace=True)
percentage_people.poverty_rate = percentage_people.poverty_rate.astype(float)
area_list = list(percentage_people['Geographic Area'].unique())
area_poverty_ratio = []
for i in area_list:
x = percentage_people[percentage_people['Geographic Area'] == i]
area_poverty_rate = sum(x.poverty_rate) / len(x)
area_poverty_ratio.append(area_poverty_rate)
data = pd.DataFrame({'area_list': area_list, 'area_poverty_ratio': area_poverty_ratio})
new_index = data['area_poverty_ratio'].sort_values(ascending=False).index.values
sorted_data = data.reindex(new_index)
plt.figure(figsize=(15, 10))
sns.barplot(x=sorted_data['area_list'], y=sorted_data['area_poverty_ratio'])
plt.xticks(rotation=45)
plt.xlabel('States')
plt.ylabel('Poverty Rate')
plt.title('Povert Rate Given States') | code |
89135088/cell_11 | [
"text_plain_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
percent_over.head() | code |
89135088/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89135088/cell_7 | [
"text_plain_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
kill.head() | code |
89135088/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
kill.info() | code |
89135088/cell_15 | [
"text_plain_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
share_race.head() | code |
89135088/cell_16 | [
"text_plain_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
share_race.info() | code |
89135088/cell_3 | [
"text_plain_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
percentage_people.head() | code |
89135088/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
percentage_people.poverty_rate.value_counts()
percentage_people.poverty_rate.replace(['-'], 0.0, inplace=True)
percentage_people.poverty_rate = percentage_people.poverty_rate.astype(float)
area_list = list(percentage_people['Geographic Area'].unique())
area_poverty_ratio = []
for i in area_list:
x = percentage_people[percentage_people['Geographic Area'] == i]
area_poverty_rate = sum(x.poverty_rate) / len(x)
area_poverty_ratio.append(area_poverty_rate)
data = pd.DataFrame({'area_list': area_list, 'area_poverty_ratio': area_poverty_ratio})
new_index = data['area_poverty_ratio'].sort_values(ascending=False).index.values
sorted_data = data.reindex(new_index)
plt.xticks(rotation=45)
kill.name.value_counts()
separate = kill.name[kill.name != 'TK TK'].str.split()
a, b = zip(*separate)
name_list = a + b
name_count = Counter(name_list)
most_common_names = name_count.most_common(15)
x, y = zip(*most_common_names)
x, y = (list(x), list(y))
percent_over.percent_completed_hs.value_counts()
percent_over.percent_completed_hs.replace(['-'], 0.0, inplace=True)
percent_over.percent_completed_hs = percent_over.percent_completed_hs.astype(float)
area_list = list(percent_over['Geographic Area'].unique())
area_highschool = []
for i in area_list:
x = percent_over[percent_over['Geographic Area'] == i]
area_highschool_rate = sum(x.percent_completed_hs) / len(x)
area_highschool.append(area_highschool_rate)
data = pd.DataFrame({'area_list': area_list, 'area_highschool_ratio': area_highschool})
new_index = data['area_highschool_ratio'].sort_values(ascending=True).index.values
sorted_data2 = data.reindex(new_index)
plt.xticks(rotation=45)
share_race.replace(['-'], 0.0, inplace=True)
share_race.replace(['(X)'], 0.0, inplace=True)
share_race.loc[:, ['share_white', 'share_black', 'share_native_american', 'share_asian', 'share_hispanic']] = share_race.loc[:, ['share_white', 'share_black', 'share_native_american', 'share_asian', 'share_hispanic']].astype(float)
area_list = list(share_race['Geographic area'].unique())
share_white = []
share_black = []
share_native_american = []
share_asian = []
share_hispanic = []
for i in area_list:
x = share_race[share_race['Geographic area'] == i]
share_white.append(sum(x.share_white) / len(x))
share_native_american.append(sum(x.share_native_american) / len(x))
share_black.append(sum(x.share_black) / len(x))
share_asian.append(sum(x.share_asian) / len(x))
share_hispanic.append(sum(x.share_hispanic) / len(x))
f, ax = plt.subplots(figsize=(9, 15))
sns.barplot(x=share_white, y=area_list, color='green', alpha=0.5, label='White')
sns.barplot(x=share_black, y=area_list, color='blue', alpha=0.5, label='Black')
sns.barplot(x=share_native_american, y=area_list, color='cyan', alpha=0.5, label='Native American')
sns.barplot(x=share_asian, y=area_list, color='yellow', alpha=0.5, label='Asian')
sns.barplot(x=share_hispanic, y=area_list, color='red', alpha=0.5, label='Hispanic')
ax.legend(loc='lower right', frameon=True) | code |
89135088/cell_14 | [
"text_html_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
percentage_people.poverty_rate.value_counts()
percentage_people.poverty_rate.replace(['-'], 0.0, inplace=True)
percentage_people.poverty_rate = percentage_people.poverty_rate.astype(float)
area_list = list(percentage_people['Geographic Area'].unique())
area_poverty_ratio = []
for i in area_list:
x = percentage_people[percentage_people['Geographic Area'] == i]
area_poverty_rate = sum(x.poverty_rate) / len(x)
area_poverty_ratio.append(area_poverty_rate)
data = pd.DataFrame({'area_list': area_list, 'area_poverty_ratio': area_poverty_ratio})
new_index = data['area_poverty_ratio'].sort_values(ascending=False).index.values
sorted_data = data.reindex(new_index)
plt.xticks(rotation=45)
kill.name.value_counts()
separate = kill.name[kill.name != 'TK TK'].str.split()
a, b = zip(*separate)
name_list = a + b
name_count = Counter(name_list)
most_common_names = name_count.most_common(15)
x, y = zip(*most_common_names)
x, y = (list(x), list(y))
percent_over.percent_completed_hs.value_counts()
percent_over.percent_completed_hs.replace(['-'], 0.0, inplace=True)
percent_over.percent_completed_hs = percent_over.percent_completed_hs.astype(float)
area_list = list(percent_over['Geographic Area'].unique())
area_highschool = []
for i in area_list:
x = percent_over[percent_over['Geographic Area'] == i]
area_highschool_rate = sum(x.percent_completed_hs) / len(x)
area_highschool.append(area_highschool_rate)
data = pd.DataFrame({'area_list': area_list, 'area_highschool_ratio': area_highschool})
new_index = data['area_highschool_ratio'].sort_values(ascending=True).index.values
sorted_data2 = data.reindex(new_index)
plt.figure(figsize=(15, 10))
sns.barplot(x=sorted_data2['area_list'], y=sorted_data2['area_highschool_ratio'])
plt.xticks(rotation=45)
plt.xlabel('States')
plt.ylabel('Highs School Graduet Rate') | code |
89135088/cell_10 | [
"text_html_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
percentage_people.poverty_rate.value_counts()
percentage_people.poverty_rate.replace(['-'], 0.0, inplace=True)
percentage_people.poverty_rate = percentage_people.poverty_rate.astype(float)
area_list = list(percentage_people['Geographic Area'].unique())
area_poverty_ratio = []
for i in area_list:
x = percentage_people[percentage_people['Geographic Area'] == i]
area_poverty_rate = sum(x.poverty_rate) / len(x)
area_poverty_ratio.append(area_poverty_rate)
data = pd.DataFrame({'area_list': area_list, 'area_poverty_ratio': area_poverty_ratio})
new_index = data['area_poverty_ratio'].sort_values(ascending=False).index.values
sorted_data = data.reindex(new_index)
plt.xticks(rotation=45)
kill.name.value_counts()
separate = kill.name[kill.name != 'TK TK'].str.split()
a, b = zip(*separate)
name_list = a + b
name_count = Counter(name_list)
most_common_names = name_count.most_common(15)
x, y = zip(*most_common_names)
x, y = (list(x), list(y))
plt.figure(figsize=(15, 10))
sns.barplot(x=x, y=y, palette=sns.cubehelix_palette(len(x)))
plt.xlabel('Name or surname of killed people')
plt.ylabel('Frequency')
plt.title('Most common 15 name or surname of killed people') | code |
89135088/cell_12 | [
"text_plain_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
percent_over.percent_completed_hs.value_counts() | code |
89135088/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | median_house = pd.read_csv('../input/fatal-police-shootings-in-the-us/MedianHouseholdIncome2015.csv', encoding='windows-1252')
percent_over = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentOver25CompletedHighSchool.csv', encoding='windows-1252')
percentage_people = pd.read_csv('../input/fatal-police-shootings-in-the-us/PercentagePeopleBelowPovertyLevel.csv', encoding='windows-1252')
kill = pd.read_csv('../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv', encoding='windows-1252')
share_race = pd.read_csv('../input/fatal-police-shootings-in-the-us/ShareRaceByCity.csv', encoding='windows-1252')
percentage_people.poverty_rate.value_counts() | code |
17104067/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';')
cota.dataframeName = 'cota_parlamentar_sp.csv'
nRow, nCol = cota.shape
#partido_valor = pd.DataFrame()
#partido_valor['sgpartido'] = cota['sgpartido']
#partido_valor['vlrdocumento'] = cota['vlrdocumento']
#partido_valor['nulegislatura'] = cota['nulegislatura']
#cota['nulegislatura'].value_counts()
#cota.groupby(['nulegislatura']).sum()
cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False))
cota_total2 = cota_total.head(5)
#cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False)
cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum())
#partido_valor.groupby(['sgpartido']).sum()
cota_por_ano
cota_total.plot(kind='bar', title='Gastos Partidos - Completo') | code |
17104067/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';')
cota.dataframeName = 'cota_parlamentar_sp.csv'
nRow, nCol = cota.shape
print(f'There are {nRow} rows and {nCol} columns') | code |
17104067/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';')
cota.dataframeName = 'cota_parlamentar_sp.csv'
nRow, nCol = cota.shape
#partido_valor = pd.DataFrame()
#partido_valor['sgpartido'] = cota['sgpartido']
#partido_valor['vlrdocumento'] = cota['vlrdocumento']
#partido_valor['nulegislatura'] = cota['nulegislatura']
#cota['nulegislatura'].value_counts()
#cota.groupby(['nulegislatura']).sum()
cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False))
cota_total2 = cota_total.head(5)
#cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False)
cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum())
#partido_valor.groupby(['sgpartido']).sum()
cota_por_ano
plt.title('Evolução Gastos dos Partidos', loc='center', fontsize=12, fontweight=0, color='black')
plt.xlabel('Ano')
plt.ylabel('Gasto')
plt.plot(cota_por_ano) | code |
17104067/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';')
cota.dataframeName = 'cota_parlamentar_sp.csv'
nRow, nCol = cota.shape
cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False))
cota_total2 = cota_total.head(5)
cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum())
cota_por_ano | code |
17104067/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';')
cota.dataframeName = 'cota_parlamentar_sp.csv'
nRow, nCol = cota.shape
#partido_valor = pd.DataFrame()
#partido_valor['sgpartido'] = cota['sgpartido']
#partido_valor['vlrdocumento'] = cota['vlrdocumento']
#partido_valor['nulegislatura'] = cota['nulegislatura']
#cota['nulegislatura'].value_counts()
#cota.groupby(['nulegislatura']).sum()
cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False))
cota_total2 = cota_total.head(5)
#cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False)
cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum())
#partido_valor.groupby(['sgpartido']).sum()
cota_por_ano
cota_total | code |
17104067/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';')
cota.dataframeName = 'cota_parlamentar_sp.csv'
nRow, nCol = cota.shape
#partido_valor = pd.DataFrame()
#partido_valor['sgpartido'] = cota['sgpartido']
#partido_valor['vlrdocumento'] = cota['vlrdocumento']
#partido_valor['nulegislatura'] = cota['nulegislatura']
#cota['nulegislatura'].value_counts()
#cota.groupby(['nulegislatura']).sum()
cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False))
cota_total2 = cota_total.head(5)
#cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False)
cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum())
#partido_valor.groupby(['sgpartido']).sum()
cota_por_ano
cota_total2.plot(kind='pie', title='Maiores Gastos Partidos - Top5', subplots=True) | code |
17104067/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';')
cota.dataframeName = 'cota_parlamentar_sp.csv'
nRow, nCol = cota.shape
#partido_valor = pd.DataFrame()
#partido_valor['sgpartido'] = cota['sgpartido']
#partido_valor['vlrdocumento'] = cota['vlrdocumento']
#partido_valor['nulegislatura'] = cota['nulegislatura']
#cota['nulegislatura'].value_counts()
#cota.groupby(['nulegislatura']).sum()
cota_total = pd.DataFrame(cota.groupby(['sgpartido'])['vlrdocumento'].sum().sort_values(ascending=False))
cota_total2 = cota_total.head(5)
#cota.groupby('nulegislatura')['vlrdocumento'].sum().sort_values(ascending=False)
cota_por_ano = pd.DataFrame(cota.groupby('numano')['vlrdocumento'].sum())
#partido_valor.groupby(['sgpartido']).sum()
cota_por_ano
pd.DataFrame(cota.groupby(['numano', 'nummes'])['vlrdocumento'].sum()) | code |
17104067/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
cota = pd.read_csv('../input/cota_parlamentar_sp.csv', delimiter=';')
cota.dataframeName = 'cota_parlamentar_sp.csv'
nRow, nCol = cota.shape
cota.head(10) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.