path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17116059/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) quartet = pd.read_csv('../input/quartet.csv', index_col='id') df = pd.read_csv('../input/raw_lemonade_data.csv') print(df.head()) print(df.tail())
code
17116059/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) quartet = pd.read_csv('../input/quartet.csv', index_col='id') quartet.groupby('dataset').agg(['mean', 'std'])
code
72092898/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from scipy.spatial.distance import squareform, pdist import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/uci-wine-data/wine-clustering.csv') def normalize_data(data): data_normalized = data.copy() for col in data.columns: data_normalized[col] = (data_normalized[col] - data_normalized[col].min()) / (data_normalized[col].max() - data_normalized[col].min()) return data_normalized wine_data_normalized = normalize_data(wine_data) def get_dissimilarity(data): from scipy.spatial.distance import squareform, pdist similarity_matrix = pd.DataFrame(squareform(pdist(data, 'euclidean'))) return similarity_matrix similarity_matrix = get_dissimilarity(wine_data_normalized) similarity_matrix def get_avg_dissimilarity(data): avg_dissimilarity = np.zeros((data.shape[0], 1)) for i in range(data.shape[0]): avg_dissimilarity[i] = data[i].mean() return avg_dissimilarity avg_dissimilarity = get_avg_dissimilarity(similarity_matrix) (avg_dissimilarity[:10], avg_dissimilarity.shape) def form_m_clusters(data, avg_data): cluster_objects = [] cluster = [] for i in range(data.shape[0]): for j in range(data.shape[1]): if data[i][j] < avg_data[i]: cluster.append(j) cluster_objects.append(cluster) cluster = [] return cluster_objects cluster_objects = form_m_clusters(similarity_matrix, avg_dissimilarity) len(cluster_objects) def remove_subset_clusters(cluster_objects): for i in range(len(cluster_objects)): for j in range(i + 1, len(cluster_objects)): if j < len(cluster_objects) and set(cluster_objects[j]).issubset(set(cluster_objects[i])): cluster_objects = np.delete(cluster_objects, j, axis=0) print('cluster', j, 'subset of cluster', i, 'deleted!') return cluster_objects
code
72092898/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/uci-wine-data/wine-clustering.csv') def normalize_data(data): data_normalized = data.copy() for col in data.columns: print(col, 'max:', data[col].max(), 'min:', data[col].min()) data_normalized[col] = (data_normalized[col] - data_normalized[col].min()) / (data_normalized[col].max() - data_normalized[col].min()) return data_normalized wine_data_normalized = normalize_data(wine_data) wine_data_normalized.head()
code
72092898/cell_6
[ "text_plain_output_1.png" ]
from scipy.spatial.distance import squareform, pdist import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/uci-wine-data/wine-clustering.csv') def normalize_data(data): data_normalized = data.copy() for col in data.columns: data_normalized[col] = (data_normalized[col] - data_normalized[col].min()) / (data_normalized[col].max() - data_normalized[col].min()) return data_normalized wine_data_normalized = normalize_data(wine_data) def get_dissimilarity(data): from scipy.spatial.distance import squareform, pdist similarity_matrix = pd.DataFrame(squareform(pdist(data, 'euclidean'))) return similarity_matrix similarity_matrix = get_dissimilarity(wine_data_normalized) similarity_matrix def get_avg_dissimilarity(data): avg_dissimilarity = np.zeros((data.shape[0], 1)) for i in range(data.shape[0]): avg_dissimilarity[i] = data[i].mean() return avg_dissimilarity avg_dissimilarity = get_avg_dissimilarity(similarity_matrix) (avg_dissimilarity[:10], avg_dissimilarity.shape)
code
72092898/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/uci-wine-data/wine-clustering.csv') wine_data.head()
code
72092898/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72092898/cell_7
[ "text_plain_output_1.png" ]
from scipy.spatial.distance import squareform, pdist import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/uci-wine-data/wine-clustering.csv') def normalize_data(data): data_normalized = data.copy() for col in data.columns: data_normalized[col] = (data_normalized[col] - data_normalized[col].min()) / (data_normalized[col].max() - data_normalized[col].min()) return data_normalized wine_data_normalized = normalize_data(wine_data) def get_dissimilarity(data): from scipy.spatial.distance import squareform, pdist similarity_matrix = pd.DataFrame(squareform(pdist(data, 'euclidean'))) return similarity_matrix similarity_matrix = get_dissimilarity(wine_data_normalized) similarity_matrix def get_avg_dissimilarity(data): avg_dissimilarity = np.zeros((data.shape[0], 1)) for i in range(data.shape[0]): avg_dissimilarity[i] = data[i].mean() return avg_dissimilarity avg_dissimilarity = get_avg_dissimilarity(similarity_matrix) (avg_dissimilarity[:10], avg_dissimilarity.shape) def form_m_clusters(data, avg_data): cluster_objects = [] cluster = [] for i in range(data.shape[0]): for j in range(data.shape[1]): if data[i][j] < avg_data[i]: cluster.append(j) cluster_objects.append(cluster) cluster = [] return cluster_objects cluster_objects = form_m_clusters(similarity_matrix, avg_dissimilarity) len(cluster_objects)
code
72092898/cell_8
[ "text_plain_output_1.png" ]
from scipy.spatial.distance import squareform, pdist import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/uci-wine-data/wine-clustering.csv') def normalize_data(data): data_normalized = data.copy() for col in data.columns: data_normalized[col] = (data_normalized[col] - data_normalized[col].min()) / (data_normalized[col].max() - data_normalized[col].min()) return data_normalized wine_data_normalized = normalize_data(wine_data) def get_dissimilarity(data): from scipy.spatial.distance import squareform, pdist similarity_matrix = pd.DataFrame(squareform(pdist(data, 'euclidean'))) return similarity_matrix similarity_matrix = get_dissimilarity(wine_data_normalized) similarity_matrix def get_avg_dissimilarity(data): avg_dissimilarity = np.zeros((data.shape[0], 1)) for i in range(data.shape[0]): avg_dissimilarity[i] = data[i].mean() return avg_dissimilarity avg_dissimilarity = get_avg_dissimilarity(similarity_matrix) (avg_dissimilarity[:10], avg_dissimilarity.shape) def form_m_clusters(data, avg_data): cluster_objects = [] cluster = [] for i in range(data.shape[0]): for j in range(data.shape[1]): if data[i][j] < avg_data[i]: cluster.append(j) cluster_objects.append(cluster) cluster = [] return cluster_objects cluster_objects = form_m_clusters(similarity_matrix, avg_dissimilarity) len(cluster_objects) for i in range(5): print('cluster', i, '(', max(cluster_objects[i]), ')', ': ', cluster_objects[i]) print('')
code
72092898/cell_3
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/uci-wine-data/wine-clustering.csv') wine_data.info()
code
72092898/cell_5
[ "text_plain_output_1.png" ]
from scipy.spatial.distance import squareform, pdist import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) wine_data = pd.read_csv('../input/uci-wine-data/wine-clustering.csv') def normalize_data(data): data_normalized = data.copy() for col in data.columns: data_normalized[col] = (data_normalized[col] - data_normalized[col].min()) / (data_normalized[col].max() - data_normalized[col].min()) return data_normalized wine_data_normalized = normalize_data(wine_data) def get_dissimilarity(data): from scipy.spatial.distance import squareform, pdist similarity_matrix = pd.DataFrame(squareform(pdist(data, 'euclidean'))) return similarity_matrix similarity_matrix = get_dissimilarity(wine_data_normalized) similarity_matrix
code
324878/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ test_df.describe()
code
324878/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ sns.countplot(x='Embarked', data=train_df)
code
324878/cell_25
[ "text_plain_output_1.png" ]
from sklearn.svm import SVC, LinearSVC import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ X_train = train_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values Y_train = train_df[['Survived']].values X_test = test_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values svc = SVC() svc.fit(X_train, Y_train) Y_pred_svm = svc.predict(X_test) svc.score(X_train, Y_train)
code
324878/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ train_df.info()
code
324878/cell_23
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ X_train = train_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values Y_train = train_df[['Survived']].values X_test = test_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values knn = KNeighborsClassifier(n_neighbors=3) knn.fit(X_train, Y_train) Y_pred_knn = knn.predict(X_test) knn.score(X_train, Y_train)
code
324878/cell_20
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ train_df['Family']
code
324878/cell_6
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ median_age = train_df['Age'].median() train_df['Age'] = train_df['Age'].fillna(median_age) train_df['Embarked'] = train_df['Embarked'].fillna('S') test_df['Age'] = test_df['Age'].fillna(median_age) test_df['Fare'] = test_df['Fare'].fillna(0) test_df['Cabin'] = test_df['Cabin'].fillna('Missing') train_df['Cabin'] = train_df['Cabin'].fillna('Missing')
code
324878/cell_26
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ X_train = train_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values Y_train = train_df[['Survived']].values X_test = test_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values logreg = LogisticRegression() logreg.fit(X_train, Y_train) Y_pred_log = logreg.predict(X_test) logreg.score(X_train, Y_train)
code
324878/cell_11
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ sns.countplot(x='Survived', hue='Pclass', data=train_df, order=[0, 1])
code
324878/cell_19
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ train_df.info()
code
324878/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
324878/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ train_df.info() test_df.info()
code
324878/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ train_df['Family'] = train_df['Parch'] + train_df['SibSp'] train_df['Family'].loc[train_df['Family'] > 0] = 1 train_df['Family'].loc[train_df['Family'] == 0] = 0 test_df['Family'] = test_df['Parch'] + test_df['SibSp'] test_df['Family'].loc[test_df['Family'] > 0] = 1 test_df['Family'].loc[test_df['Family'] == 0] = 0
code
324878/cell_28
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ X_train = train_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values Y_train = train_df[['Survived']].values X_test = test_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, Y_train) Y_pred = random_forest.predict(X_test) random_forest.score(X_train, Y_train) pid = np.array(test_df['PassengerId']).astype(int) my_solution = pd.DataFrame(Y_pred, pid, columns=['Survived']) print(my_solution) print(my_solution.shape) my_solution.to_csv('titanic_solution_rf.csv', index_label=['PassengerId'])
code
324878/cell_8
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ sns.factorplot('Embarked', 'Survived', data=train_df, size=4)
code
324878/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ train_df['Child'] = float(0) train_df['Child'][train_df['Age'] < 18] = 1 train_df['Child'][train_df['Age'] >= 18] = 0 test_df['Child'] = float(0) test_df['Child'][test_df['Age'] < 18] = 1 test_df['Child'][test_df['Age'] >= 18] = 0
code
324878/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ letters = 'ABCDEFGHIJKLM' train_df['CabinStartsWith'] = float(-1) train_df['CabinStartsWithLetter'] = '' train_df['CabinStartsWithLetter'][train_df['Cabin'] != 'Missing'] = train_df['Cabin'].str[0] train_df['CabinStartsWith'][train_df['CabinStartsWithLetter'] == 'A'] = 0 train_df['CabinStartsWith'][train_df['CabinStartsWithLetter'] == 'B'] = 1 train_df['CabinStartsWith'][train_df['CabinStartsWithLetter'] == 'C'] = 2 train_df['CabinStartsWith'][train_df['CabinStartsWithLetter'] == 'D'] = 3 train_df['CabinStartsWith'][train_df['CabinStartsWithLetter'] == 'E'] = 4 train_df['CabinStartsWith'][train_df['CabinStartsWithLetter'] == 'F'] = 5 train_df['CabinStartsWith'][train_df['CabinStartsWithLetter'] == 'G'] = 6 test_df['CabinStartsWith'] = -1 test_df['CabinStartsWithLetter'] = '' test_df['CabinStartsWithLetter'][test_df['Cabin'] != 'Missing'] = test_df['Cabin'].str[0] test_df['CabinStartsWith'][test_df['CabinStartsWithLetter'] == 'A'] = 0 test_df['CabinStartsWith'][test_df['CabinStartsWithLetter'] == 'B'] = 1 test_df['CabinStartsWith'][test_df['CabinStartsWithLetter'] == 'C'] = 2 test_df['CabinStartsWith'][test_df['CabinStartsWithLetter'] == 'D'] = 3 test_df['CabinStartsWith'][test_df['CabinStartsWithLetter'] == 'E'] = 4 test_df['CabinStartsWith'][test_df['CabinStartsWithLetter'] == 'F'] = 5 test_df['CabinStartsWith'][test_df['CabinStartsWithLetter'] == 'G'] = 6
code
324878/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) train_df.head()
code
324878/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ train_df['CabinStartsWith']
code
324878/cell_24
[ "application_vnd.jupyter.stderr_output_27.png", "application_vnd.jupyter.stderr_output_35.png", "application_vnd.jupyter.stderr_output_24.png", "application_vnd.jupyter.stderr_output_16.png", "application_vnd.jupyter.stderr_output_9.png", "application_vnd.jupyter.stderr_output_52.png", "application_vnd.jupyter.stderr_output_53.png", "application_vnd.jupyter.stderr_output_32.png", "application_vnd.jupyter.stderr_output_48.png", "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_11.png", "application_vnd.jupyter.stderr_output_18.png", "application_vnd.jupyter.stderr_output_38.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_26.png", "application_vnd.jupyter.stderr_output_6.png", "application_vnd.jupyter.stderr_output_31.png", "application_vnd.jupyter.stderr_output_33.png", "application_vnd.jupyter.stderr_output_25.png", "application_vnd.jupyter.stderr_output_12.png", "application_vnd.jupyter.stderr_output_8.png", "application_vnd.jupyter.stderr_output_10.png", "application_vnd.jupyter.stderr_output_23.png", "application_vnd.jupyter.stderr_output_34.png", "application_vnd.jupyter.stderr_output_19.png", "application_vnd.jupyter.stderr_output_44.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_42.png", "application_vnd.jupyter.stderr_output_5.png", "application_vnd.jupyter.stderr_output_30.png", "application_vnd.jupyter.stderr_output_15.png", "application_vnd.jupyter.stderr_output_17.png", "application_vnd.jupyter.stderr_output_28.png", "application_vnd.jupyter.stderr_output_46.png", "application_vnd.jupyter.stderr_output_41.png", "application_vnd.jupyter.stderr_output_20.png", "application_vnd.jupyter.stderr_output_49.png", "application_vnd.jupyter.stderr_output_47.png", "application_vnd.jupyter.stderr_output_36.png", "application_vnd.jupyter.stderr_output_57.png", "application_vnd.jupyter.stderr_output_22.png", "application_vnd.jupyter.stderr_output_56.png", "application_vnd.jupyter.stderr_output_50.png", "application_vnd.jupyter.stderr_output_29.png", "application_vnd.jupyter.stderr_output_1.png", "application_vnd.jupyter.stderr_output_51.png", "application_vnd.jupyter.stderr_output_45.png", "application_vnd.jupyter.stderr_output_14.png", "application_vnd.jupyter.stderr_output_39.png", "application_vnd.jupyter.stderr_output_21.png", "application_vnd.jupyter.stderr_output_43.png", "application_vnd.jupyter.stderr_output_54.png", "application_vnd.jupyter.stderr_output_55.png", "application_vnd.jupyter.stderr_output_40.png", "application_vnd.jupyter.stderr_output_37.png" ]
from sklearn.naive_bayes import GaussianNB import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ X_train = train_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values Y_train = train_df[['Survived']].values X_test = test_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values gaussian = GaussianNB() gaussian.fit(X_train, Y_train) Y_pred_nb = gaussian.predict(X_test) gaussian.score(X_train, Y_train)
code
324878/cell_14
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ test_df['Sex'][test_df['Sex'] == 'male'] = 0 test_df['Sex'][test_df['Sex'] == 'female'] = 1 test_df['Embarked'][test_df['Embarked'] == 'S'] = 0 test_df['Embarked'][test_df['Embarked'] == 'C'] = 1 test_df['Embarked'][test_df['Embarked'] == 'Q'] = 2 train_df['Sex'][train_df['Sex'] == 'male'] = 0 train_df['Sex'][train_df['Sex'] == 'female'] = 1 train_df['Embarked'][train_df['Embarked'] == 'S'] = 0 train_df['Embarked'][train_df['Embarked'] == 'C'] = 1 train_df['Embarked'][train_df['Embarked'] == 'Q'] = 2
code
324878/cell_22
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ X_train = train_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values Y_train = train_df[['Survived']].values X_test = test_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, Y_train) Y_pred = random_forest.predict(X_test) random_forest.score(X_train, Y_train)
code
324878/cell_10
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ sns.countplot(x='Survived', hue='Embarked', data=train_df, order=[0, 1])
code
324878/cell_27
[ "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.svm import SVC, LinearSVC import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ X_train = train_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values Y_train = train_df[['Survived']].values X_test = test_df[['Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked', 'Child', 'Family', 'CabinStartsWith']].values linreg = LinearSVC() linreg.fit(X_train, Y_train) Y_pred_lin = linreg.predict(X_test) linreg.score(X_train, Y_train)
code
324878/cell_12
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Age is null for some rows Cabin is almost always null Embarked is null for only 2 rows """ """ Takeaways Fare is null for one row Cabin null for most Age null for some """ train_df.describe()
code
324878/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) """ Takeaways Fare is null for one row Cabin null for most Age null for some """ test_df.info()
code
2000632/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) import pandas as pd from keras.models import Sequential from keras.layers import Dense, Input, Dropout from keras.utils import np_utils from sklearn.model_selection import train_test_split
code
2000632/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.layers import Dense, Input, Dropout from keras.models import Sequential model = Sequential() model.add(Dense(X_train.shape[1], input_shape=(784,), activation='relu')) model.add(Dropout(0.5)) model.add(Dense(y_train.shape[1], activation='softmax')) model.compile(optimizer='adam', metrics=['accuracy'], loss='categorical_crossentropy') model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=5, batch_size=100)
code
89123618/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) given_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/train.csv') test_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/test.csv') given_data.columns
code
89123618/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) given_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/train.csv') test_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/test.csv') given_data.columns given_data.isnull().sum()
code
89123618/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns given_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/train.csv') test_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/test.csv') given_data.columns given_data.isnull().sum() test_data.isnull().sum() drop_cols = ['BMI', 'PatientID'] given_data.drop(columns=drop_cols, inplace=True) test = test_data.drop(columns=drop_cols) sns.countplot(given_data.PhysHlth)
code
89123618/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89123618/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) given_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/train.csv') test_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/test.csv') test_data.isnull().sum()
code
89123618/cell_16
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf = clf.fit(X_train, y_train) clf.score(X_train, y_train)
code
89123618/cell_17
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf = clf.fit(X_train, y_train) clf.score(X_train, y_train) clf.score(X_test, y_test)
code
89123618/cell_14
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns given_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/train.csv') test_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/test.csv') given_data.columns given_data.isnull().sum() test_data.isnull().sum() drop_cols = ['BMI', 'PatientID'] given_data.drop(columns=drop_cols, inplace=True) test = test_data.drop(columns=drop_cols) y = given_data['target'] X = given_data.drop(columns=['target']) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) print(len(X_train), len(X_test))
code
89123618/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) given_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/train.csv') test_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/test.csv') given_data.columns given_data.isnull().sum() test_data.isnull().sum() drop_cols = ['BMI', 'PatientID'] given_data.drop(columns=drop_cols, inplace=True) test = test_data.drop(columns=drop_cols) given_data.hist(figsize=(20, 18))
code
89123618/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns given_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/train.csv') test_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/test.csv') given_data.columns given_data.isnull().sum() test_data.isnull().sum() drop_cols = ['BMI', 'PatientID'] given_data.drop(columns=drop_cols, inplace=True) test = test_data.drop(columns=drop_cols) sns.heatmap(given_data.corr(), annot=True)
code
89123618/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) given_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/train.csv') test_data = pd.read_csv('/kaggle/input/ml-olympiad-good-health-and-well-being/test.csv') given_data.columns given_data.head()
code
1006492/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, axis1 = plt.subplots(1, 1, figsize=(15, 4)) sns.countplot(x='Open', hue='DayOfWeek', data=df_train)
code
1006492/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_sales.plot(title="Average Daily Sales") average_daily_visits = df_train.groupby('Date')["Customers"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_visits.plot(title="Average Daily Visits") fig, (axis1,axis2) = plt.subplots(2,1,sharex=True,figsize=(15,8)) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() # plot average sales over time (year-month) ax1 = average_monthly_sales.plot(legend = False, ax = axis1, marker = 'o', title = "Avg. Monthly Sales") ax1.set_xticks(range(len(average_monthly_sales))) ax1.set_xticklabels(average_monthly_sales.index.tolist(), rotation=90) average_monthly_sales_change = df_train.groupby('Month')["Sales"].sum().pct_change() # plot precent change for sales over time(year-month) ax2 = average_monthly_sales_change.plot(legend = False, ax = axis2, marker = 'o', colormap = "summer", title = "% Change Monthly Sales") fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,4)) sns.barplot(x ='Month', y ='Sales', data = df_train, ax=axis1) sns.barplot(x ='Month', y ='Customers', data = df_train, ax=axis2) fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,4)) sns.barplot(x='DayOfWeek', y='Sales', data = df_train, order = [1,2,3,4,5,6,7], ax = axis1) sns.barplot(x='DayOfWeek', y='Customers', data = df_train, order = [1,2,3,4,5,6,7], ax = axis2) df_train.StateHoliday.unique() df_train['StateHoliday'] = df_train['StateHoliday'].replace(0, '0') df_train.StateHoliday.unique() sns.factorplot(x='Year', y='Sales', hue='StateHoliday', data=df_train, size=6, kind='bar', palette='muted')
code
1006492/cell_33
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_sales.plot(title="Average Daily Sales") average_daily_visits = df_train.groupby('Date')["Customers"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_visits.plot(title="Average Daily Visits") fig, (axis1,axis2) = plt.subplots(2,1,sharex=True,figsize=(15,8)) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() # plot average sales over time (year-month) ax1 = average_monthly_sales.plot(legend = False, ax = axis1, marker = 'o', title = "Avg. Monthly Sales") ax1.set_xticks(range(len(average_monthly_sales))) ax1.set_xticklabels(average_monthly_sales.index.tolist(), rotation=90) average_monthly_sales_change = df_train.groupby('Month')["Sales"].sum().pct_change() # plot precent change for sales over time(year-month) ax2 = average_monthly_sales_change.plot(legend = False, ax = axis2, marker = 'o', colormap = "summer", title = "% Change Monthly Sales") fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,4)) sns.barplot(x ='Month', y ='Sales', data = df_train, ax=axis1) sns.barplot(x ='Month', y ='Customers', data = df_train, ax=axis2) fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,4)) sns.barplot(x='DayOfWeek', y='Sales', data = df_train, order = [1,2,3,4,5,6,7], ax = axis1) sns.barplot(x='DayOfWeek', y='Customers', data = df_train, order = [1,2,3,4,5,6,7], ax = axis2) df_train.StateHoliday.unique() df_train['StateHoliday'] = df_train['StateHoliday'].replace(0, '0') df_train.StateHoliday.unique() df_train['HolidayBin'] = df_train['StateHoliday'].map({'0': 0, 'a': 1, 'b': 1, 'c': 1}) df_train.HolidayBin.unique() sns.factorplot(x='Month', y='Sales', hue='HolidayBin', data=df_train, size=6, kind='bar', palette='muted')
code
1006492/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_sales.plot(title="Average Daily Sales") average_daily_visits = df_train.groupby('Date')["Customers"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_visits.plot(title="Average Daily Visits") fig, (axis1, axis2) = plt.subplots(2, 1, sharex=True, figsize=(15, 8)) average_monthly_sales = df_train.groupby('Month')['Sales'].mean() ax1 = average_monthly_sales.plot(legend=False, ax=axis1, marker='o', title='Avg. Monthly Sales') ax1.set_xticks(range(len(average_monthly_sales))) ax1.set_xticklabels(average_monthly_sales.index.tolist(), rotation=90) average_monthly_sales_change = df_train.groupby('Month')['Sales'].sum().pct_change() ax2 = average_monthly_sales_change.plot(legend=False, ax=axis2, marker='o', colormap='summer', title='% Change Monthly Sales')
code
1006492/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_sales.plot(title="Average Daily Sales") average_daily_visits = df_train.groupby('Date')["Customers"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_visits.plot(title="Average Daily Visits") fig, (axis1,axis2) = plt.subplots(2,1,sharex=True,figsize=(15,8)) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() # plot average sales over time (year-month) ax1 = average_monthly_sales.plot(legend = False, ax = axis1, marker = 'o', title = "Avg. Monthly Sales") ax1.set_xticks(range(len(average_monthly_sales))) ax1.set_xticklabels(average_monthly_sales.index.tolist(), rotation=90) average_monthly_sales_change = df_train.groupby('Month')["Sales"].sum().pct_change() # plot precent change for sales over time(year-month) ax2 = average_monthly_sales_change.plot(legend = False, ax = axis2, marker = 'o', colormap = "summer", title = "% Change Monthly Sales") fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,4)) sns.barplot(x ='Month', y ='Sales', data = df_train, ax=axis1) sns.barplot(x ='Month', y ='Customers', data = df_train, ax=axis2) fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,4)) sns.barplot(x='DayOfWeek', y='Sales', data = df_train, order = [1,2,3,4,5,6,7], ax = axis1) sns.barplot(x='DayOfWeek', y='Customers', data = df_train, order = [1,2,3,4,5,6,7], ax = axis2) sns.factorplot(x='Year', y='Sales', hue='Promo', data=df_train, size=6, kind='box', palette='muted')
code
1006492/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train.Year.head()
code
1006492/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') df_store.head()
code
1006492/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_sales.plot(title="Average Daily Sales") average_daily_visits = df_train.groupby('Date')['Customers'].mean() fig = plt.subplots(1, 1, sharex=True, figsize=(25, 8)) average_daily_visits.plot(title='Average Daily Visits')
code
1006492/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_sales.plot(title="Average Daily Sales") average_daily_visits = df_train.groupby('Date')["Customers"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_visits.plot(title="Average Daily Visits") fig, (axis1,axis2) = plt.subplots(2,1,sharex=True,figsize=(15,8)) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() # plot average sales over time (year-month) ax1 = average_monthly_sales.plot(legend = False, ax = axis1, marker = 'o', title = "Avg. Monthly Sales") ax1.set_xticks(range(len(average_monthly_sales))) ax1.set_xticklabels(average_monthly_sales.index.tolist(), rotation=90) average_monthly_sales_change = df_train.groupby('Month')["Sales"].sum().pct_change() # plot precent change for sales over time(year-month) ax2 = average_monthly_sales_change.plot(legend = False, ax = axis2, marker = 'o', colormap = "summer", title = "% Change Monthly Sales") df_train.StateHoliday.unique() df_train['StateHoliday'] = df_train['StateHoliday'].replace(0, '0') df_train.StateHoliday.unique()
code
1006492/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')['Sales'].mean() fig = plt.subplots(1, 1, sharex=True, figsize=(25, 8)) average_daily_sales.plot(title='Average Daily Sales')
code
1006492/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv')
code
1006492/cell_31
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_sales.plot(title="Average Daily Sales") average_daily_visits = df_train.groupby('Date')["Customers"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_visits.plot(title="Average Daily Visits") fig, (axis1,axis2) = plt.subplots(2,1,sharex=True,figsize=(15,8)) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() # plot average sales over time (year-month) ax1 = average_monthly_sales.plot(legend = False, ax = axis1, marker = 'o', title = "Avg. Monthly Sales") ax1.set_xticks(range(len(average_monthly_sales))) ax1.set_xticklabels(average_monthly_sales.index.tolist(), rotation=90) average_monthly_sales_change = df_train.groupby('Month')["Sales"].sum().pct_change() # plot precent change for sales over time(year-month) ax2 = average_monthly_sales_change.plot(legend = False, ax = axis2, marker = 'o', colormap = "summer", title = "% Change Monthly Sales") df_train.StateHoliday.unique() df_train['StateHoliday'] = df_train['StateHoliday'].replace(0, '0') df_train.StateHoliday.unique() df_train['HolidayBin'] = df_train['StateHoliday'].map({'0': 0, 'a': 1, 'b': 1, 'c': 1}) df_train.HolidayBin.unique()
code
1006492/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_sales.plot(title="Average Daily Sales") average_daily_visits = df_train.groupby('Date')["Customers"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_visits.plot(title="Average Daily Visits") fig, (axis1,axis2) = plt.subplots(2,1,sharex=True,figsize=(15,8)) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() # plot average sales over time (year-month) ax1 = average_monthly_sales.plot(legend = False, ax = axis1, marker = 'o', title = "Avg. Monthly Sales") ax1.set_xticks(range(len(average_monthly_sales))) ax1.set_xticklabels(average_monthly_sales.index.tolist(), rotation=90) average_monthly_sales_change = df_train.groupby('Month')["Sales"].sum().pct_change() # plot precent change for sales over time(year-month) ax2 = average_monthly_sales_change.plot(legend = False, ax = axis2, marker = 'o', colormap = "summer", title = "% Change Monthly Sales") fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,4)) sns.barplot(x ='Month', y ='Sales', data = df_train, ax=axis1) sns.barplot(x ='Month', y ='Customers', data = df_train, ax=axis2) fig, (axis1, axis2) = plt.subplots(1, 2, figsize=(15, 4)) sns.barplot(x='DayOfWeek', y='Sales', data=df_train, order=[1, 2, 3, 4, 5, 6, 7], ax=axis1) sns.barplot(x='DayOfWeek', y='Customers', data=df_train, order=[1, 2, 3, 4, 5, 6, 7], ax=axis2)
code
1006492/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')['Sales'].mean() fig = plt.subplots(1, 1, sharex=True, figsize=(10, 5)) average_monthly_sales.plot(legend=True, marker='o', title='Average Sales')
code
1006492/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_sales.plot(title="Average Daily Sales") average_daily_visits = df_train.groupby('Date')["Customers"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_visits.plot(title="Average Daily Visits") fig, (axis1,axis2) = plt.subplots(2,1,sharex=True,figsize=(15,8)) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() # plot average sales over time (year-month) ax1 = average_monthly_sales.plot(legend = False, ax = axis1, marker = 'o', title = "Avg. Monthly Sales") ax1.set_xticks(range(len(average_monthly_sales))) ax1.set_xticklabels(average_monthly_sales.index.tolist(), rotation=90) average_monthly_sales_change = df_train.groupby('Month')["Sales"].sum().pct_change() # plot precent change for sales over time(year-month) ax2 = average_monthly_sales_change.plot(legend = False, ax = axis2, marker = 'o', colormap = "summer", title = "% Change Monthly Sales") fig, (axis1, axis2) = plt.subplots(1, 2, figsize=(15, 4)) sns.barplot(x='Month', y='Sales', data=df_train, ax=axis1) sns.barplot(x='Month', y='Customers', data=df_train, ax=axis2)
code
1006492/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') fig, (axis1) = plt.subplots(1,1,figsize=(15,4)) sns.countplot(x = 'Open', hue = 'DayOfWeek', data = df_train,) df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(10,5)) average_monthly_sales.plot(legend=True,marker='o',title="Average Sales") average_daily_sales = df_train.groupby('Date')["Sales"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_sales.plot(title="Average Daily Sales") average_daily_visits = df_train.groupby('Date')["Customers"].mean() fig = plt.subplots(1,1,sharex=True,figsize=(25,8)) average_daily_visits.plot(title="Average Daily Visits") fig, (axis1,axis2) = plt.subplots(2,1,sharex=True,figsize=(15,8)) average_monthly_sales = df_train.groupby('Month')["Sales"].mean() # plot average sales over time (year-month) ax1 = average_monthly_sales.plot(legend = False, ax = axis1, marker = 'o', title = "Avg. Monthly Sales") ax1.set_xticks(range(len(average_monthly_sales))) ax1.set_xticklabels(average_monthly_sales.index.tolist(), rotation=90) average_monthly_sales_change = df_train.groupby('Month')["Sales"].sum().pct_change() # plot precent change for sales over time(year-month) ax2 = average_monthly_sales_change.plot(legend = False, ax = axis2, marker = 'o', colormap = "summer", title = "% Change Monthly Sales") df_train.StateHoliday.unique()
code
1006492/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') df_train['Year'] = df_train['Date'].apply(lambda x: int(x[:4])) df_train['Month'] = df_train['Date'].apply(lambda x: int(x[5:7])) df_train.Month.head()
code
1006492/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_store = pd.read_csv('../input/store.csv') df_test = pd.read_csv('../input/test.csv') df_train.head()
code
332880/cell_4
[ "text_plain_output_1.png" ]
from Bio import pairwise2 alignments = pairwise2.align.globalxx('ACCGT', 'ACG') for a in pairwise2.align.globalxx('ACCGT', 'ACG'): print(pairwise2.format_alignment(*a))
code
332880/cell_6
[ "text_plain_output_1.png" ]
from Bio import pairwise2 alignments = pairwise2.align.globalxx('ACCGT', 'ACG') for a in pairwise2.align.globalmx('ACCGT', 'ACG', 2, -1): print(pairwise2.format_alignment(*a))
code
332880/cell_11
[ "text_plain_output_1.png" ]
from Bio import SeqIO from Bio import SeqIO count = 0 sequences = [] for seq_record in SeqIO.parse('../input/genome.fa', 'fasta'): if count < 6: sequences.append(seq_record) count = count + 1 chr2L = sequences[0].seq chr2R = sequences[1].seq chr3L = sequences[2].seq chr3R = sequences[3].seq chr4 = sequences[4].seq chrM = sequences[5].seq count = 0 mrna_sequences = [] for seq_record in SeqIO.parse('../input/mrna-genbank.fa', 'fasta'): if count < 6: mrna_sequences.append(seq_record) print('Id: ' + seq_record.id + ' \t ' + 'Length: ' + str('{:,d}'.format(len(seq_record)))) print(repr(seq_record.seq) + '\n') count = count + 1 mRNA1 = mrna_sequences[0].seq mRNA2 = mrna_sequences[1].seq mRNA3 = mrna_sequences[2].seq mRNA4 = mrna_sequences[3].seq mRNA5 = mrna_sequences[4].seq mRNA1 = sequences[5].seq
code
332880/cell_7
[ "text_plain_output_1.png" ]
from Bio import pairwise2 alignments = pairwise2.align.globalxx('ACCGT', 'ACG') for a in pairwise2.align.globalms('ACCGT', 'ACG', 2, -1, -0.5, -0.1): print(pairwise2.format_alignment(*a))
code
332880/cell_8
[ "text_plain_output_1.png" ]
from Bio import pairwise2 alignments = pairwise2.align.globalxx('ACCGT', 'ACG') from Bio.SubsMat import MatrixInfo as matlist matrix = matlist.blosum62 for a in pairwise2.align.globaldx('KEVLA', 'EVL', matrix): print(pairwise2.format_alignment(*a))
code
332880/cell_10
[ "text_plain_output_1.png" ]
from Bio import SeqIO from Bio import SeqIO count = 0 sequences = [] for seq_record in SeqIO.parse('../input/genome.fa', 'fasta'): if count < 6: sequences.append(seq_record) print('Id: ' + seq_record.id + ' \t ' + 'Length: ' + str('{:,d}'.format(len(seq_record)))) print(repr(seq_record.seq) + '\n') count = count + 1 chr2L = sequences[0].seq chr2R = sequences[1].seq chr3L = sequences[2].seq chr3R = sequences[3].seq chr4 = sequences[4].seq chrM = sequences[5].seq
code
332880/cell_5
[ "text_plain_output_1.png" ]
from Bio import pairwise2 alignments = pairwise2.align.globalxx('ACCGT', 'ACG') for a in pairwise2.align.localxx('ACCGT', 'ACG'): print(pairwise2.format_alignment(*a))
code
104115847/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd #Data manipulation path = '../input/' df = pd.read_csv(path + 'insurance.csv') print('\nNumber of rows and columns in the data set: ', df.shape) print('') df.head()
code
18112276/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv') train = train_df.drop('label', axis=1) target = train_df['label'] sns.countplot(target)
code
18112276/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') train = train_df.drop('label', axis=1) target = train_df['label'] train.head()
code
18112276/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') train_df.head()
code
18112276/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18112276/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.head()
code
18112276/cell_3
[ "text_html_output_1.png" ]
import keras import keras from keras import utils from keras import models from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras import optimizers print('Keras version: {}'.format(keras.__version__))
code
18112276/cell_10
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') train = train_df.drop('label', axis=1) target = train_df['label'] target.head()
code
18112276/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv') train = train_df.drop('label', axis=1) target = train_df['label'] sns.distplot(target)
code
1008978/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import StandardScaler import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedKFold from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() def show_data(cm, title='Confusion matrix', print_res=0): tp = cm[1, 1] fn = cm[1, 0] fp = cm[0, 1] tn = cm[0, 0] return (tp / (tp + fp), tp / (tp + fn), fp / (fp + tn)) df = pd.read_csv('..input/creditcard.csv') print(df.head(3)) y = np.array(df.Class.tolist()) df = df.drop('Class', 1) df = df.drop('Time', 1) df['Amount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1, 1)) X = np.array(df.as_matrix())
code
2017559/cell_13
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file = pd.read_csv('../input/train.csv') test_file = pd.read_csv('../input/test.csv') train_features = train_file[['Pclass', 'Age', 'Sex']].values target = train_file['Survived'].values model = DecisionTreeClassifier() x_train, x_test, y_train, y_test = train_test_split(train_features, target) model.fit(x_train, y_train) y_predict = model.predict(x_test) test_features = test_file[['Pclass', 'Age', 'Sex']].values test_answer = model.predict(test_features) PassengerId = np.array(test_file['PassengerId']).astype(int) solution = pd.DataFrame(test_answer, PassengerId, columns=['Survived']) solution.shape
code
2017559/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file = pd.read_csv('../input/train.csv') test_file = pd.read_csv('../input/test.csv') train_features = train_file[['Pclass', 'Age', 'Sex']].values target = train_file['Survived'].values model = DecisionTreeClassifier() x_train, x_test, y_train, y_test = train_test_split(train_features, target) model.fit(x_train, y_train) y_predict = model.predict(x_test) accuracy_score(y_test, y_predict)
code
2017559/cell_7
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier from sklearn.cross_validation import train_test_split from sklearn.metrics import accuracy_score
code
2017559/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_file = pd.read_csv('../input/train.csv') test_file = pd.read_csv('../input/test.csv') print(train_file.columns) print(test_file.columns)
code
129021770/cell_9
[ "text_html_output_1.png" ]
from catboost import CatBoostClassifier import pickle import torch test_X = pickle.load(open('/kaggle/input/embedder/test_embedding', 'rb')) multilayerperceptron = torch.load('/kaggle/input/nlptrain/mlp.pt', map_location=torch.device('cpu')) test_X_tensor = torch.tensor(test_X) multilayerperceptronprediction_proba = multilayerperceptron(test_X_tensor).detach().numpy() catboostclassifier = CatBoostClassifier().load_model('/kaggle/input/nlptrain/catboost_model.bin') catboostclassifierprediction_proba = catboostclassifier.predict_proba(test_X) prediction_proba = multilayerperceptronprediction_proba + catboostclassifierprediction_proba prediction = prediction_proba.argmax(1) ((multilayerperceptronprediction_proba.argmax(1) == prediction).mean(), (catboostclassifierprediction_proba.argmax(1) == prediction).mean())
code
129021770/cell_4
[ "text_plain_output_1.png" ]
import pickle import torch test_X = pickle.load(open('/kaggle/input/embedder/test_embedding', 'rb')) multilayerperceptron = torch.load('/kaggle/input/nlptrain/mlp.pt', map_location=torch.device('cpu')) test_X_tensor = torch.tensor(test_X) multilayerperceptronprediction_proba = multilayerperceptron(test_X_tensor).detach().numpy()
code
129021770/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import torch import catboost import pickle from catboost import CatBoostClassifier
code
129021770/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from catboost import CatBoostClassifier import pickle import torch test_X = pickle.load(open('/kaggle/input/embedder/test_embedding', 'rb')) multilayerperceptron = torch.load('/kaggle/input/nlptrain/mlp.pt', map_location=torch.device('cpu')) test_X_tensor = torch.tensor(test_X) multilayerperceptronprediction_proba = multilayerperceptron(test_X_tensor).detach().numpy() catboostclassifier = CatBoostClassifier().load_model('/kaggle/input/nlptrain/catboost_model.bin') catboostclassifierprediction_proba = catboostclassifier.predict_proba(test_X) prediction_proba = multilayerperceptronprediction_proba + catboostclassifierprediction_proba prediction = prediction_proba.argmax(1) prediction.mean()
code
129021770/cell_12
[ "text_plain_output_1.png" ]
from catboost import CatBoostClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pickle import torch test_X = pickle.load(open('/kaggle/input/embedder/test_embedding', 'rb')) multilayerperceptron = torch.load('/kaggle/input/nlptrain/mlp.pt', map_location=torch.device('cpu')) test_X_tensor = torch.tensor(test_X) multilayerperceptronprediction_proba = multilayerperceptron(test_X_tensor).detach().numpy() catboostclassifier = CatBoostClassifier().load_model('/kaggle/input/nlptrain/catboost_model.bin') catboostclassifierprediction_proba = catboostclassifier.predict_proba(test_X) prediction_proba = multilayerperceptronprediction_proba + catboostclassifierprediction_proba prediction = prediction_proba.argmax(1) prediction.mean() submission = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') submission.target = prediction submission.to_csv('submission.csv', index=False) submission
code
105192343/cell_6
[ "text_plain_output_1.png" ]
sale_of_store1 = 123 sale_of_store2 = 456 differece_of_sale = sale_of_store1 + sale_of_store2 print('differece of sale', differece_of_sale)
code
105192343/cell_8
[ "text_plain_output_1.png" ]
average_coffee_sold = 128 no_of_branches = 56 total_coffee_sold = average_coffee_sold * no_of_branches print('total coffee sold', total_coffee_sold)
code
105192343/cell_16
[ "text_plain_output_1.png" ]
r = 19 s = 45 t = r ** 45 print(t)
code
105192343/cell_3
[ "text_plain_output_1.png" ]
No_of_books_store1 = 100 No_of_books_store2 = 200 total_count_of_books = No_of_books_store1 + No_of_books_store2 print('total count of books is', total_count_of_books)
code
105192343/cell_14
[ "text_plain_output_1.png" ]
total_apple = 5890 no_of_people = 70 no_of_apple_to_each = total_apple / no_of_people total_apple = 5890 no_of_people = 70 no_of_apple_reminded = total_apple % no_of_people total_apple = 5890 no_of_people = 70 no_of_apple_to_each = total_apple // no_of_people print('no of apple to each is', no_of_apple_to_each)
code
105192343/cell_10
[ "text_plain_output_1.png" ]
total_apple = 5890 no_of_people = 70 no_of_apple_to_each = total_apple / no_of_people print('no of apple to each is', no_of_apple_to_each)
code