path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128005164/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns cat_columns = train.select_dtypes(exclude=['number']).columns.tolist() cat_columns train.head()
code
128005164/cell_24
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns cat_columns = train.select_dtypes(exclude=['number']).columns.tolist() cat_columns X = train.drop(['Survived', 'PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1) y = train['Survived'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_pred_dt = dt.predict(X_test) accuracy_score(y_test, y_pred_dt) print(classification_report(y_test, y_pred_dt))
code
128005164/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns cat_columns = train.select_dtypes(exclude=['number']).columns.tolist() cat_columns
code
128005164/cell_27
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns cat_columns = train.select_dtypes(exclude=['number']).columns.tolist() cat_columns X = train.drop(['Survived', 'PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1) y = train['Survived'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) from sklearn.ensemble import RandomForestClassifier rf_c = RandomForestClassifier(n_estimators=10, criterion='entropy') rf_c.fit(X_train, y_train) y_pred_rf_c = rf_c.predict(X_test) accuracy_score(y_test, y_pred_rf_c) print(classification_report(y_test, y_pred_rf_c))
code
128005164/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.info()
code
50238529/cell_25
[ "text_plain_output_1.png" ]
import lightgbm as lgb import numpy as np import pandas as pd import shap import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) new_data = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Pclass': [1, 3], 'Age': [17, 20], 'FamilyMembers': [1, 0], 'Female': [1, 0]}) new_data continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3) predictions = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Log-odds': [3.362, -1.861]}) predictions['Probability of survival'] = np.exp(predictions['Log-odds']) / (1 + np.exp(predictions['Log-odds'])) predictions lgbfit.predict(np.array(new_data[model_columns])) alldata = pd.concat([train[model_columns], new_data[model_columns]]).reset_index(drop=True) explainer = shap.TreeExplainer(lgbfit) shap_values = explainer.shap_values(alldata) print(explainer.expected_value) print(np.exp(explainer.expected_value) / (1 + np.exp(explainer.expected_value)))
code
50238529/cell_34
[ "text_html_output_1.png" ]
import lightgbm as lgb import numpy as np import pandas as pd import shap import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) new_data = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Pclass': [1, 3], 'Age': [17, 20], 'FamilyMembers': [1, 0], 'Female': [1, 0]}) new_data continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3) predictions = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Log-odds': [3.362, -1.861]}) predictions['Probability of survival'] = np.exp(predictions['Log-odds']) / (1 + np.exp(predictions['Log-odds'])) predictions lgbfit.predict(np.array(new_data[model_columns])) alldata = pd.concat([train[model_columns], new_data[model_columns]]).reset_index(drop=True) explainer = shap.TreeExplainer(lgbfit) shap_values = explainer.shap_values(alldata) shap.initjs() shap.dependence_plot('Pclass', shap_values[1], alldata[model_columns])
code
50238529/cell_23
[ "text_html_output_1.png" ]
import lightgbm as lgb import numpy as np import pandas as pd import shap import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) new_data = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Pclass': [1, 3], 'Age': [17, 20], 'FamilyMembers': [1, 0], 'Female': [1, 0]}) new_data continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3) predictions = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Log-odds': [3.362, -1.861]}) predictions['Probability of survival'] = np.exp(predictions['Log-odds']) / (1 + np.exp(predictions['Log-odds'])) predictions lgbfit.predict(np.array(new_data[model_columns])) alldata = pd.concat([train[model_columns], new_data[model_columns]]).reset_index(drop=True) explainer = shap.TreeExplainer(lgbfit) shap_values = explainer.shap_values(alldata)
code
50238529/cell_29
[ "text_plain_output_1.png" ]
import lightgbm as lgb import numpy as np import pandas as pd import shap import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) new_data = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Pclass': [1, 3], 'Age': [17, 20], 'FamilyMembers': [1, 0], 'Female': [1, 0]}) new_data continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3) predictions = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Log-odds': [3.362, -1.861]}) predictions['Probability of survival'] = np.exp(predictions['Log-odds']) / (1 + np.exp(predictions['Log-odds'])) predictions lgbfit.predict(np.array(new_data[model_columns])) alldata = pd.concat([train[model_columns], new_data[model_columns]]).reset_index(drop=True) explainer = shap.TreeExplainer(lgbfit) shap_values = explainer.shap_values(alldata) shap.initjs() shap.force_plot(explainer.expected_value[1], shap_values[1][892], alldata.iloc[892])
code
50238529/cell_7
[ "image_output_1.png" ]
import lightgbm as lgb import numpy as np import pandas as pd import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3)
code
50238529/cell_32
[ "text_html_output_2.png", "text_html_output_1.png" ]
import lightgbm as lgb import numpy as np import pandas as pd import shap import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) new_data = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Pclass': [1, 3], 'Age': [17, 20], 'FamilyMembers': [1, 0], 'Female': [1, 0]}) new_data continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3) predictions = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Log-odds': [3.362, -1.861]}) predictions['Probability of survival'] = np.exp(predictions['Log-odds']) / (1 + np.exp(predictions['Log-odds'])) predictions lgbfit.predict(np.array(new_data[model_columns])) alldata = pd.concat([train[model_columns], new_data[model_columns]]).reset_index(drop=True) explainer = shap.TreeExplainer(lgbfit) shap_values = explainer.shap_values(alldata) shap.initjs() shap.force_plot(explainer.expected_value[1], shap_values[1], alldata[model_columns])
code
50238529/cell_35
[ "text_html_output_1.png" ]
import lightgbm as lgb import numpy as np import pandas as pd import shap import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) new_data = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Pclass': [1, 3], 'Age': [17, 20], 'FamilyMembers': [1, 0], 'Female': [1, 0]}) new_data continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3) predictions = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Log-odds': [3.362, -1.861]}) predictions['Probability of survival'] = np.exp(predictions['Log-odds']) / (1 + np.exp(predictions['Log-odds'])) predictions lgbfit.predict(np.array(new_data[model_columns])) alldata = pd.concat([train[model_columns], new_data[model_columns]]).reset_index(drop=True) explainer = shap.TreeExplainer(lgbfit) shap_values = explainer.shap_values(alldata) shap.initjs() shap.dependence_plot('Female', shap_values[1], alldata[model_columns])
code
50238529/cell_10
[ "text_html_output_1.png" ]
import graphviz as graphviz import lightgbm as lgb import numpy as np import pandas as pd import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3) def get_a_graph(tree_index=0): gv1 = lgb.create_tree_digraph(lgbfit, tree_index=tree_index, show_info='data_percentage') gv1s = gv1.source.replace('Column_0', 'Female').replace('Column_1', 'Pclass').replace('Column_2', 'Age').replace('Column_3', 'FamilyMembers') graph = graphviz.Source(gv1s) return graph get_a_graph(tree_index=0)
code
50238529/cell_27
[ "application_vnd.jupyter.stderr_output_1.png" ]
import lightgbm as lgb import numpy as np import pandas as pd import shap import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) new_data = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Pclass': [1, 3], 'Age': [17, 20], 'FamilyMembers': [1, 0], 'Female': [1, 0]}) new_data continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3) predictions = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Log-odds': [3.362, -1.861]}) predictions['Probability of survival'] = np.exp(predictions['Log-odds']) / (1 + np.exp(predictions['Log-odds'])) predictions lgbfit.predict(np.array(new_data[model_columns])) alldata = pd.concat([train[model_columns], new_data[model_columns]]).reset_index(drop=True) explainer = shap.TreeExplainer(lgbfit) shap_values = explainer.shap_values(alldata) shap.initjs() shap.force_plot(explainer.expected_value[1], shap_values[1][891], alldata.iloc[891])
code
50238529/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import graphviz as graphviz import lightgbm as lgb import numpy as np import pandas as pd import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3) def get_a_graph(tree_index=0): gv1 = lgb.create_tree_digraph(lgbfit, tree_index=tree_index, show_info='data_percentage') gv1s = gv1.source.replace('Column_0', 'Female').replace('Column_1', 'Pclass').replace('Column_2', 'Age').replace('Column_3', 'FamilyMembers') graph = graphviz.Source(gv1s) return graph get_a_graph(tree_index=1)
code
50238529/cell_5
[ "image_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) new_data = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Pclass': [1, 3], 'Age': [17, 20], 'FamilyMembers': [1, 0], 'Female': [1, 0]}) new_data
code
50238529/cell_36
[ "image_output_1.png" ]
import lightgbm as lgb import numpy as np import pandas as pd import shap import numpy as np import pandas as pd import lightgbm as lgb import graphviz as graphviz import shap train = pd.read_csv('../input/titanic/train.csv') train['FamilyMembers'] = train['SibSp'] + train['Parch'] train['Female'] = train['Sex'].map({'male': 0, 'female': 1}).astype(np.int8) train['Pclass_cat'] = (train['Pclass'] - 1).astype(np.int8) train['Adult'] = (train['Age'] > 16).astype(np.int8) train['Adult'].values[train['Age'].isna()] = 1 train['Age'] = train.groupby(['Pclass', 'Female'])['Age'].transform(lambda x: x.fillna(x.median())) new_data = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Pclass': [1, 3], 'Age': [17, 20], 'FamilyMembers': [1, 0], 'Female': [1, 0]}) new_data continuous_features = ['Pclass', 'Age', 'FamilyMembers'] discrete_features = ['Female'] model_columns = discrete_features + continuous_features ids_of_categorical = [0] X = np.array(train[model_columns]) y = np.array(train['Survived']).flatten() dtrain = lgb.Dataset(X, label=y) params = {'objective': 'binary', 'metric': 'binary_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'seed': 538, 'learning_rate': 1, 'num_leaves': 4, 'feature_fraction': 1.0, 'bagging_fraction': 1.0, 'bagging_freq': 15, 'min_child_samples': 5} lgbfit = lgb.train(params, dtrain, categorical_feature=ids_of_categorical, verbose_eval=True, num_boost_round=3) predictions = pd.DataFrame({'Person': ['Rose', 'Jack'], 'Log-odds': [3.362, -1.861]}) predictions['Probability of survival'] = np.exp(predictions['Log-odds']) / (1 + np.exp(predictions['Log-odds'])) predictions lgbfit.predict(np.array(new_data[model_columns])) alldata = pd.concat([train[model_columns], new_data[model_columns]]).reset_index(drop=True) explainer = shap.TreeExplainer(lgbfit) shap_values = explainer.shap_values(alldata) shap.initjs() shap.dependence_plot('Age', shap_values[1], alldata[model_columns])
code
90137326/cell_11
[ "text_plain_output_1.png" ]
from wikipedia.exceptions import PageError import wikipedia topic_list = ['Application Security', 'Backup Business Continuity and Recovery', 'Change Control', 'Communication Security', 'Cryptography', 'Encryption and Key Management', 'Data Security', 'Endpoint Security', 'General Security', 'Governance', 'Risk and Compliance', 'Human Centric Security', 'Identity and Access Management', 'Infrastructure and Virtualization Security', 'Mobile Security', 'Network Security', 'Physical and Facility Security', 'Security Operations Center', 'Forensics and Incident Response', 'Threat and Vulnerability Management', 'Web Security'] result = [] count = 0 def create_data(search_term): results_list = wikipedia.search(search_term, results=20) wiki_search_results = [] for each_result in results_list: wiki_page_result = {} try: wiki_page_obj = wikipedia.page(each_result) except PageError: continue except wikipedia.DisambiguationError as e: s = 1 p = wikipedia.page(s) wiki_page_result['title'] = wiki_page_obj.title wiki_page_result['content'] = wiki_page_obj.content wiki_page_result['url'] = wiki_page_obj.url wiki_page_result['links'] = wiki_page_obj.links wiki_search_results.append(wiki_page_result) return wiki_search_results for index in topic_list: outcome = create_data(index) result.append(outcome) count += 1 print(len(result))
code
90137326/cell_19
[ "text_plain_output_1.png" ]
from wikipedia.exceptions import PageError import pandas as pd import wikipedia topic_list = ['Application Security', 'Backup Business Continuity and Recovery', 'Change Control', 'Communication Security', 'Cryptography', 'Encryption and Key Management', 'Data Security', 'Endpoint Security', 'General Security', 'Governance', 'Risk and Compliance', 'Human Centric Security', 'Identity and Access Management', 'Infrastructure and Virtualization Security', 'Mobile Security', 'Network Security', 'Physical and Facility Security', 'Security Operations Center', 'Forensics and Incident Response', 'Threat and Vulnerability Management', 'Web Security'] result = [] count = 0 def create_data(search_term): results_list = wikipedia.search(search_term, results=20) wiki_search_results = [] for each_result in results_list: wiki_page_result = {} try: wiki_page_obj = wikipedia.page(each_result) except PageError: continue except wikipedia.DisambiguationError as e: s = 1 p = wikipedia.page(s) wiki_page_result['title'] = wiki_page_obj.title wiki_page_result['content'] = wiki_page_obj.content wiki_page_result['url'] = wiki_page_obj.url wiki_page_result['links'] = wiki_page_obj.links wiki_search_results.append(wiki_page_result) return wiki_search_results for index in topic_list: outcome = create_data(index) result.append(outcome) count += 1 p = [] for i in result: for j in i: p.append(j) final_df = pd.DataFrame(p) final_df.shape final_df.columns final_df.title[0] final_df.content[0]
code
90137326/cell_18
[ "text_plain_output_1.png" ]
from wikipedia.exceptions import PageError import pandas as pd import wikipedia topic_list = ['Application Security', 'Backup Business Continuity and Recovery', 'Change Control', 'Communication Security', 'Cryptography', 'Encryption and Key Management', 'Data Security', 'Endpoint Security', 'General Security', 'Governance', 'Risk and Compliance', 'Human Centric Security', 'Identity and Access Management', 'Infrastructure and Virtualization Security', 'Mobile Security', 'Network Security', 'Physical and Facility Security', 'Security Operations Center', 'Forensics and Incident Response', 'Threat and Vulnerability Management', 'Web Security'] result = [] count = 0 def create_data(search_term): results_list = wikipedia.search(search_term, results=20) wiki_search_results = [] for each_result in results_list: wiki_page_result = {} try: wiki_page_obj = wikipedia.page(each_result) except PageError: continue except wikipedia.DisambiguationError as e: s = 1 p = wikipedia.page(s) wiki_page_result['title'] = wiki_page_obj.title wiki_page_result['content'] = wiki_page_obj.content wiki_page_result['url'] = wiki_page_obj.url wiki_page_result['links'] = wiki_page_obj.links wiki_search_results.append(wiki_page_result) return wiki_search_results for index in topic_list: outcome = create_data(index) result.append(outcome) count += 1 p = [] for i in result: for j in i: p.append(j) final_df = pd.DataFrame(p) final_df.shape final_df.columns final_df.title[0]
code
90137326/cell_15
[ "text_plain_output_1.png" ]
from wikipedia.exceptions import PageError import pandas as pd import wikipedia topic_list = ['Application Security', 'Backup Business Continuity and Recovery', 'Change Control', 'Communication Security', 'Cryptography', 'Encryption and Key Management', 'Data Security', 'Endpoint Security', 'General Security', 'Governance', 'Risk and Compliance', 'Human Centric Security', 'Identity and Access Management', 'Infrastructure and Virtualization Security', 'Mobile Security', 'Network Security', 'Physical and Facility Security', 'Security Operations Center', 'Forensics and Incident Response', 'Threat and Vulnerability Management', 'Web Security'] result = [] count = 0 def create_data(search_term): results_list = wikipedia.search(search_term, results=20) wiki_search_results = [] for each_result in results_list: wiki_page_result = {} try: wiki_page_obj = wikipedia.page(each_result) except PageError: continue except wikipedia.DisambiguationError as e: s = 1 p = wikipedia.page(s) wiki_page_result['title'] = wiki_page_obj.title wiki_page_result['content'] = wiki_page_obj.content wiki_page_result['url'] = wiki_page_obj.url wiki_page_result['links'] = wiki_page_obj.links wiki_search_results.append(wiki_page_result) return wiki_search_results for index in topic_list: outcome = create_data(index) result.append(outcome) count += 1 p = [] for i in result: for j in i: p.append(j) final_df = pd.DataFrame(p) final_df.shape
code
90137326/cell_16
[ "text_plain_output_1.png" ]
from wikipedia.exceptions import PageError import pandas as pd import wikipedia topic_list = ['Application Security', 'Backup Business Continuity and Recovery', 'Change Control', 'Communication Security', 'Cryptography', 'Encryption and Key Management', 'Data Security', 'Endpoint Security', 'General Security', 'Governance', 'Risk and Compliance', 'Human Centric Security', 'Identity and Access Management', 'Infrastructure and Virtualization Security', 'Mobile Security', 'Network Security', 'Physical and Facility Security', 'Security Operations Center', 'Forensics and Incident Response', 'Threat and Vulnerability Management', 'Web Security'] result = [] count = 0 def create_data(search_term): results_list = wikipedia.search(search_term, results=20) wiki_search_results = [] for each_result in results_list: wiki_page_result = {} try: wiki_page_obj = wikipedia.page(each_result) except PageError: continue except wikipedia.DisambiguationError as e: s = 1 p = wikipedia.page(s) wiki_page_result['title'] = wiki_page_obj.title wiki_page_result['content'] = wiki_page_obj.content wiki_page_result['url'] = wiki_page_obj.url wiki_page_result['links'] = wiki_page_obj.links wiki_search_results.append(wiki_page_result) return wiki_search_results for index in topic_list: outcome = create_data(index) result.append(outcome) count += 1 p = [] for i in result: for j in i: p.append(j) final_df = pd.DataFrame(p) final_df.shape final_df.columns
code
90137326/cell_17
[ "text_html_output_1.png" ]
from wikipedia.exceptions import PageError import pandas as pd import wikipedia topic_list = ['Application Security', 'Backup Business Continuity and Recovery', 'Change Control', 'Communication Security', 'Cryptography', 'Encryption and Key Management', 'Data Security', 'Endpoint Security', 'General Security', 'Governance', 'Risk and Compliance', 'Human Centric Security', 'Identity and Access Management', 'Infrastructure and Virtualization Security', 'Mobile Security', 'Network Security', 'Physical and Facility Security', 'Security Operations Center', 'Forensics and Incident Response', 'Threat and Vulnerability Management', 'Web Security'] result = [] count = 0 def create_data(search_term): results_list = wikipedia.search(search_term, results=20) wiki_search_results = [] for each_result in results_list: wiki_page_result = {} try: wiki_page_obj = wikipedia.page(each_result) except PageError: continue except wikipedia.DisambiguationError as e: s = 1 p = wikipedia.page(s) wiki_page_result['title'] = wiki_page_obj.title wiki_page_result['content'] = wiki_page_obj.content wiki_page_result['url'] = wiki_page_obj.url wiki_page_result['links'] = wiki_page_obj.links wiki_search_results.append(wiki_page_result) return wiki_search_results for index in topic_list: outcome = create_data(index) result.append(outcome) count += 1 p = [] for i in result: for j in i: p.append(j) final_df = pd.DataFrame(p) final_df.shape final_df.columns final_df
code
90137326/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from wikipedia.exceptions import PageError import wikipedia topic_list = ['Application Security', 'Backup Business Continuity and Recovery', 'Change Control', 'Communication Security', 'Cryptography', 'Encryption and Key Management', 'Data Security', 'Endpoint Security', 'General Security', 'Governance', 'Risk and Compliance', 'Human Centric Security', 'Identity and Access Management', 'Infrastructure and Virtualization Security', 'Mobile Security', 'Network Security', 'Physical and Facility Security', 'Security Operations Center', 'Forensics and Incident Response', 'Threat and Vulnerability Management', 'Web Security'] result = [] count = 0 def create_data(search_term): results_list = wikipedia.search(search_term, results=20) wiki_search_results = [] for each_result in results_list: wiki_page_result = {} try: wiki_page_obj = wikipedia.page(each_result) except PageError: continue except wikipedia.DisambiguationError as e: s = 1 p = wikipedia.page(s) wiki_page_result['title'] = wiki_page_obj.title wiki_page_result['content'] = wiki_page_obj.content wiki_page_result['url'] = wiki_page_obj.url wiki_page_result['links'] = wiki_page_obj.links wiki_search_results.append(wiki_page_result) return wiki_search_results for index in topic_list: outcome = create_data(index) result.append(outcome) count += 1
code
128009779/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.isnull().sum() work_year_values = df['work_year'].value_counts() exp_level_values = df['experience_level'].value_counts() empl_type_values = df['employment_type'].value_counts() most_frequent_titles = df['job_title'].value_counts().sort_values(ascending=False) plt.xticks(rotation=45) sns.displot(df['salary_in_usd']).set(title='Distribution of the salaries *in US dollars*') plt.show()
code
128009779/cell_4
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3)
code
128009779/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.isnull().sum() work_year_values = df['work_year'].value_counts() exp_level_values = df['experience_level'].value_counts() empl_type_values = df['employment_type'].value_counts() most_frequent_titles = df['job_title'].value_counts().sort_values(ascending=False) plt.xticks(rotation=45) salary_curr_values = df['salary_currency'].value_counts() most_frequent_residences = df['employee_residence'].value_counts().sort_values(ascending=False) most_frequent_residences[:10].plot(kind='bar') plt.xticks(rotation=45) plt.ylabel('Count') plt.xlabel('Employee residence') plt.title('Most frequent employees residences') plt.show()
code
128009779/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.info()
code
128009779/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.isnull().sum() work_year_values = df['work_year'].value_counts() exp_level_values = df['experience_level'].value_counts() empl_type_values = df['employment_type'].value_counts() most_frequent_titles = df['job_title'].value_counts().sort_values(ascending=False) plt.xticks(rotation=45) sns.displot(df['salary']).set(title='Distribution of the salaries') plt.show()
code
128009779/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.isnull().sum() work_year_values = df['work_year'].value_counts() exp_level_values = df['experience_level'].value_counts() empl_type_values = df['employment_type'].value_counts() most_frequent_titles = df['job_title'].value_counts().sort_values(ascending=False) most_frequent_titles[:10].plot(kind='bar') plt.xticks(rotation=45) plt.ylabel('Count') plt.xlabel('Job title') plt.title('Most frequent job titles') plt.show()
code
128009779/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) sns.boxplot(df) plt.title('Boxplot representation of the dataframe') plt.show()
code
128009779/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.isnull().sum() work_year_values = df['work_year'].value_counts() plt.pie(work_year_values, labels=work_year_values.index, autopct='%.0f%%') plt.title('Distribution of years of work') plt.show()
code
128009779/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.isnull().sum() work_year_values = df['work_year'].value_counts() exp_level_values = df['experience_level'].value_counts() plt.pie(exp_level_values, labels=exp_level_values.index, autopct='%.0f%%') plt.title('Distribution of levels of experience') plt.show()
code
128009779/cell_3
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') print(f'Shape of the dataframe: {df.shape}')
code
128009779/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.isnull().sum() work_year_values = df['work_year'].value_counts() exp_level_values = df['experience_level'].value_counts() empl_type_values = df['employment_type'].value_counts() plt.pie(empl_type_values, labels=empl_type_values.index, autopct='%.0f%%') plt.title('Distribution of type of employment') plt.show()
code
128009779/cell_22
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.isnull().sum() work_year_values = df['work_year'].value_counts() exp_level_values = df['experience_level'].value_counts() empl_type_values = df['employment_type'].value_counts() most_frequent_titles = df['job_title'].value_counts().sort_values(ascending=False) plt.xticks(rotation=45) salary_curr_values = df['salary_currency'].value_counts() plt.pie(salary_curr_values, labels=salary_curr_values.index, autopct='%.0f%%') plt.title('Distribution of the different currencies') plt.show()
code
128009779/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df[df['salary'] > 30000000]
code
128009779/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.isnull().sum()
code
128009779/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/ai-ml-data-salaries/salaries.csv') df.sample(3) df.describe()
code
50215202/cell_21
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from textblob import TextBlob import matplotlib.dates as mdates import matplotlib.pylab as plt import nltk import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') array = {} for c in dfpub['browsing_date']: if '1601' in c: continue today = c.split()[0] array[today] = 1 if today not in array else array[today] + 1 x = list(array.keys()) y = list(array.values()) df_plot = pd.DataFrame() df_plot['x'] = x df_plot['y'] = y df_plot.index = x ax = plt.gca() ax.xaxis.set_major_locator(mdates.DayLocator(interval=13)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) plt.gcf().autofmt_xdate() wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) from sklearn.feature_extraction.text import TfidfVectorizer vec = TfidfVectorizer(use_idf=False, norm='l1') matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) from textblob import TextBlob import nltk nltk.download('punkt') def textblob_tokenizer(str_input): blob = TextBlob(str_input.lower()) tokens = blob.words words = [token.stem() for token in tokens] return words vec = CountVectorizer(tokenizer=textblob_tokenizer) matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) vec = TfidfVectorizer(tokenizer=textblob_tokenizer, stop_words='english', use_idf=True) matrix = vec.fit_transform(titleCorpus) df = pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names())
code
50215202/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) print(tags) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus)
code
50215202/cell_25
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from textblob import TextBlob import matplotlib.dates as mdates import matplotlib.pylab as plt import nltk import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') array = {} for c in dfpub['browsing_date']: if '1601' in c: continue today = c.split()[0] array[today] = 1 if today not in array else array[today] + 1 x = list(array.keys()) y = list(array.values()) df_plot = pd.DataFrame() df_plot['x'] = x df_plot['y'] = y df_plot.index = x ax = plt.gca() ax.xaxis.set_major_locator(mdates.DayLocator(interval=13)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) plt.gcf().autofmt_xdate() wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) from sklearn.feature_extraction.text import TfidfVectorizer vec = TfidfVectorizer(use_idf=False, norm='l1') matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) from textblob import TextBlob import nltk nltk.download('punkt') def textblob_tokenizer(str_input): blob = TextBlob(str_input.lower()) tokens = blob.words words = [token.stem() for token in tokens] return words vec = CountVectorizer(tokenizer=textblob_tokenizer) matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) vec = TfidfVectorizer(tokenizer=textblob_tokenizer, stop_words='english', use_idf=True) matrix = vec.fit_transform(titleCorpus) df = pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) from sklearn.cluster import KMeans number_of_clusters = 10 km = KMeans(n_clusters=number_of_clusters) km.fit(matrix) order_centroids = km.cluster_centers_.argsort()[:, ::-1] terms = vec.get_feature_names() for i in range(number_of_clusters): top_ten_words = [terms[ind] for ind in order_centroids[i, :5]] results = pd.DataFrame({'corpus': titleCorpus, 'category': km.labels_}) results.sort_values('category') for k in results.sort_values('category').values: print(k[1], ' --- ', k[0])
code
50215202/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') dfpub
code
50215202/cell_23
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from textblob import TextBlob import matplotlib.dates as mdates import matplotlib.pylab as plt import nltk import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') array = {} for c in dfpub['browsing_date']: if '1601' in c: continue today = c.split()[0] array[today] = 1 if today not in array else array[today] + 1 x = list(array.keys()) y = list(array.values()) df_plot = pd.DataFrame() df_plot['x'] = x df_plot['y'] = y df_plot.index = x ax = plt.gca() ax.xaxis.set_major_locator(mdates.DayLocator(interval=13)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) plt.gcf().autofmt_xdate() wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) from sklearn.feature_extraction.text import TfidfVectorizer vec = TfidfVectorizer(use_idf=False, norm='l1') matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) from textblob import TextBlob import nltk nltk.download('punkt') def textblob_tokenizer(str_input): blob = TextBlob(str_input.lower()) tokens = blob.words words = [token.stem() for token in tokens] return words vec = CountVectorizer(tokenizer=textblob_tokenizer) matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) vec = TfidfVectorizer(tokenizer=textblob_tokenizer, stop_words='english', use_idf=True) matrix = vec.fit_transform(titleCorpus) df = pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) from sklearn.cluster import KMeans number_of_clusters = 10 km = KMeans(n_clusters=number_of_clusters) km.fit(matrix) print('Top terms per cluster:') order_centroids = km.cluster_centers_.argsort()[:, ::-1] terms = vec.get_feature_names() for i in range(number_of_clusters): top_ten_words = [terms[ind] for ind in order_centroids[i, :5]] print('Cluster {}: {}'.format(i, ' '.join(top_ten_words)))
code
50215202/cell_20
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from textblob import TextBlob import matplotlib.dates as mdates import matplotlib.pylab as plt import nltk import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') array = {} for c in dfpub['browsing_date']: if '1601' in c: continue today = c.split()[0] array[today] = 1 if today not in array else array[today] + 1 x = list(array.keys()) y = list(array.values()) df_plot = pd.DataFrame() df_plot['x'] = x df_plot['y'] = y df_plot.index = x ax = plt.gca() ax.xaxis.set_major_locator(mdates.DayLocator(interval=13)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) plt.gcf().autofmt_xdate() wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) from sklearn.feature_extraction.text import TfidfVectorizer vec = TfidfVectorizer(use_idf=False, norm='l1') matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) from textblob import TextBlob import nltk nltk.download('punkt') def textblob_tokenizer(str_input): blob = TextBlob(str_input.lower()) tokens = blob.words words = [token.stem() for token in tokens] return words vec = CountVectorizer(tokenizer=textblob_tokenizer) matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names())
code
50215202/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') dfpub['browsing_date']
code
50215202/cell_19
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import matplotlib.dates as mdates import matplotlib.pylab as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') array = {} for c in dfpub['browsing_date']: if '1601' in c: continue today = c.split()[0] array[today] = 1 if today not in array else array[today] + 1 x = list(array.keys()) y = list(array.values()) df_plot = pd.DataFrame() df_plot['x'] = x df_plot['y'] = y df_plot.index = x ax = plt.gca() ax.xaxis.set_major_locator(mdates.DayLocator(interval=13)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) plt.gcf().autofmt_xdate() wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) from sklearn.feature_extraction.text import TfidfVectorizer vec = TfidfVectorizer(use_idf=False, norm='l1') matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names())
code
50215202/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50215202/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') array = {} for c in dfpub['browsing_date']: print(c) if '1601' in c: continue today = c.split()[0] array[today] = 1 if today not in array else array[today] + 1
code
50215202/cell_18
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) vectorizer = CountVectorizer() X = vectorizer.fit_transform(titleCorpus) print('Count: {0}'.format(len(vectorizer.get_feature_names()))) vectorizer.get_feature_names()[1000:1030]
code
50215202/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') array = {} for c in dfpub['browsing_date']: if '1601' in c: continue today = c.split()[0] array[today] = 1 if today not in array else array[today] + 1 x = list(array.keys()) y = list(array.values()) print(len(x)) print(len(y)) df_plot = pd.DataFrame() df_plot['x'] = x df_plot['y'] = y df_plot.index = x
code
50215202/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) len(failedConvert)
code
50215202/cell_16
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) len(titleCorpus)
code
50215202/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) len(wordListCorpus)
code
50215202/cell_22
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from textblob import TextBlob import matplotlib.dates as mdates import matplotlib.pylab as plt import nltk import nltk import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') array = {} for c in dfpub['browsing_date']: if '1601' in c: continue today = c.split()[0] array[today] = 1 if today not in array else array[today] + 1 x = list(array.keys()) y = list(array.values()) df_plot = pd.DataFrame() df_plot['x'] = x df_plot['y'] = y df_plot.index = x ax = plt.gca() ax.xaxis.set_major_locator(mdates.DayLocator(interval=13)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) plt.gcf().autofmt_xdate() wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) from sklearn.feature_extraction.text import TfidfVectorizer vec = TfidfVectorizer(use_idf=False, norm='l1') matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) from textblob import TextBlob import nltk nltk.download('punkt') def textblob_tokenizer(str_input): blob = TextBlob(str_input.lower()) tokens = blob.words words = [token.stem() for token in tokens] return words vec = CountVectorizer(tokenizer=textblob_tokenizer) matrix = vec.fit_transform(titleCorpus) pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) vec = TfidfVectorizer(tokenizer=textblob_tokenizer, stop_words='english', use_idf=True) matrix = vec.fit_transform(titleCorpus) df = pd.DataFrame(matrix.toarray(), columns=vec.get_feature_names()) from sklearn.cluster import KMeans number_of_clusters = 10 km = KMeans(n_clusters=number_of_clusters) km.fit(matrix)
code
50215202/cell_10
[ "text_html_output_1.png" ]
import matplotlib.dates as mdates import matplotlib.pylab as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') array = {} for c in dfpub['browsing_date']: if '1601' in c: continue today = c.split()[0] array[today] = 1 if today not in array else array[today] + 1 x = list(array.keys()) y = list(array.values()) df_plot = pd.DataFrame() df_plot['x'] = x df_plot['y'] = y df_plot.index = x plt.figure(figsize=(15, 6)) plt.bar(pd.to_datetime(df_plot['x']), df_plot['y']) ax = plt.gca() ax.xaxis.set_major_locator(mdates.DayLocator(interval=13)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) plt.gcf().autofmt_xdate() plt.show()
code
50215202/cell_27
[ "application_vnd.jupyter.stderr_output_1.png" ]
from gensim.models import word2vec from nltk.corpus import stopwords import io import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dfpub = pd.read_csv('/kaggle/input/academic-publications-and-journals/wiki_query_22_12_2020.csv', encoding='iso-8859-1') array = {} for c in dfpub['browsing_date']: if '1601' in c: continue today = c.split()[0] array[today] = 1 if today not in array else array[today] + 1 x = list(array.keys()) y = list(array.values()) df_plot = pd.DataFrame() df_plot['x'] = x df_plot['y'] = y df_plot.index = x wordListCorpus = [] titleCorpus = [] failedConvert = [] for row in dfpub['tags'].values: try: tags = row.split(',') title = ' '.join(row.split(',')) except AttributeError: failedConvert.append(row) for k in tags: wordListCorpus.append(k) titleCorpus.append(title) len(wordListCorpus) from gensim.models import word2vec from gensim.test.utils import common_texts, get_tmpfile tokenized_sentences = [[j.lower() for j in st.split() if j not in stopwords.words('english')] for st in titleCorpus] model = word2vec.Word2Vec(tokenized_sentences, min_count=1) model.save('word2vec.model') import io out_v = io.open('vecs.tsv', 'w', encoding='utf-8') out_m = io.open('meta.tsv', 'w', encoding='utf-8') for word in model.wv.vocab: out_m.write(word + '\n') out_v.write('\t'.join([str(x) for x in model[word]]) + '\n') out_v.close() out_m.close()
code
16154375/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') dfs = [train_df, potential_energy_df, mulliken_charges_df, scalar_coupling_contributions_df, magnetic_shielding_tensors_df, dipole_moments_df, structure_df, test_df] names = ['train_df', 'potential_energy_df', 'mulliken_charges_df', 'scalar_coupling_contributions_df', 'magnetic_shielding_tensors_df', 'dipole_moments_df', 'structure_df', 'test_df'] def dispDF(df, name): pass pd.set_option('display.expand_frame_repr', False) for df, name in zip(dfs, names): dispDF(df, name) dispDF(potential_energy_df, 'potential energy')
code
16154375/cell_4
[ "image_output_1.png" ]
import warnings import seaborn as sns import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy import stats import warnings warnings.filterwarnings('ignore') print('Libraries were loaded.')
code
16154375/cell_30
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') colors = sns.color_palette('cubehelix', 8) sns.set() subsample = 100 fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111, projection='3d') scatter_colors = sns.color_palette("husl", 85003) # 3D scatter ax.scatter(dipole_moments_df['X'][::subsample], dipole_moments_df['Y'][::subsample], dipole_moments_df['Z'][::subsample], s=30, alpha=0.5, c=scatter_colors[::subsample]) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('Dipole Moment') distances = np.asarray([x**2 + y**2 + z**2 for x, y, z in zip(dipole_moments_df['X'],dipole_moments_df['Y'], dipole_moments_df['Z'])]) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) ax = ax.flatten() # original distribution sns.distplot(distances, color=colors[0], kde=False, norm_hist=False, ax=ax[0]) ax[0].set_xlabel('distance') # in log sns.distplot(np.log(distances + 0.00001), color=colors[0], kde=False, norm_hist=False, ax=ax[1]) ax[1].set_xlabel('log distance') fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111, projection='3d') scatter_colors = sns.color_palette("husl", 29) # 3D scatter for i in range(29): xx = magnetic_shielding_tensors_df.loc[magnetic_shielding_tensors_df['atom_index']==i, 'XX'] yy = magnetic_shielding_tensors_df.loc[magnetic_shielding_tensors_df['atom_index']==i, 'YY'] zz = magnetic_shielding_tensors_df.loc[magnetic_shielding_tensors_df['atom_index']==i, 'ZZ'] ax.scatter(xx[::subsample*100], yy[::subsample*100], zz[::subsample*100], s=30, alpha=0.5, c=scatter_colors[i]) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('Magnetic shielding tensors') # potential energy fig, ax = plt.subplots(2, 1, figsize=(12, 8)) ax = ax.flatten() sns.distplot(potential_energy_df['potential_energy'], kde=False, color=colors[0], ax=ax[0]) ax[1].plot(np.arange(0, 85003, subsample*10), potential_energy_df['potential_energy'][::subsample*10], c=colors[0], alpha=0.5) ax[1].set_xlabel('molecular name') ax[1].set_ylabel('potential energy') plt.tight_layout() fig, ax = plt.subplots(6, 5, figsize=(12, 16)) ax = ax.flatten() for i in range(29): sns.distplot(mulliken_charges_df.loc[mulliken_charges_df['atom_index'] == i, 'mulliken_charge'], kde=False, color=colors[2], ax=ax[i]) ax[i].set_title('atom index ' + str(i)) ax[i].set_xlabel('') ax[i].set_ylabel('') plt.tight_layout()
code
16154375/cell_33
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') dfs = [train_df, potential_energy_df, mulliken_charges_df, scalar_coupling_contributions_df, magnetic_shielding_tensors_df, dipole_moments_df, structure_df, test_df] names = ['train_df', 'potential_energy_df', 'mulliken_charges_df', 'scalar_coupling_contributions_df', 'magnetic_shielding_tensors_df', 'dipole_moments_df', 'structure_df', 'test_df'] def dispDF(df, name): pass pd.set_option('display.expand_frame_repr', False) for df, name in zip(dfs, names): dispDF(df, name) dispDF(scalar_coupling_contributions_df, 'scalar coupling contributions')
code
16154375/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') dfs = [train_df, potential_energy_df, mulliken_charges_df, scalar_coupling_contributions_df, magnetic_shielding_tensors_df, dipole_moments_df, structure_df, test_df] names = ['train_df', 'potential_energy_df', 'mulliken_charges_df', 'scalar_coupling_contributions_df', 'magnetic_shielding_tensors_df', 'dipole_moments_df', 'structure_df', 'test_df'] def dispDF(df, name): pass pd.set_option('display.expand_frame_repr', False) for df, name in zip(dfs, names): dispDF(df, name) dispDF(magnetic_shielding_tensors_df, 'magnetic shielding tensors')
code
16154375/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') print('All the data were loaded.')
code
16154375/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') dfs = [train_df, potential_energy_df, mulliken_charges_df, scalar_coupling_contributions_df, magnetic_shielding_tensors_df, dipole_moments_df, structure_df, test_df] names = ['train_df', 'potential_energy_df', 'mulliken_charges_df', 'scalar_coupling_contributions_df', 'magnetic_shielding_tensors_df', 'dipole_moments_df', 'structure_df', 'test_df'] def dispDF(df, name): pass pd.set_option('display.expand_frame_repr', False) for df, name in zip(dfs, names): dispDF(df, name) dispDF(mulliken_charges_df, 'mulliken charges')
code
16154375/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') colors = sns.color_palette('cubehelix', 8) sns.set() subsample = 100 fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111, projection='3d') scatter_colors = sns.color_palette("husl", 85003) # 3D scatter ax.scatter(dipole_moments_df['X'][::subsample], dipole_moments_df['Y'][::subsample], dipole_moments_df['Z'][::subsample], s=30, alpha=0.5, c=scatter_colors[::subsample]) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('Dipole Moment') distances = np.asarray([x**2 + y**2 + z**2 for x, y, z in zip(dipole_moments_df['X'],dipole_moments_df['Y'], dipole_moments_df['Z'])]) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) ax = ax.flatten() # original distribution sns.distplot(distances, color=colors[0], kde=False, norm_hist=False, ax=ax[0]) ax[0].set_xlabel('distance') # in log sns.distplot(np.log(distances + 0.00001), color=colors[0], kde=False, norm_hist=False, ax=ax[1]) ax[1].set_xlabel('log distance') fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111, projection='3d') scatter_colors = sns.color_palette("husl", 29) # 3D scatter for i in range(29): xx = magnetic_shielding_tensors_df.loc[magnetic_shielding_tensors_df['atom_index']==i, 'XX'] yy = magnetic_shielding_tensors_df.loc[magnetic_shielding_tensors_df['atom_index']==i, 'YY'] zz = magnetic_shielding_tensors_df.loc[magnetic_shielding_tensors_df['atom_index']==i, 'ZZ'] ax.scatter(xx[::subsample*100], yy[::subsample*100], zz[::subsample*100], s=30, alpha=0.5, c=scatter_colors[i]) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('Magnetic shielding tensors') fig, ax = plt.subplots(2, 1, figsize=(12, 8)) ax = ax.flatten() sns.distplot(potential_energy_df['potential_energy'], kde=False, color=colors[0], ax=ax[0]) ax[1].plot(np.arange(0, 85003, subsample * 10), potential_energy_df['potential_energy'][::subsample * 10], c=colors[0], alpha=0.5) ax[1].set_xlabel('molecular name') ax[1].set_ylabel('potential energy') plt.tight_layout()
code
16154375/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16154375/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') colors = sns.color_palette('cubehelix', 8) sns.set() subsample = 100 fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111, projection='3d') scatter_colors = sns.color_palette("husl", 85003) # 3D scatter ax.scatter(dipole_moments_df['X'][::subsample], dipole_moments_df['Y'][::subsample], dipole_moments_df['Z'][::subsample], s=30, alpha=0.5, c=scatter_colors[::subsample]) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('Dipole Moment') distances = np.asarray([x**2 + y**2 + z**2 for x, y, z in zip(dipole_moments_df['X'],dipole_moments_df['Y'], dipole_moments_df['Z'])]) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) ax = ax.flatten() # original distribution sns.distplot(distances, color=colors[0], kde=False, norm_hist=False, ax=ax[0]) ax[0].set_xlabel('distance') # in log sns.distplot(np.log(distances + 0.00001), color=colors[0], kde=False, norm_hist=False, ax=ax[1]) ax[1].set_xlabel('log distance') outliers_dipole_moment = [m for i, m in enumerate(dipole_moments_df['molecule_name']) if distances[i] > 100] print('outliers (dipole moments): ' + str(len(outliers_dipole_moment)) + ' molecules') print(str(outliers_dipole_moment))
code
16154375/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') dfs = [train_df, potential_energy_df, mulliken_charges_df, scalar_coupling_contributions_df, magnetic_shielding_tensors_df, dipole_moments_df, structure_df, test_df] names = ['train_df', 'potential_energy_df', 'mulliken_charges_df', 'scalar_coupling_contributions_df', 'magnetic_shielding_tensors_df', 'dipole_moments_df', 'structure_df', 'test_df'] def dispDF(df, name): print('========== ' + name + ' ==========') print('SHAPE:') print(df.shape) print('HEAD:') print(df.head(5)) print('DATA TYPE:') print(df.dtypes) print('UNIQUES:') print(df.nunique()) print('======================================') pd.set_option('display.expand_frame_repr', False) for df, name in zip(dfs, names): dispDF(df, name)
code
16154375/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') colors = sns.color_palette('cubehelix', 8) sns.set() subsample = 100 fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111, projection='3d') scatter_colors = sns.color_palette("husl", 85003) # 3D scatter ax.scatter(dipole_moments_df['X'][::subsample], dipole_moments_df['Y'][::subsample], dipole_moments_df['Z'][::subsample], s=30, alpha=0.5, c=scatter_colors[::subsample]) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('Dipole Moment') distances = np.asarray([x ** 2 + y ** 2 + z ** 2 for x, y, z in zip(dipole_moments_df['X'], dipole_moments_df['Y'], dipole_moments_df['Z'])]) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) ax = ax.flatten() sns.distplot(distances, color=colors[0], kde=False, norm_hist=False, ax=ax[0]) ax[0].set_xlabel('distance') sns.distplot(np.log(distances + 1e-05), color=colors[0], kde=False, norm_hist=False, ax=ax[1]) ax[1].set_xlabel('log distance')
code
16154375/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') colors = sns.color_palette('cubehelix', 8) sns.set() subsample = 100 fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111, projection='3d') scatter_colors = sns.color_palette('husl', 85003) ax.scatter(dipole_moments_df['X'][::subsample], dipole_moments_df['Y'][::subsample], dipole_moments_df['Z'][::subsample], s=30, alpha=0.5, c=scatter_colors[::subsample]) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('Dipole Moment')
code
16154375/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') colors = sns.color_palette('cubehelix', 8) sns.set() subsample = 100 fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111, projection='3d') scatter_colors = sns.color_palette("husl", 85003) # 3D scatter ax.scatter(dipole_moments_df['X'][::subsample], dipole_moments_df['Y'][::subsample], dipole_moments_df['Z'][::subsample], s=30, alpha=0.5, c=scatter_colors[::subsample]) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('Dipole Moment') distances = np.asarray([x**2 + y**2 + z**2 for x, y, z in zip(dipole_moments_df['X'],dipole_moments_df['Y'], dipole_moments_df['Z'])]) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) ax = ax.flatten() # original distribution sns.distplot(distances, color=colors[0], kde=False, norm_hist=False, ax=ax[0]) ax[0].set_xlabel('distance') # in log sns.distplot(np.log(distances + 0.00001), color=colors[0], kde=False, norm_hist=False, ax=ax[1]) ax[1].set_xlabel('log distance') fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111, projection='3d') scatter_colors = sns.color_palette('husl', 29) for i in range(29): xx = magnetic_shielding_tensors_df.loc[magnetic_shielding_tensors_df['atom_index'] == i, 'XX'] yy = magnetic_shielding_tensors_df.loc[magnetic_shielding_tensors_df['atom_index'] == i, 'YY'] zz = magnetic_shielding_tensors_df.loc[magnetic_shielding_tensors_df['atom_index'] == i, 'ZZ'] ax.scatter(xx[::subsample * 100], yy[::subsample * 100], zz[::subsample * 100], s=30, alpha=0.5, c=scatter_colors[i]) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('Magnetic shielding tensors')
code
16154375/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') potential_energy_df = pd.read_csv('../input/potential_energy.csv') mulliken_charges_df = pd.read_csv('../input/mulliken_charges.csv') scalar_coupling_contributions_df = pd.read_csv('../input/scalar_coupling_contributions.csv') magnetic_shielding_tensors_df = pd.read_csv('../input/magnetic_shielding_tensors.csv') dipole_moments_df = pd.read_csv('../input/dipole_moments.csv') structure_df = pd.read_csv('../input/structures.csv') test_df = pd.read_csv('../input/test.csv') dfs = [train_df, potential_energy_df, mulliken_charges_df, scalar_coupling_contributions_df, magnetic_shielding_tensors_df, dipole_moments_df, structure_df, test_df] names = ['train_df', 'potential_energy_df', 'mulliken_charges_df', 'scalar_coupling_contributions_df', 'magnetic_shielding_tensors_df', 'dipole_moments_df', 'structure_df', 'test_df'] def dispDF(df, name): pass pd.set_option('display.expand_frame_repr', False) for df, name in zip(dfs, names): dispDF(df, name) dispDF(dipole_moments_df, 'dipole moments')
code
50239726/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') test_data.isnull().sum()
code
50239726/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.isnull().sum() map1 = sns.FacetGrid(train_data, col='Pclass', row='Sex') map1.map_dataframe(sns.histplot, x='Survived')
code
50239726/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.isnull().sum() print('Survival rate of adult males:', ((train_data['Survived'] == True) & (train_data['Sex'] == 'male') & (train_data['Age'] > 14)).sum() / (train_data['Sex'] == 'male').sum(), '%')
code
50239726/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') test_data.head()
code
50239726/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.isnull().sum() print('Survival rate of females:', ((train_data['Survived'] == True) & (train_data['Sex'] == 'female')).sum() / (train_data['Sex'] == 'female').sum(), '%')
code
50239726/cell_1
[ "text_plain_output_1.png" ]
# This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session !pip install seaborn --upgrade
code
50239726/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.isnull().sum() print('Survival rate of males:', ((train_data['Survived'] == True) & (train_data['Sex'] == 'male')).sum() / (train_data['Sex'] == 'male').sum(), '%')
code
50239726/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.isnull().sum() print('Percertage survived (train_data):', (train_data['Survived'] == True).sum() * 100 / train_data.shape[0], '%')
code
50239726/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.isnull().sum() print('Survival rate of females:', ((train_data['Survived'] == True) & (train_data['Sex'] == 'female')).sum() / (train_data['Sex'] == 'female').sum(), '%')
code
50239726/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.isnull().sum() sns.countplot(x='Survived', hue='Pclass', data=train_data)
code
50239726/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') print('Total size of train_data:', train_data.shape) print('Total size of test_data:', test_data.shape)
code
50239726/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.isnull().sum()
code
50239726/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.head(20)
code
309683/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import networkx as nx import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import networkx as nx import matplotlib.pyplot as plt from subprocess import check_output comments = pd.read_csv('../input/comment.csv') likes = pd.read_csv('../input/like.csv') members = pd.read_csv('../input/member.csv') posts = pd.read_csv('../input/post.csv') likeResponse = pd.merge(likes.loc[likes['gid'] == 117291968282998], posts.loc[posts['gid'] == 117291968282998, ['pid', 'name']], left_on='pid', right_on='pid') result = likeResponse.groupby(['name_y', 'name_x'])['response'].count() finalResult = pd.DataFrame(result.index.values, columns=['NameCombo']) finalResult['Weight'] = result.values finalResult['From'] = finalResult['NameCombo'].map(lambda x: x[0]) finalResult['To'] = finalResult['NameCombo'].map(lambda x: x[1]) del finalResult['NameCombo'] g = nx.Graph() g.add_edges_from([(row['From'], row['To']) for index, row in finalResult.iterrows()]) d = nx.degree(g) spring_pos = nx.spring_layout(g) plt.axis('off') plt.clf() g.number_of_nodes() spring_pos = nx.spring_layout(g, scale=2) nx.draw(g, spring_pos, with_labels=False, nodelist=d.keys(), node_size=[v * 5 for v in d.values()])
code
309683/cell_3
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import networkx as nx import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import networkx as nx import matplotlib.pyplot as plt from subprocess import check_output comments = pd.read_csv('../input/comment.csv') likes = pd.read_csv('../input/like.csv') members = pd.read_csv('../input/member.csv') posts = pd.read_csv('../input/post.csv') likeResponse = pd.merge(likes.loc[likes['gid'] == 117291968282998], posts.loc[posts['gid'] == 117291968282998, ['pid', 'name']], left_on='pid', right_on='pid') result = likeResponse.groupby(['name_y', 'name_x'])['response'].count() finalResult = pd.DataFrame(result.index.values, columns=['NameCombo']) finalResult['Weight'] = result.values finalResult['From'] = finalResult['NameCombo'].map(lambda x: x[0]) finalResult['To'] = finalResult['NameCombo'].map(lambda x: x[1]) del finalResult['NameCombo'] g = nx.Graph() plt.figure() g.add_edges_from([(row['From'], row['To']) for index, row in finalResult.iterrows()]) d = nx.degree(g) spring_pos = nx.spring_layout(g) plt.axis('off') nx.draw_networkx(g, spring_pos, with_labels=False, nodelist=d.keys(), node_size=[v * 10 for v in d.values()]) plt.savefig('LIKE_PLOT_GROUP1.png') plt.clf()
code
309683/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import networkx as nx import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import networkx as nx import matplotlib.pyplot as plt from subprocess import check_output comments = pd.read_csv('../input/comment.csv') likes = pd.read_csv('../input/like.csv') members = pd.read_csv('../input/member.csv') posts = pd.read_csv('../input/post.csv') likeResponse = pd.merge(likes.loc[likes['gid'] == 117291968282998], posts.loc[posts['gid'] == 117291968282998, ['pid', 'name']], left_on='pid', right_on='pid') result = likeResponse.groupby(['name_y', 'name_x'])['response'].count() finalResult = pd.DataFrame(result.index.values, columns=['NameCombo']) finalResult['Weight'] = result.values finalResult['From'] = finalResult['NameCombo'].map(lambda x: x[0]) finalResult['To'] = finalResult['NameCombo'].map(lambda x: x[1]) del finalResult['NameCombo'] g = nx.Graph() g.add_edges_from([(row['From'], row['To']) for index, row in finalResult.iterrows()]) d = nx.degree(g) spring_pos = nx.spring_layout(g) plt.axis('off') plt.clf() f = open('g.json', 'w') f.write('{"nodes":[') str1 = '' for i in finalResult['From'].unique(): str1 += '{"name":"' + str(i) + '","group":' + str(1) + '},' f.write(str1[:-1]) f.write('],"links":[') str1 = '' for i in range(len(finalResult)): str1 += '{"source":' + str(finalResult['From'][i]) + ',"target":' + str(finalResult['To'][i]) + ',"value":' + str(finalResult['Weight'][i]) + '},' f.write(str1[:-1]) f.write(']}') f.close h1 = '\n<!DOCTYPE html>\n<meta charset="utf-8">\n<style>\n.link {stroke: #ccc;}\n.node text {pointer-events: none; font: 10px sans-serif;}\n</style>\n<body>\n<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"></script>\n<script>\nvar width = 800, height = 800;\nvar color = d3.scale.category20();\nvar force = d3.layout.force()\n .charge(-120)\n .linkDistance(80)\n .size([width, height]);\nvar svg = d3.select("body").append("svg")\n .attr("width", width)\n .attr("height", height);\nd3.json("g.json", function(error, graph) {\n if (error) throw error;\n\tforce.nodes(graph.nodes)\n\t .links(graph.links)\n\t .start();\n\tvar link = svg.selectAll(".link")\n\t .data(graph.links)\n\t .enter().append("line")\n\t .attr("class", "link")\n\t .style("stroke-width", function (d) {return Math.sqrt(d.value);});\n\tvar node = svg.selectAll(".node")\n\t .data(graph.nodes)\n\t .enter().append("g")\n\t .attr("class", "node")\n\t .call(force.drag);\n\tnode.append("circle")\n\t .attr("r", 8)\n\t .style("fill", function (d) {return color(d.group);})\n\tnode.append("text")\n\t .attr("dx", 10)\n\t .attr("dy", ".35em")\n\t .text(function(d) { return d.name });\n\tforce.on("tick", function () {\n\t link.attr("x1", function (d) {return d.source.x;})\n\t\t.attr("y1", function (d) {return d.source.y;})\n\t\t.attr("x2", function (d) {return d.target.x;})\n\t\t.attr("y2", function (d) {return d.target.y;});\n\t d3.selectAll("circle").attr("cx", function (d) {return d.x;})\n\t\t.attr("cy", function (d) {return d.y;});\n\t d3.selectAll("text").attr("x", function (d) {return d.x;})\n\t\t.attr("y", function (d) {return d.y;});\n });\n});\n</script>\n' f = open('output.html', 'w') f.write(h1) f.close
code
32068206/cell_21
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') item_categories.describe()
code
32068206/cell_13
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') print('Count of categories with count of items 100-1000 is', len(list(filter(lambda x: 100 <= x <= 1000, items['item_category_id'].value_counts(ascending=True)))))
code
32068206/cell_9
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') items.info()
code
32068206/cell_57
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') test.describe()
code
32068206/cell_56
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') test.info()
code
32068206/cell_34
[ "text_html_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sales_train.info()
code
32068206/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sales_train.head(5)
code
32068206/cell_44
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sns.set(rc={'figure.figsize':(11.7,12)}) ax = sns.countplot(y = 'item_category_id', data = items, order = items['item_category_id'].value_counts(ascending=True).index) sales_train.groupby('shop_id').mean() sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sales_train.groupby('shop_id').mean().index, y=sales_train.groupby('shop_id').mean()['item_cnt_day'], color="salmon") sales_train.groupby('shop_id').sum() sub_sales_df = sales_train.groupby('shop_id').sum() sub_sales_df['index_shop'] = sub_sales_df.index sub_sales_df = sub_sales_df.sort_values(['item_cnt_day']).reset_index(drop=True) print('Count of prices overall:', len(sales_train)) print('Count of prices < 50000:', len(sales_train[sales_train['item_price'] < 50000])) print('Count of prices 50000 <= x <= 250000:', len(sales_train) - len(sales_train[sales_train['item_price'] > 250000]) - len(sales_train[sales_train['item_price'] < 50000])) print('Count of prices > 250000:', len(sales_train[sales_train['item_price'] > 250000]))
code
32068206/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') item_categories.info()
code
32068206/cell_55
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') test.head(5)
code
32068206/cell_39
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') sns.set(rc={'figure.figsize':(11.7,12)}) ax = sns.countplot(y = 'item_category_id', data = items, order = items['item_category_id'].value_counts(ascending=True).index) sales_train.groupby('shop_id').mean() sns.set(rc={'figure.figsize':(13,13)}) ax = sns.barplot(x=sales_train.groupby('shop_id').mean().index, y=sales_train.groupby('shop_id').mean()['item_cnt_day'], color="salmon") sales_train.groupby('shop_id').sum()
code
32068206/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv') sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv') item_categories = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv') test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv') shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv') sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv') shops.head(5)
code