path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
50245049/cell_21 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.feature_selection import SelectKBest, chi2
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
f, ax = plt.subplots(1, 2, figsize = (15, 7))
df['class'].value_counts().plot.bar(ax=ax[0])
df['class'].value_counts().plot.pie(ax=ax[1], autopct = "%.2f%%");
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
pca_2d = PCA(n_components=2)
pca_3d = PCA(n_components=3)
PCs_2d = pd.DataFrame(pca_2d.fit_transform(df.drop(['class'], axis=1)))
PCs_3d = pd.DataFrame(pca_3d.fit_transform(df.drop(['class'], axis=1)))
PCs_2d.columns = ['PC1_2d', 'PC2_2d']
PCs_3d.columns = ['PC1_3d', 'PC2_3d', 'PC3_3d']
from mpl_toolkits.mplot3d import Axes3D
x = PCs_3d.iloc[:, 1]
y = PCs_3d.iloc[:, 2]
c = df['class']
ax.set_zlabel('PC3')
x = df.drop(['class'], axis=1)
y = df['class']
from sklearn.feature_selection import SelectKBest, chi2
sb = SelectKBest(chi2, k=5)
X_new = sb.fit_transform(x, y)
X_new.shape
mask = sb.get_support()
new_features = x.columns[mask]
new_features | code |
50245049/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
df.hist(figsize=(15, 15)) | code |
50245049/cell_25 | [
"text_html_output_1.png"
] | import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
tsne_data = np.vstack((tsne_model.T, y)).T
tsne_df = pd.DataFrame(data=tsne_data, columns=('Dimension 1', 'Dimension 2', 'Class'))
sns.FacetGrid(tsne_df, height=8, hue='Class').map(plt.scatter, 'Dimension 1', 'Dimension 2').add_legend() | code |
50245049/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
df.head() | code |
50245049/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
df.describe() | code |
50245049/cell_40 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
pca_2d = PCA(n_components=2)
pca_3d = PCA(n_components=3)
PCs_2d = pd.DataFrame(pca_2d.fit_transform(df.drop(['class'], axis=1)))
PCs_3d = pd.DataFrame(pca_3d.fit_transform(df.drop(['class'], axis=1)))
x = df.drop(['class'], axis=1)
y = df['class']
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=0, max_depth=5)
dt.fit(x_train, y_train)
dt.score(x_train, y_train)
predictions = dt.predict(x_test)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=5)
rf.fit(x_train, y_train)
rf.score(x_train, y_train)
predictions = rf.predict(x_test)
rf.score(x_test, y_test)
df.drop(['class'])
rf.feature_importances_.shape
fi = pd.DataFrame({'feature': df.columns, 'importance': rf.feature_importances_}).sort_values(by='importance', ascending=False)
fi = fi.reset_index()
fi | code |
50245049/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=0, max_depth=5)
dt.fit(x_train, y_train)
dt.score(x_train, y_train) | code |
50245049/cell_39 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=5)
rf.fit(x_train, y_train)
rf.score(x_train, y_train)
predictions = rf.predict(x_test)
rf.score(x_test, y_test)
rf.feature_importances_.shape | code |
50245049/cell_26 | [
"image_output_1.png"
] | from sklearn.feature_selection import SelectKBest, chi2
from sklearn.manifold import TSNE
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
f, ax = plt.subplots(1, 2, figsize = (15, 7))
df['class'].value_counts().plot.bar(ax=ax[0])
df['class'].value_counts().plot.pie(ax=ax[1], autopct = "%.2f%%");
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
pca_2d = PCA(n_components=2)
pca_3d = PCA(n_components=3)
PCs_2d = pd.DataFrame(pca_2d.fit_transform(df.drop(['class'], axis=1)))
PCs_3d = pd.DataFrame(pca_3d.fit_transform(df.drop(['class'], axis=1)))
PCs_2d.columns = ['PC1_2d', 'PC2_2d']
PCs_3d.columns = ['PC1_3d', 'PC2_3d', 'PC3_3d']
from mpl_toolkits.mplot3d import Axes3D
x = PCs_3d.iloc[:, 1]
y = PCs_3d.iloc[:, 2]
c = df['class']
ax.set_zlabel('PC3')
x = df.drop(['class'], axis=1)
y = df['class']
from sklearn.feature_selection import SelectKBest, chi2
sb = SelectKBest(chi2, k=5)
X_new = sb.fit_transform(x, y)
X_new.shape
mask = sb.get_support()
new_features = x.columns[mask]
model = TSNE(n_components=2, random_state=0)
tsne_model = model.fit_transform(x)
tsne_model[:, 0].T | code |
50245049/cell_2 | [
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50245049/cell_19 | [
"image_output_1.png"
] | from sklearn.feature_selection import SelectKBest, chi2
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
f, ax = plt.subplots(1, 2, figsize = (15, 7))
df['class'].value_counts().plot.bar(ax=ax[0])
df['class'].value_counts().plot.pie(ax=ax[1], autopct = "%.2f%%");
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
pca_2d = PCA(n_components=2)
pca_3d = PCA(n_components=3)
PCs_2d = pd.DataFrame(pca_2d.fit_transform(df.drop(['class'], axis=1)))
PCs_3d = pd.DataFrame(pca_3d.fit_transform(df.drop(['class'], axis=1)))
PCs_2d.columns = ['PC1_2d', 'PC2_2d']
PCs_3d.columns = ['PC1_3d', 'PC2_3d', 'PC3_3d']
from mpl_toolkits.mplot3d import Axes3D
x = PCs_3d.iloc[:, 1]
y = PCs_3d.iloc[:, 2]
c = df['class']
ax.set_zlabel('PC3')
x = df.drop(['class'], axis=1)
y = df['class']
from sklearn.feature_selection import SelectKBest, chi2
sb = SelectKBest(chi2, k=5)
X_new = sb.fit_transform(x, y)
X_new.shape | code |
50245049/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
pca_2d = PCA(n_components=2)
pca_3d = PCA(n_components=3)
PCs_2d = pd.DataFrame(pca_2d.fit_transform(df.drop(['class'], axis=1)))
PCs_3d = pd.DataFrame(pca_3d.fit_transform(df.drop(['class'], axis=1)))
x = df.drop(['class'], axis=1)
y = df['class']
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=0, max_depth=5)
dt.fit(x_train, y_train)
dt.score(x_train, y_train)
predictions = dt.predict(x_test)
print(dict(zip(df.columns, dt.feature_importances_))) | code |
50245049/cell_28 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=0, max_depth=5)
dt.fit(x_train, y_train) | code |
50245049/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
f, ax = plt.subplots(1, 2, figsize=(15, 7))
df['class'].value_counts().plot.bar(ax=ax[0])
df['class'].value_counts().plot.pie(ax=ax[1], autopct='%.2f%%') | code |
50245049/cell_16 | [
"text_html_output_1.png"
] | import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
tsne_data = pd.DataFrame(PCs_3d)
tsne_data['class'] = df['class']
ax2 = tsne_data.plot.scatter(x='PC1_3d', y='PC3_3d', c='class', colormap='viridis') | code |
50245049/cell_38 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
pca_2d = PCA(n_components=2)
pca_3d = PCA(n_components=3)
PCs_2d = pd.DataFrame(pca_2d.fit_transform(df.drop(['class'], axis=1)))
PCs_3d = pd.DataFrame(pca_3d.fit_transform(df.drop(['class'], axis=1)))
x = df.drop(['class'], axis=1)
y = df['class']
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=0, max_depth=5)
dt.fit(x_train, y_train)
dt.score(x_train, y_train)
predictions = dt.predict(x_test)
df.drop(['class']) | code |
50245049/cell_17 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
f, ax = plt.subplots(1, 2, figsize = (15, 7))
df['class'].value_counts().plot.bar(ax=ax[0])
df['class'].value_counts().plot.pie(ax=ax[1], autopct = "%.2f%%");
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
pca_2d = PCA(n_components=2)
pca_3d = PCA(n_components=3)
PCs_2d = pd.DataFrame(pca_2d.fit_transform(df.drop(['class'], axis=1)))
PCs_3d = pd.DataFrame(pca_3d.fit_transform(df.drop(['class'], axis=1)))
PCs_2d.columns = ['PC1_2d', 'PC2_2d']
PCs_3d.columns = ['PC1_3d', 'PC2_3d', 'PC3_3d']
from mpl_toolkits.mplot3d import Axes3D
x = PCs_3d.iloc[:, 1]
y = PCs_3d.iloc[:, 2]
c = df['class']
ax.scatter(x, y, c=c, cmap='coolwarm')
plt.title('First 3 Principal Components')
ax.set_ylabel('PC2')
ax.set_xlabel('PC1')
ax.set_zlabel('PC3')
plt.legend() | code |
50245049/cell_35 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=5)
rf.fit(x_train, y_train)
rf.score(x_train, y_train) | code |
50245049/cell_31 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=0, max_depth=5)
dt.fit(x_train, y_train)
dt.score(x_train, y_train)
predictions = dt.predict(x_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, predictions) | code |
50245049/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
sns.pairplot(data=df, hue='class') | code |
50245049/cell_37 | [
"text_plain_output_1.png"
] | print() | code |
50245049/cell_12 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
df.head() | code |
50245049/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/mushroom-classification/mushrooms.csv')
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df = df.apply(LabelEncoder().fit_transform)
df.head() | code |
50245049/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=5)
rf.fit(x_train, y_train)
rf.score(x_train, y_train)
predictions = rf.predict(x_test)
rf.score(x_test, y_test) | code |
16136430/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
_data = pd.read_csv('../input/zomato.csv')
data = _data.drop(columns=['url', 'address', 'phone'], axis=1)
columns = data.columns
splist = []
flat = []
cuisineList = data['cuisines'].dropna(axis=0, inplace=False)
for i in range(0, cuisineList.count()):
splist = str(data['cuisines'][i]).split(', ')
for item in splist:
if item not in flat:
flat.append(item)
flat | code |
16136430/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
_data = pd.read_csv('../input/zomato.csv')
_data.head() | code |
16136430/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16136430/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
_data = pd.read_csv('../input/zomato.csv')
print('Original set of columns:{}'.format(_data.columns))
data = _data.drop(columns=['url', 'address', 'phone'], axis=1)
columns = data.columns
print('New columns : {}'.format(columns)) | code |
48164526/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
df.drop(['EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours'], axis='columns', inplace=True)
categorical_col = []
for column in df.columns:
if df[column].dtype == object and len(df[column].unique()) <= 50:
categorical_col.append(column)
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
df.hist(edgecolor='black', linewidth=1.2, figsize=(20, 20)) | code |
48164526/cell_9 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
df.drop(['EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours'], axis='columns', inplace=True)
categorical_col = []
for column in df.columns:
if df[column].dtype == object and len(df[column].unique()) <= 50:
categorical_col.append(column)
print(f'{column} : {df[column].unique()}')
print('====================================') | code |
48164526/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True))
elif train == False:
pred = clf.predict(X_test)
clf_report = pd.DataFrame(classification_report(y_test, pred, output_dict=True))
from sklearn.tree import DecisionTreeClassifier
tree_clf = DecisionTreeClassifier(random_state=42)
tree_clf.fit(X_train, y_train)
print_score(tree_clf, X_train, y_train, X_test, y_test, train=True)
print_score(tree_clf, X_train, y_train, X_test, y_test, train=False) | code |
48164526/cell_4 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df.head() | code |
48164526/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from IPython.display import Image
from sklearn.externals.six import StringIO
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
import matplotlib.pyplot as plt
import pandas as pd
import pydot
import seaborn as sns
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
df.drop(['EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours'], axis='columns', inplace=True)
categorical_col = []
for column in df.columns:
if df[column].dtype == object and len(df[column].unique()) <= 50:
categorical_col.append(column)
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
# Plotting how every feature correlate with the "target"
sns.set(font_scale=1.2)
plt.figure(figsize=(30, 30))
for i, column in enumerate(categorical_col, 1):
plt.subplot(3, 3, i)
g = sns.barplot(x=f"{column}", y='Attrition', data=df)
g.set_xticklabels(g.get_xticklabels(), rotation=90)
plt.ylabel('Attrition Count')
plt.xlabel(f'{column}')
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True))
elif train == False:
pred = clf.predict(X_test)
clf_report = pd.DataFrame(classification_report(y_test, pred, output_dict=True))
from sklearn.tree import DecisionTreeClassifier
tree_clf = DecisionTreeClassifier(random_state=42)
tree_clf.fit(X_train, y_train)
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))}
tree_clf = DecisionTreeClassifier(random_state=42)
tree_cv = GridSearchCV(tree_clf, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3)
tree_cv.fit(X_train, y_train)
best_params = tree_cv.best_params_
tree_clf = DecisionTreeClassifier(**best_params)
tree_clf.fit(X_train, y_train)
from IPython.display import Image
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
import pydot
features = list(df.columns)
features.remove('Attrition')
dot_data = StringIO()
export_graphviz(tree_clf, out_file=dot_data, feature_names=features, filled=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph[0].create_png()) | code |
48164526/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df.info() | code |
48164526/cell_29 | [
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
df.drop(['EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours'], axis='columns', inplace=True)
categorical_col = []
for column in df.columns:
if df[column].dtype == object and len(df[column].unique()) <= 50:
categorical_col.append(column)
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
# Plotting how every feature correlate with the "target"
sns.set(font_scale=1.2)
plt.figure(figsize=(30, 30))
for i, column in enumerate(categorical_col, 1):
plt.subplot(3, 3, i)
g = sns.barplot(x=f"{column}", y='Attrition', data=df)
g.set_xticklabels(g.get_xticklabels(), rotation=90)
plt.ylabel('Attrition Count')
plt.xlabel(f'{column}')
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
from IPython.display import Image
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
import pydot
features = list(df.columns)
features.remove('Attrition') | code |
48164526/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
df.describe() | code |
48164526/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True))
elif train == False:
pred = clf.predict(X_test)
clf_report = pd.DataFrame(classification_report(y_test, pred, output_dict=True))
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(n_estimators=100)
rf_clf.fit(X_train, y_train)
print_score(rf_clf, X_train, y_train, X_test, y_test, train=True)
print_score(rf_clf, X_train, y_train, X_test, y_test, train=False) | code |
48164526/cell_38 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True))
elif train == False:
pred = clf.predict(X_test)
clf_report = pd.DataFrame(classification_report(y_test, pred, output_dict=True))
from sklearn.tree import DecisionTreeClassifier
tree_clf = DecisionTreeClassifier(random_state=42)
tree_clf.fit(X_train, y_train)
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))}
tree_clf = DecisionTreeClassifier(random_state=42)
tree_cv = GridSearchCV(tree_clf, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3)
tree_cv.fit(X_train, y_train)
best_params = tree_cv.best_params_
tree_clf = DecisionTreeClassifier(**best_params)
tree_clf.fit(X_train, y_train)
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(n_estimators=100)
rf_clf.fit(X_train, y_train)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}
rf_clf = RandomForestClassifier(random_state=42)
rf_cv = RandomizedSearchCV(estimator=rf_clf, param_distributions=random_grid, n_iter=100, cv=3, verbose=2, random_state=42, n_jobs=-1)
rf_cv.fit(X_train, y_train)
rf_best_params = rf_cv.best_params_
rf_clf = RandomForestClassifier(**rf_best_params)
rf_clf.fit(X_train, y_train)
n_estimators = [100, 500, 1000, 1500]
max_features = ['auto', 'sqrt']
max_depth = [2, 3, 5]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4, 10]
bootstrap = [True, False]
params_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}
rf_clf = RandomForestClassifier(random_state=42)
rf_cv = GridSearchCV(rf_clf, params_grid, scoring='accuracy', cv=3, verbose=2, n_jobs=-1)
rf_cv.fit(X_train, y_train)
best_params = rf_cv.best_params_
print(f'Best parameters: {best_params}')
rf_clf = RandomForestClassifier(**best_params)
rf_clf.fit(X_train, y_train)
print_score(rf_clf, X_train, y_train, X_test, y_test, train=True)
print_score(rf_clf, X_train, y_train, X_test, y_test, train=False) | code |
48164526/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
df.drop(['EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours'], axis='columns', inplace=True)
categorical_col = []
for column in df.columns:
if df[column].dtype == object and len(df[column].unique()) <= 50:
categorical_col.append(column)
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
# Plotting how every feature correlate with the "target"
sns.set(font_scale=1.2)
plt.figure(figsize=(30, 30))
for i, column in enumerate(categorical_col, 1):
plt.subplot(3, 3, i)
g = sns.barplot(x=f"{column}", y='Attrition', data=df)
g.set_xticklabels(g.get_xticklabels(), rotation=90)
plt.ylabel('Attrition Count')
plt.xlabel(f'{column}')
plt.figure(figsize=(30, 30))
sns.heatmap(df.corr(), annot=True, cmap='RdYlGn', annot_kws={'size': 15}) | code |
48164526/cell_35 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import RandomizedSearchCV
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True))
elif train == False:
pred = clf.predict(X_test)
clf_report = pd.DataFrame(classification_report(y_test, pred, output_dict=True))
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(n_estimators=100)
rf_clf.fit(X_train, y_train)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}
rf_clf = RandomForestClassifier(random_state=42)
rf_cv = RandomizedSearchCV(estimator=rf_clf, param_distributions=random_grid, n_iter=100, cv=3, verbose=2, random_state=42, n_jobs=-1)
rf_cv.fit(X_train, y_train)
rf_best_params = rf_cv.best_params_
print(f'Best paramters: {rf_best_params})')
rf_clf = RandomForestClassifier(**rf_best_params)
rf_clf.fit(X_train, y_train)
print_score(rf_clf, X_train, y_train, X_test, y_test, train=True)
print_score(rf_clf, X_train, y_train, X_test, y_test, train=False) | code |
48164526/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
df.drop(['EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours'], axis='columns', inplace=True)
categorical_col = []
for column in df.columns:
if df[column].dtype == object and len(df[column].unique()) <= 50:
categorical_col.append(column)
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts()
sns.set(font_scale=1.2)
plt.figure(figsize=(30, 30))
for i, column in enumerate(categorical_col, 1):
plt.subplot(3, 3, i)
g = sns.barplot(x=f'{column}', y='Attrition', data=df)
g.set_xticklabels(g.get_xticklabels(), rotation=90)
plt.ylabel('Attrition Count')
plt.xlabel(f'{column}') | code |
48164526/cell_27 | [
"image_output_1.png"
] | from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
clf_report = pd.DataFrame(classification_report(y_train, pred, output_dict=True))
elif train == False:
pred = clf.predict(X_test)
clf_report = pd.DataFrame(classification_report(y_test, pred, output_dict=True))
from sklearn.tree import DecisionTreeClassifier
tree_clf = DecisionTreeClassifier(random_state=42)
tree_clf.fit(X_train, y_train)
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
params = {'criterion': ('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_depth': list(range(1, 20)), 'min_samples_split': [2, 3, 4], 'min_samples_leaf': list(range(1, 20))}
tree_clf = DecisionTreeClassifier(random_state=42)
tree_cv = GridSearchCV(tree_clf, params, scoring='accuracy', n_jobs=-1, verbose=1, cv=3)
tree_cv.fit(X_train, y_train)
best_params = tree_cv.best_params_
print(f'Best paramters: {best_params})')
tree_clf = DecisionTreeClassifier(**best_params)
tree_clf.fit(X_train, y_train)
print_score(tree_clf, X_train, y_train, X_test, y_test, train=True)
print_score(tree_clf, X_train, y_train, X_test, y_test, train=False) | code |
48164526/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
pd.set_option('display.float_format', '{:.2f}'.format)
df.drop(['EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours'], axis='columns', inplace=True)
categorical_col = []
for column in df.columns:
if df[column].dtype == object and len(df[column].unique()) <= 50:
categorical_col.append(column)
df['Attrition'] = df.Attrition.astype('category').cat.codes
df.Attrition.value_counts() | code |
1009348/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # combined plotting
df = pd.read_csv('../input/train.csv', index_col=0)
df_test = pd.read_csv('../input/test.csv', index_col=0)
df.dtypes
def cond_hists(df, plot_cols, grid_col):
for col in plot_cols:
grid1 = sns.FacetGrid(df, col=grid_col)
grid1.map(plt.hist, col, alpha=0.7)
return grid_col
df.Sex = df.Sex.map({'male': 0, 'female': 1})
df_test.Sex = df_test.Sex.map({'male': 0, 'female': 1})
plot_cols = ['Pclass', 'Age', 'Sex', 'Parch', 'SibSp']
df.corr() | code |
1009348/cell_6 | [
"image_output_5.png",
"image_output_4.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv', index_col=0)
df_test = pd.read_csv('../input/test.csv', index_col=0)
df.head(5) | code |
1009348/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv', index_col=0)
df_test = pd.read_csv('../input/test.csv', index_col=0)
df.dtypes | code |
1009348/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv', index_col=0)
df_test = pd.read_csv('../input/test.csv', index_col=0)
df.dtypes
df.Survived.hist() | code |
1009348/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # combined plotting
df = pd.read_csv('../input/train.csv', index_col=0)
df_test = pd.read_csv('../input/test.csv', index_col=0)
df.dtypes
def cond_hists(df, plot_cols, grid_col):
for col in plot_cols:
grid1 = sns.FacetGrid(df, col=grid_col)
grid1.map(plt.hist, col, alpha=0.7)
return grid_col
df.Sex = df.Sex.map({'male': 0, 'female': 1})
df_test.Sex = df_test.Sex.map({'male': 0, 'female': 1})
plot_cols = ['Pclass', 'Age', 'Sex', 'Parch', 'SibSp']
cond_hists(df, plot_cols, 'Survived') | code |
2004114/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.tree import DecisionTreeRegressor
import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
y = data.SalePrice
predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF']
X = data[predicators]
from sklearn.tree import DecisionTreeRegressor
housing_model = DecisionTreeRegressor()
housing_model.fit(X, y)
print(' making predictions for the following 5 houses:')
print(X.head())
print('The prediction are')
print(housing_model.predict(X.head())) | code |
2004114/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
print(data.columns) | code |
2004114/cell_11 | [
"text_html_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
y = data.SalePrice
predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF']
X = data[predicators]
from sklearn.tree import DecisionTreeRegressor
housing_model = DecisionTreeRegressor()
housing_model.fit(X, y)
from sklearn.metrics import mean_absolute_error
predicted_Home_prices = housing_model.predict(X)
mean_absolute_error(y, predicted_Home_prices)
from sklearn.model_selection import train_test_split
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0)
housing_model = DecisionTreeRegressor()
housing_model.fit(train_X, train_y)
val_predictions = housing_model.predict(val_X)
print(mean_absolute_error(val_y, val_predictions)) | code |
2004114/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
import pandas as pd
import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
train = pd.read_csv('../input/train.csv')
train_y = train.SalePrice
predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd']
train_X = train[predictor_cols]
my_model = RandomForestRegressor()
my_model.fit(train_X, train_y)
test = pd.read_csv('../input/test.csv')
test_X = test[predictor_cols]
predict_prices = my_model.predict(test_X)
print(predict_prices) | code |
2004114/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
import pandas as pd
import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
train = pd.read_csv('../input/train.csv')
train_y = train.SalePrice
predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd']
train_X = train[predictor_cols]
my_model = RandomForestRegressor()
my_model.fit(train_X, train_y) | code |
2004114/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.tree import DecisionTreeRegressor
import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
y = data.SalePrice
predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF']
X = data[predicators]
from sklearn.tree import DecisionTreeRegressor
housing_model = DecisionTreeRegressor()
housing_model.fit(X, y) | code |
2004114/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
forest_model = RandomForestRegressor()
forest_model.fit(train_X, train_y)
predict_vals = forest_model.predict(val_X)
print(mean_absolute_error(val_y, predict_vals)) | code |
2004114/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
col_interest = ['ScreenPorch', 'MoSold']
sa = data[col_interest]
sa.describe() | code |
2004114/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val):
model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
model.fit(predictors_train, targ_train)
preds_val = model.predict(predictors_val)
mae = mean_absolute_error(targ_val, preds_val)
return mae
for max_leaf_nodes in [5, 50, 500, 5000]:
my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y)
print('Max leaf nodes :%d \t\t Mean Absolute Error: %d' % (max_leaf_nodes, my_mae)) | code |
2004114/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
y = data.SalePrice
predicators = ['YearBuilt', 'YrSold', 'TotalBsmtSF']
X = data[predicators]
from sklearn.tree import DecisionTreeRegressor
housing_model = DecisionTreeRegressor()
housing_model.fit(X, y)
from sklearn.metrics import mean_absolute_error
predicted_Home_prices = housing_model.predict(X)
mean_absolute_error(y, predicted_Home_prices) | code |
73100727/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
train_data.isnull().sum()
train_drop_target = train_data.drop(['target'], axis=1)
y = train_data['target']
train_drop_target
y.head() | code |
73100727/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
train_data.head() | code |
73100727/cell_25 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OrdinalEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
train_data.isnull().sum()
train_drop_target = train_data.drop(['target'], axis=1)
y = train_data['target']
train_drop_target
cat_o = train_data.dtypes == 'object'
object_cols = list(cat_o[cat_o].index)
object_cols
X = train_drop_target.copy()
X_test = test_data.copy()
ordinal_encoder = OrdinalEncoder()
X[object_cols] = ordinal_encoder.fit_transform(train_drop_target[object_cols])
X_test[object_cols] = ordinal_encoder.transform(test_data[object_cols])
X.head() | code |
73100727/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
train_data.isnull().sum()
train_drop_target = train_data.drop(['target'], axis=1)
y = train_data['target']
train_drop_target | code |
73100727/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
print(f' test_data : {test_data.shape}, \n train_data: {train_data.shape}') | code |
73100727/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OrdinalEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
train_data.isnull().sum()
train_drop_target = train_data.drop(['target'], axis=1)
y = train_data['target']
train_drop_target
cat_o = train_data.dtypes == 'object'
object_cols = list(cat_o[cat_o].index)
object_cols
X = train_drop_target.copy()
X_test = test_data.copy()
ordinal_encoder = OrdinalEncoder()
X[object_cols] = ordinal_encoder.fit_transform(train_drop_target[object_cols])
X_test[object_cols] = ordinal_encoder.transform(test_data[object_cols])
X_test | code |
73100727/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73100727/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
train_data.isnull().sum()
def print_cat_columns(dataset):
cl = dataset.dtypes == 'object'
cat_vars = list(cl[cl].index)
print(f'\nCategorical variables Data-Set:')
print(cat_vars)
print_cat_columns(test_data)
print_cat_columns(train_data) | code |
73100727/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
def train_the_model(m_d, n, ran):
model = RandomForestRegressor(max_depth=m_d, n_estimators=n, random_state=ran, n_jobs=-1)
return model
ran = 0
n = 1100
m_d = 500
model = train_the_model(m_d, n, ran)
predict_0 = model.predict(X_val)
print(' MSE: ', mean_squared_error(y_val, predict_0, squared=False)) | code |
73100727/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
test_data.head() | code |
73100727/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
train_data.isnull().sum()
print(test_data.columns)
print('\n', train_data.columns) | code |
73100727/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
train_data.isnull().sum()
train_drop_target = train_data.drop(['target'], axis=1)
y = train_data['target']
train_drop_target
cat_o = train_data.dtypes == 'object'
object_cols = list(cat_o[cat_o].index)
object_cols | code |
73100727/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
test_data = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0)
train_data = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0)
train_data.isnull().sum() | code |
2013071/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | train_xyz | code |
2013071/cell_25 | [
"text_plain_output_1.png"
] | from numpy.linalg import inv
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float), x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return (pos_data, np.array(lat_data))
def length(v):
return np.linalg.norm(v)
def unit_vector(vector):
return vector / length(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def angle_deg_between(v1, v2):
return np.degrees(angle_between(v1, v2))
def get_lattice_constants(lattice_vectors):
lat_const_series = pd.Series()
for i in range(3):
lat_const_series['lattice_vector_' + str(i + 1) + '_ang'] = length(lattice_vectors[i])
lat_const_series['lattice_angle_alpha_degree'] = angle_deg_between(lattice_vectors[1], lattice_vectors[2])
lat_const_series['lattice_angle_beta_degree'] = angle_deg_between(lattice_vectors[2], lattice_vectors[0])
lat_const_series['lattice_angle_gamma_degree'] = angle_deg_between(lattice_vectors[0], lattice_vectors[1])
return lat_const_series
A = np.transpose(train_lat)
R = train_xyz[0][0]
from numpy.linalg import inv
B = inv(A)
print('The reciprocal lattice vectors:')
print(B) | code |
2013071/cell_34 | [
"text_plain_output_1.png"
] | from numpy.linalg import inv
import networkx as nx
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float), x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return (pos_data, np.array(lat_data))
def length(v):
return np.linalg.norm(v)
def unit_vector(vector):
return vector / length(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def angle_deg_between(v1, v2):
return np.degrees(angle_between(v1, v2))
def get_lattice_constants(lattice_vectors):
lat_const_series = pd.Series()
for i in range(3):
lat_const_series['lattice_vector_' + str(i + 1) + '_ang'] = length(lattice_vectors[i])
lat_const_series['lattice_angle_alpha_degree'] = angle_deg_between(lattice_vectors[1], lattice_vectors[2])
lat_const_series['lattice_angle_beta_degree'] = angle_deg_between(lattice_vectors[2], lattice_vectors[0])
lat_const_series['lattice_angle_gamma_degree'] = angle_deg_between(lattice_vectors[0], lattice_vectors[1])
return lat_const_series
A = np.transpose(train_lat)
R = train_xyz[0][0]
from numpy.linalg import inv
B = inv(A)
r = np.matmul(B, R)
def get_distances(reduced_coords, amat):
natom = len(reduced_coords)
dists = np.array([np.inf] * natom ** 2).reshape(natom, natom)
for i in range(natom):
dists[i, i] = 0
for j in range(i):
rij = reduced_coords[i][0] - reduced_coords[j][0]
for l in range(-1, 2):
for m in range(-1, 2):
for n in range(-1, 2):
r = rij + np.array([l, m, n])
dists[i, j] = min(dists[i, j], length(np.matmul(amat, r)))
dists[j, i] = dists[i, j]
return dists
train_red = [[np.matmul(B, R), symbol] for R, symbol in train_xyz]
train_dist = get_distances(train_red, A)
train_dist
import networkx as nx
R_O = 1.35
R_Al = 0.535
R_Ga = 0.62
R_In = 0.8
R_ionic = {'O': R_O, 'Al': R_Al, 'Ga': R_Ga, 'In': R_In}
def get_crytal_graph(reduced_coords, dists):
natom = len(reduced_coords)
G = nx.Graph()
for i in range(natom):
symbol_i = reduced_coords[i][1]
for j in range(i):
symbol_j = reduced_coords[j][1]
if symbol_i == 'O' and symbol_j != 'O' or (symbol_i != 'O' and symbol_j == 'O'):
node_i = symbol_i + '_' + str(i)
node_j = symbol_j + '_' + str(j)
R_max = (R_ionic[symbol_i] + R_ionic[symbol_j]) * 1.2
if dists[i, j] < R_max:
G.add_edge(node_i, node_j)
return G
G = get_crytal_graph(train_red, train_dist)
print(G.number_of_nodes())
print(G.number_of_edges())
natom = len(train_red)
for i in range(natom):
symbol_i = train_red[i][1]
node_i = symbol_i + '_' + str(i)
print(node_i, G.neighbors(node_i)) | code |
2013071/cell_30 | [
"text_plain_output_1.png"
] | from numpy.linalg import inv
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float), x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return (pos_data, np.array(lat_data))
def length(v):
return np.linalg.norm(v)
def unit_vector(vector):
return vector / length(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def angle_deg_between(v1, v2):
return np.degrees(angle_between(v1, v2))
def get_lattice_constants(lattice_vectors):
lat_const_series = pd.Series()
for i in range(3):
lat_const_series['lattice_vector_' + str(i + 1) + '_ang'] = length(lattice_vectors[i])
lat_const_series['lattice_angle_alpha_degree'] = angle_deg_between(lattice_vectors[1], lattice_vectors[2])
lat_const_series['lattice_angle_beta_degree'] = angle_deg_between(lattice_vectors[2], lattice_vectors[0])
lat_const_series['lattice_angle_gamma_degree'] = angle_deg_between(lattice_vectors[0], lattice_vectors[1])
return lat_const_series
A = np.transpose(train_lat)
R = train_xyz[0][0]
from numpy.linalg import inv
B = inv(A)
r = np.matmul(B, R)
def get_distances(reduced_coords, amat):
natom = len(reduced_coords)
dists = np.array([np.inf] * natom ** 2).reshape(natom, natom)
for i in range(natom):
dists[i, i] = 0
for j in range(i):
rij = reduced_coords[i][0] - reduced_coords[j][0]
for l in range(-1, 2):
for m in range(-1, 2):
for n in range(-1, 2):
r = rij + np.array([l, m, n])
dists[i, j] = min(dists[i, j], length(np.matmul(amat, r)))
dists[j, i] = dists[i, j]
return dists
train_red = [[np.matmul(B, R), symbol] for R, symbol in train_xyz]
train_dist = get_distances(train_red, A)
train_dist | code |
2013071/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
lattice_columns = ['lattice_vector_1_ang', 'lattice_vector_2_ang', 'lattice_vector_3_ang', 'lattice_angle_alpha_degree', 'lattice_angle_beta_degree', 'lattice_angle_gamma_degree']
df_train.loc[0, lattice_columns]
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float), x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return (pos_data, np.array(lat_data))
idx = df_train.id.values[0]
fn = '../input/train/{}/geometry.xyz'.format(idx)
train_xyz, train_lat = get_xyz_data(fn)
idx = df_test.id.values[0]
fn = '../input/test/{}/geometry.xyz'.format(idx)
test_xyz, test_lat = get_xyz_data(fn)
df_test.loc[0, lattice_columns] | code |
2013071/cell_26 | [
"text_plain_output_1.png"
] | from numpy.linalg import inv
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float), x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return (pos_data, np.array(lat_data))
def length(v):
return np.linalg.norm(v)
def unit_vector(vector):
return vector / length(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def angle_deg_between(v1, v2):
return np.degrees(angle_between(v1, v2))
def get_lattice_constants(lattice_vectors):
lat_const_series = pd.Series()
for i in range(3):
lat_const_series['lattice_vector_' + str(i + 1) + '_ang'] = length(lattice_vectors[i])
lat_const_series['lattice_angle_alpha_degree'] = angle_deg_between(lattice_vectors[1], lattice_vectors[2])
lat_const_series['lattice_angle_beta_degree'] = angle_deg_between(lattice_vectors[2], lattice_vectors[0])
lat_const_series['lattice_angle_gamma_degree'] = angle_deg_between(lattice_vectors[0], lattice_vectors[1])
return lat_const_series
A = np.transpose(train_lat)
R = train_xyz[0][0]
from numpy.linalg import inv
B = inv(A)
r = np.matmul(B, R)
print('The reduced coordinate vector:')
print(r) | code |
2013071/cell_11 | [
"text_plain_output_1.png"
] | train_lat | code |
2013071/cell_19 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float), x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return (pos_data, np.array(lat_data))
def length(v):
return np.linalg.norm(v)
def unit_vector(vector):
return vector / length(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def angle_deg_between(v1, v2):
return np.degrees(angle_between(v1, v2))
def get_lattice_constants(lattice_vectors):
lat_const_series = pd.Series()
for i in range(3):
lat_const_series['lattice_vector_' + str(i + 1) + '_ang'] = length(lattice_vectors[i])
lat_const_series['lattice_angle_alpha_degree'] = angle_deg_between(lattice_vectors[1], lattice_vectors[2])
lat_const_series['lattice_angle_beta_degree'] = angle_deg_between(lattice_vectors[2], lattice_vectors[0])
lat_const_series['lattice_angle_gamma_degree'] = angle_deg_between(lattice_vectors[0], lattice_vectors[1])
return lat_const_series
get_lattice_constants(test_lat) | code |
2013071/cell_18 | [
"text_plain_output_1.png"
] | test_lat | code |
2013071/cell_17 | [
"text_plain_output_1.png"
] | test_xyz | code |
2013071/cell_31 | [
"text_plain_output_1.png"
] | from numpy.linalg import inv
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float), x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return (pos_data, np.array(lat_data))
def length(v):
return np.linalg.norm(v)
def unit_vector(vector):
return vector / length(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def angle_deg_between(v1, v2):
return np.degrees(angle_between(v1, v2))
def get_lattice_constants(lattice_vectors):
lat_const_series = pd.Series()
for i in range(3):
lat_const_series['lattice_vector_' + str(i + 1) + '_ang'] = length(lattice_vectors[i])
lat_const_series['lattice_angle_alpha_degree'] = angle_deg_between(lattice_vectors[1], lattice_vectors[2])
lat_const_series['lattice_angle_beta_degree'] = angle_deg_between(lattice_vectors[2], lattice_vectors[0])
lat_const_series['lattice_angle_gamma_degree'] = angle_deg_between(lattice_vectors[0], lattice_vectors[1])
return lat_const_series
A = np.transpose(train_lat)
R = train_xyz[0][0]
from numpy.linalg import inv
B = inv(A)
r = np.matmul(B, R)
def get_distances(reduced_coords, amat):
natom = len(reduced_coords)
dists = np.array([np.inf] * natom ** 2).reshape(natom, natom)
for i in range(natom):
dists[i, i] = 0
for j in range(i):
rij = reduced_coords[i][0] - reduced_coords[j][0]
for l in range(-1, 2):
for m in range(-1, 2):
for n in range(-1, 2):
r = rij + np.array([l, m, n])
dists[i, j] = min(dists[i, j], length(np.matmul(amat, r)))
dists[j, i] = dists[i, j]
return dists
train_red = [[np.matmul(B, R), symbol] for R, symbol in train_xyz]
train_dist = get_distances(train_red, A)
train_dist
import seaborn as sns
sns.heatmap(train_dist) | code |
2013071/cell_24 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float), x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return (pos_data, np.array(lat_data))
def length(v):
return np.linalg.norm(v)
def unit_vector(vector):
return vector / length(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def angle_deg_between(v1, v2):
return np.degrees(angle_between(v1, v2))
def get_lattice_constants(lattice_vectors):
lat_const_series = pd.Series()
for i in range(3):
lat_const_series['lattice_vector_' + str(i + 1) + '_ang'] = length(lattice_vectors[i])
lat_const_series['lattice_angle_alpha_degree'] = angle_deg_between(lattice_vectors[1], lattice_vectors[2])
lat_const_series['lattice_angle_beta_degree'] = angle_deg_between(lattice_vectors[2], lattice_vectors[0])
lat_const_series['lattice_angle_gamma_degree'] = angle_deg_between(lattice_vectors[0], lattice_vectors[1])
return lat_const_series
A = np.transpose(train_lat)
R = train_xyz[0][0]
print('The lattice vectors:')
print(A)
print('The position vector:')
print(R) | code |
2013071/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float), x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return (pos_data, np.array(lat_data))
def length(v):
return np.linalg.norm(v)
def unit_vector(vector):
return vector / length(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def angle_deg_between(v1, v2):
return np.degrees(angle_between(v1, v2))
def get_lattice_constants(lattice_vectors):
lat_const_series = pd.Series()
for i in range(3):
lat_const_series['lattice_vector_' + str(i + 1) + '_ang'] = length(lattice_vectors[i])
lat_const_series['lattice_angle_alpha_degree'] = angle_deg_between(lattice_vectors[1], lattice_vectors[2])
lat_const_series['lattice_angle_beta_degree'] = angle_deg_between(lattice_vectors[2], lattice_vectors[0])
lat_const_series['lattice_angle_gamma_degree'] = angle_deg_between(lattice_vectors[0], lattice_vectors[1])
return lat_const_series
get_lattice_constants(train_lat) | code |
2013071/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
lattice_columns = ['lattice_vector_1_ang', 'lattice_vector_2_ang', 'lattice_vector_3_ang', 'lattice_angle_alpha_degree', 'lattice_angle_beta_degree', 'lattice_angle_gamma_degree']
df_train.loc[0, lattice_columns] | code |
2013071/cell_36 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from numpy.linalg import inv
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float), x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return (pos_data, np.array(lat_data))
def length(v):
return np.linalg.norm(v)
def unit_vector(vector):
return vector / length(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def angle_deg_between(v1, v2):
return np.degrees(angle_between(v1, v2))
def get_lattice_constants(lattice_vectors):
lat_const_series = pd.Series()
for i in range(3):
lat_const_series['lattice_vector_' + str(i + 1) + '_ang'] = length(lattice_vectors[i])
lat_const_series['lattice_angle_alpha_degree'] = angle_deg_between(lattice_vectors[1], lattice_vectors[2])
lat_const_series['lattice_angle_beta_degree'] = angle_deg_between(lattice_vectors[2], lattice_vectors[0])
lat_const_series['lattice_angle_gamma_degree'] = angle_deg_between(lattice_vectors[0], lattice_vectors[1])
return lat_const_series
A = np.transpose(train_lat)
R = train_xyz[0][0]
from numpy.linalg import inv
B = inv(A)
r = np.matmul(B, R)
def get_distances(reduced_coords, amat):
natom = len(reduced_coords)
dists = np.array([np.inf] * natom ** 2).reshape(natom, natom)
for i in range(natom):
dists[i, i] = 0
for j in range(i):
rij = reduced_coords[i][0] - reduced_coords[j][0]
for l in range(-1, 2):
for m in range(-1, 2):
for n in range(-1, 2):
r = rij + np.array([l, m, n])
dists[i, j] = min(dists[i, j], length(np.matmul(amat, r)))
dists[j, i] = dists[i, j]
return dists
train_red = [[np.matmul(B, R), symbol] for R, symbol in train_xyz]
train_dist = get_distances(train_red, A)
train_dist
import networkx as nx
R_O = 1.35
R_Al = 0.535
R_Ga = 0.62
R_In = 0.8
R_ionic = {'O': R_O, 'Al': R_Al, 'Ga': R_Ga, 'In': R_In}
def get_crytal_graph(reduced_coords, dists):
natom = len(reduced_coords)
G = nx.Graph()
for i in range(natom):
symbol_i = reduced_coords[i][1]
for j in range(i):
symbol_j = reduced_coords[j][1]
if symbol_i == 'O' and symbol_j != 'O' or (symbol_i != 'O' and symbol_j == 'O'):
node_i = symbol_i + '_' + str(i)
node_j = symbol_j + '_' + str(j)
R_max = (R_ionic[symbol_i] + R_ionic[symbol_j]) * 1.2
if dists[i, j] < R_max:
G.add_edge(node_i, node_j)
return G
G = get_crytal_graph(train_red, train_dist)
natom = len(train_red)
for i in range(natom):
symbol_i = train_red[i][1]
node_i = symbol_i + '_' + str(i)
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
nx.draw_spring(G, with_labels=True, node_size=800, font_size=8) | code |
90137157/cell_17 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
from html import unescape
import csv
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import csv
import gc
from pathlib import Path
columns = ['tweetcreatedts', 'extractedts', 'userid', 'tweetid', 'text', 'language']
dataframe_collection = []
csvfile = '/kaggle/input/ukraine-russian-crisis-twitter-dataset-1-2-m-rows/UkraineCombinedTweetsDeduped_MAR14.csv.gzip'
df = pd.read_csv(csvfile, compression='gzip', index_col=0, encoding='utf-8', quoting=csv.QUOTE_ALL)
df = df[columns]
df = df[df['language'] == 'en']
df.reset_index(drop=True, inplace=True)
df.shape
import re
from bs4 import BeautifulSoup
from html import unescape
def remove_urls(x):
cleaned_string = re.sub('(https|http)?:\\/\\/(\\w|\\.|\\/|\\?|\\=|\\&|\\%)*\\b', '', str(x), flags=re.MULTILINE)
return cleaned_string
def unescape_stuff(x):
soup = BeautifulSoup(unescape(x), 'lxml')
return soup.text
def deEmojify(x):
regrex_pattern = re.compile(pattern='[π-ππ-πΏπ-\U0001f6ff\U0001f1e0-πΏ]+', flags=re.UNICODE)
return regrex_pattern.sub('', x)
def remove_symbols(x):
cleaned_string = re.sub('[^a-zA-Z0-9]+', ' ', x)
return cleaned_string
def unify_whitespaces(x):
cleaned_string = re.sub(' +', ' ', x)
return cleaned_string
import swifter
df['text'] = df['text'].swifter.apply(remove_urls)
df['text'] = df['text'].swifter.apply(unescape_stuff)
df['text'] = df['text'].swifter.apply(deEmojify)
df['text'] = df['text'].swifter.apply(remove_symbols)
df['text'] = df['text'].swifter.apply(unify_whitespaces) | code |
90137157/cell_37 | [
"image_output_1.png"
] | import csv
import matplotlib.pyplot as plt # for wordclouds & charts
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import csv
import gc
from pathlib import Path
columns = ['tweetcreatedts', 'extractedts', 'userid', 'tweetid', 'text', 'language']
dataframe_collection = []
csvfile = '/kaggle/input/ukraine-russian-crisis-twitter-dataset-1-2-m-rows/UkraineCombinedTweetsDeduped_MAR14.csv.gzip'
df = pd.read_csv(csvfile, compression='gzip', index_col=0, encoding='utf-8', quoting=csv.QUOTE_ALL)
df = df[columns]
df = df[df['language'] == 'en']
df.reset_index(drop=True, inplace=True)
df.shape
tweet_string_list = df['bigram_text'].tolist()
tweet_string = ' '.join(tweet_string_list)
from wordcloud import WordCloud
wordcloud = WordCloud(width=2000, height=1334, random_state=1, background_color='black', colormap='Pastel1', max_words=75, collocations=False, normalize_plurals=False).generate(tweet_string)
# create the wordcloud
import matplotlib.pyplot as plt # for wordclouds & charts
from matplotlib.pyplot import figure
# Define a function to plot word cloud
def plot_cloud(wordcloud):
fig = plt.figure(figsize=(25, 17), dpi=80)
plt.tight_layout(pad=0)
plt.imshow(wordcloud)
plt.axis("off")
plt.box(False)
plt.show()
plt.close()
#Plot
plot_cloud(wordcloud)
tweet_string_list = df['trigram_text'].tolist()
tweet_string = ' '.join(tweet_string_list)
from wordcloud import WordCloud
wordcloud = WordCloud(width=2000, height=1334, random_state=1, background_color='black', colormap='Pastel1', max_words=50, collocations=False, normalize_plurals=False).generate(tweet_string)
plot_cloud(wordcloud) | code |
90137157/cell_5 | [
"image_output_1.png"
] | import csv
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import csv
import gc
from pathlib import Path
columns = ['tweetcreatedts', 'extractedts', 'userid', 'tweetid', 'text', 'language']
dataframe_collection = []
csvfile = '/kaggle/input/ukraine-russian-crisis-twitter-dataset-1-2-m-rows/UkraineCombinedTweetsDeduped_MAR14.csv.gzip'
df = pd.read_csv(csvfile, compression='gzip', index_col=0, encoding='utf-8', quoting=csv.QUOTE_ALL)
df = df[columns]
df = df[df['language'] == 'en']
df.reset_index(drop=True, inplace=True)
df.shape | code |
128011216/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import crosstab
from pyclustering.cluster.kmeans import kmeans
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.metrics import adjusted_rand_score
import numpy as np
iris = load_iris()
X = iris['data']
y = iris['target']
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
X_scaled = X_pca
ax = plt.axes()
cor = ['blue', 'red', 'green']
for i in range(3):
idx = np.where(y == i)
ax.set_aspect('equal')
ax.set(title='Ground-truth')
kmeans_instance = kmeans(X_scaled, initial_centers)
kmeans_instance.process()
centroids = np.array(kmeans_instance.get_centers())
kmeans_clusters = kmeans_instance.get_clusters()
kmeans_labels = np.zeros([X.shape[0]], dtype='int64')
ax = plt.axes()
for i in range(3):
kmeans_labels[kmeans_clusters[i]] = i
ax.scatter(X_scaled[kmeans_clusters[i], 0], X_scaled[kmeans_clusters[i], 1], color=cor[i], alpha=0.5, zorder=2)
ax.scatter(centroids[:, 0], centroids[:, 1], c='k', marker='*', zorder=3)
ax.set_aspect('equal')
ax.grid(visible=True, zorder=1)
ax.set(title='K-means clustering')
y_kmeans = np.zeros(150)
for i in range(3):
y_kmeans[kmeans_clusters[i]] = i
print(crosstab(y, y_kmeans))
print('ARI={ars:.2f}'.format(ars=adjusted_rand_score(y, kmeans_labels))) | code |
128011216/cell_2 | [
"image_output_1.png"
] | pip install pyclustering; | code |
128011216/cell_11 | [
"text_plain_output_1.png"
] | from pandas import crosstab
from pyclustering.cluster.kmeans import kmeans
from pyclustering.cluster.kmedians import kmedians
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.metrics import adjusted_rand_score
import numpy as np
iris = load_iris()
X = iris['data']
y = iris['target']
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
X_scaled = X_pca
ax = plt.axes()
cor = ['blue', 'red', 'green']
for i in range(3):
idx = np.where(y == i)
ax.set_aspect('equal')
ax.set(title='Ground-truth')
kmeans_instance = kmeans(X_scaled, initial_centers)
kmeans_instance.process()
centroids = np.array(kmeans_instance.get_centers())
kmeans_clusters = kmeans_instance.get_clusters()
kmeans_labels = np.zeros([X.shape[0]], dtype='int64')
ax = plt.axes()
for i in range(3):
kmeans_labels[kmeans_clusters[i]] = i
ax.set_aspect('equal')
ax.set(title='K-means clustering')
y_kmeans = np.zeros(150)
for i in range(3):
y_kmeans[kmeans_clusters[i]] = i
kmedians_instance = kmedians(X_scaled, initial_centers)
kmedians_instance.process()
medians = np.array(kmedians_instance.get_medians())
kmedians_clusters = kmedians_instance.get_clusters()
kmedians_labels = np.zeros([X.shape[0]], dtype='int64')
plt.figure()
ax = plt.axes()
for i in range(3):
kmedians_labels[kmeans_clusters[i]] = i
ax.scatter(X_scaled[kmedians_clusters[i], 0], X_scaled[kmedians_clusters[i], 1], color=cor[i], alpha=0.5, zorder=2)
ax.scatter(medians[:, 0], medians[:, 1], c='k', marker='*', zorder=3)
ax.set_aspect('equal')
ax.grid(visible=True, zorder=1)
ax.set(title='K-medians clustering')
y_kmedians = np.zeros(150)
for i in range(3):
y_kmedians[kmedians_clusters[i]] = i
crosstab(y, y_kmedians)
print(crosstab(y, y_kmedians))
print('ARI={ars:.2f}'.format(ars=adjusted_rand_score(y, kmedians_labels))) | code |
128011216/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
import numpy as np
iris = load_iris()
X = iris['data']
y = iris['target']
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
X_scaled = X_pca
ax = plt.axes()
cor = ['blue', 'red', 'green']
for i in range(3):
idx = np.where(y == i)
ax.scatter(X_scaled[idx, 0], X_scaled[idx, 1], color=cor[i], alpha=0.5, zorder=2)
ax.set_aspect('equal')
ax.grid(visible=True, zorder=1)
ax.set(title='Ground-truth') | code |
72113568/cell_9 | [
"image_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
pd.set_option('display.max_columns', 500)
import os
df = pd.read_csv('../input/wuzzuf-jobs/Wuzzuf_Jobs.csv')
fig , ax = plt.subplots (figsize = (18 , 6))
df.Title.value_counts().sort_values(ascending = False).reset_index().head(25).plot(kind = 'bar' ,
x = 'index' , ax = ax ,
alpha = 0.7 , color = 'grey' ,
width=0.4);
ax.grid(axis = 'y' , alpha=0.6)
for patch in ax.patches:
bl = patch.get_xy()
x = 0.5 * patch.get_width() + bl[0]
y = 1.02 * patch.get_height() + bl[1]
ax.text(x,y,"%d" %(patch.get_height()),
ha='center' , color = '#4a4a4a',
fontsize = 12, fontfamily='serif')
for s in ['top', 'left', 'right']:
ax.spines[s].set_visible(False)
plt.ylabel('Number of posted jobs' , fontsize = 14, fontfamily='serif');
plt.xlabel('Job Titel' , fontsize = 14, fontfamily='serif');
plt.xticks(fontsize = 12 , rotation = 90 , fontfamily='serif');
plt.title('Most Job Titles Posted on Website ' ,fontsize = 16, fontfamily='serif' );
df['titel_adj'] = df.Title.apply(lambda x: 'Sales' if 'Sales' in x or 'Telesales' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Accountant' if 'Accountant' in x or 'Accounting' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Marketing' if 'Marketing' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'HR' if 'HR' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Graphic designer' if 'Graphic' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Bussiness Developer' if 'Bussiness Developer' in x else x)
fig , ax = plt.subplots (figsize = (18 , 5))
df.titel_adj.value_counts().sort_values(ascending = False).reset_index().head(25).plot(kind = 'bar' ,
x = 'index' , ax = ax,
alpha = 0.7 , color = 'grey' ,
width=0.4);
ax.grid(axis = 'y' , alpha=0.6)
for patch in ax.patches:
bl = patch.get_xy()
x = 0.5 * patch.get_width() + bl[0]
y = 1.02 * patch.get_height() + bl[1]
ax.text(x,y,"%d" %(patch.get_height()),
ha='center' , color = '#4a4a4a',
fontsize = 12, fontfamily='serif')
for s in ['top', 'left', 'right']:
ax.spines[s].set_visible(False)
plt.ylabel('Number of posted jobs' , fontsize = 14, fontfamily='serif');
plt.xlabel('Job Titel' , fontsize = 14, fontfamily='serif');
plt.xticks(fontsize = 12 , rotation = 90 , fontfamily='serif');
plt.title('Most Job Titles Posted on Website ' ,fontsize = 16, fontfamily='serif' );
sales_skills = Counter()
df_Sales = df[df['titel_adj'] == 'Sales'].reset_index()
for i in range(df_Sales.shape[0]):
for j in df_Sales.Skills[i].split(', '):
sales_skills[j] += 1
sales_skills = sorted(sales_skills.items(), key=lambda x: x[1], reverse=True)[0:25]
x, y = zip(*sales_skills)
fig, ax = plt.subplots(figsize=(18, 5))
plt.bar(x, y, alpha=0.7, color='grey', width=0.4)
ax.grid(axis='y', alpha=0.6)
for patch in ax.patches:
bl = patch.get_xy()
x = 0.5 * patch.get_width() + bl[0]
y = 1.02 * patch.get_height() + bl[1]
ax.text(x, y, '%d' % patch.get_height(), ha='center', color='#4a4a4a', fontsize=12, fontfamily='serif')
for s in ['top', 'left', 'right']:
ax.spines[s].set_visible(False)
plt.ylabel("Number of skill's Occurance", fontsize=14, fontfamily='serif')
plt.xlabel('Sales Skills', fontsize=14, fontfamily='serif')
plt.xticks(fontsize=12, rotation=90, fontfamily='serif')
plt.title('Most Important Skills for Sales ', fontsize=16, fontfamily='serif') | code |
72113568/cell_4 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
pd.set_option('display.max_columns', 500)
import os
df = pd.read_csv('../input/wuzzuf-jobs/Wuzzuf_Jobs.csv')
fig, ax = plt.subplots(figsize=(18, 6))
df.Title.value_counts().sort_values(ascending=False).reset_index().head(25).plot(kind='bar', x='index', ax=ax, alpha=0.7, color='grey', width=0.4)
ax.grid(axis='y', alpha=0.6)
for patch in ax.patches:
bl = patch.get_xy()
x = 0.5 * patch.get_width() + bl[0]
y = 1.02 * patch.get_height() + bl[1]
ax.text(x, y, '%d' % patch.get_height(), ha='center', color='#4a4a4a', fontsize=12, fontfamily='serif')
for s in ['top', 'left', 'right']:
ax.spines[s].set_visible(False)
plt.ylabel('Number of posted jobs', fontsize=14, fontfamily='serif')
plt.xlabel('Job Titel', fontsize=14, fontfamily='serif')
plt.xticks(fontsize=12, rotation=90, fontfamily='serif')
plt.title('Most Job Titles Posted on Website ', fontsize=16, fontfamily='serif') | code |
72113568/cell_2 | [
"image_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
pd.set_option('display.max_columns', 500)
import os
df = pd.read_csv('../input/wuzzuf-jobs/Wuzzuf_Jobs.csv')
df.head() | code |
72113568/cell_11 | [
"text_html_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
pd.set_option('display.max_columns', 500)
import os
df = pd.read_csv('../input/wuzzuf-jobs/Wuzzuf_Jobs.csv')
fig , ax = plt.subplots (figsize = (18 , 6))
df.Title.value_counts().sort_values(ascending = False).reset_index().head(25).plot(kind = 'bar' ,
x = 'index' , ax = ax ,
alpha = 0.7 , color = 'grey' ,
width=0.4);
ax.grid(axis = 'y' , alpha=0.6)
for patch in ax.patches:
bl = patch.get_xy()
x = 0.5 * patch.get_width() + bl[0]
y = 1.02 * patch.get_height() + bl[1]
ax.text(x,y,"%d" %(patch.get_height()),
ha='center' , color = '#4a4a4a',
fontsize = 12, fontfamily='serif')
for s in ['top', 'left', 'right']:
ax.spines[s].set_visible(False)
plt.ylabel('Number of posted jobs' , fontsize = 14, fontfamily='serif');
plt.xlabel('Job Titel' , fontsize = 14, fontfamily='serif');
plt.xticks(fontsize = 12 , rotation = 90 , fontfamily='serif');
plt.title('Most Job Titles Posted on Website ' ,fontsize = 16, fontfamily='serif' );
df['titel_adj'] = df.Title.apply(lambda x: 'Sales' if 'Sales' in x or 'Telesales' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Accountant' if 'Accountant' in x or 'Accounting' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Marketing' if 'Marketing' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'HR' if 'HR' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Graphic designer' if 'Graphic' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Bussiness Developer' if 'Bussiness Developer' in x else x)
fig , ax = plt.subplots (figsize = (18 , 5))
df.titel_adj.value_counts().sort_values(ascending = False).reset_index().head(25).plot(kind = 'bar' ,
x = 'index' , ax = ax,
alpha = 0.7 , color = 'grey' ,
width=0.4);
ax.grid(axis = 'y' , alpha=0.6)
for patch in ax.patches:
bl = patch.get_xy()
x = 0.5 * patch.get_width() + bl[0]
y = 1.02 * patch.get_height() + bl[1]
ax.text(x,y,"%d" %(patch.get_height()),
ha='center' , color = '#4a4a4a',
fontsize = 12, fontfamily='serif')
for s in ['top', 'left', 'right']:
ax.spines[s].set_visible(False)
plt.ylabel('Number of posted jobs' , fontsize = 14, fontfamily='serif');
plt.xlabel('Job Titel' , fontsize = 14, fontfamily='serif');
plt.xticks(fontsize = 12 , rotation = 90 , fontfamily='serif');
plt.title('Most Job Titles Posted on Website ' ,fontsize = 16, fontfamily='serif' );
sales_skills = Counter()
df_Sales = df[df['titel_adj'] == 'Sales'].reset_index()
for i in range(df_Sales.shape[0]):
for j in df_Sales.Skills[i].split(', '):
sales_skills [j] += 1
sales_skills = sorted(sales_skills.items(), key=lambda x: x[1] , reverse=True)[0:25]
x, y = zip(*sales_skills) # unpack a list of pairs into two tuples
fig , ax = plt.subplots (figsize = (18 , 5))
plt.bar(x, y , alpha = 0.7 , color = 'grey' , width=0.4);
ax.grid(axis = 'y' , alpha=0.6)
for patch in ax.patches:
bl = patch.get_xy()
x = 0.5 * patch.get_width() + bl[0]
y = 1.02 * patch.get_height() + bl[1]
ax.text(x,y,"%d" %(patch.get_height()),
ha='center' , color = '#4a4a4a',
fontsize = 12, fontfamily='serif')
for s in ['top', 'left', 'right']:
ax.spines[s].set_visible(False)
plt.ylabel('Number of skill\'s Occurance' , fontsize = 14, fontfamily='serif');
plt.xlabel('Sales Skills' , fontsize = 14, fontfamily='serif');
plt.xticks(fontsize = 12 , rotation = 90 , fontfamily='serif');
plt.title('Most Important Skills for Sales ' ,fontsize = 16, fontfamily='serif' );
all_skills = Counter()
for i in range(df.shape[0]):
for j in df.Skills[i].split(', '):
all_skills[j] += 1
all_skills = sorted(all_skills.items(), key=lambda x: x[1], reverse=True)[0:25]
x, y = zip(*all_skills)
fig, ax = plt.subplots(figsize=(18, 5))
plt.bar(x, y, alpha=0.7, color='grey', width=0.4)
ax.grid(axis='y', alpha=0.6)
for patch in ax.patches:
bl = patch.get_xy()
x = 0.5 * patch.get_width() + bl[0]
y = 1.02 * patch.get_height() + bl[1]
ax.text(x, y, '%d' % patch.get_height(), ha='center', color='#4a4a4a', fontsize=12, fontfamily='serif')
for s in ['top', 'left', 'right']:
ax.spines[s].set_visible(False)
plt.ylabel("Number of skill's Occurance", fontsize=14, fontfamily='serif')
plt.xlabel('Skills', fontsize=14, fontfamily='serif')
plt.xticks(fontsize=12, rotation=90, fontfamily='serif')
plt.title('Most Important Skills for All Jobs ', fontsize=16, fontfamily='serif') | code |
72113568/cell_1 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
pd.set_option('display.max_columns', 500)
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72113568/cell_7 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
pd.set_option('display.max_columns', 500)
import os
df = pd.read_csv('../input/wuzzuf-jobs/Wuzzuf_Jobs.csv')
fig , ax = plt.subplots (figsize = (18 , 6))
df.Title.value_counts().sort_values(ascending = False).reset_index().head(25).plot(kind = 'bar' ,
x = 'index' , ax = ax ,
alpha = 0.7 , color = 'grey' ,
width=0.4);
ax.grid(axis = 'y' , alpha=0.6)
for patch in ax.patches:
bl = patch.get_xy()
x = 0.5 * patch.get_width() + bl[0]
y = 1.02 * patch.get_height() + bl[1]
ax.text(x,y,"%d" %(patch.get_height()),
ha='center' , color = '#4a4a4a',
fontsize = 12, fontfamily='serif')
for s in ['top', 'left', 'right']:
ax.spines[s].set_visible(False)
plt.ylabel('Number of posted jobs' , fontsize = 14, fontfamily='serif');
plt.xlabel('Job Titel' , fontsize = 14, fontfamily='serif');
plt.xticks(fontsize = 12 , rotation = 90 , fontfamily='serif');
plt.title('Most Job Titles Posted on Website ' ,fontsize = 16, fontfamily='serif' );
df['titel_adj'] = df.Title.apply(lambda x: 'Sales' if 'Sales' in x or 'Telesales' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Accountant' if 'Accountant' in x or 'Accounting' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Marketing' if 'Marketing' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'HR' if 'HR' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Graphic designer' if 'Graphic' in x else x)
df['titel_adj'] = df.titel_adj.apply(lambda x: 'Bussiness Developer' if 'Bussiness Developer' in x else x)
fig, ax = plt.subplots(figsize=(18, 5))
df.titel_adj.value_counts().sort_values(ascending=False).reset_index().head(25).plot(kind='bar', x='index', ax=ax, alpha=0.7, color='grey', width=0.4)
ax.grid(axis='y', alpha=0.6)
for patch in ax.patches:
bl = patch.get_xy()
x = 0.5 * patch.get_width() + bl[0]
y = 1.02 * patch.get_height() + bl[1]
ax.text(x, y, '%d' % patch.get_height(), ha='center', color='#4a4a4a', fontsize=12, fontfamily='serif')
for s in ['top', 'left', 'right']:
ax.spines[s].set_visible(False)
plt.ylabel('Number of posted jobs', fontsize=14, fontfamily='serif')
plt.xlabel('Job Titel', fontsize=14, fontfamily='serif')
plt.xticks(fontsize=12, rotation=90, fontfamily='serif')
plt.title('Most Job Titles Posted on Website ', fontsize=16, fontfamily='serif') | code |
104115135/cell_42 | [
"image_output_1.png"
] | from collections import Counter
import feature_engine.transformation as vt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
df.shape
df.quality.unique()
df.quality.value_counts(ascending=False)
def diagnostic_plots(df, variable, target):
pass
corr = df.corr()
plt.figure(figsize=(20, 9))
k = 12 #number of variables for heatmap
cols = corr.nlargest(k, 'quality')['quality'].index
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values,cmap="Blues")
plt.show()
df.isnull().sum()
def detect_outliers(df, features):
outlier_indices = []
for c in features:
Q1 = np.percentile(df[c], 25)
Q3 = np.percentile(df[c], 75)
IQR = Q3 - Q1
outlier_step = IQR * 1.5
outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index
outlier_indices.extend(outlier_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2))
return multiple_outliers
df.loc[detect_outliers(df, df.columns[:-1])]
cols = ['fixed acidity', 'volatile acidity', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'sulphates', 'alcohol']
lt = vt.LogTransformer(variables=cols)
lt.fit(df)
df = lt.transform(df)
df['quality'].value_counts() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.