path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
1008693/cell_34 | [
"text_plain_output_1.png"
] | from scipy import interp
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
employees.shape
employees.mean()
import seaborn as sns
correlation_matrix = employees.corr()
employees['salary'] = pd.factorize(employees['salary'])[0]
employees['sales'] = pd.factorize(employees['sales'])[0]
leave_result = employees['left']
y = np.where(leave_result == 1, 1, 0)
y
X = employees.drop('left', axis=1).as_matrix().astype(np.float)
X
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X = scaler.fit_transform(X)
from sklearn.model_selection import KFold
def run_cv(X, y, clf_class, **kwargs):
kf = KFold(n_splits=3, shuffle=True)
y_pred = y.copy()
for train_index, test_index in kf.split(X):
X_train, X_test = (X[train_index], X[test_index])
y_train = y[train_index]
clf = clf_class(**kwargs)
clf.fit(X_train, y_train)
y_pred[test_index] = clf.predict(X_test)
return y_pred
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.linear_model import LogisticRegression as LR
from sklearn.ensemble import GradientBoostingClassifier as GBC
from sklearn.metrics import average_precision_score
def accuracy(y_true, y_pred):
return np.mean(y_true == y_pred)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
def draw_confusion_matrices(confusion_matricies, class_names):
fig = plt.figure()
class_names = class_names.tolist()
for cm in confusion_matrices:
classifier, cm, pos = cm[0], cm[1], cm[2]
print("\n%s" % classifier)
print(cm)
ax = fig.add_subplot(pos,
title = 'Confusion matrix for %s' % classifier,
xlabel = 'Predicted',
ylabel = 'True')
cax = ax.matshow(cm)
plt.title('Confusion matrix for %s' % classifier)
fig.colorbar(cax)
ax.set_xticklabels([''] + class_names)
ax.set_yticklabels([''] + class_names)
plt.tight_layout()
plt.show()
y = np.array(y)
class_names = np.unique(y)
confusion_matrices = [
( "Support Vector Machines", confusion_matrix(y, run_cv(X, y, SVC)), 321 ),
( "Random Forest", confusion_matrix(y, run_cv(X, y, RF)), 322 ),
( "K-Nearest-Neighbors", confusion_matrix(y, run_cv(X, y, KNN)), 323 ),
( "Gradient Boosting Classifier", confusion_matrix(y, run_cv(X, y, GBC)), 324 ),
( "Logisitic Regression", confusion_matrix(y, run_cv(X, y, LR)), 325 )
]
draw_confusion_matrices(confusion_matrices, class_names)
from sklearn.metrics import roc_curve, auc
from scipy import interp
def plot_roc(X, y, clf_class, clf_name, **kwargs):
kf = KFold(n_splits=3, shuffle=True)
y_prob = np.zeros((len(y), 2))
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train_index, test_index) in enumerate(kf.split(X)):
X_train, X_test = (X[train_index], X[test_index])
y_train = y[train_index]
clf = clf_class(**kwargs)
clf.fit(X_train, y_train)
y_prob[test_index] = clf.predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(y[test_index], y_prob[test_index, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
mean_tpr /= kf.get_n_splits(X)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, lw=2, label='%s (area = %0.3f)' % (clf_name, mean_auc))
def plot_all(title, max_x, min_y):
plot_roc(X, y, SVC, 'Support vector machines', probability=True)
plot_roc(X, y, RF, 'Random forests', n_estimators=18)
plot_roc(X, y, KNN, 'K-nearest-neighbors')
plot_roc(X, y, GBC, 'Gradient Boosting Classifier')
plt.plot([-0.05, max_x], [min_y, 1.05], '--', color=(0.6, 0.6, 0.6), label='Random')
plt.xlim(-0.05, max_x)
plt.ylim(min_y, 1.05)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(title)
plt.legend(loc='lower right')
plt.show()
plt.figure(1)
plot_all('Receiver operating characteristic', 1.05, -0.05)
plt.figure(2)
plot_all('ROC (zoomed in at top left corner)', 0.6, 0.7) | code |
1008693/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
employees.shape
employees.mean()
import seaborn as sns
correlation_matrix = employees.corr()
employees['salary'] = pd.factorize(employees['salary'])[0]
employees['sales'] = pd.factorize(employees['sales'])[0]
leave_result = employees['left']
y = np.where(leave_result == 1, 1, 0)
y
X = employees.drop('left', axis=1).as_matrix().astype(np.float)
X
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X = scaler.fit_transform(X)
from sklearn.model_selection import KFold
def run_cv(X, y, clf_class, **kwargs):
kf = KFold(n_splits=3, shuffle=True)
y_pred = y.copy()
for train_index, test_index in kf.split(X):
X_train, X_test = (X[train_index], X[test_index])
y_train = y[train_index]
clf = clf_class(**kwargs)
clf.fit(X_train, y_train)
y_pred[test_index] = clf.predict(X_test)
return y_pred
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.linear_model import LogisticRegression as LR
from sklearn.ensemble import GradientBoostingClassifier as GBC
from sklearn.metrics import average_precision_score
def accuracy(y_true, y_pred):
return np.mean(y_true == y_pred)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
def draw_confusion_matrices(confusion_matricies, class_names):
fig = plt.figure()
class_names = class_names.tolist()
for cm in confusion_matrices:
classifier, cm, pos = (cm[0], cm[1], cm[2])
print('\n%s' % classifier)
print(cm)
ax = fig.add_subplot(pos, title='Confusion matrix for %s' % classifier, xlabel='Predicted', ylabel='True')
cax = ax.matshow(cm)
plt.title('Confusion matrix for %s' % classifier)
fig.colorbar(cax)
ax.set_xticklabels([''] + class_names)
ax.set_yticklabels([''] + class_names)
plt.tight_layout()
plt.show()
y = np.array(y)
class_names = np.unique(y)
confusion_matrices = [('Support Vector Machines', confusion_matrix(y, run_cv(X, y, SVC)), 321), ('Random Forest', confusion_matrix(y, run_cv(X, y, RF)), 322), ('K-Nearest-Neighbors', confusion_matrix(y, run_cv(X, y, KNN)), 323), ('Gradient Boosting Classifier', confusion_matrix(y, run_cv(X, y, GBC)), 324), ('Logisitic Regression', confusion_matrix(y, run_cv(X, y, LR)), 325)]
draw_confusion_matrices(confusion_matrices, class_names) | code |
1008693/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
employees.shape
employees.mean()
import seaborn as sns
correlation_matrix = employees.corr()
employees['salary'] = pd.factorize(employees['salary'])[0]
employees['sales'] = pd.factorize(employees['sales'])[0]
leave_result = employees['left']
y = np.where(leave_result == 1, 1, 0)
y
X = employees.drop('left', axis=1).as_matrix().astype(np.float)
X
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X = scaler.fit_transform(X)
print('Feature space holds %d observations and %d features' % X.shape)
print('Unique target labels: ', np.unique(y)) | code |
1008693/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
employees.shape
employees.mean()
import seaborn as sns
correlation_matrix = employees.corr()
employees['salary'] = pd.factorize(employees['salary'])[0]
employees['sales'] = pd.factorize(employees['sales'])[0]
leave_result = employees['left']
y = np.where(leave_result == 1, 1, 0)
y
X = employees.drop('left', axis=1).as_matrix().astype(np.float)
X | code |
1008693/cell_8 | [
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
employees.shape
employees.mean()
import seaborn as sns
correlation_matrix = employees.corr()
plt.subplots(figsize=(8, 8))
sns.heatmap(correlation_matrix, vmax=0.8, square=True)
plt.show() | code |
1008693/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
employees.shape
employees.mean()
import seaborn as sns
correlation_matrix = employees.corr()
employees['salary'] = pd.factorize(employees['salary'])[0]
employees['sales'] = pd.factorize(employees['sales'])[0]
employees.head() | code |
1008693/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
employees = pd.read_csv('../input/HR_comma_sep.csv')
employees.head() | code |
1008693/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
employees.shape
employees.mean()
import seaborn as sns
correlation_matrix = employees.corr()
employees['salary'] = pd.factorize(employees['salary'])[0]
employees['sales'] = pd.factorize(employees['sales'])[0]
leave_result = employees['left']
y = np.where(leave_result == 1, 1, 0)
y | code |
1008693/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
employees.shape
employees.mean()
import seaborn as sns
correlation_matrix = employees.corr()
employees['sales'].unique() | code |
1008693/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
employees.shape
employees.mean()
import seaborn as sns
correlation_matrix = employees.corr()
employees['salary'].unique() | code |
1008693/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | employees.shape
employees.mean() | code |
128027378/cell_13 | [
"text_html_output_1.png"
] | from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
df.diabetes.value_counts()
def plot_histogram(dataset: pd.DataFrame):
unique_labels, counts = np.unique(dataset.diabetes, return_counts=True)
import pandas as pd
from sklearn import preprocessing
x_unscaled = df.drop('diabetes', axis=1).values
min_max_scaler = preprocessing.MinMaxScaler()
X = min_max_scaler.fit_transform(x_unscaled)
example = pd.DataFrame(X)
example.head(10) | code |
128027378/cell_9 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
df.diabetes.value_counts() | code |
128027378/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df.head() | code |
128027378/cell_6 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
df.head(20) | code |
128027378/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
df.diabetes.value_counts()
def plot_histogram(dataset: pd.DataFrame):
unique_labels, counts = np.unique(dataset.diabetes, return_counts=True)
plot_histogram(df) | code |
128027378/cell_1 | [
"text_plain_output_1.png"
] | import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128027378/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm') | code |
128027378/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df = df[df.gender != 'Other']
df['gender'].replace(['Female', 'Male'], [0, 1], inplace=True)
df.smoking_history.replace(['No Info', 'never', 'former', 'current', 'not current', 'ever'], [0.5, 0, 0.5, 1, 0.5, 0.5], inplace=True)
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
df.head(10) | code |
128027378/cell_16 | [
"text_html_output_1.png"
] | from sklearn.metrics import accuracy_score, recall_score
from sklearn.svm import SVC
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, recall_score
svm_clf = SVC()
svm_clf.fit(X_train_res, y_train_res)
svm_clf_preds = svm_clf.predict(X_test_res)
print('SVM Classifier accuracy on validation data : ', recall_score(y_test_res, svm_clf_preds))
print('SVM Classifier accuracy on validation data : ', accuracy_score(y_test_res, svm_clf_preds)) | code |
128027378/cell_17 | [
"text_html_output_1.png"
] | from sklearn.metrics import accuracy_score, recall_score
from xgboost import XGBClassifier
from xgboost import XGBClassifier
xgb_clf = XGBClassifier(early_stopping_rounds=3)
xgb_clf.fit(X_train_res, y_train_res, eval_set=[(X_test_res, y_test_res)])
xgb_clf_preds = xgb_clf.predict(X_test_res)
print('Accuracy of XGBoost on validation data : ', recall_score(y_test_res, xgb_clf_preds))
print('XGBoost accuracy on validation data : ', recall_score(y_test, xgb_clf_preds)) | code |
34124545/cell_25 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.sort_values(by='num_subscribers', ascending=False)
data_paid[data_paid['price'] == '200']['subject'].value_counts() | code |
34124545/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.describe() | code |
34124545/cell_34 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.sort_values(by='num_subscribers', ascending=False)
data_paid[data_paid['engagement'] == 1.0] | code |
34124545/cell_23 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.sort_values(by='num_subscribers', ascending=False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_subscribers",hue="num_subscribers",ax=ax ,data=data_paid).set(title = 'price vs subscribers(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x='price', y='num_lectures', hue='num_lectures', ax=ax, data=data_paid).set(title='price vs number of lectures(paid)', xlabel='price')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False) | code |
34124545/cell_30 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_free = data[data['is_paid'] == False]
data_free.shape
data_free.sort_values(by='num_subscribers', ascending=False)
data_paid.sort_values(by='num_subscribers', ascending=False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_subscribers",hue="num_subscribers",ax=ax ,data=data_paid).set(title = 'price vs subscribers(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_lectures",hue="num_lectures",ax=ax ,data=data_paid).set(title = 'price vs number of lectures(paid)',xlabel= "price")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.countplot(x='subject', data=data_free) | code |
34124545/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.sort_values(by='num_subscribers', ascending=False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x='price', y='num_subscribers', hue='num_subscribers', ax=ax, data=data_paid).set(title='price vs subscribers(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False) | code |
34124545/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
len(data['course_title'].value_counts()) | code |
34124545/cell_39 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.sort_values(by='num_subscribers', ascending=False)
data_paid_10 = data_paid.sort_values(by='num_subscribers', ascending=False)[0:10].sort_values('num_subscribers', ascending=False).reset_index(drop=True).reset_index()[['course_id', 'course_title', 'num_subscribers', 'num_reviews', 'price']]
data_paid_10 | code |
34124545/cell_26 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_free = data[data['is_paid'] == False]
data_free.shape
data_free.sort_values(by='num_subscribers', ascending=False)
data_free['subject'].value_counts() | code |
34124545/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.head() | code |
34124545/cell_7 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape | code |
34124545/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.sort_values(by='num_subscribers', ascending=False) | code |
34124545/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_free = data[data['is_paid'] == False]
data_free.shape
data_free.sort_values(by='num_subscribers', ascending=False)
data_paid.sort_values(by='num_subscribers', ascending=False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_subscribers",hue="num_subscribers",ax=ax ,data=data_paid).set(title = 'price vs subscribers(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_lectures",hue="num_lectures",ax=ax ,data=data_paid).set(title = 'price vs number of lectures(paid)',xlabel= "price")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x='price', y='engagement', hue='num_lectures', ax=ax, data=data_paid).set(title='price vs engagement(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False) | code |
34124545/cell_28 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.sort_values(by='num_subscribers', ascending=False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_subscribers",hue="num_subscribers",ax=ax ,data=data_paid).set(title = 'price vs subscribers(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_lectures",hue="num_lectures",ax=ax ,data=data_paid).set(title = 'price vs number of lectures(paid)',xlabel= "price")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.countplot(x='subject', data=data_paid) | code |
34124545/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_free = data[data['is_paid'] == False]
data_free.shape
data_free.head() | code |
34124545/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.head() | code |
34124545/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_free = data[data['is_paid'] == False]
data_free.shape
data_free.sort_values(by='num_subscribers', ascending=False) | code |
34124545/cell_35 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_free = data[data['is_paid'] == False]
data_free.shape
data_free.sort_values(by='num_subscribers', ascending=False)
data_paid.sort_values(by='num_subscribers', ascending=False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_subscribers",hue="num_subscribers",ax=ax ,data=data_paid).set(title = 'price vs subscribers(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_lectures",hue="num_lectures",ax=ax ,data=data_paid).set(title = 'price vs number of lectures(paid)',xlabel= "price")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="engagement",hue="num_lectures",ax=ax ,data=data_paid).set(title = 'price vs engagement(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
sns.set_palette('Blues_d')
sns.scatterplot(x='num_lectures', y='engagement', hue='num_lectures', ax=ax, data=data_paid).set(title='engagement vs number of lectures(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False) | code |
34124545/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
import re
data[data['course_title'].str.contains('Data') == True] | code |
34124545/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.sort_values(by='num_subscribers', ascending=False)
data_paid['subject'].value_counts() | code |
34124545/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_free = data[data['is_paid'] == False]
data_free.shape | code |
34124545/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.sort_values(by='num_subscribers', ascending=False)
data_paid[data_paid['num_subscribers'] == max(data_paid['num_subscribers'])] | code |
34124545/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape | code |
34124545/cell_37 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_free = data[data['is_paid'] == False]
data_free.shape
data_free.sort_values(by='num_subscribers', ascending=False)
data_paid.sort_values(by='num_subscribers', ascending=False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_subscribers",hue="num_subscribers",ax=ax ,data=data_paid).set(title = 'price vs subscribers(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="num_lectures",hue="num_lectures",ax=ax ,data=data_paid).set(title = 'price vs number of lectures(paid)',xlabel= "price")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x="price", y="engagement",hue="num_lectures",ax=ax ,data=data_paid).set(title = 'price vs engagement(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
sns.set_palette("Blues_d")
sns.scatterplot(x="num_lectures", y="engagement",hue="num_lectures",ax=ax ,data=data_paid).set(title = 'engagement vs number of lectures(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
fig.set
sns.scatterplot(x='num_subscribers', y='num_reviews', hue='num_reviews', ax=ax, data=data_paid).set(title='price vs number of lectures(paid)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False) | code |
34124545/cell_36 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd
data = pd.read_csv('../input/udemy-courses/clean_dataset.csv')
data.shape
data_paid = data[data['is_paid'] == True]
data_paid.shape
data_paid.sort_values(by='num_subscribers', ascending=False)
data_paid[data_paid['num_lectures'] == max(data_paid['num_lectures'])] | code |
32069437/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu = pd.read_csv('/kaggle/input/nutrition-facts/menu.csv')
menu.shape | code |
32069437/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu = pd.read_csv('/kaggle/input/nutrition-facts/menu.csv')
menu.shape
menu.sort_values('Serving Size').tail(10) | code |
32069437/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input/'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32069437/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu = pd.read_csv('/kaggle/input/nutrition-facts/menu.csv')
menu.shape
menu.sort_values('Serving Size').tail(10)
menu.loc[menu.Sugars.idxmax()].Item
menu.set_index('Item').loc['Egg McMuffin', 'Calories']
menu.Category.value_counts()
menu.groupby('Category').Calories.mean().round(2)
menu.Category.value_counts().plot.pie() | code |
32069437/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu = pd.read_csv('/kaggle/input/nutrition-facts/menu.csv')
menu.shape
menu.sort_values('Serving Size').tail(10)
menu.loc[menu.Sugars.idxmax()].Item | code |
32069437/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu = pd.read_csv('/kaggle/input/nutrition-facts/menu.csv')
menu.shape
menu.sort_values('Serving Size').tail(10)
menu.loc[menu.Sugars.idxmax()].Item
menu.set_index('Item').loc['Egg McMuffin', 'Calories']
menu.Category.value_counts()
menu.groupby('Category').Calories.mean().round(2)
menu.plot.scatter(x='Carbohydrates', y='Total Fat') | code |
32069437/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu = pd.read_csv('/kaggle/input/nutrition-facts/menu.csv')
menu.shape
menu.sort_values('Serving Size').tail(10)
menu.loc[menu.Sugars.idxmax()].Item
menu.set_index('Item').loc['Egg McMuffin', 'Calories']
menu.Category.value_counts()
menu.groupby('Category').Calories.mean().round(2) | code |
32069437/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu = pd.read_csv('/kaggle/input/nutrition-facts/menu.csv')
menu.shape
menu.sort_values('Serving Size').tail(10)
menu.loc[menu.Sugars.idxmax()].Item
menu.set_index('Item').loc['Egg McMuffin', 'Calories'] | code |
32069437/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu = pd.read_csv('/kaggle/input/nutrition-facts/menu.csv')
menu.shape
menu.sort_values('Serving Size').tail(10)
menu.loc[menu.Sugars.idxmax()].Item
menu.set_index('Item').loc['Egg McMuffin', 'Calories']
menu.Category.value_counts() | code |
72066220/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/iris/Iris.csv'
data = pd.read_csv(path)
y = data.Species
data.drop(['Id', 'Species'], axis=1, inplace=True)
data.shape | code |
72066220/cell_25 | [
"image_output_1.png"
] | from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
path = '../input/iris/Iris.csv'
data = pd.read_csv(path)
y = data.Species
data.drop(['Id', 'Species'], axis=1, inplace=True)
data.shape
x = data.iloc[:].values
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init='k-means++', n_init=10, max_iter=300, random_state=0)
kmeans.fit(x)
wcss.append(kmeans.inertia_)
kmeans = KMeans(n_clusters=3, init='k-means++', n_init=10, max_iter=300, random_state=0)
y = kmeans.fit_predict(x)
plt.scatter(x[y == 0, 0], x[y == 0, 1], s=60, c='b', label='Iris-setosa')
plt.scatter(x[y == 1, 0], x[y == 1, 1], s=60, c='r', label='Iris-versicolour')
plt.scatter(x[y == 2, 0], x[y == 2, 1], s=60, c='g', label='Iris-virginica')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=100, c='yellow', label='Centroid')
plt.title('Cluster with centroids')
plt.legend()
plt.show() | code |
72066220/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
import pandas as pd
path = '../input/iris/Iris.csv'
data = pd.read_csv(path)
y = data.Species
data.drop(['Id', 'Species'], axis=1, inplace=True)
data.shape
x = data.iloc[:].values
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init='k-means++', n_init=10, max_iter=300, random_state=0)
kmeans.fit(x)
wcss.append(kmeans.inertia_)
for i in range(0, 10):
print('{} : {}'.format(i + 1, wcss[i])) | code |
72066220/cell_6 | [
"image_output_1.png"
] | import pandas as pd
path = '../input/iris/Iris.csv'
data = pd.read_csv(path)
print('Total Species: ', data.Species.nunique())
print(data.Species.unique()) | code |
72066220/cell_19 | [
"text_html_output_1.png"
] | from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
path = '../input/iris/Iris.csv'
data = pd.read_csv(path)
y = data.Species
data.drop(['Id', 'Species'], axis=1, inplace=True)
data.shape
x = data.iloc[:].values
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init='k-means++', n_init=10, max_iter=300, random_state=0)
kmeans.fit(x)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss, marker='o')
plt.title('Elbow method')
plt.xlabel('No. of clusters')
plt.ylabel('WCSS')
plt.grid(True)
plt.show() | code |
72066220/cell_10 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd
path = '../input/iris/Iris.csv'
data = pd.read_csv(path)
y = data.Species
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
y_data = encoder.fit_transform(y)
y_data | code |
72066220/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/iris/Iris.csv'
data = pd.read_csv(path)
y = data.Species
data.drop(['Id', 'Species'], axis=1, inplace=True)
data.head() | code |
72066220/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/iris/Iris.csv'
data = pd.read_csv(path)
data.head() | code |
130026088/cell_9 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import seaborn as sns
import seaborn as sns
sns.set()
(sns.get_dataset_names(), len(sns.get_dataset_names()))
healthexp = sns.load_dataset('healthexp')
healthexp
top_spending_countrys = healthexp[['Country', 'Life_Expectancy']]
top_spending_countrys
Spending_USD_by_year = healthexp.groupby('Year')['Spending_USD'].sum()
Spending_USD_by_year
plt.figure(figsize=(12, 6))
sns.lineplot(x=Spending_USD_by_year.index, y=Spending_USD_by_year.values, color='blue')
plt.title('Spending_USD_by_year')
plt.xlabel('Year')
plt.ylabel('Spending_USD')
plt.show() | code |
130026088/cell_4 | [
"text_html_output_1.png"
] | import seaborn as sns
import seaborn as sns
sns.set()
(sns.get_dataset_names(), len(sns.get_dataset_names())) | code |
130026088/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130026088/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import seaborn as sns
import seaborn as sns
sns.set()
(sns.get_dataset_names(), len(sns.get_dataset_names()))
healthexp = sns.load_dataset('healthexp')
healthexp
top_spending_countrys = healthexp[['Country', 'Life_Expectancy']]
top_spending_countrys
plt.figure(figsize=(10, 6))
sns.barplot(x='Life_Expectancy', y='Country', data=top_spending_countrys, palette='viridis')
plt.title('Life_Expectancy')
plt.xlabel('Life_Expectancy')
plt.ylabel('Year')
plt.show() | code |
130026088/cell_3 | [
"text_plain_output_1.png"
] | import seaborn as sns
import seaborn as sns
sns.set() | code |
130026088/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import seaborn as sns
import seaborn as sns
sns.set()
(sns.get_dataset_names(), len(sns.get_dataset_names()))
healthexp = sns.load_dataset('healthexp')
healthexp
top_spending_countrys = healthexp[['Country', 'Life_Expectancy']]
top_spending_countrys
Spending_USD_by_year = healthexp.groupby('Year')['Spending_USD'].sum()
Spending_USD_by_year
regional_Spending_and_Life_Expectancy = healthexp.groupby('Country')[['Year', 'Spending_USD', 'Life_Expectancy']].sum()
plt.figure(figsize=(10, 6))
sns.heatmap(data=regional_Spending_and_Life_Expectancy, cmap='YlGnBu', annot=True, fmt='.1f')
plt.title('regional_Spending_and_Life_Expectancy')
plt.xlabel('Features')
plt.ylabel('Country')
plt.show() | code |
130026088/cell_5 | [
"image_output_1.png"
] | import seaborn as sns
import seaborn as sns
sns.set()
(sns.get_dataset_names(), len(sns.get_dataset_names()))
healthexp = sns.load_dataset('healthexp')
healthexp | code |
72070182/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
train_data_file_path = '../input/30-days-of-ml/train.csv'
test_data_file_path = '../input/30-days-of-ml/test.csv'
df_train = pd.read_csv(train_data_file_path, index_col=0)
df_test = pd.read_csv(test_data_file_path, index_col=0)
df_train.head() | code |
72070182/cell_6 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OrdinalEncoder
import pandas as pd
train_data_file_path = '../input/30-days-of-ml/train.csv'
test_data_file_path = '../input/30-days-of-ml/test.csv'
df_train = pd.read_csv(train_data_file_path, index_col=0)
df_test = pd.read_csv(test_data_file_path, index_col=0)
y = df_train['target']
features = df_train.drop(['target'], axis=1)
object_cols = [col for col in features.columns if 'cat' in col]
X = features.copy()
X_test = df_test.copy()
ordinal_encoder = OrdinalEncoder()
X[object_cols] = ordinal_encoder.fit_transform(features[object_cols])
X_test[object_cols] = ordinal_encoder.transform(df_test[object_cols])
X.head() | code |
72070182/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
model = RandomForestRegressor(random_state=1)
model.fit(X_train, y_train)
preds_valid = model.predict(X_valid)
print(mean_squared_error(y_valid, preds_valid, squared=False)) | code |
72070182/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train_data_file_path = '../input/30-days-of-ml/train.csv'
test_data_file_path = '../input/30-days-of-ml/test.csv'
df_train = pd.read_csv(train_data_file_path, index_col=0)
df_test = pd.read_csv(test_data_file_path, index_col=0)
y = df_train['target']
features = df_train.drop(['target'], axis=1)
features.head() | code |
105189181/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 10), facecolor='w')
sns.boxplot(data=data)
plt.show() | code |
105189181/cell_9 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum() | code |
105189181/cell_4 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape | code |
105189181/cell_34 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
dfvalued = data.groupby('Country', as_index=False).Valuation.count()
dfvalued.sort_values(by='Valuation', ascending=False).head(10)
dfcom = data.groupby('Company', as_index=False).Valuation.max()
dfcom.sort_values(by='Valuation', ascending=False).head(1)
dfcity = data.groupby('City', as_index=False).Valuation.count()
print('Based on valuation Which city has most valuation startups in world')
dfcity.sort_values(by='Valuation', ascending=False).head(1) | code |
105189181/cell_23 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
USData = data[data['Country'] == 'United States']
print('Number of US startups count is:')
USData['Company'].count() | code |
105189181/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
dfvalued = data.groupby('Country', as_index=False).Valuation.count()
print(' top 10 most valued unicorn based country ')
dfvalued.sort_values(by='Valuation', ascending=False).head(10) | code |
105189181/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
data.year.value_counts().plot(kind='bar', figsize=(20, 3))
plt.title('Yearly companies joined unicorn club', fontdict={'fontsize': 20}) | code |
105189181/cell_6 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns | code |
105189181/cell_40 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
dfvalued = data.groupby('Country', as_index=False).Valuation.count()
dfvalued.sort_values(by='Valuation', ascending=False).head(10)
dfcom = data.groupby('Company', as_index=False).Valuation.max()
dfcom.sort_values(by='Valuation', ascending=False).head(1)
dfcity = data.groupby('City', as_index=False).Valuation.count()
dfcity.sort_values(by='Valuation', ascending=False).head(1)
dfcity = data.groupby('City', as_index=False).Valuation.count()
dfcity.sort_values(by='Valuation', ascending=True)
print('Total number of companies in all cities')
data.groupby('City', as_index=False).Valuation.count() | code |
105189181/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum() | code |
105189181/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
data.Industry.value_counts().plot(kind='bar', figsize=(20, 3))
plt.title('Industry with their Unicorn Stauts', fontdict={'fontsize': 20}) | code |
105189181/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105189181/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.info() | code |
105189181/cell_32 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
dfvalued = data.groupby('Country', as_index=False).Valuation.count()
dfvalued.sort_values(by='Valuation', ascending=False).head(10)
dfcom = data.groupby('Company', as_index=False).Valuation.max()
print('Based on valuation which company has most valuation')
dfcom.sort_values(by='Valuation', ascending=False).head(1) | code |
105189181/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
data.describe() | code |
105189181/cell_38 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
dfvalued = data.groupby('Country', as_index=False).Valuation.count()
dfvalued.sort_values(by='Valuation', ascending=False).head(10)
dfcom = data.groupby('Company', as_index=False).Valuation.max()
dfcom.sort_values(by='Valuation', ascending=False).head(1)
dfcity = data.groupby('City', as_index=False).Valuation.count()
dfcity.sort_values(by='Valuation', ascending=False).head(1)
dfcity = data.groupby('City', as_index=False).Valuation.count()
dfcity.sort_values(by='Valuation', ascending=True)
SFData = data[data['City'] == 'San Francisco']
SFData.head(10) | code |
105189181/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
data.Industry.unique() | code |
105189181/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
USData = data[data['Country'] == 'United States']
USData.plot(kind='line') | code |
105189181/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
USData = data[data['Country'] == 'United States']
USData.head(10) | code |
105189181/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum() | code |
105189181/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
USData = data[data['Country'] == 'United States']
print('Number of fintech comapny in ths US:', USData['Industry'].value_counts().Fintech) | code |
105189181/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape | code |
105189181/cell_5 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.head(5) | code |
105189181/cell_36 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/world-wide-unicorn-startups/World_Wide_Unicorn_Startups.csv')
data.shape
data.columns
data.isnull().sum()
data.duplicated().sum()
data.dropna(inplace=True)
data.isnull().sum()
data.shape
import seaborn as sns
import matplotlib.pyplot as plt
data.Industry.unique()
dfvalued = data.groupby('Country', as_index=False).Valuation.count()
dfvalued.sort_values(by='Valuation', ascending=False).head(10)
dfcom = data.groupby('Company', as_index=False).Valuation.max()
dfcom.sort_values(by='Valuation', ascending=False).head(1)
dfcity = data.groupby('City', as_index=False).Valuation.count()
dfcity.sort_values(by='Valuation', ascending=False).head(1)
dfcity = data.groupby('City', as_index=False).Valuation.count()
print('Total number of valuation startups in all cities')
dfcity.sort_values(by='Valuation', ascending=True) | code |
89143018/cell_21 | [
"text_plain_output_1.png"
] | from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import LinearRegression
from pyspark.ml.regression import LinearRegression
from pyspark.ml.regression import LinearRegression
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight', 'totalHeight'], outputCol='features')
regression_df = vectorAssembler.transform(fixedData)
regression_df = regression_df.select(['features', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df)
trainingSummary = lr_model.summary
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight'], outputCol='features2')
regression_df2 = vectorAssembler.transform(fixedData)
regression_df2 = regression_df2.select(['features2', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features2', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df2)
trainingSummary = lr_model.summary
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['totalHeight'], outputCol='features3')
regression_df3 = vectorAssembler.transform(fixedData)
regression_df3 = regression_df3.select(['features3', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features3', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df3)
print('Coefficients: ' + str(lr_model.coefficients))
print('Intercept: ' + str(lr_model.intercept)) | code |
89143018/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight', 'totalHeight'], outputCol='features')
regression_df = vectorAssembler.transform(fixedData)
regression_df = regression_df.select(['features', 'kickReturnYardage'])
regression_df.show(3) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.