path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90123428/cell_29
[ "text_plain_output_1.png" ]
from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2 * n for n in range(1, 10)], 'max_features': ['auto', 'sqrt'], 'min_samples_leaf': [1, 2, 4], 'min_samples_split': [2, 5, 10]} tree = DecisionTreeClassifier() tree_cv = GridSearchCV(tree, parameters, cv=10) tree_cv.fit(x_train, y_train) print('tuned hpyerparameters :(best parameters) ', tree_cv.best_params_) print('accuracy :', tree_cv.best_score_)
code
90123428/cell_41
[ "text_plain_output_1.png" ]
print('Your submission was successfully saved!')
code
90123428/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90123428/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.dtypes
code
90123428/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.dtypes train_data.isnull().sum() test_data.isnull().sum() train_data.fillna(train_data.Age.mean(), inplace=True) train_data['Cabin'].fillna('Unknown', inplace=True) train_data['Embarked'].fillna('Unknown', inplace=True) test_data.fillna(test_data['Age'].mean(), inplace=True) test_data.fillna(test_data['Fare'].mean(), inplace=True) test_data['Cabin'].fillna('Unknown', inplace=True) train_data.groupby(['Sex'])['Survived'].value_counts(normalize=True) train_data.groupby(['Pclass'])['Survived'].value_counts(normalize=True) sns.barplot(x='Pclass', y='Survived', data=train_data) plt.xlabel('Passenger Class') plt.ylabel('Survival Rate') plt.title('Survival Rates by Passenger Class on the Titanic') plt.show()
code
90123428/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.dtypes train_data.isnull().sum()
code
90123428/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.dtypes train_data.isnull().sum() test_data.isnull().sum() train_data.fillna(train_data.Age.mean(), inplace=True) train_data['Cabin'].fillna('Unknown', inplace=True) train_data['Embarked'].fillna('Unknown', inplace=True) test_data.fillna(test_data['Age'].mean(), inplace=True) test_data.fillna(test_data['Fare'].mean(), inplace=True) test_data['Cabin'].fillna('Unknown', inplace=True) train_data.groupby(['Sex'])['Survived'].value_counts(normalize=True) sns.barplot(x='Sex', y='Survived', data=train_data) plt.xlabel('Gender') plt.ylabel('Survival Rate') plt.title('Survival Rates by Gender on the Titanic') plt.show()
code
90123428/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.dtypes train_data.isnull().sum() test_data.isnull().sum() train_data.fillna(train_data.Age.mean(), inplace=True) train_data['Cabin'].fillna('Unknown', inplace=True) train_data['Embarked'].fillna('Unknown', inplace=True) test_data.fillna(test_data['Age'].mean(), inplace=True) test_data.fillna(test_data['Fare'].mean(), inplace=True) test_data['Cabin'].fillna('Unknown', inplace=True) train_data.groupby(['Sex'])['Survived'].value_counts(normalize=True) train_data.groupby(['Pclass'])['Survived'].value_counts(normalize=True)
code
90123428/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.head()
code
90123428/cell_37
[ "image_output_1.png" ]
from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier import numpy as np # linear algebra Ks = 100 mean_acc = np.zeros(Ks - 1) std_acc = np.zeros(Ks - 1) for n in range(1, Ks): neigh = KNeighborsClassifier(n_neighbors=n).fit(x_train, y_train) yhat = neigh.predict(x_test) mean_acc[n - 1] = metrics.accuracy_score(y_test, yhat) std_acc[n - 1] = np.std(yhat == y_test) / np.sqrt(yhat.shape[0]) from sklearn.ensemble import RandomForestClassifier Ns = 200 mean_acc = np.zeros(Ns - 1) for n in range(1, Ns): forest = RandomForestClassifier(n_estimators=n).fit(x_train, y_train) yhat = forest.predict(x_test) mean_acc[n - 1] = metrics.accuracy_score(y_test, yhat) print('The best accuracy was ', mean_acc.max(), 'with n = ', mean_acc.argmax() + 1)
code
90123428/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data.dtypes train_data.isnull().sum() test_data.isnull().sum() train_data.fillna(train_data.Age.mean(), inplace=True) train_data['Cabin'].fillna('Unknown', inplace=True) train_data['Embarked'].fillna('Unknown', inplace=True) test_data.fillna(test_data['Age'].mean(), inplace=True) test_data.fillna(test_data['Fare'].mean(), inplace=True) test_data['Cabin'].fillna('Unknown', inplace=True) train_data.groupby(['Sex'])['Survived'].value_counts(normalize=True)
code
105211208/cell_13
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.isnull().sum() df.duplicated().sum() visualize(df.drop(['car_ID', 'CarName'], axis=1)) df.columns
code
105211208/cell_9
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.info()
code
105211208/cell_25
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') model = LinearRegression() model.fit(x_train, y_train) y_predict = model.predict(x_test) evaluate(model, x_train, y_train, x_test, y_test, y_predict)
code
105211208/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import linear_model from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.preprocessing import RobustScaler import numpy as np import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.isnull().sum() df.duplicated().sum() visualize(df.drop(['car_ID', 'CarName'], axis=1)) df.columns limits = {} def IQRoutliers(data): global limits for c in data.columns: if data[c].dtype == 'int64' or data[c].dtype == 'float64': Q1, Q3 = np.percentile(data[c], [25, 75]) IQR = Q3 - Q1 limits[c] = [Q1 - 1.5 * IQR, Q3 + 1.5 * IQR] df[c] = np.where(df[c] > limits[c][1], limits[c][1], np.where(df[c] < limits[c][0], limits[c][0], df[c])) IQRoutliers(df.drop(['car_ID'], axis=1)) visualize(df.drop(['car_ID', 'CarName'], axis=1)) df = pd.get_dummies(df) model = LinearRegression() model.fit(x_train, y_train) y_predict = model.predict(x_test) evaluate(model, x_train, y_train, x_test, y_test, y_predict) model = linear_model.Lasso() model.fit(x_train, y_train) y_predict = model.predict(x_test) evaluate(model, x_train, y_train, x_test, y_test, y_predict) scaler = RobustScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.fit_transform(x_test) model = linear_model.BayesianRidge() model.fit(x_train, y_train) y_predict = model.predict(x_test) result = pd.DataFrame({'Y_test': y_test, 'Y_predicted': y_predict}) evaluate(model, x_train, y_train, x_test, y_test, y_predict)
code
105211208/cell_30
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') model = LinearRegression() model.fit(x_train, y_train) y_predict = model.predict(x_test) evaluate(model, x_train, y_train, x_test, y_test, y_predict) model = linear_model.Lasso() model.fit(x_train, y_train) y_predict = model.predict(x_test) evaluate(model, x_train, y_train, x_test, y_test, y_predict) sns.regplot(x=y_test, y=y_predict, color='green')
code
105211208/cell_29
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') model = LinearRegression() model.fit(x_train, y_train) y_predict = model.predict(x_test) evaluate(model, x_train, y_train, x_test, y_test, y_predict) model = linear_model.Lasso() model.fit(x_train, y_train) y_predict = model.predict(x_test) evaluate(model, x_train, y_train, x_test, y_test, y_predict)
code
105211208/cell_26
[ "image_output_11.png", "image_output_24.png", "text_plain_output_5.png", "text_plain_output_15.png", "image_output_17.png", "text_plain_output_9.png", "image_output_14.png", "image_output_23.png", "text_plain_output_4.png", "text_plain_output_13.png", "image_output_13.png", "image_output_5.png", "text_plain_output_14.png", "image_output_18.png", "image_output_21.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "image_output_20.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "image_output_16.png", "text_plain_output_8.png", "image_output_6.png", "image_output_12.png", "image_output_22.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_11.png", "text_plain_output_12.png", "image_output_15.png", "image_output_9.png", "image_output_19.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') model = LinearRegression() model.fit(x_train, y_train) y_predict = model.predict(x_test) evaluate(model, x_train, y_train, x_test, y_test, y_predict) sns.regplot(x=y_test, y=y_predict, color='green')
code
105211208/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.isnull().sum() df.duplicated().sum()
code
105211208/cell_19
[ "image_output_11.png", "image_output_24.png", "text_plain_output_5.png", "text_plain_output_15.png", "image_output_17.png", "text_plain_output_9.png", "image_output_14.png", "image_output_23.png", "text_plain_output_4.png", "text_plain_output_13.png", "image_output_13.png", "image_output_5.png", "text_plain_output_14.png", "image_output_18.png", "image_output_21.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "image_output_20.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "image_output_16.png", "text_plain_output_8.png", "image_output_6.png", "image_output_12.png", "image_output_22.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "text_plain_output_11.png", "text_plain_output_12.png", "image_output_15.png", "image_output_9.png", "image_output_19.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import numpy as np import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.isnull().sum() df.duplicated().sum() visualize(df.drop(['car_ID', 'CarName'], axis=1)) df.columns limits = {} def IQRoutliers(data): global limits for c in data.columns: if data[c].dtype == 'int64' or data[c].dtype == 'float64': Q1, Q3 = np.percentile(data[c], [25, 75]) IQR = Q3 - Q1 limits[c] = [Q1 - 1.5 * IQR, Q3 + 1.5 * IQR] df[c] = np.where(df[c] > limits[c][1], limits[c][1], np.where(df[c] < limits[c][0], limits[c][0], df[c])) IQRoutliers(df.drop(['car_ID'], axis=1)) visualize(df.drop(['car_ID', 'CarName'], axis=1))
code
105211208/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns
code
105211208/cell_18
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import numpy as np import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.isnull().sum() df.duplicated().sum() visualize(df.drop(['car_ID', 'CarName'], axis=1)) df.columns limits = {} def IQRoutliers(data): global limits for c in data.columns: if data[c].dtype == 'int64' or data[c].dtype == 'float64': Q1, Q3 = np.percentile(data[c], [25, 75]) IQR = Q3 - Q1 limits[c] = [Q1 - 1.5 * IQR, Q3 + 1.5 * IQR] df[c] = np.where(df[c] > limits[c][1], limits[c][1], np.where(df[c] < limits[c][0], limits[c][0], df[c])) print(limits)
code
105211208/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.head()
code
105211208/cell_35
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.preprocessing import RobustScaler import numpy as np import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.isnull().sum() df.duplicated().sum() visualize(df.drop(['car_ID', 'CarName'], axis=1)) df.columns limits = {} def IQRoutliers(data): global limits for c in data.columns: if data[c].dtype == 'int64' or data[c].dtype == 'float64': Q1, Q3 = np.percentile(data[c], [25, 75]) IQR = Q3 - Q1 limits[c] = [Q1 - 1.5 * IQR, Q3 + 1.5 * IQR] df[c] = np.where(df[c] > limits[c][1], limits[c][1], np.where(df[c] < limits[c][0], limits[c][0], df[c])) IQRoutliers(df.drop(['car_ID'], axis=1)) visualize(df.drop(['car_ID', 'CarName'], axis=1)) df = pd.get_dummies(df) model = LinearRegression() model.fit(x_train, y_train) y_predict = model.predict(x_test) evaluate(model, x_train, y_train, x_test, y_test, y_predict) model = linear_model.Lasso() model.fit(x_train, y_train) y_predict = model.predict(x_test) evaluate(model, x_train, y_train, x_test, y_test, y_predict) scaler = RobustScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.fit_transform(x_test) model = linear_model.BayesianRidge() model.fit(x_train, y_train) y_predict = model.predict(x_test) result = pd.DataFrame({'Y_test': y_test, 'Y_predicted': y_predict}) evaluate(model, x_train, y_train, x_test, y_test, y_predict) sns.regplot(x=y_test, y=y_predict, color='green')
code
105211208/cell_22
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import numpy as np import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.isnull().sum() df.duplicated().sum() visualize(df.drop(['car_ID', 'CarName'], axis=1)) df.columns limits = {} def IQRoutliers(data): global limits for c in data.columns: if data[c].dtype == 'int64' or data[c].dtype == 'float64': Q1, Q3 = np.percentile(data[c], [25, 75]) IQR = Q3 - Q1 limits[c] = [Q1 - 1.5 * IQR, Q3 + 1.5 * IQR] df[c] = np.where(df[c] > limits[c][1], limits[c][1], np.where(df[c] < limits[c][0], limits[c][0], df[c])) IQRoutliers(df.drop(['car_ID'], axis=1)) visualize(df.drop(['car_ID', 'CarName'], axis=1)) df = pd.get_dummies(df) df.head()
code
105211208/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.isnull().sum()
code
105211208/cell_12
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np from matplotlib import pyplot as plt plt.rcParams.update({'figure.max_open_warning': 0}) import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import linear_model from statsmodels.stats.outliers_influence import variance_inflation_factor from sklearn.preprocessing import RobustScaler from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import warnings warnings.filterwarnings('ignore') def visualize(data): for c in data.columns: plt.figure() plt.tight_layout() sns.set(rc={"figure.figsize":(8, 5)}) if data[c].dtype=='int64' or data[c].dtype=='float64': f, (ax_box, ax_hist) = plt.subplots(2, sharex=True) plt.gca().set(xlabel= c,ylabel='Frequency') sns.boxplot(data[c], ax=ax_box , linewidth= 1.0) sns.histplot(data[c], ax=ax_hist , bins = 10,kde=True) else: plt.gca().set(xlabel= c,ylabel='Frequency') sns.histplot(data[c], bins = 10) def evaluate(model,x_train , y_train, x_test , y_test, y_predict): print(f'score x_train , y_train : {model.score(x_train , y_train)}') print(f'score x_test , y_test : {model.score(x_test , y_test)}') print(f'r2_score : {r2_score(y_test, y_predict)}') print(f'mean absolute err : {mean_absolute_error(y_predict, y_test)}') print(f'mean squared err : {mean_squared_error(y_test, y_predict)}') df = pd.read_csv('../input/car-price-prediction/CarPrice_Assignment.csv') df.columns pd.set_option('display.max_columns', None) df.isnull().sum() df.duplicated().sum() visualize(df.drop(['car_ID', 'CarName'], axis=1))
code
16118948/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/Mall_Customers.csv') data.shape data.isnull().any() sns.set(style='white', palette='PuBuGn_d', color_codes=True) size = data['Gender'].value_counts() plt.figure(1, figsize=(15, 6)) n = 0 color = ['red', 'green', 'blue'] count = 0 for x in ['Age', 'Annual Income (k$)', 'Spending Score (1-100)']: n += 1 plt.subplot(1, 3, n) plt.subplots_adjust(hspace=0.5, wspace=0.5) sns.distplot(data[x], color=color[count]) plt.title('Distplot of {}'.format(x)) count += 1 plt.show()
code
16118948/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/Mall_Customers.csv') data.shape data.isnull().any()
code
16118948/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/Mall_Customers.csv') data.head()
code
16118948/cell_2
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16118948/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/Mall_Customers.csv') data.shape data.describe()
code
16118948/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/Mall_Customers.csv') data.shape data.isnull().any() sns.set(style='white', palette='PuBuGn_d', color_codes=True) sns.countplot('Gender', data=data, palette='winter') size = data['Gender'].value_counts() print('Female :', size[0] / (size[0] + size[1]) * 100) print('Male :', size[1] / (size[0] + size[1]) * 100) plt.title('Gender distirbution')
code
16118948/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/Mall_Customers.csv') data.shape
code
16118948/cell_24
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/Mall_Customers.csv') data.shape data.isnull().any() sns.set(style='white', palette='PuBuGn_d', color_codes=True) size = data['Gender'].value_counts() n = 0 color = ['red', 'green', 'blue'] count = 0 for x in ['Age', 'Annual Income (k$)', 'Spending Score (1-100)']: n += 1 count += 1 sns.pairplot(data) plt.plot()
code
16118948/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/Mall_Customers.csv') data.shape data.info()
code
74056805/cell_21
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from nltk.classify.scikitlearn import SklearnClassifier from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.tag import pos_tag from nltk.tokenize import TweetTokenizer from sklearn.naive_bayes import MultinomialNB,BernoulliNB from time import time import nltk import pandas as pd import random dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) new_df = tweet_df[['sentiment', 'text']] df_pos = tweet_df[tweet_df['sentiment'] == 4] df_neg = tweet_df[tweet_df['sentiment'] == 0] new_df = pd.concat([df_pos, df_neg]) len(new_df) start_time = time() token = TweetTokenizer(reduce_len=True) data = [] X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() for x, y in zip(X, Y): if y == 4: data.append((token.tokenize(x), 1)) else: data.append((token.tokenize(x), 0)) data[:5] import nltk nltk.download('all') def lemmatize_sentence(tokens): lemmatizer = WordNetLemmatizer() lemmatized_sentence = [] for word, tag in pos_tag(tokens): if tag.startswith('NN'): pos = 'n' elif tag.startswith('VB'): pos = 'v' else: pos = 'a' lemmatized_sentence.append(lemmatizer.lemmatize(word, pos)) return lemmatized_sentence import re, string from nltk.corpus import stopwords STOP_WORDS = stopwords.words('english') def cleaned(token): if token == 'u': return 'you' if token == 'r': return 'are' if token == 'some1': return 'someone' if token == 'yrs': return 'years' if token == 'hrs': return 'hours' if token == 'mins': return 'minutes' if token == 'secs': return 'seconds' if token == 'pls' or token == 'plz': return 'please' if token == '2morow': return 'tomorrow' if token == '2day': return 'today' if token == '4got' or token == '4gotten': return 'forget' if token == 'amp' or token == 'quot' or token == 'lt' or (token == 'gt') or (token == '½25'): return '' return token def remove_noise(tweet_tokens): cleaned_tokens = [] for token, tag in pos_tag(tweet_tokens): token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', token) token = re.sub('(@[A-Za-z0-9_]+)', '', token) if tag.startswith('NN'): pos = 'n' elif tag.startswith('VB'): pos = 'v' else: pos = 'a' lemmatizer = WordNetLemmatizer() token = lemmatizer.lemmatize(token, pos) cleaned_token = cleaned(token.lower()) if cleaned_token not in string.punctuation and len(cleaned_token) > 2 and (cleaned_token not in STOP_WORDS): cleaned_tokens.append(cleaned_token) return cleaned_tokens start_time = time() def list_to_dict(cleaned_tokens): return dict(([token, True] for token in cleaned_tokens)) cleaned_tokens_list = [] for tokens, label in data: cleaned_tokens_list.append((remove_noise(tokens), label)) start_time = time() final_data = [] for tokens, label in cleaned_tokens_list: final_data.append((list_to_dict(tokens), label)) final_data[:5] import random random.Random(140).shuffle(final_data) trim_index = int(len(final_data) * 0.9) train_data = final_data[:trim_index] test_data = final_data[trim_index:] start_time = time() from nltk import classify from nltk import NaiveBayesClassifier from nltk.classify.scikitlearn import SklearnClassifier from sklearn.naive_bayes import MultinomialNB, BernoulliNB classify_BNB = SklearnClassifier(BernoulliNB()) classifier = classify_BNB.train(train_data) print('Accuracy on train data:', nltk.classify.accuracy(classifier, train_data) * 100) print('Accuracy on test data:', nltk.classify.accuracy(classifier, test_data) * 100)
code
74056805/cell_13
[ "text_html_output_1.png" ]
from nltk.stem.wordnet import WordNetLemmatizer from nltk.tag import pos_tag from nltk.tokenize import TweetTokenizer from time import time import pandas as pd dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) new_df = tweet_df[['sentiment', 'text']] df_pos = tweet_df[tweet_df['sentiment'] == 4] df_neg = tweet_df[tweet_df['sentiment'] == 0] new_df = pd.concat([df_pos, df_neg]) len(new_df) start_time = time() token = TweetTokenizer(reduce_len=True) data = [] X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() for x, y in zip(X, Y): if y == 4: data.append((token.tokenize(x), 1)) else: data.append((token.tokenize(x), 0)) data[:5] def lemmatize_sentence(tokens): lemmatizer = WordNetLemmatizer() lemmatized_sentence = [] for word, tag in pos_tag(tokens): if tag.startswith('NN'): pos = 'n' elif tag.startswith('VB'): pos = 'v' else: pos = 'a' lemmatized_sentence.append(lemmatizer.lemmatize(word, pos)) return lemmatized_sentence print(lemmatize_sentence(data[0][0]))
code
74056805/cell_9
[ "text_plain_output_1.png" ]
from nltk.tokenize import TweetTokenizer from time import time import pandas as pd dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) new_df = tweet_df[['sentiment', 'text']] df_pos = tweet_df[tweet_df['sentiment'] == 4] df_neg = tweet_df[tweet_df['sentiment'] == 0] new_df = pd.concat([df_pos, df_neg]) len(new_df) start_time = time() token = TweetTokenizer(reduce_len=True) data = [] X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() for x, y in zip(X, Y): if y == 4: data.append((token.tokenize(x), 1)) else: data.append((token.tokenize(x), 0)) print('CPU Time:', time() - start_time) data[:5]
code
74056805/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) new_df = tweet_df[['sentiment', 'text']] new_df.head()
code
74056805/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) new_df = tweet_df[['sentiment', 'text']] df_pos = tweet_df[tweet_df['sentiment'] == 4] df_neg = tweet_df[tweet_df['sentiment'] == 0] new_df = pd.concat([df_pos, df_neg]) len(new_df)
code
74056805/cell_2
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) tweet_df.head()
code
74056805/cell_18
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.tag import pos_tag from nltk.tokenize import TweetTokenizer from time import time import pandas as pd dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) new_df = tweet_df[['sentiment', 'text']] df_pos = tweet_df[tweet_df['sentiment'] == 4] df_neg = tweet_df[tweet_df['sentiment'] == 0] new_df = pd.concat([df_pos, df_neg]) len(new_df) start_time = time() token = TweetTokenizer(reduce_len=True) data = [] X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() for x, y in zip(X, Y): if y == 4: data.append((token.tokenize(x), 1)) else: data.append((token.tokenize(x), 0)) data[:5] def lemmatize_sentence(tokens): lemmatizer = WordNetLemmatizer() lemmatized_sentence = [] for word, tag in pos_tag(tokens): if tag.startswith('NN'): pos = 'n' elif tag.startswith('VB'): pos = 'v' else: pos = 'a' lemmatized_sentence.append(lemmatizer.lemmatize(word, pos)) return lemmatized_sentence import re, string from nltk.corpus import stopwords STOP_WORDS = stopwords.words('english') def cleaned(token): if token == 'u': return 'you' if token == 'r': return 'are' if token == 'some1': return 'someone' if token == 'yrs': return 'years' if token == 'hrs': return 'hours' if token == 'mins': return 'minutes' if token == 'secs': return 'seconds' if token == 'pls' or token == 'plz': return 'please' if token == '2morow': return 'tomorrow' if token == '2day': return 'today' if token == '4got' or token == '4gotten': return 'forget' if token == 'amp' or token == 'quot' or token == 'lt' or (token == 'gt') or (token == '½25'): return '' return token def remove_noise(tweet_tokens): cleaned_tokens = [] for token, tag in pos_tag(tweet_tokens): token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', token) token = re.sub('(@[A-Za-z0-9_]+)', '', token) if tag.startswith('NN'): pos = 'n' elif tag.startswith('VB'): pos = 'v' else: pos = 'a' lemmatizer = WordNetLemmatizer() token = lemmatizer.lemmatize(token, pos) cleaned_token = cleaned(token.lower()) if cleaned_token not in string.punctuation and len(cleaned_token) > 2 and (cleaned_token not in STOP_WORDS): cleaned_tokens.append(cleaned_token) return cleaned_tokens start_time = time() def list_to_dict(cleaned_tokens): return dict(([token, True] for token in cleaned_tokens)) cleaned_tokens_list = [] for tokens, label in data: cleaned_tokens_list.append((remove_noise(tokens), label)) print('Removed Noise, CPU Time:', time() - start_time) start_time = time() final_data = [] for tokens, label in cleaned_tokens_list: final_data.append((list_to_dict(tokens), label)) print('Data Prepared for model, CPU Time:', time() - start_time) final_data[:5]
code
74056805/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) tweet_df['sentiment'].value_counts()
code
74056805/cell_17
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.tag import pos_tag from nltk.tokenize import TweetTokenizer from time import time import pandas as pd dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) new_df = tweet_df[['sentiment', 'text']] df_pos = tweet_df[tweet_df['sentiment'] == 4] df_neg = tweet_df[tweet_df['sentiment'] == 0] new_df = pd.concat([df_pos, df_neg]) len(new_df) start_time = time() token = TweetTokenizer(reduce_len=True) data = [] X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() for x, y in zip(X, Y): if y == 4: data.append((token.tokenize(x), 1)) else: data.append((token.tokenize(x), 0)) data[:5] def lemmatize_sentence(tokens): lemmatizer = WordNetLemmatizer() lemmatized_sentence = [] for word, tag in pos_tag(tokens): if tag.startswith('NN'): pos = 'n' elif tag.startswith('VB'): pos = 'v' else: pos = 'a' lemmatized_sentence.append(lemmatizer.lemmatize(word, pos)) return lemmatized_sentence import re, string from nltk.corpus import stopwords STOP_WORDS = stopwords.words('english') def cleaned(token): if token == 'u': return 'you' if token == 'r': return 'are' if token == 'some1': return 'someone' if token == 'yrs': return 'years' if token == 'hrs': return 'hours' if token == 'mins': return 'minutes' if token == 'secs': return 'seconds' if token == 'pls' or token == 'plz': return 'please' if token == '2morow': return 'tomorrow' if token == '2day': return 'today' if token == '4got' or token == '4gotten': return 'forget' if token == 'amp' or token == 'quot' or token == 'lt' or (token == 'gt') or (token == '½25'): return '' return token def remove_noise(tweet_tokens): cleaned_tokens = [] for token, tag in pos_tag(tweet_tokens): token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', token) token = re.sub('(@[A-Za-z0-9_]+)', '', token) if tag.startswith('NN'): pos = 'n' elif tag.startswith('VB'): pos = 'v' else: pos = 'a' lemmatizer = WordNetLemmatizer() token = lemmatizer.lemmatize(token, pos) cleaned_token = cleaned(token.lower()) if cleaned_token not in string.punctuation and len(cleaned_token) > 2 and (cleaned_token not in STOP_WORDS): cleaned_tokens.append(cleaned_token) return cleaned_tokens print(remove_noise(data[0][0]))
code
74056805/cell_24
[ "text_plain_output_1.png" ]
from nltk.classify.scikitlearn import SklearnClassifier from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.tag import pos_tag from nltk.tokenize import TweetTokenizer from sklearn.naive_bayes import MultinomialNB,BernoulliNB from time import time import nltk import pandas as pd import random dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) new_df = tweet_df[['sentiment', 'text']] df_pos = tweet_df[tweet_df['sentiment'] == 4] df_neg = tweet_df[tweet_df['sentiment'] == 0] new_df = pd.concat([df_pos, df_neg]) len(new_df) start_time = time() token = TweetTokenizer(reduce_len=True) data = [] X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() for x, y in zip(X, Y): if y == 4: data.append((token.tokenize(x), 1)) else: data.append((token.tokenize(x), 0)) data[:5] import nltk nltk.download('all') def lemmatize_sentence(tokens): lemmatizer = WordNetLemmatizer() lemmatized_sentence = [] for word, tag in pos_tag(tokens): if tag.startswith('NN'): pos = 'n' elif tag.startswith('VB'): pos = 'v' else: pos = 'a' lemmatized_sentence.append(lemmatizer.lemmatize(word, pos)) return lemmatized_sentence import re, string from nltk.corpus import stopwords STOP_WORDS = stopwords.words('english') def cleaned(token): if token == 'u': return 'you' if token == 'r': return 'are' if token == 'some1': return 'someone' if token == 'yrs': return 'years' if token == 'hrs': return 'hours' if token == 'mins': return 'minutes' if token == 'secs': return 'seconds' if token == 'pls' or token == 'plz': return 'please' if token == '2morow': return 'tomorrow' if token == '2day': return 'today' if token == '4got' or token == '4gotten': return 'forget' if token == 'amp' or token == 'quot' or token == 'lt' or (token == 'gt') or (token == '½25'): return '' return token def remove_noise(tweet_tokens): cleaned_tokens = [] for token, tag in pos_tag(tweet_tokens): token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', token) token = re.sub('(@[A-Za-z0-9_]+)', '', token) if tag.startswith('NN'): pos = 'n' elif tag.startswith('VB'): pos = 'v' else: pos = 'a' lemmatizer = WordNetLemmatizer() token = lemmatizer.lemmatize(token, pos) cleaned_token = cleaned(token.lower()) if cleaned_token not in string.punctuation and len(cleaned_token) > 2 and (cleaned_token not in STOP_WORDS): cleaned_tokens.append(cleaned_token) return cleaned_tokens start_time = time() def list_to_dict(cleaned_tokens): return dict(([token, True] for token in cleaned_tokens)) cleaned_tokens_list = [] for tokens, label in data: cleaned_tokens_list.append((remove_noise(tokens), label)) start_time = time() final_data = [] for tokens, label in cleaned_tokens_list: final_data.append((list_to_dict(tokens), label)) final_data[:5] import random random.Random(140).shuffle(final_data) trim_index = int(len(final_data) * 0.9) train_data = final_data[:trim_index] test_data = final_data[trim_index:] start_time = time() from nltk import classify from nltk import NaiveBayesClassifier from nltk.classify.scikitlearn import SklearnClassifier from sklearn.naive_bayes import MultinomialNB, BernoulliNB classify_BNB = SklearnClassifier(BernoulliNB()) classifier = classify_BNB.train(train_data) custom_tweet = 'I ordered just once from Veg Treat, they screwed up, never used the app again.' custom_tokens = remove_noise(token.tokenize(custom_tweet)) print(classifier.classify(dict(([token, True] for token in custom_tokens))))
code
74056805/cell_10
[ "text_html_output_1.png" ]
import nltk import nltk nltk.download('all')
code
74056805/cell_12
[ "text_plain_output_1.png" ]
from nltk.tag import pos_tag from nltk.tokenize import TweetTokenizer from time import time import pandas as pd dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) new_df = tweet_df[['sentiment', 'text']] df_pos = tweet_df[tweet_df['sentiment'] == 4] df_neg = tweet_df[tweet_df['sentiment'] == 0] new_df = pd.concat([df_pos, df_neg]) len(new_df) start_time = time() token = TweetTokenizer(reduce_len=True) data = [] X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() X = new_df['text'].tolist() Y = new_df['sentiment'].tolist() for x, y in zip(X, Y): if y == 4: data.append((token.tokenize(x), 1)) else: data.append((token.tokenize(x), 0)) data[:5] from nltk.tag import pos_tag from nltk.stem.wordnet import WordNetLemmatizer print(pos_tag(data[0][0]))
code
74056805/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd dataset_columns = ['sentiment', 'ids', 'date', 'flag', 'user', 'text'] dataset_encode = 'ISO-8859-1' tweet_df = pd.read_csv('../input/twitter-sentiment/training.1600000.processed.noemoticon.csv', encoding=dataset_encode, names=dataset_columns) df_pos = tweet_df[tweet_df['sentiment'] == 4] df_neg = tweet_df[tweet_df['sentiment'] == 0] print(len(df_pos), len(df_neg))
code
73077112/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns rows, columns = df.shape rows df.isnull().sum()
code
73077112/cell_9
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns df.tail()
code
73077112/cell_20
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns rows, columns = df.shape rows df.isnull().sum() df.drop('company', inplace=True, axis=1) df df.sort_values(by=['adr'], ascending=False)[['name']][:1]
code
73077112/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape
code
73077112/cell_26
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns rows, columns = df.shape rows df.isnull().sum() df.drop('company', inplace=True, axis=1) df df.sort_values(by=['adr'], ascending=False)[['name']][:1] df.sort_values(by=['total_of_special_requests'], ascending=False)[['name', 'email']]
code
73077112/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns rows, columns = df.shape rows
code
73077112/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns
code
73077112/cell_18
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns rows, columns = df.shape rows df.isnull().sum() df.drop('company', inplace=True, axis=1) df df['country'].value_counts(sort=True)[:5]
code
73077112/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns df.head()
code
73077112/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns rows, columns = df.shape rows df.isnull().sum() df.drop('company', inplace=True, axis=1) df
code
73077112/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns rows, columns = df.shape rows df.isnull().sum() df.drop('company', inplace=True, axis=1) df df.sort_values(by=['adr'], ascending=False)[['name']][:1] Mean_Stay = df['adr'].mean() round(Mean_Stay, 2)
code
73077112/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.shape df.columns rows, columns = df.shape rows df.isnull().sum() df.drop('company', inplace=True, axis=1) df df.sort_values(by=['adr'], ascending=False)[['name']][:1] Mean_ADR = df['adr'].mean() round(Mean_ADR, 2)
code
73077112/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/hotel-booking/hotel_booking.csv') df.info()
code
128048982/cell_13
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from transformers import AutoTokenizer from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('gpt2') example = 'def gcd(a, b):\n """Computes gcd(a,b) via the euclidean algorithm."""\n while b > 0:\n a,b = b, a%b\n return a' tokens = tokenizer.tokenize(example) print(tokens) print(len(tokens))
code
128048982/cell_6
[ "text_plain_output_1.png" ]
from datasets import load_dataset from datasets import load_dataset dataset = load_dataset('espejelomar/code_search_net_python_10000_examples') dataset['train']
code
128048982/cell_2
[ "text_plain_output_1.png" ]
def gcd(a, b): """Computes gcd(a,b) via the euclidean algorithm.""" while b > 0: a, b = (b, a % b) return a print(f'gcd(1337,143) = {gcd(1337, 143)}') print(f'gcd(1337,7) = {gcd(1337, 7)}')
code
128048982/cell_1
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from transformers import AutoTokenizer from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('gpt2')
code
128048982/cell_7
[ "text_plain_output_1.png" ]
from datasets import load_dataset from datasets import load_dataset dataset = load_dataset('espejelomar/code_search_net_python_10000_examples') dataset['train'][10]['whole_func_string']
code
128048982/cell_3
[ "text_plain_output_1.png" ]
from transformers import AutoTokenizer from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('gpt2') example = 'def gcd(a, b):\n """Computes gcd(a,b) via the euclidean algorithm."""\n while b > 0:\n a,b = b, a%b\n return a' tokens = tokenizer.tokenize(example) print(tokens) print(len(tokens))
code
128048982/cell_10
[ "text_plain_output_1.png" ]
from datasets import load_dataset from transformers import AutoTokenizer from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('gpt2') example = 'def gcd(a, b):\n """Computes gcd(a,b) via the euclidean algorithm."""\n while b > 0:\n a,b = b, a%b\n return a' tokens = tokenizer.tokenize(example) from datasets import load_dataset dataset = load_dataset('espejelomar/code_search_net_python_10000_examples') def get_training_corpus(size=10): return (dataset['train'][i:i + size]['whole_func_string'] for i in range(0, len(dataset['train']), size)) training_corpus = get_training_corpus() updated_tokenizer = tokenizer.train_new_from_iterator(training_corpus, 52000)
code
128048982/cell_12
[ "text_plain_output_1.png" ]
from datasets import load_dataset from transformers import AutoTokenizer from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('gpt2') example = 'def gcd(a, b):\n """Computes gcd(a,b) via the euclidean algorithm."""\n while b > 0:\n a,b = b, a%b\n return a' tokens = tokenizer.tokenize(example) from datasets import load_dataset dataset = load_dataset('espejelomar/code_search_net_python_10000_examples') def get_training_corpus(size=10): return (dataset['train'][i:i + size]['whole_func_string'] for i in range(0, len(dataset['train']), size)) training_corpus = get_training_corpus() updated_tokenizer = tokenizer.train_new_from_iterator(training_corpus, 52000) updated_tokens = updated_tokenizer.tokenize(example) print(updated_tokens) print(len(updated_tokens))
code
128048982/cell_5
[ "text_plain_output_1.png" ]
from datasets import load_dataset from datasets import load_dataset dataset = load_dataset('espejelomar/code_search_net_python_10000_examples')
code
128005120/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape df.isnull().sum() df.drop(['Unnamed: 32', 'id'], axis=1, inplace=True) df.diagnosis.value_counts() df.diagnosis.replace({'M': 1, 'B': 2}, inplace=True) df.describe()
code
128005120/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape df.isnull().sum() df.drop(['Unnamed: 32', 'id'], axis=1, inplace=True) df
code
128005120/cell_25
[ "text_html_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors=5) model.fit(x_train, y_train)
code
128005120/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df
code
128005120/cell_34
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors=5) model.fit(x_train, y_train) model.score(x_train, y_train) * 100 model.score(x_test, y_test) * 100 y_pred = model.predict(x_test) acc = accuracy_score(y_test, y_pred) * 100 acc
code
128005120/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape df.info()
code
128005120/cell_40
[ "text_plain_output_1.png" ]
from sklearn.metrics import classification_report from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors=5) model.fit(x_train, y_train) model.score(x_train, y_train) * 100 model.score(x_test, y_test) * 100 y_pred = model.predict(x_test) report = classification_report(y_test, y_pred) print(report)
code
128005120/cell_29
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors=5) model.fit(x_train, y_train) model.score(x_train, y_train) * 100 model.score(x_test, y_test) * 100 y_pred = model.predict(x_test) y_pred
code
128005120/cell_26
[ "text_html_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors=5) model.fit(x_train, y_train) model.score(x_train, y_train) * 100
code
128005120/cell_2
[ "text_html_output_1.png" ]
import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore')
code
128005120/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape df.isnull().sum() df.drop(['Unnamed: 32', 'id'], axis=1, inplace=True) df.diagnosis.value_counts() df.diagnosis.replace({'M': 1, 'B': 2}, inplace=True) x = df.values[:, 1:] y = df.values[:, 0] y
code
128005120/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128005120/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape df.isnull().sum()
code
128005120/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape df.isnull().sum() df.drop(['Unnamed: 32', 'id'], axis=1, inplace=True) df.diagnosis.value_counts() df.diagnosis.replace({'M': 1, 'B': 2}, inplace=True) df.tail()
code
128005120/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape df.isnull().sum() df.drop(['Unnamed: 32', 'id'], axis=1, inplace=True) df.diagnosis.value_counts() df.diagnosis.replace({'M': 1, 'B': 2}, inplace=True) x = df.values[:, 1:] x
code
128005120/cell_31
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') model = KNeighborsClassifier(n_neighbors=5) model.fit(x_train, y_train) model.score(x_train, y_train) * 100 model.score(x_test, y_test) * 100 y_pred = model.predict(x_test) df1 = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) df1
code
128005120/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape df.isnull().sum() df.drop(['Unnamed: 32', 'id'], axis=1, inplace=True) df.diagnosis.value_counts() df.diagnosis.replace({'M': 1, 'B': 2}, inplace=True) df.head()
code
128005120/cell_22
[ "text_html_output_1.png" ]
print(x_train.shape) print(x_test.shape) print(y_train.shape) print(y_test.shape)
code
128005120/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape df.isnull().sum() df.drop(['Unnamed: 32', 'id'], axis=1, inplace=True) df.diagnosis.value_counts()
code
128005120/cell_27
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors=5) model.fit(x_train, y_train) model.score(x_train, y_train) * 100 model.score(x_test, y_test) * 100
code
128005120/cell_37
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors=5) model.fit(x_train, y_train) model.score(x_train, y_train) * 100 model.score(x_test, y_test) * 100 y_pred = model.predict(x_test) performance = confusion_matrix(y_test, y_pred) performance
code
128005120/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape df.isnull().sum() df.drop(['Unnamed: 32', 'id'], axis=1, inplace=True) df.diagnosis.value_counts() df.diagnosis.replace({'M': 1, 'B': 2}, inplace=True) df
code
128005120/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/breast-cancer-wisconsin-data/data.csv') df.shape
code
90150781/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv') df.shape df.drop('Unnamed: 0', axis=1, inplace=True) df.isnull().sum() df.columns df.airline.value_counts() plt.figure(figsize=(10, 8)) plt1 = df.airline.value_counts().plot(kind='bar') plt.title('Airline histogram', fontsize=20) plt1.set(xlabel = 'airline', ylabel='Frequency of airline') plt.figure(figsize=(20,8)) plt.subplot(1,2,1) plt.title('Source histogram') plt1 = df['source_city'].value_counts().plot(kind='bar') plt1.set(xlabel = 'Source city', ylabel='Frequency of source city') plt.subplot(1,2,2) plt.title('Destination histogram') plt1 = df['destination_city'].value_counts().plot(kind='bar') plt1.set(xlabel = 'Destination city', ylabel='Frequency of destination city') plt.show() df.departure_time.value_counts() plt.figure(figsize=(20,8)) plt.subplot(1,2,1) plt.title('Departure time histogram') plt1 = df.departure_time.value_counts().plot(kind='bar') plt1.set(xlabel = 'Departure time', ylabel='Frequency of Departure time') plt.subplot(1,2,2) plt.title('Arrival time histogram') plt1 = df.arrival_time.value_counts().plot(kind='bar') plt1.set(xlabel = 'Arrival time', ylabel='Frequency of Arrival time') plt.show() df.stops.value_counts() plt.figure(figsize=(20, 8)) plt.subplot(1, 2, 1) plt.title('Class Histogram') sns.countplot(df['class'], palette='cubehelix') plt.subplot(1, 2, 2) plt.title('Class vs Price') sns.boxplot(x=df['class'], y=df.price, palette='cubehelix') plt.show()
code
90150781/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv') df.shape df.drop('Unnamed: 0', axis=1, inplace=True) df.isnull().sum() df.columns df.airline.value_counts()
code
90150781/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv') df.shape df.drop('Unnamed: 0', axis=1, inplace=True) df.isnull().sum()
code
90150781/cell_4
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv') df.shape
code
90150781/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv') df.shape df.drop('Unnamed: 0', axis=1, inplace=True) df.isnull().sum() df.columns df.airline.value_counts() plt.figure(figsize=(10, 8)) plt1 = df.airline.value_counts().plot(kind='bar') plt.title('Airline histogram', fontsize=20) plt1.set(xlabel = 'airline', ylabel='Frequency of airline') plt.figure(figsize=(20,8)) plt.subplot(1,2,1) plt.title('Source histogram') plt1 = df['source_city'].value_counts().plot(kind='bar') plt1.set(xlabel = 'Source city', ylabel='Frequency of source city') plt.subplot(1,2,2) plt.title('Destination histogram') plt1 = df['destination_city'].value_counts().plot(kind='bar') plt1.set(xlabel = 'Destination city', ylabel='Frequency of destination city') plt.show() df.departure_time.value_counts() plt.figure(figsize=(20,8)) plt.subplot(1,2,1) plt.title('Departure time histogram') plt1 = df.departure_time.value_counts().plot(kind='bar') plt1.set(xlabel = 'Departure time', ylabel='Frequency of Departure time') plt.subplot(1,2,2) plt.title('Arrival time histogram') plt1 = df.arrival_time.value_counts().plot(kind='bar') plt1.set(xlabel = 'Arrival time', ylabel='Frequency of Arrival time') plt.show() df.stops.value_counts()
code
90150781/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/flight-price-prediction/Clean_Dataset.csv') df.shape df.head()
code