path
stringlengths 13
17
| screenshot_names
listlengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
104130018/cell_40
|
[
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
data.isnull().sum() / data.shape[0] * 100
data.nunique()
data.Gender.value_counts()
data.groupby('Gender')['Purchase'].mean()
data.groupby('Marital_Status')['Purchase'].mean()
jobs = data.groupby('Occupation')['Purchase'].mean()
plt.figure(figsize=(12, 8))
sns.countplot(data['City_Category'], hue=data['Gender'])
plt.show()
|
code
|
104130018/cell_29
|
[
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
data.isnull().sum() / data.shape[0] * 100
data.nunique()
data.Gender.value_counts()
data.groupby('Gender')['Purchase'].mean()
sns.countplot(data['Marital_Status'])
plt.show()
|
code
|
104130018/cell_2
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
|
code
|
104130018/cell_1
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
104130018/cell_7
|
[
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
|
code
|
104130018/cell_18
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
data.isnull().sum() / data.shape[0] * 100
data.nunique()
data.Gender.value_counts()
plt.figure(figsize=(10, 6))
sns.boxplot(x=data['Purchase'], palette='Set3')
plt.show()
|
code
|
104130018/cell_28
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
data.isnull().sum() / data.shape[0] * 100
data.nunique()
data.Gender.value_counts()
data.groupby('Gender')['Purchase'].mean()
data['Marital_Status'].value_counts()
|
code
|
104130018/cell_15
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
data.isnull().sum() / data.shape[0] * 100
data.nunique()
data.Gender.value_counts()
plt.figure(figsize=(12, 8))
plt.title('Purchase Distribution')
sns.distplot(data['Purchase'], color='r')
|
code
|
104130018/cell_38
|
[
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
data.isnull().sum() / data.shape[0] * 100
data.nunique()
data.Gender.value_counts()
data.groupby('Gender')['Purchase'].mean()
data.groupby('Marital_Status')['Purchase'].mean()
jobs = data.groupby('Occupation')['Purchase'].mean()
sns.countplot(data['City_Category'])
|
code
|
104130018/cell_35
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
data.isnull().sum() / data.shape[0] * 100
data.nunique()
data.Gender.value_counts()
data.groupby('Gender')['Purchase'].mean()
data.groupby('Marital_Status')['Purchase'].mean()
jobs = data.groupby('Occupation')['Purchase'].mean()
plt.figure(figsize=(12, 9))
jobs.plot(kind='bar')
|
code
|
104130018/cell_31
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
data.isnull().sum() / data.shape[0] * 100
data.nunique()
data.Gender.value_counts()
data.groupby('Gender')['Purchase'].mean()
data.groupby('Marital_Status')['Purchase'].mean()
|
code
|
104130018/cell_24
|
[
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
data.isnull().sum() / data.shape[0] * 100
data.nunique()
data.Gender.value_counts()
data.groupby('Gender')['Purchase'].mean()
|
code
|
104130018/cell_12
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.describe().T
data.isnull().sum() / data.shape[0] * 100
data.nunique()
|
code
|
104130018/cell_5
|
[
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/black-friday-sales-prediction/train_oSwQCTC (1)/train.csv')
data
data.shape
data.info()
|
code
|
2040633/cell_13
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="season", data=daily_Data, color="c");
plt.xlabel("season")
daily_Data.season.value_counts()
f, ax = plt.subplots(figsize=(5, 5))
plt.hist(x='holiday', data=daily_Data, color='c')
plt.xlabel('holiday')
|
code
|
2040633/cell_4
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
|
code
|
2040633/cell_23
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
daily_Data.season.value_counts()
daily_Data.holiday.value_counts()
daily_Data.workingday.value_counts()
daily_Data.weather.value_counts()
season = pd.get_dummies(daily_Data['season'])
daily_Data = pd.concat([daily_Data, season], axis=1)
weather = pd.get_dummies(daily_Data['weather'])
daily_Data = pd.concat([daily_Data, weather], axis=1)
daily_Data.head()
|
code
|
2040633/cell_20
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="season", data=daily_Data, color="c");
plt.xlabel("season")
daily_Data.season.value_counts()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="holiday", data=daily_Data,color='c');
plt.xlabel("holiday")
daily_Data.holiday.value_counts()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="workingday",data=daily_Data,color='c');
plt.xlabel("workingday")
daily_Data.workingday.value_counts()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="weather",data=daily_Data,color='c');
plt.xlabel("weather")
daily_Data.weather.value_counts()
corrMatt = daily_Data[['temp', 'atemp', 'casual', 'registered', 'humidity', 'windspeed', 'count']].corr()
mask = np.array(corrMatt)
mask[np.tril_indices_from(mask)] = False
fig, ax = plt.subplots()
fig.set_size_inches(20, 10)
sn.heatmap(corrMatt, mask=mask, vmax=0.8, square=True, annot=True)
|
code
|
2040633/cell_6
|
[
"text_plain_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
|
code
|
2040633/cell_26
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
daily_Data.season.value_counts()
daily_Data.holiday.value_counts()
daily_Data.workingday.value_counts()
daily_Data.weather.value_counts()
season = pd.get_dummies(daily_Data['season'])
daily_Data = pd.concat([daily_Data, season], axis=1)
weather = pd.get_dummies(daily_Data['weather'])
daily_Data = pd.concat([daily_Data, weather], axis=1)
daily_Data.shape
daily_Data = daily_Data.drop('season', axis=1)
daily_Data = daily_Data.drop('weather', axis=1)
daily_Data = daily_Data.drop('casual', axis=1)
daily_Data = daily_Data.drop('registered', axis=1)
labels = daily_Data.pop('count')
daily_Data.head()
|
code
|
2040633/cell_2
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.head()
|
code
|
2040633/cell_11
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
f, ax = plt.subplots(figsize=(5, 5))
plt.hist(x='season', data=daily_Data, color='c')
plt.xlabel('season')
|
code
|
2040633/cell_19
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="season", data=daily_Data, color="c");
plt.xlabel("season")
daily_Data.season.value_counts()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="holiday", data=daily_Data,color='c');
plt.xlabel("holiday")
daily_Data.holiday.value_counts()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="workingday",data=daily_Data,color='c');
plt.xlabel("workingday")
daily_Data.workingday.value_counts()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="weather",data=daily_Data,color='c');
plt.xlabel("weather")
daily_Data.weather.value_counts()
plt.hist(x='temp', data=daily_Data, edgecolor='black', linewidth=2)
|
code
|
2040633/cell_7
|
[
"text_html_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
print('season:', daily_Data.season.unique())
print('holiday', daily_Data.holiday.unique())
print('workingday:', daily_Data.workingday.unique())
print('weather:', daily_Data.weather.unique())
print('temp:', daily_Data.temp.unique())
print('atemp:', daily_Data.atemp.unique())
print('humidity:', daily_Data.humidity.unique())
|
code
|
2040633/cell_18
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
daily_Data.season.value_counts()
daily_Data.holiday.value_counts()
daily_Data.workingday.value_counts()
daily_Data.weather.value_counts()
|
code
|
2040633/cell_8
|
[
"text_plain_output_1.png"
] |
from collections import Counter
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
from collections import Counter
Counter(daily_Data['holiday'])
|
code
|
2040633/cell_15
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="season", data=daily_Data, color="c");
plt.xlabel("season")
daily_Data.season.value_counts()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="holiday", data=daily_Data,color='c');
plt.xlabel("holiday")
daily_Data.holiday.value_counts()
f, ax = plt.subplots(figsize=(5, 5))
plt.hist(x='workingday', data=daily_Data, color='c')
plt.xlabel('workingday')
|
code
|
2040633/cell_16
|
[
"text_plain_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
daily_Data.season.value_counts()
daily_Data.holiday.value_counts()
daily_Data.workingday.value_counts()
|
code
|
2040633/cell_3
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
|
code
|
2040633/cell_17
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="season", data=daily_Data, color="c");
plt.xlabel("season")
daily_Data.season.value_counts()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="holiday", data=daily_Data,color='c');
plt.xlabel("holiday")
daily_Data.holiday.value_counts()
f, ax = plt.subplots(figsize=(5,5))
plt.hist(x="workingday",data=daily_Data,color='c');
plt.xlabel("workingday")
daily_Data.workingday.value_counts()
f, ax = plt.subplots(figsize=(5, 5))
plt.hist(x='weather', data=daily_Data, color='c')
plt.xlabel('weather')
|
code
|
2040633/cell_24
|
[
"text_plain_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
daily_Data.season.value_counts()
daily_Data.holiday.value_counts()
daily_Data.workingday.value_counts()
daily_Data.weather.value_counts()
season = pd.get_dummies(daily_Data['season'])
daily_Data = pd.concat([daily_Data, season], axis=1)
weather = pd.get_dummies(daily_Data['weather'])
daily_Data = pd.concat([daily_Data, weather], axis=1)
daily_Data.shape
|
code
|
2040633/cell_14
|
[
"text_plain_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
daily_Data.season.value_counts()
daily_Data.holiday.value_counts()
|
code
|
2040633/cell_10
|
[
"text_html_output_1.png"
] |
import pandas as pd
import seaborn as sn
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
sn.barplot(x='season', y='count', data=daily_Data)
|
code
|
2040633/cell_12
|
[
"text_plain_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
daily_Data.isnull().sum()
daily_Data.season.value_counts()
|
code
|
2040633/cell_5
|
[
"text_html_output_1.png"
] |
import pandas as pd
daily_Data = pd.read_csv('../input/train.csv')
daily_Data.shape
daily_Data.dtypes
daily_Data.columns
|
code
|
325705/cell_21
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from sklearn.cross_validation import KFold
from sklearn.metrics import log_loss
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
gatrain = pd.read_csv('../input/gender_age_train.csv')
gatest = pd.read_csv('../input/gender_age_test.csv')
letarget = LabelEncoder().fit(gatrain.group.values)
y = letarget.transform(gatrain.group.values)
n_classes = len(letarget.classes_)
phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8')
phone = phone.drop_duplicates('device_id', keep='first')
lebrand = LabelEncoder().fit(phone.phone_brand)
phone['brand'] = lebrand.transform(phone.phone_brand)
m = phone.phone_brand.str.cat(phone.device_model)
lemodel = LabelEncoder().fit(m)
phone['model'] = lemodel.transform(m)
train = gatrain.merge(phone[['device_id', 'brand', 'model']], how='left', on='device_id')
ptrain = gatrain.merge(phone[['device_id', 'brand', 'model']], how='left', on='device_id')
class GenderAgeGroupProb(object):
def __init__(self, prior_weight=10.0):
self.prior_weight = prior_weight
def fit(self, df, by):
self.by = by
self.prior = df['group'].value_counts().sort_index() / df.shape[0]
c = df.groupby([by, 'group']).size().unstack().fillna(0)
total = c.sum(axis=1)
self.prob = c.add(self.prior_weight * self.prior).div(c.sum(axis=1) + self.prior_weight, axis=0)
return self
def predict_proba(self, df):
pred = df[[self.by]].merge(self.prob, how='left', left_on=self.by, right_index=True).fillna(self.prior)[self.prob.columns]
pred.loc[pred.iloc[:, 0].isnull(), :] = self.prior
return pred.values
def score(ptrain, by, prior_weight=10.0):
kf = KFold(ptrain.shape[0], n_folds=10, shuffle=True, random_state=0)
pred = np.zeros((ptrain.shape[0], n_classes))
for itrain, itest in kf:
train = ptrain.iloc[itrain, :]
test = ptrain.iloc[itest, :]
ytrain, ytest = (y[itrain], y[itest])
clf = GenderAgeGroupProb(prior_weight=prior_weight).fit(train, by)
pred[itest, :] = clf.predict_proba(test)
return log_loss(y, pred)
weights = [0.5, 1.0, 5.0, 10.0, 20.0, 40.0, 100.0]
res = [score(ptrain, 'brand', prior_weight=w) for w in weights]
weights = [0.5, 1.0, 5.0, 10.0, 20.0, 40.0, 100.0]
res = [score(ptrain, 'model', prior_weight=w) for w in weights]
kf = KFold(ptrain.shape[0], n_folds=10, shuffle=True, random_state=0)
predb = np.zeros((ptrain.shape[0], n_classes))
predm = np.zeros((ptrain.shape[0], n_classes))
for itrain, itest in kf:
train = ptrain.iloc[itrain, :]
test = ptrain.iloc[itest, :]
ytrain, ytest = (y[itrain], y[itest])
clf = GenderAgeGroupProb(prior_weight=40.0).fit(train, 'brand')
predb[itest, :] = clf.predict_proba(test)
clf = GenderAgeGroupProb(prior_weight=40.0).fit(train, 'model')
predm[itest, :] = clf.predict_proba(test)
log_loss(y, 0.5 * (predb + predm))
|
code
|
325705/cell_23
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
import pandas as pd
gatrain = pd.read_csv('../input/gender_age_train.csv')
gatest = pd.read_csv('../input/gender_age_test.csv')
phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8')
phone = phone.drop_duplicates('device_id', keep='first')
lebrand = LabelEncoder().fit(phone.phone_brand)
phone['brand'] = lebrand.transform(phone.phone_brand)
m = phone.phone_brand.str.cat(phone.device_model)
lemodel = LabelEncoder().fit(m)
phone['model'] = lemodel.transform(m)
ptest = gatest.merge(phone[['device_id', 'brand', 'model']], how='left', on='device_id')
ptest.head(3)
|
code
|
325705/cell_6
|
[
"text_html_output_1.png"
] |
import pandas as pd
gatrain = pd.read_csv('../input/gender_age_train.csv')
gatest = pd.read_csv('../input/gender_age_test.csv')
phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8')
phone.head(3)
|
code
|
325705/cell_19
|
[
"text_html_output_1.png"
] |
from sklearn.cross_validation import KFold
from sklearn.metrics import log_loss
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
gatrain = pd.read_csv('../input/gender_age_train.csv')
gatest = pd.read_csv('../input/gender_age_test.csv')
letarget = LabelEncoder().fit(gatrain.group.values)
y = letarget.transform(gatrain.group.values)
n_classes = len(letarget.classes_)
phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8')
phone = phone.drop_duplicates('device_id', keep='first')
lebrand = LabelEncoder().fit(phone.phone_brand)
phone['brand'] = lebrand.transform(phone.phone_brand)
m = phone.phone_brand.str.cat(phone.device_model)
lemodel = LabelEncoder().fit(m)
phone['model'] = lemodel.transform(m)
train = gatrain.merge(phone[['device_id', 'brand', 'model']], how='left', on='device_id')
ptrain = gatrain.merge(phone[['device_id', 'brand', 'model']], how='left', on='device_id')
class GenderAgeGroupProb(object):
def __init__(self, prior_weight=10.0):
self.prior_weight = prior_weight
def fit(self, df, by):
self.by = by
self.prior = df['group'].value_counts().sort_index() / df.shape[0]
c = df.groupby([by, 'group']).size().unstack().fillna(0)
total = c.sum(axis=1)
self.prob = c.add(self.prior_weight * self.prior).div(c.sum(axis=1) + self.prior_weight, axis=0)
return self
def predict_proba(self, df):
pred = df[[self.by]].merge(self.prob, how='left', left_on=self.by, right_index=True).fillna(self.prior)[self.prob.columns]
pred.loc[pred.iloc[:, 0].isnull(), :] = self.prior
return pred.values
def score(ptrain, by, prior_weight=10.0):
kf = KFold(ptrain.shape[0], n_folds=10, shuffle=True, random_state=0)
pred = np.zeros((ptrain.shape[0], n_classes))
for itrain, itest in kf:
train = ptrain.iloc[itrain, :]
test = ptrain.iloc[itest, :]
ytrain, ytest = (y[itrain], y[itest])
clf = GenderAgeGroupProb(prior_weight=prior_weight).fit(train, by)
pred[itest, :] = clf.predict_proba(test)
return log_loss(y, pred)
weights = [0.5, 1.0, 5.0, 10.0, 20.0, 40.0, 100.0]
res = [score(ptrain, 'brand', prior_weight=w) for w in weights]
weights = [0.5, 1.0, 5.0, 10.0, 20.0, 40.0, 100.0]
res = [score(ptrain, 'model', prior_weight=w) for w in weights]
plt.plot(weights, res)
plt.title('Best score {:.5f} at prior_weight = {}'.format(np.min(res), weights[np.argmin(res)]))
plt.xlabel('prior_weight')
|
code
|
325705/cell_1
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import os
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import KFold
from sklearn.metrics import log_loss
|
code
|
325705/cell_3
|
[
"text_plain_output_1.png"
] |
import pandas as pd
gatrain = pd.read_csv('../input/gender_age_train.csv')
gatest = pd.read_csv('../input/gender_age_test.csv')
gatrain.head(3)
|
code
|
325705/cell_17
|
[
"text_html_output_1.png"
] |
from sklearn.cross_validation import KFold
from sklearn.metrics import log_loss
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
gatrain = pd.read_csv('../input/gender_age_train.csv')
gatest = pd.read_csv('../input/gender_age_test.csv')
letarget = LabelEncoder().fit(gatrain.group.values)
y = letarget.transform(gatrain.group.values)
n_classes = len(letarget.classes_)
phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8')
phone = phone.drop_duplicates('device_id', keep='first')
lebrand = LabelEncoder().fit(phone.phone_brand)
phone['brand'] = lebrand.transform(phone.phone_brand)
m = phone.phone_brand.str.cat(phone.device_model)
lemodel = LabelEncoder().fit(m)
phone['model'] = lemodel.transform(m)
train = gatrain.merge(phone[['device_id', 'brand', 'model']], how='left', on='device_id')
ptrain = gatrain.merge(phone[['device_id', 'brand', 'model']], how='left', on='device_id')
class GenderAgeGroupProb(object):
def __init__(self, prior_weight=10.0):
self.prior_weight = prior_weight
def fit(self, df, by):
self.by = by
self.prior = df['group'].value_counts().sort_index() / df.shape[0]
c = df.groupby([by, 'group']).size().unstack().fillna(0)
total = c.sum(axis=1)
self.prob = c.add(self.prior_weight * self.prior).div(c.sum(axis=1) + self.prior_weight, axis=0)
return self
def predict_proba(self, df):
pred = df[[self.by]].merge(self.prob, how='left', left_on=self.by, right_index=True).fillna(self.prior)[self.prob.columns]
pred.loc[pred.iloc[:, 0].isnull(), :] = self.prior
return pred.values
def score(ptrain, by, prior_weight=10.0):
kf = KFold(ptrain.shape[0], n_folds=10, shuffle=True, random_state=0)
pred = np.zeros((ptrain.shape[0], n_classes))
for itrain, itest in kf:
train = ptrain.iloc[itrain, :]
test = ptrain.iloc[itest, :]
ytrain, ytest = (y[itrain], y[itest])
clf = GenderAgeGroupProb(prior_weight=prior_weight).fit(train, by)
pred[itest, :] = clf.predict_proba(test)
return log_loss(y, pred)
weights = [0.5, 1.0, 5.0, 10.0, 20.0, 40.0, 100.0]
res = [score(ptrain, 'brand', prior_weight=w) for w in weights]
plt.plot(weights, res)
plt.title('Best score {:.5f} at prior_weight = {}'.format(np.min(res), weights[np.argmin(res)]))
plt.xlabel('prior_weight')
|
code
|
325705/cell_14
|
[
"text_html_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
import pandas as pd
gatrain = pd.read_csv('../input/gender_age_train.csv')
gatest = pd.read_csv('../input/gender_age_test.csv')
letarget = LabelEncoder().fit(gatrain.group.values)
y = letarget.transform(gatrain.group.values)
n_classes = len(letarget.classes_)
phone = pd.read_csv('../input/phone_brand_device_model.csv', encoding='utf-8')
phone = phone.drop_duplicates('device_id', keep='first')
lebrand = LabelEncoder().fit(phone.phone_brand)
phone['brand'] = lebrand.transform(phone.phone_brand)
m = phone.phone_brand.str.cat(phone.device_model)
lemodel = LabelEncoder().fit(m)
phone['model'] = lemodel.transform(m)
train = gatrain.merge(phone[['device_id', 'brand', 'model']], how='left', on='device_id')
ptrain = gatrain.merge(phone[['device_id', 'brand', 'model']], how='left', on='device_id')
ptrain.head(3)
|
code
|
129019808/cell_21
|
[
"text_plain_output_1.png"
] |
from scipy import stats
from scipy import stats
from scipy.stats import t
from scipy.stats import ttest_1samp
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
mean = np.mean(df['Result'])
std = np.std(df['Result'])
import numpy as np
from scipy.stats import t
conf_level = 0.95
n = len(df['Result'])
df = n - 1
t_stat = t.ppf((1 + conf_level) / 2, df)
margin_of_error = t_stat * std / np.sqrt(n)
lower = mean - margin_of_error
upper = mean + margin_of_error
import pandas as pd
import numpy as np
from scipy.stats import ttest_1samp
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
null_mean = 0
alpha = 0.05
t_stat, p_val = ttest_1samp(df['Result'], null_mean)
import pandas as pd
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df['Gender'] = df['Gender'].replace({'F': 1, 'M': 0})
df['Gender'] = df['Gender'].astype('float64')
import pandas as pd
from scipy import stats
df.dropna(inplace=True)
bigResult = df[df['Result'] >= 4.0].iloc[:, 1:]
smallResult = df[df['Result'] < 4.0].iloc[:, 1:]
f_val, p_val = stats.f_oneway(bigResult, smallResult)
import pandas as pd
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df['Gender'] = df['Gender'].replace({'F': 1, 'M': 0})
df['Gender'] = df['Gender'].astype('float64')
import pandas as pd
from scipy import stats
df.dropna(inplace=True)
maleData = df[df['Gender'] == 0.0].iloc[:, 1:]
femaleData = df[df['Gender'] == 1.0].iloc[:, 1:]
f_val, p_val = stats.f_oneway(maleData, femaleData)
data = df['Result'].values
plt.hist(data, bins=9)
plt.title('Histogram of Reesults')
plt.xlabel('Data Values')
plt.ylabel('Frequency')
plt.show()
|
code
|
129019808/cell_9
|
[
"image_output_1.png"
] |
from scipy.stats import t
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
mean = np.mean(df['Result'])
std = np.std(df['Result'])
import numpy as np
from scipy.stats import t
conf_level = 0.95
n = len(df['Result'])
df = n - 1
t_stat = t.ppf((1 + conf_level) / 2, df)
margin_of_error = t_stat * std / np.sqrt(n)
lower = mean - margin_of_error
upper = mean + margin_of_error
print('The 95% confidence interval for the mean is: ({:.2f}, {:.2f})'.format(lower, upper))
|
code
|
129019808/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df.info()
|
code
|
129019808/cell_2
|
[
"text_html_output_1.png"
] |
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
|
code
|
129019808/cell_11
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
from scipy.stats import t
from scipy.stats import ttest_1samp
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
mean = np.mean(df['Result'])
std = np.std(df['Result'])
import numpy as np
from scipy.stats import t
conf_level = 0.95
n = len(df['Result'])
df = n - 1
t_stat = t.ppf((1 + conf_level) / 2, df)
margin_of_error = t_stat * std / np.sqrt(n)
lower = mean - margin_of_error
upper = mean + margin_of_error
import pandas as pd
import numpy as np
from scipy.stats import ttest_1samp
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
null_mean = 0
alpha = 0.05
t_stat, p_val = ttest_1samp(df['Result'], null_mean)
print('t-statistic: {:.2f}'.format(t_stat))
print('p-value: {:.2f}'.format(p_val))
if p_val < alpha:
print('The result is statistically significant')
else:
print('The result is not statistically significant')
|
code
|
129019808/cell_19
|
[
"text_plain_output_1.png"
] |
from scipy import stats
from scipy import stats
from scipy.stats import t
from scipy.stats import ttest_1samp
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
mean = np.mean(df['Result'])
std = np.std(df['Result'])
import numpy as np
from scipy.stats import t
conf_level = 0.95
n = len(df['Result'])
df = n - 1
t_stat = t.ppf((1 + conf_level) / 2, df)
margin_of_error = t_stat * std / np.sqrt(n)
lower = mean - margin_of_error
upper = mean + margin_of_error
import pandas as pd
import numpy as np
from scipy.stats import ttest_1samp
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
null_mean = 0
alpha = 0.05
t_stat, p_val = ttest_1samp(df['Result'], null_mean)
import pandas as pd
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df['Gender'] = df['Gender'].replace({'F': 1, 'M': 0})
df['Gender'] = df['Gender'].astype('float64')
import pandas as pd
from scipy import stats
df.dropna(inplace=True)
bigResult = df[df['Result'] >= 4.0].iloc[:, 1:]
smallResult = df[df['Result'] < 4.0].iloc[:, 1:]
f_val, p_val = stats.f_oneway(bigResult, smallResult)
import pandas as pd
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df['Gender'] = df['Gender'].replace({'F': 1, 'M': 0})
df['Gender'] = df['Gender'].astype('float64')
import pandas as pd
from scipy import stats
df.dropna(inplace=True)
maleData = df[df['Gender'] == 0.0].iloc[:, 1:]
femaleData = df[df['Gender'] == 1.0].iloc[:, 1:]
if len(maleData) == 0 or len(femaleData) == 0:
print('Error: one or more groups has no data.')
else:
f_val, p_val = stats.f_oneway(maleData, femaleData)
print('F-value:', f_val)
print('p-value:', p_val)
|
code
|
129019808/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
129019808/cell_7
|
[
"image_output_1.png"
] |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
mean = np.mean(df['Result'])
std = np.std(df['Result'])
print('Mean: ', mean)
print('Standard Deviation: ', std)
|
code
|
129019808/cell_18
|
[
"text_plain_output_1.png"
] |
from scipy import stats
from scipy.stats import t
from scipy.stats import ttest_1samp
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
mean = np.mean(df['Result'])
std = np.std(df['Result'])
import numpy as np
from scipy.stats import t
conf_level = 0.95
n = len(df['Result'])
df = n - 1
t_stat = t.ppf((1 + conf_level) / 2, df)
margin_of_error = t_stat * std / np.sqrt(n)
lower = mean - margin_of_error
upper = mean + margin_of_error
import pandas as pd
import numpy as np
from scipy.stats import ttest_1samp
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
null_mean = 0
alpha = 0.05
t_stat, p_val = ttest_1samp(df['Result'], null_mean)
import pandas as pd
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df['Gender'] = df['Gender'].replace({'F': 1, 'M': 0})
df['Gender'] = df['Gender'].astype('float64')
import pandas as pd
from scipy import stats
df.dropna(inplace=True)
bigResult = df[df['Result'] >= 4.0].iloc[:, 1:]
smallResult = df[df['Result'] < 4.0].iloc[:, 1:]
f_val, p_val = stats.f_oneway(bigResult, smallResult)
import pandas as pd
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df['Gender'] = df['Gender'].replace({'F': 1, 'M': 0})
df['Gender'] = df['Gender'].astype('float64')
df.head()
|
code
|
129019808/cell_16
|
[
"text_plain_output_1.png"
] |
from scipy import stats
from scipy.stats import t
from scipy.stats import ttest_1samp
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
mean = np.mean(df['Result'])
std = np.std(df['Result'])
import numpy as np
from scipy.stats import t
conf_level = 0.95
n = len(df['Result'])
df = n - 1
t_stat = t.ppf((1 + conf_level) / 2, df)
margin_of_error = t_stat * std / np.sqrt(n)
lower = mean - margin_of_error
upper = mean + margin_of_error
import pandas as pd
import numpy as np
from scipy.stats import ttest_1samp
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
null_mean = 0
alpha = 0.05
t_stat, p_val = ttest_1samp(df['Result'], null_mean)
import pandas as pd
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df['Gender'] = df['Gender'].replace({'F': 1, 'M': 0})
df['Gender'] = df['Gender'].astype('float64')
import pandas as pd
from scipy import stats
df.dropna(inplace=True)
bigResult = df[df['Result'] >= 4.0].iloc[:, 1:]
smallResult = df[df['Result'] < 4.0].iloc[:, 1:]
if len(bigResult) == 0 or len(smallResult) == 0:
print('Error: one or more groups has no data.')
else:
f_val, p_val = stats.f_oneway(bigResult, smallResult)
print('F-value:', f_val)
print('p-value:', p_val)
|
code
|
129019808/cell_14
|
[
"text_html_output_1.png"
] |
from scipy.stats import t
from scipy.stats import ttest_1samp
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import statsmodels.api as sm
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
mean = np.mean(df['Result'])
std = np.std(df['Result'])
import numpy as np
from scipy.stats import t
conf_level = 0.95
n = len(df['Result'])
df = n - 1
t_stat = t.ppf((1 + conf_level) / 2, df)
margin_of_error = t_stat * std / np.sqrt(n)
lower = mean - margin_of_error
upper = mean + margin_of_error
import pandas as pd
import numpy as np
from scipy.stats import ttest_1samp
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
null_mean = 0
alpha = 0.05
t_stat, p_val = ttest_1samp(df['Result'], null_mean)
import pandas as pd
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df['Gender'] = df['Gender'].replace({'F': 1, 'M': 0})
df['Gender'] = df['Gender'].astype('float64')
import pandas as pd
import statsmodels.api as sm
X = df['Result']
y = df['Gender']
X = sm.add_constant(X)
model = sm.OLS(y, X).fit()
print(model.summary())
|
code
|
129019808/cell_12
|
[
"text_plain_output_1.png"
] |
from scipy.stats import t
from scipy.stats import ttest_1samp
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
mean = np.mean(df['Result'])
std = np.std(df['Result'])
import numpy as np
from scipy.stats import t
conf_level = 0.95
n = len(df['Result'])
df = n - 1
t_stat = t.ppf((1 + conf_level) / 2, df)
margin_of_error = t_stat * std / np.sqrt(n)
lower = mean - margin_of_error
upper = mean + margin_of_error
import pandas as pd
import numpy as np
from scipy.stats import ttest_1samp
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
null_mean = 0
alpha = 0.05
t_stat, p_val = ttest_1samp(df['Result'], null_mean)
import pandas as pd
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df['Gender'] = df['Gender'].replace({'F': 1, 'M': 0})
df['Gender'] = df['Gender'].astype('float64')
df.head()
|
code
|
129019808/cell_5
|
[
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('/kaggle/input/iedata/iedatason.xlsx', sheet_name='Form Yanıtları 1')
df.head()
|
code
|
105186835/cell_2
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
DEPT_name = input('please enter your department name')
DEPT_revenue = input('please enter your department revenue')
|
code
|
129039496/cell_42
|
[
"text_plain_output_1.png"
] |
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
maxent_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', LogisticRegression())])
nb_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', MultinomialNB())])
svm_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', SVC())])
nn_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', MLPClassifier())])
maxent_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__C': [0.1, 1, 10]}
maxent_grid = GridSearchCV(maxent_pipeline, maxent_params, cv=10, scoring='accuracy')
maxent_grid.fit(X_train, y_train)
best_maxent_clf = maxent_grid.best_estimator_
nb_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__alpha': [0.1, 1, 10]}
nb_grid = GridSearchCV(nb_pipeline, nb_params, cv=10, scoring='accuracy')
nb_grid.fit(X_train, y_train)
best_nb_clf = nb_grid.best_estimator_
svm_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__C': [0.1, 1, 10]}
svm_grid = GridSearchCV(svm_pipeline, svm_params, cv=10, scoring='accuracy')
svm_grid.fit(X_train, y_train)
best_svm_clf = svm_grid.best_estimator_
nn_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__hidden_layer_sizes': [(100,), (200,), (300,)]}
nn_grid = GridSearchCV(nn_pipeline, nn_params, cv=10, scoring='accuracy')
nn_grid.fit(X_train, y_train)
best_nn_clf = nn_grid.best_estimator_
print('Best-fitting model in each category:')
print('MaxEnt (Logistic Regression):', maxent_grid.best_params_)
print('Naïve Bayes:', nb_grid.best_params_)
print('SVM:', svm_grid.best_params_)
print('Neural Network:', nn_grid.best_params_)
print('\nScores:')
print('MaxEnt (Logistic Regression):', maxent_grid.best_score_)
print('Naïve Bayes:', nb_grid.best_score_)
print('SVM:', svm_grid.best_score_)
print('Neural Network:', nn_grid.best_score_)
|
code
|
129039496/cell_25
|
[
"text_plain_output_1.png"
] |
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
svd.fit(wdm)
wdm_transformed = pd.DataFrame(svd.transform())
wdm_enron = pd.pivot_table(merged_docwords[0], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_enron_transformed = svd.fit_transform(wdm_enron)
wdm_kos = pd.pivot_table(merged_docwords[1], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_kos_transformed = svd.fit_transform(wdm_kos)
kos_similarity = cosine_similarity(wdm_kos_transformed)
kos_similarity.mean()
|
code
|
129039496/cell_23
|
[
"text_plain_output_1.png"
] |
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
svd.fit(wdm)
wdm_transformed = pd.DataFrame(svd.transform())
wdm_enron = pd.pivot_table(merged_docwords[0], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_enron_transformed = svd.fit_transform(wdm_enron)
enron_similarity = cosine_similarity(wdm_enron_transformed)
enron_similarity.mean()
|
code
|
129039496/cell_30
|
[
"text_plain_output_1.png"
] |
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
svd.fit(wdm)
wdm_transformed = pd.DataFrame(svd.transform())
wdm_enron = pd.pivot_table(merged_docwords[0], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_enron_transformed = svd.fit_transform(wdm_enron)
wdm_kos = pd.pivot_table(merged_docwords[1], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_kos_transformed = svd.fit_transform(wdm_kos)
wdm_nips = pd.pivot_table(merged_docwords[2], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_nips_transformed = svd.fit_transform(wdm_nips)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_corpus_transformed = svd.fit_transform(wdm)
corpus_similarity = cosine_similarity(wdm_corpus_transformed)
corpus_similarity.mean()
|
code
|
129039496/cell_44
|
[
"text_plain_output_1.png"
] |
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_curve, auc, confusion_matrix
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
import numpy as np
import numpy as np
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
svd.fit(wdm)
wdm_transformed = pd.DataFrame(svd.transform())
wdm_enron = pd.pivot_table(merged_docwords[0], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_enron_transformed = svd.fit_transform(wdm_enron)
wdm_kos = pd.pivot_table(merged_docwords[1], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_kos_transformed = svd.fit_transform(wdm_kos)
wdm_nips = pd.pivot_table(merged_docwords[2], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_nips_transformed = svd.fit_transform(wdm_nips)
dataset = pd.read_csv('/kaggle/input/sentiment-labelled-sentences-data-set/sentiment labelled sentences/amazon_cells_labelled.txt', delimiter='\t', header=None, names=['text', 'sentiment'])
X_train, X_test, y_train, y_test = train_test_split(dataset.text, dataset.sentiment, test_size=0.2, random_state=42)
maxent_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', LogisticRegression())])
nb_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', MultinomialNB())])
svm_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', SVC())])
nn_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', MLPClassifier())])
maxent_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__C': [0.1, 1, 10]}
maxent_grid = GridSearchCV(maxent_pipeline, maxent_params, cv=10, scoring='accuracy')
maxent_grid.fit(X_train, y_train)
best_maxent_clf = maxent_grid.best_estimator_
nb_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__alpha': [0.1, 1, 10]}
nb_grid = GridSearchCV(nb_pipeline, nb_params, cv=10, scoring='accuracy')
nb_grid.fit(X_train, y_train)
best_nb_clf = nb_grid.best_estimator_
svm_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__C': [0.1, 1, 10]}
svm_grid = GridSearchCV(svm_pipeline, svm_params, cv=10, scoring='accuracy')
svm_grid.fit(X_train, y_train)
best_svm_clf = svm_grid.best_estimator_
nn_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__hidden_layer_sizes': [(100,), (200,), (300,)]}
nn_grid = GridSearchCV(nn_pipeline, nn_params, cv=10, scoring='accuracy')
nn_grid.fit(X_train, y_train)
best_nn_clf = nn_grid.best_estimator_
classifiers = [best_maxent_clf, best_nb_clf, best_svm_clf, best_nn_clf]
classifier_names = ['MaxEnt', 'Naïve Bayes', 'SVM', 'Neural Network']
for clf, name in zip(classifiers, classifier_names):
scores = cross_val_score(clf, dataset.text, dataset.sentiment, cv=10)
def generate_contingency_table(clf, name):
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cm_avg = cm / cm.sum(axis=1)[:, np.newaxis]
for clf, name in zip(classifiers, classifier_names):
generate_contingency_table(clf, name)
def calculate_metrics(clf, name):
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
print(f'\n{name} - Performance Metrics:')
print(f'Accuracy: {accuracy:.4f}')
print(f'Precision: {precision:.4f}')
print(f'Recall: {recall:.4f}')
print(f'F1 Score: {f1:.4f}')
for clf, name in zip(classifiers, classifier_names):
calculate_metrics(clf, name)
|
code
|
129039496/cell_7
|
[
"text_plain_output_1.png"
] |
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
print(corpus[:30])
print(corpus[-30:])
print(corpus.info())
|
code
|
129039496/cell_18
|
[
"text_html_output_1.png"
] |
from sklearn.decomposition import TruncatedSVD
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
svd.fit(wdm)
wdm_transformed = pd.DataFrame(svd.transform())
wdm_transformed
|
code
|
129039496/cell_8
|
[
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] |
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
print(len(corpus.word.unique()))
|
code
|
129039496/cell_15
|
[
"text_plain_output_1.png"
] |
from sklearn.decomposition import TruncatedSVD
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
svd.fit(wdm)
print(svd.explained_variance_ratio_)
|
code
|
129039496/cell_16
|
[
"text_html_output_1.png"
] |
from sklearn.decomposition import TruncatedSVD
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
svd.fit(wdm)
print(svd.singular_values_)
|
code
|
129039496/cell_43
|
[
"text_plain_output_1.png"
] |
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_curve, auc, confusion_matrix
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
import numpy as np
import numpy as np
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
svd.fit(wdm)
wdm_transformed = pd.DataFrame(svd.transform())
wdm_enron = pd.pivot_table(merged_docwords[0], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_enron_transformed = svd.fit_transform(wdm_enron)
wdm_kos = pd.pivot_table(merged_docwords[1], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_kos_transformed = svd.fit_transform(wdm_kos)
wdm_nips = pd.pivot_table(merged_docwords[2], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_nips_transformed = svd.fit_transform(wdm_nips)
dataset = pd.read_csv('/kaggle/input/sentiment-labelled-sentences-data-set/sentiment labelled sentences/amazon_cells_labelled.txt', delimiter='\t', header=None, names=['text', 'sentiment'])
X_train, X_test, y_train, y_test = train_test_split(dataset.text, dataset.sentiment, test_size=0.2, random_state=42)
maxent_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', LogisticRegression())])
nb_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', MultinomialNB())])
svm_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', SVC())])
nn_pipeline = Pipeline([('tfidf', TfidfVectorizer()), ('classifier', MLPClassifier())])
maxent_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__C': [0.1, 1, 10]}
maxent_grid = GridSearchCV(maxent_pipeline, maxent_params, cv=10, scoring='accuracy')
maxent_grid.fit(X_train, y_train)
best_maxent_clf = maxent_grid.best_estimator_
nb_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__alpha': [0.1, 1, 10]}
nb_grid = GridSearchCV(nb_pipeline, nb_params, cv=10, scoring='accuracy')
nb_grid.fit(X_train, y_train)
best_nb_clf = nb_grid.best_estimator_
svm_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__C': [0.1, 1, 10]}
svm_grid = GridSearchCV(svm_pipeline, svm_params, cv=10, scoring='accuracy')
svm_grid.fit(X_train, y_train)
best_svm_clf = svm_grid.best_estimator_
nn_params = {'tfidf__ngram_range': [(1, 1), (1, 2)], 'classifier__hidden_layer_sizes': [(100,), (200,), (300,)]}
nn_grid = GridSearchCV(nn_pipeline, nn_params, cv=10, scoring='accuracy')
nn_grid.fit(X_train, y_train)
best_nn_clf = nn_grid.best_estimator_
classifiers = [best_maxent_clf, best_nb_clf, best_svm_clf, best_nn_clf]
classifier_names = ['MaxEnt', 'Naïve Bayes', 'SVM', 'Neural Network']
for clf, name in zip(classifiers, classifier_names):
scores = cross_val_score(clf, dataset.text, dataset.sentiment, cv=10)
print(f'{name}: Accuracy = {np.mean(scores):.4f}')
def generate_contingency_table(clf, name):
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cm_avg = cm / cm.sum(axis=1)[:, np.newaxis]
print(f'\n{name} - Contingency Table (Averaged):')
print(cm_avg)
for clf, name in zip(classifiers, classifier_names):
generate_contingency_table(clf, name)
|
code
|
129039496/cell_14
|
[
"text_plain_output_1.png"
] |
from sklearn.decomposition import TruncatedSVD
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
svd.fit(wdm)
|
code
|
129039496/cell_27
|
[
"text_html_output_1.png"
] |
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
svd.fit(wdm)
wdm_transformed = pd.DataFrame(svd.transform())
wdm_enron = pd.pivot_table(merged_docwords[0], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_enron_transformed = svd.fit_transform(wdm_enron)
wdm_kos = pd.pivot_table(merged_docwords[1], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_kos_transformed = svd.fit_transform(wdm_kos)
wdm_nips = pd.pivot_table(merged_docwords[2], values='count', index='word', columns='documentID', fill_value=0)
svd = TruncatedSVD(n_components=100, random_state=7)
wdm_nips_transformed = svd.fit_transform(wdm_nips)
nips_similarity = cosine_similarity(wdm_nips_transformed)
nips_similarity.mean()
|
code
|
129039496/cell_12
|
[
"text_plain_output_1.png"
] |
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
corpus = pd.concat(merged_docwords, axis=0, ignore_index=True)
wdm = pd.pivot_table(corpus, values='count', index='word', columns='documentID', fill_value=0)
wdm.head()
|
code
|
129039496/cell_5
|
[
"text_plain_output_1.png"
] |
import pandas as pd
datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')]
dataset_column_names = ['documentID', 'wordID', 'count']
merged_docwords = []
for dataset_tuple in datasets_to_combine:
docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names, nrows=100000)
vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word'])
merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1)
merged_docwords.append(merged)
print(merged_docwords)
|
code
|
34134596/cell_30
|
[
"text_html_output_1.png"
] |
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import re
import string
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train.isna().sum()
def clean_text_round_1(text):
text = text.lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
round_1 = lambda x: clean_text_round_1(x)
data_review_cleaned = pd.DataFrame(df_train.review.apply(round_1))
def clean_text_round_2(text):
text = re.sub('\n', ' ', text)
text = re.sub('[""..._]', '', text)
return text
round2 = lambda x: clean_text_round_2(x)
data_review_cleaned = pd.DataFrame(data_review_cleaned.review.apply(round2))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(stop_words='english')
data_cv = cv.fit_transform(data_review_cleaned.review)
data_dtm = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_dtm.index = data_review_cleaned.index
data_dtm
|
code
|
34134596/cell_44
|
[
"text_html_output_1.png"
] |
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import re
import string
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train.isna().sum()
df_test = review_text_test.copy()
df_test['vote_funny'] = review_meta_test.vote_funny
df_test['vote_cool'] = review_meta_test.vote_cool
df_test['vote_useful'] = review_meta_test.vote_useful
df_test
df_test.isna().sum()
def clean_text_round_1(text):
text = text.lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
round_1 = lambda x: clean_text_round_1(x)
data_review_cleaned = pd.DataFrame(df_train.review.apply(round_1))
def clean_text_round_2(text):
text = re.sub('\n', ' ', text)
text = re.sub('[""..._]', '', text)
return text
round2 = lambda x: clean_text_round_2(x)
data_review_cleaned = pd.DataFrame(data_review_cleaned.review.apply(round2))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(stop_words='english')
data_cv = cv.fit_transform(data_review_cleaned.review)
data_dtm = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_dtm.index = data_review_cleaned.index
data_dtm
data_dtm_transposed = data_dtm.T
data_review_cleaned_test = pd.DataFrame(df_test.review.apply(round_1))
data_review_cleaned_test = pd.DataFrame(data_review_cleaned_test.review.apply(round2))
top_words = {}
for o in data_dtm_transposed.columns:
top = data_dtm_transposed[o].sort_values(ascending=False).head(50)
top_words[o] = list(zip(top.index, top.values))
data = data_dtm_transposed
from collections import Counter
words = []
for user_id in data.columns:
top = [word for word, count in top_words[user_id]]
for t in top:
words.append(t)
common_words_count = Counter(words).most_common()
df_common_words = pd.DataFrame(common_words_count, columns=['word', 'count'])
df_common_words
|
code
|
34134596/cell_55
|
[
"text_html_output_1.png"
] |
from collections import Counter
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import re
import seaborn as sns
import string
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import preprocessing, feature_extraction, model_selection, linear_model
import seaborn as sns
sns.set()
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train.isna().sum()
df_test = review_text_test.copy()
df_test['vote_funny'] = review_meta_test.vote_funny
df_test['vote_cool'] = review_meta_test.vote_cool
df_test['vote_useful'] = review_meta_test.vote_useful
df_test
df_test.isna().sum()
def clean_text_round_1(text):
text = text.lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
round_1 = lambda x: clean_text_round_1(x)
data_review_cleaned = pd.DataFrame(df_train.review.apply(round_1))
def clean_text_round_2(text):
text = re.sub('\n', ' ', text)
text = re.sub('[""..._]', '', text)
return text
round2 = lambda x: clean_text_round_2(x)
data_review_cleaned = pd.DataFrame(data_review_cleaned.review.apply(round2))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(stop_words='english')
data_cv = cv.fit_transform(data_review_cleaned.review)
data_dtm = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_dtm.index = data_review_cleaned.index
data_dtm
data_dtm_transposed = data_dtm.T
data_review_cleaned_test = pd.DataFrame(df_test.review.apply(round_1))
data_review_cleaned_test = pd.DataFrame(data_review_cleaned_test.review.apply(round2))
top_words = {}
for o in data_dtm_transposed.columns:
top = data_dtm_transposed[o].sort_values(ascending=False).head(50)
top_words[o] = list(zip(top.index, top.values))
data = data_dtm_transposed
from collections import Counter
words = []
for user_id in data.columns:
top = [word for word, count in top_words[user_id]]
for t in top:
words.append(t)
common_words_count = Counter(words).most_common()
df_common_words = pd.DataFrame(common_words_count, columns=['word', 'count'])
df_common_words
# plot
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(15, 12)
ax = sns.barplot(x='count', y='word', data=df_common_words[:30])
ax.set_title('Top 30 Words in the Corpus', size = 24)
ax.set_xlabel('Count', size = 20)
ax.set_ylabel("Words", size = 20)
fig.savefig('top_30_words.png')
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import CountVectorizer
stop_words = text.ENGLISH_STOP_WORDS
wc = WordCloud(width=800, height=400, stopwords=stop_words, background_color='white', colormap='Dark2', max_font_size=170, random_state=45)
data_for_wc = pd.DataFrame()
data_for_wc['review'] = data_review_cleaned['review']
data_for_wc = data_for_wc.reset_index(drop=True)
text_wc = ' '
for i in range(len(data_for_wc)):
text_wc += data_for_wc['review'][i]
cloud = wc.generate(text_wc)
plt.axis('off')
cloud.to_file('word_cloud.png')
|
code
|
34134596/cell_6
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
print('review_text_train :\t', str(review_text_train.shape))
print('review_text_test :\t', str(review_text_test.shape))
print('review_meta_train :\t', str(review_meta_train.shape))
print('review_meta_test :\t', str(review_meta_test.shape))
|
code
|
34134596/cell_54
|
[
"text_html_output_1.png"
] |
from collections import Counter
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import re
import seaborn as sns
import string
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import preprocessing, feature_extraction, model_selection, linear_model
import seaborn as sns
sns.set()
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train.isna().sum()
df_test = review_text_test.copy()
df_test['vote_funny'] = review_meta_test.vote_funny
df_test['vote_cool'] = review_meta_test.vote_cool
df_test['vote_useful'] = review_meta_test.vote_useful
df_test
df_test.isna().sum()
def clean_text_round_1(text):
text = text.lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
round_1 = lambda x: clean_text_round_1(x)
data_review_cleaned = pd.DataFrame(df_train.review.apply(round_1))
def clean_text_round_2(text):
text = re.sub('\n', ' ', text)
text = re.sub('[""..._]', '', text)
return text
round2 = lambda x: clean_text_round_2(x)
data_review_cleaned = pd.DataFrame(data_review_cleaned.review.apply(round2))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(stop_words='english')
data_cv = cv.fit_transform(data_review_cleaned.review)
data_dtm = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_dtm.index = data_review_cleaned.index
data_dtm
data_dtm_transposed = data_dtm.T
data_review_cleaned_test = pd.DataFrame(df_test.review.apply(round_1))
data_review_cleaned_test = pd.DataFrame(data_review_cleaned_test.review.apply(round2))
top_words = {}
for o in data_dtm_transposed.columns:
top = data_dtm_transposed[o].sort_values(ascending=False).head(50)
top_words[o] = list(zip(top.index, top.values))
data = data_dtm_transposed
from collections import Counter
words = []
for user_id in data.columns:
top = [word for word, count in top_words[user_id]]
for t in top:
words.append(t)
common_words_count = Counter(words).most_common()
df_common_words = pd.DataFrame(common_words_count, columns=['word', 'count'])
df_common_words
# plot
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(15, 12)
ax = sns.barplot(x='count', y='word', data=df_common_words[:30])
ax.set_title('Top 30 Words in the Corpus', size = 24)
ax.set_xlabel('Count', size = 20)
ax.set_ylabel("Words", size = 20)
fig.savefig('top_30_words.png')
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import CountVectorizer
stop_words = text.ENGLISH_STOP_WORDS
wc = WordCloud(width=800, height=400, stopwords=stop_words, background_color='white', colormap='Dark2', max_font_size=170, random_state=45)
data_for_wc = pd.DataFrame()
data_for_wc['review'] = data_review_cleaned['review']
data_for_wc = data_for_wc.reset_index(drop=True)
text_wc = ' '
for i in range(len(data_for_wc)):
text_wc += data_for_wc['review'][i]
cloud = wc.generate(text_wc)
plt.figure(figsize=(30, 15))
plt.title('Tweets Text WordCloud', fontsize=30)
plt.imshow(wc, interpolation='bilinear')
plt.axis('off')
|
code
|
34134596/cell_7
|
[
"text_html_output_1.png"
] |
import pandas as pd
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train
|
code
|
34134596/cell_18
|
[
"text_html_output_1.png"
] |
import pandas as pd
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_test = review_text_test.copy()
df_test['vote_funny'] = review_meta_test.vote_funny
df_test['vote_cool'] = review_meta_test.vote_cool
df_test['vote_useful'] = review_meta_test.vote_useful
df_test
df_test.isna().sum()
|
code
|
34134596/cell_32
|
[
"text_plain_output_1.png"
] |
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import re
import string
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train.isna().sum()
def clean_text_round_1(text):
text = text.lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
round_1 = lambda x: clean_text_round_1(x)
data_review_cleaned = pd.DataFrame(df_train.review.apply(round_1))
def clean_text_round_2(text):
text = re.sub('\n', ' ', text)
text = re.sub('[""..._]', '', text)
return text
round2 = lambda x: clean_text_round_2(x)
data_review_cleaned = pd.DataFrame(data_review_cleaned.review.apply(round2))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(stop_words='english')
data_cv = cv.fit_transform(data_review_cleaned.review)
data_dtm = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_dtm.index = data_review_cleaned.index
data_dtm
data_dtm_transposed = data_dtm.T
data_dtm_transposed
|
code
|
34134596/cell_58
|
[
"image_output_1.png"
] |
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import re
import string
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train.isna().sum()
def clean_text_round_1(text):
text = text.lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
round_1 = lambda x: clean_text_round_1(x)
data_review_cleaned = pd.DataFrame(df_train.review.apply(round_1))
def clean_text_round_2(text):
text = re.sub('\n', ' ', text)
text = re.sub('[""..._]', '', text)
return text
round2 = lambda x: clean_text_round_2(x)
data_review_cleaned = pd.DataFrame(data_review_cleaned.review.apply(round2))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(stop_words='english')
data_cv = cv.fit_transform(data_review_cleaned.review)
data_dtm = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_dtm.index = data_review_cleaned.index
data_dtm
df_train['review'] = data_review_cleaned['review']
df_train
|
code
|
34134596/cell_8
|
[
"image_output_1.png"
] |
import pandas as pd
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
review_text_train
|
code
|
34134596/cell_15
|
[
"text_html_output_1.png"
] |
import pandas as pd
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train.isna().sum()
|
code
|
34134596/cell_38
|
[
"text_html_output_1.png"
] |
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import re
import string
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train.isna().sum()
df_test = review_text_test.copy()
df_test['vote_funny'] = review_meta_test.vote_funny
df_test['vote_cool'] = review_meta_test.vote_cool
df_test['vote_useful'] = review_meta_test.vote_useful
df_test
df_test.isna().sum()
def clean_text_round_1(text):
text = text.lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
round_1 = lambda x: clean_text_round_1(x)
data_review_cleaned = pd.DataFrame(df_train.review.apply(round_1))
def clean_text_round_2(text):
text = re.sub('\n', ' ', text)
text = re.sub('[""..._]', '', text)
return text
round2 = lambda x: clean_text_round_2(x)
data_review_cleaned = pd.DataFrame(data_review_cleaned.review.apply(round2))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(stop_words='english')
data_cv = cv.fit_transform(data_review_cleaned.review)
data_dtm = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_dtm.index = data_review_cleaned.index
data_dtm
data_review_cleaned_test = pd.DataFrame(df_test.review.apply(round_1))
data_review_cleaned_test = pd.DataFrame(data_review_cleaned_test.review.apply(round2))
df_test
|
code
|
34134596/cell_17
|
[
"text_plain_output_1.png"
] |
import pandas as pd
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_test = review_text_test.copy()
df_test['vote_funny'] = review_meta_test.vote_funny
df_test['vote_cool'] = review_meta_test.vote_cool
df_test['vote_useful'] = review_meta_test.vote_useful
df_test
|
code
|
34134596/cell_46
|
[
"text_html_output_1.png"
] |
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import pandas as pd
import re
import seaborn as sns
import string
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import preprocessing, feature_extraction, model_selection, linear_model
import seaborn as sns
sns.set()
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train.isna().sum()
df_test = review_text_test.copy()
df_test['vote_funny'] = review_meta_test.vote_funny
df_test['vote_cool'] = review_meta_test.vote_cool
df_test['vote_useful'] = review_meta_test.vote_useful
df_test
df_test.isna().sum()
def clean_text_round_1(text):
text = text.lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
round_1 = lambda x: clean_text_round_1(x)
data_review_cleaned = pd.DataFrame(df_train.review.apply(round_1))
def clean_text_round_2(text):
text = re.sub('\n', ' ', text)
text = re.sub('[""..._]', '', text)
return text
round2 = lambda x: clean_text_round_2(x)
data_review_cleaned = pd.DataFrame(data_review_cleaned.review.apply(round2))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(stop_words='english')
data_cv = cv.fit_transform(data_review_cleaned.review)
data_dtm = pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_dtm.index = data_review_cleaned.index
data_dtm
data_dtm_transposed = data_dtm.T
data_review_cleaned_test = pd.DataFrame(df_test.review.apply(round_1))
data_review_cleaned_test = pd.DataFrame(data_review_cleaned_test.review.apply(round2))
top_words = {}
for o in data_dtm_transposed.columns:
top = data_dtm_transposed[o].sort_values(ascending=False).head(50)
top_words[o] = list(zip(top.index, top.values))
data = data_dtm_transposed
from collections import Counter
words = []
for user_id in data.columns:
top = [word for word, count in top_words[user_id]]
for t in top:
words.append(t)
common_words_count = Counter(words).most_common()
df_common_words = pd.DataFrame(common_words_count, columns=['word', 'count'])
df_common_words
fig, ax = plt.subplots()
fig.set_size_inches(15, 12)
ax = sns.barplot(x='count', y='word', data=df_common_words[:30])
ax.set_title('Top 30 Words in the Corpus', size=24)
ax.set_xlabel('Count', size=20)
ax.set_ylabel('Words', size=20)
fig.savefig('top_30_words.png')
|
code
|
34134596/cell_24
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import re
import string
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train.isna().sum()
def clean_text_round_1(text):
text = text.lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
round_1 = lambda x: clean_text_round_1(x)
data_review_cleaned = pd.DataFrame(df_train.review.apply(round_1))
data_review_cleaned
|
code
|
34134596/cell_14
|
[
"text_html_output_1.png"
] |
import pandas as pd
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
df_train = review_text_train.copy()
df_train['vote_funny'] = review_meta_train.vote_funny
df_train['vote_cool'] = review_meta_train.vote_cool
df_train['vote_useful'] = review_meta_train.vote_useful
df_train['rating'] = review_meta_train.rating
df_train
|
code
|
34134596/cell_10
|
[
"text_plain_output_1.png"
] |
import pandas as pd
review_text_train = pd.read_csv('../input/review_text_train.csv', index_col=False, delimiter=',', header=0)
review_text_test = pd.read_csv('../input/review_text_test.csv', index_col=False, delimiter=',', header=0)
review_meta_train = pd.read_csv('../input/review_meta_train.csv', index_col=False, delimiter=',', header=0)
review_meta_test = pd.read_csv('../input/review_meta_test.csv', index_col=False, delimiter=',', header=0)
print('review_text_train :\n', str(review_text_train.isna().sum()), '\n**********\n')
print('review_text_test :\n', str(review_text_test.isna().sum()), '\n**********\n')
print('review_meta_train :\n', str(review_meta_train.isna().sum()), '\n**********\n')
print('review_meta_test :\n', str(review_meta_test.isna().sum()), '\n**********\n')
|
code
|
73099078/cell_13
|
[
"text_plain_output_1.png"
] |
from transformers import DistilBertTokenizerFast
from transformers import TFDistilBertForSequenceClassification, TFTrainer, TFTrainingArguments
import tensorflow as tf
from transformers import DistilBertTokenizerFast
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
train_encodings = tokenizer(X_train, truncation=True, padding=True)
test_encodings = tokenizer(X_test, truncation=True, padding=True)
import tensorflow as tf
train_dataset = tf.data.Dataset.from_tensor_slices((dict(train_encodings), y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((dict(test_encodings), y_test))
from transformers import TFDistilBertForSequenceClassification, TFTrainer, TFTrainingArguments
training_args = TFTrainingArguments(output_dir='./results', num_train_epochs=5, per_device_train_batch_size=8, per_device_eval_batch_size=16, warmup_steps=500, weight_decay=0.01, logging_dir='./logs', logging_steps=10)
with training_args.strategy.scope():
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
trainer = TFTrainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=test_dataset)
trainer.train()
|
code
|
73099078/cell_4
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd
messages = pd.read_csv('../input/spam-or-ham/spam.csv', usecols=['v1', 'v2'], encoding='ISO-8859-1')
messages = messages.rename(columns={'v1': 'label', 'v2': 'message'})
y = list(messages['label'])
y[:5]
|
code
|
73099078/cell_6
|
[
"text_plain_output_1.png"
] |
from sklearn.model_selection import train_test_split
import pandas as pd
import pandas as pd
messages = pd.read_csv('../input/spam-or-ham/spam.csv', usecols=['v1', 'v2'], encoding='ISO-8859-1')
messages = messages.rename(columns={'v1': 'label', 'v2': 'message'})
X = list(messages['message'])
X[:5]
y = list(messages['label'])
y[:5]
y = list(pd.get_dummies(y, drop_first=True)['spam'])
y[:5]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
X_train[:5]
|
code
|
73099078/cell_2
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd
messages = pd.read_csv('../input/spam-or-ham/spam.csv', usecols=['v1', 'v2'], encoding='ISO-8859-1')
messages = messages.rename(columns={'v1': 'label', 'v2': 'message'})
messages.head()
|
code
|
73099078/cell_1
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
messages = pd.read_csv('../input/spam-or-ham/spam.csv', usecols=['v1', 'v2'], encoding='ISO-8859-1')
messages.head()
|
code
|
73099078/cell_7
|
[
"text_plain_output_1.png"
] |
!pip install transformers
|
code
|
73099078/cell_8
|
[
"text_plain_output_1.png"
] |
from transformers import DistilBertTokenizerFast
from transformers import DistilBertTokenizerFast
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
|
code
|
73099078/cell_15
|
[
"text_plain_output_1.png"
] |
from transformers import DistilBertTokenizerFast
from transformers import TFDistilBertForSequenceClassification, TFTrainer, TFTrainingArguments
import tensorflow as tf
from transformers import DistilBertTokenizerFast
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
train_encodings = tokenizer(X_train, truncation=True, padding=True)
test_encodings = tokenizer(X_test, truncation=True, padding=True)
import tensorflow as tf
train_dataset = tf.data.Dataset.from_tensor_slices((dict(train_encodings), y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((dict(test_encodings), y_test))
from transformers import TFDistilBertForSequenceClassification, TFTrainer, TFTrainingArguments
training_args = TFTrainingArguments(output_dir='./results', num_train_epochs=5, per_device_train_batch_size=8, per_device_eval_batch_size=16, warmup_steps=500, weight_decay=0.01, logging_dir='./logs', logging_steps=10)
with training_args.strategy.scope():
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
trainer = TFTrainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=test_dataset)
trainer.train()
trainer.evaluate(test_dataset)
trainer.predict(test_dataset)
|
code
|
73099078/cell_16
|
[
"text_plain_output_1.png"
] |
from sklearn.metrics import classification_report
from transformers import DistilBertTokenizerFast
from transformers import TFDistilBertForSequenceClassification, TFTrainer, TFTrainingArguments
import tensorflow as tf
from transformers import DistilBertTokenizerFast
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
train_encodings = tokenizer(X_train, truncation=True, padding=True)
test_encodings = tokenizer(X_test, truncation=True, padding=True)
import tensorflow as tf
train_dataset = tf.data.Dataset.from_tensor_slices((dict(train_encodings), y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((dict(test_encodings), y_test))
from transformers import TFDistilBertForSequenceClassification, TFTrainer, TFTrainingArguments
training_args = TFTrainingArguments(output_dir='./results', num_train_epochs=5, per_device_train_batch_size=8, per_device_eval_batch_size=16, warmup_steps=500, weight_decay=0.01, logging_dir='./logs', logging_steps=10)
with training_args.strategy.scope():
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
trainer = TFTrainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=test_dataset)
trainer.train()
trainer.evaluate(test_dataset)
trainer.predict(test_dataset)
from sklearn.metrics import classification_report
print(classification_report(y_test, trainer.predict(test_dataset)[1]))
|
code
|
73099078/cell_3
|
[
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd
messages = pd.read_csv('../input/spam-or-ham/spam.csv', usecols=['v1', 'v2'], encoding='ISO-8859-1')
messages = messages.rename(columns={'v1': 'label', 'v2': 'message'})
X = list(messages['message'])
X[:5]
|
code
|
73099078/cell_14
|
[
"text_plain_output_1.png"
] |
from transformers import DistilBertTokenizerFast
from transformers import TFDistilBertForSequenceClassification, TFTrainer, TFTrainingArguments
import tensorflow as tf
from transformers import DistilBertTokenizerFast
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
train_encodings = tokenizer(X_train, truncation=True, padding=True)
test_encodings = tokenizer(X_test, truncation=True, padding=True)
import tensorflow as tf
train_dataset = tf.data.Dataset.from_tensor_slices((dict(train_encodings), y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((dict(test_encodings), y_test))
from transformers import TFDistilBertForSequenceClassification, TFTrainer, TFTrainingArguments
training_args = TFTrainingArguments(output_dir='./results', num_train_epochs=5, per_device_train_batch_size=8, per_device_eval_batch_size=16, warmup_steps=500, weight_decay=0.01, logging_dir='./logs', logging_steps=10)
with training_args.strategy.scope():
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
trainer = TFTrainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=test_dataset)
trainer.train()
trainer.evaluate(test_dataset)
|
code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.