path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
72101516/cell_3 | [
"image_output_1.png"
] | import pandas as pd
medal = pd.read_excel('../input/2021-olympics-in-tokyo/Medals.xlsx', index_col=0)
athlete = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx', index_col=0)
gender = pd.read_excel('../input/2021-olympics-in-tokyo/EntriesGender.xlsx', index_col=0)
team = pd.read_excel('../input/2021-olympics-in-tokyo/Teams.xlsx', index_col=0)
coach = pd.read_excel('../input/2021-olympics-in-tokyo/Coaches.xlsx', index_col=0) | code |
72101516/cell_24 | [
"text_html_output_1.png"
] | from plotly.offline import init_notebook_mode, iplot, plot
import pandas as pd
import plotly.graph_objs as go
medal = pd.read_excel('../input/2021-olympics-in-tokyo/Medals.xlsx', index_col=0)
athlete = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx', index_col=0)
gender = pd.read_excel('../input/2021-olympics-in-tokyo/EntriesGender.xlsx', index_col=0)
team = pd.read_excel('../input/2021-olympics-in-tokyo/Teams.xlsx', index_col=0)
coach = pd.read_excel('../input/2021-olympics-in-tokyo/Coaches.xlsx', index_col=0)
gender['male_ratio'] = gender['Male'] / gender['Total']
gender['female_ratio'] = gender['Female'] / gender['Total']
gender['Discipline'] = gender.index
from plotly.offline import init_notebook_mode, iplot, plot
import plotly as py
init_notebook_mode(connected=True)
import plotly.graph_objs as go
fig = go.Figure()
fig.add_trace(go.Bar(y=gender.Discipline, x=gender.female_ratio, orientation='h', name='Females'))
fig.add_trace(go.Bar(y=gender.Discipline, x=gender.male_ratio, orientation='h', name='Males'))
template = dict(layout=go.Layout(title_font=dict(family='Rockwell', size=30)))
fig.update_layout(title='Distribution of disciplines based on gender', template=template, barmode='stack', autosize=False, width=680, height=900, margin=dict(l=150, r=100, b=30, t=100, pad=4))
fig.layout.xaxis.tickformat = ',.0%'
medal.rename(columns={'Team/NOC': 'NOC'}, inplace=True)
medalf = medal.sort_values(by='Rank by Total', ascending=True).head(10)
fig = go.Figure()
fig.add_trace(go.Bar(y=medalf.Gold, x=medalf.NOC, name='Gold'))
fig.add_trace(go.Bar(y=medalf.Silver, x=medalf.NOC, name='Silver'))
fig.add_trace(go.Bar(y=medalf.Bronze, x=medalf.NOC, name='Bronze'))
template = dict(layout=go.Layout(title_font=dict(family='Rockwell', size=30)))
fig.update_layout(title='Medal Distribution', template=template, barmode='stack', autosize=False, width=680, height=650, margin=dict(l=30, r=30, b=180, t=100, pad=4))
fig.layout.xaxis.tickformat = ',.0%'
fig.show() | code |
72101516/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import nltk
import os
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
import seaborn as sns
sns.set()
from sklearn.cluster import KMeans
from mpl_toolkits.mplot3d import Axes3D
import tensorflow as tf
import tensorflow_datasets as tfds
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from collections import Counter
import nltk
nltk.download('stopwords')
import re
from collections import defaultdict
from sklearn.preprocessing import LabelEncoder, StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from xgboost import XGBRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from scipy import stats
from scipy.stats import norm, skew
from scipy.special import boxcox1p
from sklearn.preprocessing import RobustScaler
import os
medal = pd.read_excel('../input/2021-olympics-in-tokyo/Medals.xlsx', index_col=0)
athlete = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx', index_col=0)
gender = pd.read_excel('../input/2021-olympics-in-tokyo/EntriesGender.xlsx', index_col=0)
team = pd.read_excel('../input/2021-olympics-in-tokyo/Teams.xlsx', index_col=0)
coach = pd.read_excel('../input/2021-olympics-in-tokyo/Coaches.xlsx', index_col=0)
count1 = athlete['NOC'].value_counts().head(10)
sns.barplot(x=count1.index, y=count1.values)
plt.xticks(rotation=90)
plt.show() | code |
72101516/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
medal = pd.read_excel('../input/2021-olympics-in-tokyo/Medals.xlsx', index_col=0)
athlete = pd.read_excel('../input/2021-olympics-in-tokyo/Athletes.xlsx', index_col=0)
gender = pd.read_excel('../input/2021-olympics-in-tokyo/EntriesGender.xlsx', index_col=0)
team = pd.read_excel('../input/2021-olympics-in-tokyo/Teams.xlsx', index_col=0)
coach = pd.read_excel('../input/2021-olympics-in-tokyo/Coaches.xlsx', index_col=0)
def miss(data):
missing_value = data.isnull().sum().sort_values(ascending=False)
missing_perc = (data.isnull().sum() * 100 / data.shape[0]).sort_values(ascending=False)
value = pd.concat([missing_value, missing_perc], axis=1, keys=['Count', '%'])
miss(team) | code |
2021927/cell_13 | [
"text_plain_output_1.png"
] | from statsmodels.graphics.gofplots import ProbPlot
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/multipleChoiceResponses.csv', encoding='ISO-8859-1')
df = df[['CompensationAmount', 'Age']]
df['CompensationAmount'] = df['CompensationAmount'].str.replace('[^\\w\\s]', '')
df['CompensationAmount'].fillna(0, inplace=True)
df['Age'].fillna(0, inplace=True)
df['CompensationAmount'] = pd.to_numeric(df['CompensationAmount'])
df = df[df['CompensationAmount'] > 0]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
# statsmodels Q-Q plot on model residuals
QQ = ProbPlot(model.resid_deviance)
fig = QQ.qqplot(alpha=0.5, markersize=5);
model_g = sm.OLS(y, x).fit()
model_leverage = model_g.get_influence().hat_matrix_diag
model_cooks = model_g.get_influence().cooks_distance[0]
plt.xlim(xmin=-0.005, xmax=0.02)
df = df[df['CompensationAmount'] <= 150000]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
model_g = sm.OLS(y, x).fit()
QQ = ProbPlot(model.resid_deviance)
fig = QQ.qqplot(alpha=0.5, markersize=5) | code |
2021927/cell_9 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/multipleChoiceResponses.csv', encoding='ISO-8859-1')
df = df[['CompensationAmount', 'Age']]
df['CompensationAmount'] = df['CompensationAmount'].str.replace('[^\\w\\s]', '')
df['CompensationAmount'].fillna(0, inplace=True)
df['Age'].fillna(0, inplace=True)
df['CompensationAmount'] = pd.to_numeric(df['CompensationAmount'])
df = df[df['CompensationAmount'] > 0]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
model_g = sm.OLS(y, x).fit()
model_leverage = model_g.get_influence().hat_matrix_diag
model_cooks = model_g.get_influence().cooks_distance[0]
sns.regplot(model_leverage, model_cooks, fit_reg=False)
plt.xlim(xmin=-0.005, xmax=0.02)
plt.xlabel('Leverage')
plt.ylabel("Cook's distance")
plt.title("Cook's vs Leverage") | code |
2021927/cell_2 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/multipleChoiceResponses.csv', encoding='ISO-8859-1')
df.head() | code |
2021927/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.graphics.gofplots import ProbPlot | code |
2021927/cell_7 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/multipleChoiceResponses.csv', encoding='ISO-8859-1')
df = df[['CompensationAmount', 'Age']]
df['CompensationAmount'] = df['CompensationAmount'].str.replace('[^\\w\\s]', '')
df['CompensationAmount'].fillna(0, inplace=True)
df['Age'].fillna(0, inplace=True)
df['CompensationAmount'] = pd.to_numeric(df['CompensationAmount'])
df = df[df['CompensationAmount'] > 0]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
sns.regplot(df['Age'], model.resid_deviance, fit_reg=False)
plt.title('Residual plot')
plt.xlabel('Age')
plt.ylabel('Residuals') | code |
2021927/cell_8 | [
"image_output_1.png"
] | from statsmodels.graphics.gofplots import ProbPlot
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/multipleChoiceResponses.csv', encoding='ISO-8859-1')
df = df[['CompensationAmount', 'Age']]
df['CompensationAmount'] = df['CompensationAmount'].str.replace('[^\\w\\s]', '')
df['CompensationAmount'].fillna(0, inplace=True)
df['Age'].fillna(0, inplace=True)
df['CompensationAmount'] = pd.to_numeric(df['CompensationAmount'])
df = df[df['CompensationAmount'] > 0]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
QQ = ProbPlot(model.resid_deviance)
fig = QQ.qqplot(alpha=0.5, markersize=5) | code |
2021927/cell_16 | [
"image_output_1.png"
] | from statsmodels.graphics.gofplots import ProbPlot
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/multipleChoiceResponses.csv', encoding='ISO-8859-1')
df = df[['CompensationAmount', 'Age']]
df['CompensationAmount'] = df['CompensationAmount'].str.replace('[^\\w\\s]', '')
df['CompensationAmount'].fillna(0, inplace=True)
df['Age'].fillna(0, inplace=True)
df['CompensationAmount'] = pd.to_numeric(df['CompensationAmount'])
df = df[df['CompensationAmount'] > 0]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
# statsmodels Q-Q plot on model residuals
QQ = ProbPlot(model.resid_deviance)
fig = QQ.qqplot(alpha=0.5, markersize=5);
model_g = sm.OLS(y, x).fit()
model_leverage = model_g.get_influence().hat_matrix_diag
model_cooks = model_g.get_influence().cooks_distance[0]
plt.xlim(xmin=-0.005, xmax=0.02)
df = df[df['CompensationAmount'] <= 150000]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
model_g = sm.OLS(y, x).fit()
# statsmodels Q-Q plot on model residuals
QQ = ProbPlot(model.resid_deviance)
fig = QQ.qqplot(alpha=0.5, markersize=5);
model_leverage = model_g.get_influence().hat_matrix_diag
model_cooks = model_g.get_influence().cooks_distance[0]
plt.xlim(xmin=-0.005, xmax=0.02)
df['reg_fit'] = model.fittedvalues
df.sort_values('Age', inplace=True)
sns.regplot(df['Age'], df['CompensationAmount'], fit_reg=False)
plt.plot(df['Age'], df['reg_fit']) | code |
2021927/cell_3 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/multipleChoiceResponses.csv', encoding='ISO-8859-1')
df[['CompensationAmount', 'Age']].info() | code |
2021927/cell_14 | [
"image_output_1.png"
] | from statsmodels.graphics.gofplots import ProbPlot
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/multipleChoiceResponses.csv', encoding='ISO-8859-1')
df = df[['CompensationAmount', 'Age']]
df['CompensationAmount'] = df['CompensationAmount'].str.replace('[^\\w\\s]', '')
df['CompensationAmount'].fillna(0, inplace=True)
df['Age'].fillna(0, inplace=True)
df['CompensationAmount'] = pd.to_numeric(df['CompensationAmount'])
df = df[df['CompensationAmount'] > 0]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
# statsmodels Q-Q plot on model residuals
QQ = ProbPlot(model.resid_deviance)
fig = QQ.qqplot(alpha=0.5, markersize=5);
model_g = sm.OLS(y, x).fit()
model_leverage = model_g.get_influence().hat_matrix_diag
model_cooks = model_g.get_influence().cooks_distance[0]
plt.xlim(xmin=-0.005, xmax=0.02)
df = df[df['CompensationAmount'] <= 150000]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
model_g = sm.OLS(y, x).fit()
model_leverage = model_g.get_influence().hat_matrix_diag
model_cooks = model_g.get_influence().cooks_distance[0]
sns.regplot(model_leverage, model_cooks, fit_reg=False)
plt.xlim(xmin=-0.005, xmax=0.02)
plt.xlabel('Leverage')
plt.ylabel("Cook's distance")
plt.title("Cook's vs Leverage") | code |
2021927/cell_12 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from statsmodels.graphics.gofplots import ProbPlot
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
df = pd.read_csv('../input/multipleChoiceResponses.csv', encoding='ISO-8859-1')
df = df[['CompensationAmount', 'Age']]
df['CompensationAmount'] = df['CompensationAmount'].str.replace('[^\\w\\s]', '')
df['CompensationAmount'].fillna(0, inplace=True)
df['Age'].fillna(0, inplace=True)
df['CompensationAmount'] = pd.to_numeric(df['CompensationAmount'])
df = df[df['CompensationAmount'] > 0]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
# statsmodels Q-Q plot on model residuals
QQ = ProbPlot(model.resid_deviance)
fig = QQ.qqplot(alpha=0.5, markersize=5);
model_g = sm.OLS(y, x).fit()
model_leverage = model_g.get_influence().hat_matrix_diag
model_cooks = model_g.get_influence().cooks_distance[0]
plt.xlim(xmin=-0.005, xmax=0.02)
df = df[df['CompensationAmount'] <= 150000]
y = df['CompensationAmount']
x = df['Age']
x = sm.add_constant(x)
model = sm.GLM(y, x, family=sm.families.Poisson()).fit()
model_g = sm.OLS(y, x).fit()
sns.regplot(df['Age'], model.resid_deviance, fit_reg=False)
plt.title('Residual plot')
plt.xlabel('Age')
plt.ylabel('Residuals') | code |
129018141/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
submission.head() | code |
129018141/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
train.describe() | code |
129018141/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
train.head() | code |
129018141/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
print(f'\x1b[94m')
print(test.isna().sum().sort_values(ascending=False)) | code |
129018141/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129018141/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
print(f'\x1b[94mNumber of rows in train data: {train.shape[0]}')
print(f'\x1b[94mNumber of columns in train data: {train.shape[1]}')
print(f'\x1b[94mNumber of values in train data: {train.count().sum()}')
print(f'\x1b[94mNumber missing values in train data: {sum(train.isna().sum())}') | code |
129018141/cell_8 | [
"text_html_output_2.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
print(f'\x1b[94m')
print(train.isna().sum().sort_values(ascending=False)) | code |
129018141/cell_15 | [
"text_html_output_1.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.graph_objects as go
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
train.drop(['PassengerId'], axis=1, inplace=True)
test.drop(['PassengerId'], axis=1, inplace=True)
TARGET = 'Transported'
FEATURES = [col for col in train.columns if col != TARGET]
RANDOM_STATE = 12
test_null = pd.DataFrame(test.isna().sum())
test_null = test_null.sort_values(by=0, ascending=False)
train_null = pd.DataFrame(train.isna().sum())
train_null = train_null.sort_values(by=0, ascending=False)[:-1]
fig = make_subplots(rows=1, cols=2, column_titles=['Train Data', 'Test Data'], x_title='Missing Values')
fig.add_trace(go.Bar(x=train_null[0], y=train_null.index, orientation='h', marker=dict(color=[n for n in range(12)], line_color='rgb(0,0,0)', line_width=2, coloraxis='coloraxis')), 1, 1)
fig.add_trace(go.Bar(x=test_null[0], y=test_null.index, orientation='h', marker=dict(color=[n for n in range(12)], line_color='rgb(0,0,0)', line_width=2, coloraxis='coloraxis')), 1, 2)
fig.update_layout(showlegend=False, title_text='Column wise Null Value Distribution', title_x=0.5) | code |
129018141/cell_16 | [
"text_plain_output_1.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.graph_objects as go
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
train.drop(['PassengerId'], axis=1, inplace=True)
test.drop(['PassengerId'], axis=1, inplace=True)
TARGET = 'Transported'
FEATURES = [col for col in train.columns if col != TARGET]
RANDOM_STATE = 12
test_null = pd.DataFrame(test.isna().sum())
test_null = test_null.sort_values(by = 0 ,ascending = False)
# test data의 null값들을 뽑아서 값을 정렬한다.
train_null = pd.DataFrame(train.isna().sum())
train_null = train_null.sort_values(by = 0 ,ascending = False)[:-1]
# train data의 null값들을 뽑아서 값을 정렬한다.
# subplots를 활용해서 그래프 2개를 나란히 출력할수 있게 한다.
fig = make_subplots(rows=1,
cols=2,
column_titles = ["Train Data", "Test Data"] ,
x_title="Missing Values")
# 각각 그래프 추가
fig.add_trace(go.Bar(x=train_null[0],
y=train_null.index,
orientation="h",
marker=dict(color=[n for n in range(12)],
line_color='rgb(0,0,0)' ,
line_width = 2,
coloraxis="coloraxis")),
1, 1)
# 각각 그래프 추가
fig.add_trace(go.Bar(x=test_null[0],
y=test_null.index,
orientation="h",
marker=dict(color=[n for n in range(12)],
line_color='rgb(0,0,0)',
line_width = 2,
coloraxis="coloraxis")),
1, 2)
fig.update_layout(showlegend=False, title_text="Column wise Null Value Distribution", title_x=0.5)
missing_train_row = train.isna().sum(axis=1)
missing_train_row = pd.DataFrame(missing_train_row.value_counts() / train.shape[0]).reset_index()
missing_test_row = test.isna().sum(axis=1)
missing_test_row = pd.DataFrame(missing_test_row.value_counts() / test.shape[0]).reset_index()
missing_train_row.columns = ['no', 'count']
missing_test_row.columns = ['no', 'count']
missing_train_row['count'] = missing_train_row['count'] * 100
missing_test_row['count'] = missing_test_row['count'] * 100
fig = make_subplots(rows=1, cols=2, column_titles=['Train Data', 'Test Data'], x_title='Missing Values')
fig.add_trace(go.Bar(x=missing_train_row['no'], y=missing_train_row['count'], marker=dict(color=[n for n in range(4)], line_color='rgb(0,0,0)', line_width=3, coloraxis='coloraxis')), 1, 1)
fig.add_trace(go.Bar(x=missing_test_row['no'], y=missing_test_row['count'], marker=dict(color=[n for n in range(4)], line_color='rgb(0,0,0)', line_width=3, coloraxis='coloraxis')), 1, 2)
fig.update_layout(showlegend=False, title_text='Row wise Null Value Distribution', title_x=0.5) | code |
129018141/cell_3 | [
"text_plain_output_1.png"
] | import pandas
import pandas
pandas.__version__ | code |
129018141/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
print(f'\x1b[94mNumber of rows in test data: {test.shape[0]}')
print(f'\x1b[94mNumber of columns in test data: {test.shape[1]}')
print(f'\x1b[94mNumber of values in train data: {test.count().sum()}')
print(f'\x1b[94mNo of rows with missing values in test data: {sum(test.isna().sum())}') | code |
129018141/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = 'median'
test.describe() | code |
73078708/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import chi2
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
classes = df.intent.unique()
classes
df['intent_id'] = df['intent'].factorize()[0]
intent_id_df = df[['intent', 'intent_id']].drop_duplicates()
intent_to_id = dict(intent_id_df.values)
id_to_intent = dict(intent_id_df[['intent_id', 'intent']].values)
tfidf = TfidfVectorizer(sublinear_tf=True, ngram_range=(1, 2))
features = tfidf.fit_transform(df['lem']).toarray()
labels = df.intent_id
N = 3
for intent, intent_id in sorted(intent_to_id.items()):
features_chi2 = chi2(features, labels == intent_id)
indices = np.argsort(features_chi2[0])
feature_names = np.array(tfidf.get_feature_names())[indices]
unigrams = [v for v in feature_names if len(v.split(' ')) == 1]
bigrams = [v for v in feature_names if len(v.split(' ')) == 2]
print('\n==> %s:' % intent)
print(' * Most Correlated Unigrams are: %s' % ', '.join(unigrams[-N:]))
print(' * Most Correlated Bigrams are: %s' % ', '.join(bigrams[-N:])) | code |
73078708/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt # ploting library
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
pd.set_option('display.max_rows', 10000)
pd.set_option('display.max_columns', 500)
pd.set_option('display.precision', 150)
pd.options.display.float_format = '{:,.3f}'.format
classes = df.intent.unique()
classes
df['intent_id'] = df['intent'].factorize()[0]
intent_id_df = df[['intent', 'intent_id']].drop_duplicates()
intent_to_id = dict(intent_id_df.values)
id_to_intent = dict(intent_id_df[['intent_id', 'intent']].values)
counts = df['intent'].value_counts()
base_color = sns.color_palette()[0]
plt.xticks(size=12)
totale = len(df)
locs, labels = plt.yticks(size=14)
for loc, label in zip(locs, labels):
count = counts[label.get_text()]
pct_string = '{:0.1f}%'.format(100 * count / totale)
plt.text(count + 8, loc + 0.3, pct_string, ha='center', color='black', fontsize=12)
tfidf = TfidfVectorizer(sublinear_tf=True, ngram_range=(1, 2))
features = tfidf.fit_transform(df['lem']).toarray()
labels = df.intent_id
models = [RandomForestClassifier(n_estimators=100, max_depth=5, random_state=0), LinearSVC(), MultinomialNB(), LogisticRegression(random_state=0)]
CV = 5
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in models:
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
mean_accuracy = cv_df.groupby('model_name').accuracy.mean()
std_accuracy = cv_df.groupby('model_name').accuracy.std()
acc = pd.concat([mean_accuracy, std_accuracy], axis=1, ignore_index=True)
acc.columns = ['Mean Accuracy', 'Standard deviation']
acc
plt.figure(figsize=(8, 5))
sns.boxplot(x='model_name', y='accuracy', data=cv_df, color='lightblue', showmeans=True)
plt.title('MEAN ACCURACY (cv = 5)\n', size=14) | code |
73078708/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
df.info() | code |
73078708/cell_23 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
pd.set_option('display.max_rows', 10000)
pd.set_option('display.max_columns', 500)
pd.set_option('display.precision', 150)
pd.options.display.float_format = '{:,.3f}'.format
classes = df.intent.unique()
classes
df['intent_id'] = df['intent'].factorize()[0]
intent_id_df = df[['intent', 'intent_id']].drop_duplicates()
intent_to_id = dict(intent_id_df.values)
id_to_intent = dict(intent_id_df[['intent_id', 'intent']].values)
tfidf = TfidfVectorizer(sublinear_tf=True, ngram_range=(1, 2))
features = tfidf.fit_transform(df['lem']).toarray()
labels = df.intent_id
models = [RandomForestClassifier(n_estimators=100, max_depth=5, random_state=0), LinearSVC(), MultinomialNB(), LogisticRegression(random_state=0)]
CV = 5
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in models:
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy']) | code |
73078708/cell_30 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
pd.set_option('display.max_rows', 10000)
pd.set_option('display.max_columns', 500)
pd.set_option('display.precision', 150)
pd.options.display.float_format = '{:,.3f}'.format
classes = df.intent.unique()
classes
df['intent_id'] = df['intent'].factorize()[0]
intent_id_df = df[['intent', 'intent_id']].drop_duplicates()
intent_to_id = dict(intent_id_df.values)
id_to_intent = dict(intent_id_df[['intent_id', 'intent']].values)
tfidf = TfidfVectorizer(sublinear_tf=True, ngram_range=(1, 2))
features = tfidf.fit_transform(df['lem']).toarray()
labels = df.intent_id
models = [RandomForestClassifier(n_estimators=100, max_depth=5, random_state=0), LinearSVC(), MultinomialNB(), LogisticRegression(random_state=0)]
CV = 5
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in models:
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
mean_accuracy = cv_df.groupby('model_name').accuracy.mean()
std_accuracy = cv_df.groupby('model_name').accuracy.std()
acc = pd.concat([mean_accuracy, std_accuracy], axis=1, ignore_index=True)
acc.columns = ['Mean Accuracy', 'Standard deviation']
acc
df_test = pd.read_csv('../input/machathon-20-final-round/test.csv')
df_test.head() | code |
73078708/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
classes = df.intent.unique()
classes
df['intent_id'] = df['intent'].factorize()[0]
intent_id_df = df[['intent', 'intent_id']].drop_duplicates()
intent_to_id = dict(intent_id_df.values)
id_to_intent = dict(intent_id_df[['intent_id', 'intent']].values)
tfidf = TfidfVectorizer(sublinear_tf=True, ngram_range=(1, 2))
features = tfidf.fit_transform(df['lem']).toarray()
labels = df.intent_id
print('Each of the %d intents is represented by %d features (TF-IDF score of unigrams and bigrams)' % features.shape) | code |
73078708/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
pd.set_option('display.max_rows', 10000)
pd.set_option('display.max_columns', 500)
pd.set_option('display.precision', 150)
pd.options.display.float_format = '{:,.3f}'.format
classes = df.intent.unique()
classes
df['intent_id'] = df['intent'].factorize()[0]
intent_id_df = df[['intent', 'intent_id']].drop_duplicates()
intent_to_id = dict(intent_id_df.values)
id_to_intent = dict(intent_id_df[['intent_id', 'intent']].values)
tfidf = TfidfVectorizer(sublinear_tf=True, ngram_range=(1, 2))
features = tfidf.fit_transform(df['lem']).toarray()
labels = df.intent_id
models = [RandomForestClassifier(n_estimators=100, max_depth=5, random_state=0), LinearSVC(), MultinomialNB(), LogisticRegression(random_state=0)]
CV = 5
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in models:
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
model = LinearSVC()
model.fit(features, labels) | code |
73078708/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
for i in df['intent'].value_counts().index:
print(i)
print(df[df['intent'] == i]['clean_text']) | code |
73078708/cell_19 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt # ploting library
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
classes = df.intent.unique()
classes
df['intent_id'] = df['intent'].factorize()[0]
intent_id_df = df[['intent', 'intent_id']].drop_duplicates()
intent_to_id = dict(intent_id_df.values)
id_to_intent = dict(intent_id_df[['intent_id', 'intent']].values)
plt.figure(figsize=(15, 10))
counts = df['intent'].value_counts()
base_color = sns.color_palette()[0]
sns.countplot(data=df, y='intent', order=counts.index, orient='v', color=base_color, linewidth=100)
plt.ylabel('Count', fontsize=14)
plt.xlabel('intents', fontsize=14)
plt.title('Most Frequent intents', fontsize=20)
plt.xticks(size=12)
totale = len(df)
locs, labels = plt.yticks(size=14)
for loc, label in zip(locs, labels):
count = counts[label.get_text()]
pct_string = '{:0.1f}%'.format(100 * count / totale)
plt.text(count + 8, loc + 0.3, pct_string, ha='center', color='black', fontsize=12) | code |
73078708/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
for i, row in df.iterrows():
print(row['text'], ' -> ', row['intent']) | code |
73078708/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
classes = df.intent.unique()
classes
df['intent_id'] = df['intent'].factorize()[0]
intent_id_df = df[['intent', 'intent_id']].drop_duplicates()
intent_to_id = dict(intent_id_df.values)
id_to_intent = dict(intent_id_df[['intent_id', 'intent']].values)
print(intent_to_id)
print(id_to_intent)
df.head() | code |
73078708/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
df[df['intent'] == 'warm weather'] | code |
73078708/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
classes = df.intent.unique()
classes | code |
73078708/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
df.head() | code |
73078708/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
classes = df.intent.unique()
classes
len(classes) | code |
73078708/cell_24 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
pd.set_option('display.max_rows', 10000)
pd.set_option('display.max_columns', 500)
pd.set_option('display.precision', 150)
pd.options.display.float_format = '{:,.3f}'.format
classes = df.intent.unique()
classes
df['intent_id'] = df['intent'].factorize()[0]
intent_id_df = df[['intent', 'intent_id']].drop_duplicates()
intent_to_id = dict(intent_id_df.values)
id_to_intent = dict(intent_id_df[['intent_id', 'intent']].values)
tfidf = TfidfVectorizer(sublinear_tf=True, ngram_range=(1, 2))
features = tfidf.fit_transform(df['lem']).toarray()
labels = df.intent_id
models = [RandomForestClassifier(n_estimators=100, max_depth=5, random_state=0), LinearSVC(), MultinomialNB(), LogisticRegression(random_state=0)]
CV = 5
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in models:
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
mean_accuracy = cv_df.groupby('model_name').accuracy.mean()
std_accuracy = cv_df.groupby('model_name').accuracy.std()
acc = pd.concat([mean_accuracy, std_accuracy], axis=1, ignore_index=True)
acc.columns = ['Mean Accuracy', 'Standard deviation']
acc | code |
73078708/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
df | code |
73078708/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
for i, row in df.iterrows():
print(row['text'], ' -> ', row['clean_text'], ' -> ', row['intent']) | code |
73078708/cell_27 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
pd.set_option('display.max_rows', 10000)
pd.set_option('display.max_columns', 500)
pd.set_option('display.precision', 150)
pd.options.display.float_format = '{:,.3f}'.format
classes = df.intent.unique()
classes
df['intent_id'] = df['intent'].factorize()[0]
intent_id_df = df[['intent', 'intent_id']].drop_duplicates()
intent_to_id = dict(intent_id_df.values)
id_to_intent = dict(intent_id_df[['intent_id', 'intent']].values)
tfidf = TfidfVectorizer(sublinear_tf=True, ngram_range=(1, 2))
features = tfidf.fit_transform(df['lem']).toarray()
labels = df.intent_id
models = [RandomForestClassifier(n_estimators=100, max_depth=5, random_state=0), LinearSVC(), MultinomialNB(), LogisticRegression(random_state=0)]
CV = 5
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in models:
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
mean_accuracy = cv_df.groupby('model_name').accuracy.mean()
std_accuracy = cv_df.groupby('model_name').accuracy.std()
acc = pd.concat([mean_accuracy, std_accuracy], axis=1, ignore_index=True)
acc.columns = ['Mean Accuracy', 'Standard deviation']
acc
df_test = pd.read_csv('../input/machathon-20-final-round/test.csv')
df_test.head() | code |
73078708/cell_12 | [
"text_plain_output_1.png"
] | from nltk.stem.isri import ISRIStemmer
import re
import string
def remove_punc(s):
punctuations = '`÷×؛ʿˇ<>(‚)*&^%][،/:ღ"┈؟.,\'{}~¦+ ، 》《|﴾»«﴿!”…“–❒ـ۞✦✩☜ ̷ ﮼☻\U000fe334❥*،“¸.•°``°•.`•.¸.•♫♡—' + string.punctuation
punctuations = ''.join(set(punctuations) - {'ـ'})
for c in punctuations:
s = s.replace(c, ' ')
return s
def clean_text(text):
arabic_diacritics = re.compile(' ّ| َ| ً| ُ| ٌ| ِ| ٍ| ْ| ۖ| ۠| ۘ| ۙ| ۚ| ۛ| ۜ| ۗ| ۡ| ۟| ۤ|ۥ| ۧ', re.VERBOSE)
weridPatterns = re.compile('[😀-🙏🌀-🗿🚀-\U0001f6ff\U0001f1e0-🇿✂-➰Ⓜ-🉑🤦-🤷𐀀-\U0010ffff\u200d♀-♂☀-⭕⏏⏩⌚〰️\u2069\u2066\u200c\u2068\u2067]+', flags=re.UNICODE)
text = weridPatterns.sub(' ', text)
text = re.sub(arabic_diacritics, '', text)
text = re.sub('\\d+', ' ', text)
text = re.sub('ﷺ', 'صلى الله عليه وسلم', text)
text = re.sub('[كق][رو]+ن[اهة]*', 'كورونا', text)
text = re.sub('_', ' ', text)
text = re.sub('\n', ' ', text)
text = re.sub('ـ', '', text)
text = re.sub('،', ' ', text)
text = re.sub('وو', 'و', text)
text = re.sub('يي', 'ي', text)
text = re.sub('اا', 'ا', text)
text = re.sub('أأ', 'أ', text)
text = re.sub('URL', '', text)
text = re.sub('USER', '', text)
text = re.sub('[ٱٲٳٵ]', 'ا', text)
text = re.sub('[پ]', 'ب', text)
text = re.sub('[ٺټ]', 'ت', text)
text = re.sub('[چ]', 'ج', text)
text = re.sub('[ډڊ]', 'د', text)
text = re.sub('[ڏ]', 'ذ', text)
text = re.sub('[ڒړڕ]', 'ر', text)
text = re.sub('[ژ]', 'ز', text)
text = re.sub('[کڪګگڰڱڳڴؼػ]', 'ك', text)
text = re.sub('[؏]', 'ع', text)
text = re.sub('[ڛ]', 'س', text)
text = re.sub('[ێېیێېےۓؽؾؿ]', 'ي', text)
text = re.sub('[ڣڤڨᓅ]', 'ف', text)
text = re.sub('[ۆۈۉۊۋ]', 'و', text)
text = re.sub('[ھہۂۿ]', 'ه', text)
text = re.sub('[ںڼݩ]', 'ن', text)
text = re.sub('[۾ᓄ]', 'م', text)
text = re.sub('[ڵ]', 'ل', text)
text = re.sub('[ۃ]', 'ة', text)
text = remove_punc(text)
text = re.compile('(.)\\1{2,}', re.IGNORECASE).sub('\\1', text)
text = re.sub('\\s+', ' ', text)
return text
from nltk.stem.isri import ISRIStemmer
s = u'كم عدد مستشفيات العزل فى مصر وما هى اماكنها'
def stem(sentence):
stemmed_sentence = []
for i in sentence.split(' '):
i = re.sub('^ال', '', i)
stemmed_sentence.append(ISRIStemmer().suf32(i))
return ' '.join(stemmed_sentence)
stem(s) | code |
73078708/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/machathon-20-final-round/train_ara.csv')
df['intent'].value_counts() | code |
33110896/cell_13 | [
"text_plain_output_1.png"
] | from datetime import date
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
d0 = date(2014, 8, 29)
d1 = date(2016, 3, 23)
delta = d1 - d0
df.shape
df.groupby('Country')['No. of confirmed cases', 'No. of confirmed deaths'].sum()
df.groupby('Country')['No. of confirmed cases'].sum().nlargest(3)
df.groupby('Country')['No. of confirmed deaths'].sum().nlargest(3)
plt.tight_layout()
df.groupby('Country')['No. of suspected cases'].sum().nlargest(3) | code |
33110896/cell_9 | [
"text_plain_output_1.png"
] | from datetime import date
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
d0 = date(2014, 8, 29)
d1 = date(2016, 3, 23)
delta = d1 - d0
df.shape
df.groupby('Country')['No. of confirmed cases', 'No. of confirmed deaths'].sum()
print('The date of the data is from', df.Dates.min(), 'to', df.Dates.max(), ',a total amount of', delta)
print('The total number of confirmed cases is', df['No. of confirmed cases'].sum())
print('The total number of confirmed deaths is', df['No. of confirmed deaths'].sum())
print('The total number of suspected cases is', df['No. of suspected cases'].sum())
print('The total number of suspected deaths is', df['No. of suspected deaths'].sum())
print('The total number of probable cases is', df['No. of probable cases'].sum())
print('The total number of probable deaths is', df['No. of probable deaths'].sum()) | code |
33110896/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
df.head() | code |
33110896/cell_11 | [
"text_plain_output_1.png"
] | from datetime import date
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
d0 = date(2014, 8, 29)
d1 = date(2016, 3, 23)
delta = d1 - d0
df.shape
df.groupby('Country')['No. of confirmed cases', 'No. of confirmed deaths'].sum()
df.groupby('Country')['No. of confirmed cases'].sum().nlargest(3)
df.groupby('Country')['No. of confirmed deaths'].sum().nlargest(3) | code |
33110896/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33110896/cell_7 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
df.shape | code |
33110896/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
df.shape
df.groupby('Country')['No. of confirmed cases', 'No. of confirmed deaths'].sum() | code |
33110896/cell_15 | [
"text_plain_output_1.png"
] | from datetime import date
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
d0 = date(2014, 8, 29)
d1 = date(2016, 3, 23)
delta = d1 - d0
df.shape
df.groupby('Country')['No. of confirmed cases', 'No. of confirmed deaths'].sum()
df.groupby('Country')['No. of confirmed cases'].sum().nlargest(3)
df.groupby('Country')['No. of confirmed deaths'].sum().nlargest(3)
plt.tight_layout()
df.groupby('Country')['No. of suspected cases'].sum().nlargest(3)
df.groupby('Country')['No. of suspected deaths'].sum().nlargest(3)
plt.subplot(1, 2, 1)
df.groupby('Country')['No. of suspected cases'].sum().nlargest(3).plot(kind='bar', grid=True)
plt.title('Suspected cases (3)')
plt.xlabel('Countries')
plt.ylabel('No of suspected cases')
plt.subplot(1, 2, 2)
df.groupby('Country')['No. of suspected deaths'].sum().nlargest(3).plot(kind='bar', grid=True, color='red')
plt.title('Suspected deaths (3)')
plt.xlabel('Countries')
plt.ylabel('No of suspected deaths')
plt.tight_layout()
plt.show() | code |
33110896/cell_16 | [
"text_plain_output_1.png"
] | from datetime import date
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
d0 = date(2014, 8, 29)
d1 = date(2016, 3, 23)
delta = d1 - d0
df.shape
df.groupby('Country')['No. of confirmed cases', 'No. of confirmed deaths'].sum()
df.groupby('Country')['No. of confirmed cases'].sum().nlargest(3)
df.groupby('Country')['No. of confirmed deaths'].sum().nlargest(3)
plt.tight_layout()
df.groupby('Country')['No. of suspected cases'].sum().nlargest(3)
df.groupby('Country')['No. of suspected deaths'].sum().nlargest(3)
plt.tight_layout()
df.groupby('Country')['No. of probable cases'].sum().nlargest(3) | code |
33110896/cell_3 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
df.head() | code |
33110896/cell_14 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from datetime import date
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
d0 = date(2014, 8, 29)
d1 = date(2016, 3, 23)
delta = d1 - d0
df.shape
df.groupby('Country')['No. of confirmed cases', 'No. of confirmed deaths'].sum()
df.groupby('Country')['No. of confirmed cases'].sum().nlargest(3)
df.groupby('Country')['No. of confirmed deaths'].sum().nlargest(3)
plt.tight_layout()
df.groupby('Country')['No. of suspected cases'].sum().nlargest(3)
df.groupby('Country')['No. of suspected deaths'].sum().nlargest(3) | code |
33110896/cell_10 | [
"text_html_output_1.png"
] | from datetime import date
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
d0 = date(2014, 8, 29)
d1 = date(2016, 3, 23)
delta = d1 - d0
df.shape
df.groupby('Country')['No. of confirmed cases', 'No. of confirmed deaths'].sum()
df.groupby('Country')['No. of confirmed cases'].sum().nlargest(3) | code |
33110896/cell_12 | [
"text_html_output_1.png"
] | from datetime import date
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ebola-outbreak-20142016-complete-dataset/ebola_2014_2016_clean.csv')
import datetime as dt
from datetime import date
df['Dates'] = pd.to_datetime(df['Date'])
df['Year'] = df.Dates.dt.year
df['Month_name'] = df.Dates.dt.month_name()
df['Day_name'] = df.Dates.dt.day_name()
df['Month'] = df.Dates.dt.month
df['Week'] = df.Dates.dt.week
df['Day_of_year'] = df.Dates.dt.dayofyear
d0 = date(2014, 8, 29)
d1 = date(2016, 3, 23)
delta = d1 - d0
df.shape
df.groupby('Country')['No. of confirmed cases', 'No. of confirmed deaths'].sum()
df.groupby('Country')['No. of confirmed cases'].sum().nlargest(3)
df.groupby('Country')['No. of confirmed deaths'].sum().nlargest(3)
plt.subplot(1, 2, 1)
df.groupby('Country')['No. of confirmed cases'].sum().nlargest(3).plot(kind='bar', grid=True)
plt.title('Confirmed cases (3)')
plt.xlabel('Countries')
plt.ylabel('No of confirmed cases')
plt.subplot(1, 2, 2)
df.groupby('Country')['No. of confirmed deaths'].sum().nlargest(3).plot(kind='bar', grid=True, color='red')
plt.title('Confirmed deaths (3)')
plt.xlabel('Countries')
plt.ylabel('No of confirmed deaths')
plt.tight_layout()
plt.show() | code |
33110896/cell_5 | [
"text_plain_output_1.png"
] | from datetime import date
d0 = date(2014, 8, 29)
d1 = date(2016, 3, 23)
delta = d1 - d0
print(delta) | code |
89138170/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts()
df.quality.value_counts(normalize=True)
sns.countplot(x='quality', data=df) | code |
89138170/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.describe() | code |
89138170/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts()
df.quality.value_counts(normalize=True) | code |
89138170/cell_29 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts()
df.quality.value_counts(normalize=True)
scaler = MinMaxScaler()
norm_df = scaler.fit_transform(df.drop('quality', axis=1))
norm_df = pd.DataFrame(norm_df, columns=df.columns[:-1])
X = norm_df
y = df.quality
X.shape
y.shape | code |
89138170/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.info() | code |
89138170/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts() | code |
89138170/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89138170/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.head(7) | code |
89138170/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique() | code |
89138170/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique() | code |
89138170/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique()
df.Id.unique()
df = df.drop('Id', axis=1)
df.quality.unique()
df.quality.value_counts()
df.quality.value_counts(normalize=True)
scaler = MinMaxScaler()
norm_df = scaler.fit_transform(df.drop('quality', axis=1))
norm_df = pd.DataFrame(norm_df, columns=df.columns[:-1])
norm_df.head() | code |
89138170/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum()
df.Id.nunique() | code |
89138170/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape | code |
89138170/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv')
df.shape
df.isnull().sum() | code |
50211041/cell_13 | [
"image_output_1.png"
] | import lightgbm
import lightgbm
lgbreg = lightgbm.LGBMRegressor(boosting_type='gbdt', num_leaves=31, learning_rate=0.1, n_estimators=100)
lgbreg.fit(X_train, Y_train)
print('train score', lgbreg.score(X_train, Y_train))
print('test score', lgbreg.score(X_test, Y_test)) | code |
50211041/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
df.info() | code |
50211041/cell_11 | [
"image_output_1.png"
] | import catboost
import catboost
cboost = catboost.CatBoostRegressor(loss_function='RMSE', verbose=False)
cboost.fit(X_train, Y_train)
print('train score', cboost.score(X_train, Y_train))
print('test score', cboost.score(X_test, Y_test)) | code |
50211041/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50211041/cell_7 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
def NanColums(df):
percent_nan = 100 * df.isnull().sum() / len(df)
percent_nan = percent_nan[percent_nan > 0].sort_values()
return percent_nan
nanColums = NanColums(df)
plt.xticks(rotation=90)
del df['Alley']
del df['PoolQC']
del df['MiscFeature']
df['MasVnrType'].fillna(value='0', inplace=True)
df['MasVnrArea'].fillna(value=0.0, inplace=True)
df['BsmtQual'].fillna(value='0', inplace=True)
df['BsmtCond'].fillna(value='0', inplace=True)
df['BsmtExposure'].fillna(value='0', inplace=True)
df['BsmtFinType1'].fillna(value='0', inplace=True)
df['BsmtFinType2'].fillna(value='0', inplace=True)
df['FireplaceQu'].fillna(value='0', inplace=True)
df['Electrical'].fillna(value='0', inplace=True)
df['GarageType'].fillna(value='0', inplace=True)
df['GarageYrBlt'].fillna(value=0.0, inplace=True)
df['GarageFinish'].fillna(value='0', inplace=True)
df['GarageQual'].fillna(value='0', inplace=True)
df['GarageCond'].fillna(value='0', inplace=True)
df['Fence'].fillna(value='0', inplace=True)
df['LotFrontage'] = df.groupby('Neighborhood')['LotFrontage'].transform(lambda val: val.fillna(val.mean()))
df = pd.get_dummies(df)
plt.figure(figsize=(10, 8))
sns.distplot(df['SalePrice'])
plt.show() | code |
50211041/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
def NanColums(df):
percent_nan = 100 * df.isnull().sum() / len(df)
percent_nan = percent_nan[percent_nan > 0].sort_values()
return percent_nan
nanColums = NanColums(df)
plt.xticks(rotation=90)
del df['Alley']
del df['PoolQC']
del df['MiscFeature']
df['MasVnrType'].fillna(value='0', inplace=True)
df['MasVnrArea'].fillna(value=0.0, inplace=True)
df['BsmtQual'].fillna(value='0', inplace=True)
df['BsmtCond'].fillna(value='0', inplace=True)
df['BsmtExposure'].fillna(value='0', inplace=True)
df['BsmtFinType1'].fillna(value='0', inplace=True)
df['BsmtFinType2'].fillna(value='0', inplace=True)
df['FireplaceQu'].fillna(value='0', inplace=True)
df['Electrical'].fillna(value='0', inplace=True)
df['GarageType'].fillna(value='0', inplace=True)
df['GarageYrBlt'].fillna(value=0.0, inplace=True)
df['GarageFinish'].fillna(value='0', inplace=True)
df['GarageQual'].fillna(value='0', inplace=True)
df['GarageCond'].fillna(value='0', inplace=True)
df['Fence'].fillna(value='0', inplace=True)
df['LotFrontage'] = df.groupby('Neighborhood')['LotFrontage'].transform(lambda val: val.fillna(val.mean()))
df = pd.get_dummies(df)
sns.heatmap(df.corr(), xticklabels=True, yticklabels=True)
plt.show() | code |
50211041/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
Begging = RandomForestRegressor(max_depth=30, n_estimators=300)
Begging.fit(X_train, Y_train)
print('train score', Begging.score(X_train, Y_train))
print('test score', Begging.score(X_test, Y_test)) | code |
50211041/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import StackingRegressor
from sklearn.linear_model import RidgeCV
from sklearn.svm import LinearSVR
import warnings
from sklearn.linear_model import RidgeCV
from sklearn.svm import LinearSVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import StackingRegressor
import warnings
warnings.filterwarnings('ignore')
estimators = [('lr', RidgeCV()), ('svr', LinearSVR(random_state=42, max_iter=1000))]
regStack = StackingRegressor(estimators=estimators, final_estimator=RandomForestRegressor(n_estimators=10, random_state=42))
regStack.fit(X_train, Y_train)
print('train score', regStack.score(X_train, Y_train))
print('test score', regStack.score(X_test, Y_test)) | code |
50211041/cell_10 | [
"text_plain_output_1.png"
] | from sklearn import ensemble
import sklearn
sklearn_boost = ensemble.GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=100)
sklearn_boost.fit(X_train, Y_train)
print('train score', sklearn_boost.score(X_train, Y_train))
print('test score', sklearn_boost.score(X_test, Y_test)) | code |
50211041/cell_12 | [
"image_output_1.png"
] | import xgboost
import xgboost
xgBoost = xgboost.XGBRegressor(max_depth=3, learning_rate=0.1, n_estimators=100, booster='gbtree')
xgBoost.fit(X_train, Y_train)
print('train score', xgBoost.score(X_train, Y_train))
print('test score', xgBoost.score(X_test, Y_test)) | code |
50211041/cell_5 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
def NanColums(df):
percent_nan = 100 * df.isnull().sum() / len(df)
percent_nan = percent_nan[percent_nan > 0].sort_values()
return percent_nan
nanColums = NanColums(df)
sns.barplot(x=nanColums.index, y=nanColums)
plt.xticks(rotation=90) | code |
122255715/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/temp2021/Temp2021.csv', skiprows=[i for i in range(1, 98)], parse_dates=['Var1'], index_col=['Var1'])
df.index.name = 'Date'
df.index = pd.to_datetime(df.index)
df = df.loc['2021-06-01':'2021-09-30']
df = df.replace(0, np.nan)
dict = {'datax_ 1': '1', 'datax_ 2': '2', 'datax_ 3': '3', 'datax_ 4': '4', 'datax_ 5': '5', 'datax_ 6': '6', 'datax_ 7': '7', 'datax_ 8': '8', 'datax_ 9': '9', 'datax_10': '10', 'datax_11': '11', 'datax_12': '12', 'datax_13': '13', 'datax_14': '14', 'datax_15': '15', 'datax_16': '16', 'datax_17': '17', 'datax_18': '18', 'datax_19': '20', 'datax_20': '21', 'datax_21': '23', 'datax_22': '24', 'datax_23': '25', 'datax_24': '26', 'datax_25': '27', 'datax_26': '28', 'datax_27': '29', 'datax_28': '30', 'datax_29': '31', 'datax_30': '32', 'datax_31': '33', 'datax_32': '34', 'datax_33': '35', 'datax_34': '36', 'datax_35': '37', 'datax_36': '38', 'datax_37': '40', 'datax_38': '41', 'datax_39': '42', 'datax_40': '43', 'datax_41': '45', 'datax_42': '46', 'datax_43': '47', 'datax_44': '48', 'datax_45': '49', 'datax_46': '50', 'datax_47': '51', 'datax_48': '52', 'datax_49': '53', 'datax_50': '54', 'datax_51': '55', 'datax_52': '56', 'datax_53': '57', 'datax_54': '58', 'datax_55': '59', 'datax_56': '60'}
df.rename(columns=dict, inplace=True)
rowmean = df.mean(axis=1).copy()
rowmean.groupby(rowmean.index.hour).mean().plot()
def diurnal_cycle(df, station):
df[station].groupby(df[station].index.hour).mean().plot(label=station, c='r', marker='o', markersize=4)
rowmean.groupby(rowmean.index.hour).mean().plot(label='All buttons', c='k')
diurnal_cycle(df, '51')
diurnal_cycle(df, '33')
diurnal_cycle(df, '45')
diurnal_cycle(df, '30')
diurnal_cycle(df, '42')
diurnal_cycle(df, '60')
diurnal_cycle(df, '29')
diurnal_cycle(df, '49')
diurnal_cycle(df, '12')
diurnal_cycle(df, '4')
diurnal_cycle(df, '2')
diurnal_cycle(df, '46')
diurnal_cycle(df, '56')
diurnal_cycle(df, '3')
plt.show()
diurnal_cycle(df, '11')
plt.show()
diurnal_cycle(df, '32')
plt.show()
diurnal_cycle(df, '54')
plt.show()
diurnal_cycle(df, '28')
plt.show()
diurnal_cycle(df, '17')
plt.show() | code |
122255715/cell_9 | [
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/temp2021/Temp2021.csv', skiprows=[i for i in range(1, 98)], parse_dates=['Var1'], index_col=['Var1'])
df.index.name = 'Date'
df.index = pd.to_datetime(df.index)
df = df.loc['2021-06-01':'2021-09-30']
df = df.replace(0, np.nan)
dict = {'datax_ 1': '1', 'datax_ 2': '2', 'datax_ 3': '3', 'datax_ 4': '4', 'datax_ 5': '5', 'datax_ 6': '6', 'datax_ 7': '7', 'datax_ 8': '8', 'datax_ 9': '9', 'datax_10': '10', 'datax_11': '11', 'datax_12': '12', 'datax_13': '13', 'datax_14': '14', 'datax_15': '15', 'datax_16': '16', 'datax_17': '17', 'datax_18': '18', 'datax_19': '20', 'datax_20': '21', 'datax_21': '23', 'datax_22': '24', 'datax_23': '25', 'datax_24': '26', 'datax_25': '27', 'datax_26': '28', 'datax_27': '29', 'datax_28': '30', 'datax_29': '31', 'datax_30': '32', 'datax_31': '33', 'datax_32': '34', 'datax_33': '35', 'datax_34': '36', 'datax_35': '37', 'datax_36': '38', 'datax_37': '40', 'datax_38': '41', 'datax_39': '42', 'datax_40': '43', 'datax_41': '45', 'datax_42': '46', 'datax_43': '47', 'datax_44': '48', 'datax_45': '49', 'datax_46': '50', 'datax_47': '51', 'datax_48': '52', 'datax_49': '53', 'datax_50': '54', 'datax_51': '55', 'datax_52': '56', 'datax_53': '57', 'datax_54': '58', 'datax_55': '59', 'datax_56': '60'}
df.rename(columns=dict, inplace=True)
rowmean = df.mean(axis=1).copy()
rowmean.groupby(rowmean.index.hour).mean().plot()
def diurnal_cycle(df, station):
df[station].groupby(df[station].index.hour).mean().plot(label=station, c='r', marker='o', markersize=4)
rowmean.groupby(rowmean.index.hour).mean().plot(label='All buttons', c='k')
diurnal_cycle(df, '51')
plt.show()
diurnal_cycle(df, '33')
plt.show()
diurnal_cycle(df, '45')
plt.show()
diurnal_cycle(df, '30')
plt.show()
diurnal_cycle(df, '42')
plt.show()
diurnal_cycle(df, '60')
plt.show()
diurnal_cycle(df, '29')
plt.show() | code |
122255715/cell_4 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/temp2021/Temp2021.csv', skiprows=[i for i in range(1, 98)], parse_dates=['Var1'], index_col=['Var1'])
df.index.name = 'Date'
df.index = pd.to_datetime(df.index)
df = df.loc['2021-06-01':'2021-09-30']
df = df.replace(0, np.nan)
dict = {'datax_ 1': '1', 'datax_ 2': '2', 'datax_ 3': '3', 'datax_ 4': '4', 'datax_ 5': '5', 'datax_ 6': '6', 'datax_ 7': '7', 'datax_ 8': '8', 'datax_ 9': '9', 'datax_10': '10', 'datax_11': '11', 'datax_12': '12', 'datax_13': '13', 'datax_14': '14', 'datax_15': '15', 'datax_16': '16', 'datax_17': '17', 'datax_18': '18', 'datax_19': '20', 'datax_20': '21', 'datax_21': '23', 'datax_22': '24', 'datax_23': '25', 'datax_24': '26', 'datax_25': '27', 'datax_26': '28', 'datax_27': '29', 'datax_28': '30', 'datax_29': '31', 'datax_30': '32', 'datax_31': '33', 'datax_32': '34', 'datax_33': '35', 'datax_34': '36', 'datax_35': '37', 'datax_36': '38', 'datax_37': '40', 'datax_38': '41', 'datax_39': '42', 'datax_40': '43', 'datax_41': '45', 'datax_42': '46', 'datax_43': '47', 'datax_44': '48', 'datax_45': '49', 'datax_46': '50', 'datax_47': '51', 'datax_48': '52', 'datax_49': '53', 'datax_50': '54', 'datax_51': '55', 'datax_52': '56', 'datax_53': '57', 'datax_54': '58', 'datax_55': '59', 'datax_56': '60'}
df.rename(columns=dict, inplace=True)
df.head() | code |
122255715/cell_6 | [
"image_output_5.png",
"image_output_4.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/temp2021/Temp2021.csv', skiprows=[i for i in range(1, 98)], parse_dates=['Var1'], index_col=['Var1'])
df.index.name = 'Date'
df.index = pd.to_datetime(df.index)
df = df.loc['2021-06-01':'2021-09-30']
df = df.replace(0, np.nan)
dict = {'datax_ 1': '1', 'datax_ 2': '2', 'datax_ 3': '3', 'datax_ 4': '4', 'datax_ 5': '5', 'datax_ 6': '6', 'datax_ 7': '7', 'datax_ 8': '8', 'datax_ 9': '9', 'datax_10': '10', 'datax_11': '11', 'datax_12': '12', 'datax_13': '13', 'datax_14': '14', 'datax_15': '15', 'datax_16': '16', 'datax_17': '17', 'datax_18': '18', 'datax_19': '20', 'datax_20': '21', 'datax_21': '23', 'datax_22': '24', 'datax_23': '25', 'datax_24': '26', 'datax_25': '27', 'datax_26': '28', 'datax_27': '29', 'datax_28': '30', 'datax_29': '31', 'datax_30': '32', 'datax_31': '33', 'datax_32': '34', 'datax_33': '35', 'datax_34': '36', 'datax_35': '37', 'datax_36': '38', 'datax_37': '40', 'datax_38': '41', 'datax_39': '42', 'datax_40': '43', 'datax_41': '45', 'datax_42': '46', 'datax_43': '47', 'datax_44': '48', 'datax_45': '49', 'datax_46': '50', 'datax_47': '51', 'datax_48': '52', 'datax_49': '53', 'datax_50': '54', 'datax_51': '55', 'datax_52': '56', 'datax_53': '57', 'datax_54': '58', 'datax_55': '59', 'datax_56': '60'}
df.rename(columns=dict, inplace=True)
rowmean = df.mean(axis=1).copy()
rowmean.groupby(rowmean.index.hour).mean().plot()
plt.title('Summer 2021 Diurnal Cycle')
plt.ylabel('Temperature ℃')
plt.xlabel('Hour of the day') | code |
122255715/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/temp2021/Temp2021.csv', skiprows=[i for i in range(1, 98)], parse_dates=['Var1'], index_col=['Var1'])
df.index.name = 'Date'
df.index = pd.to_datetime(df.index)
df = df.loc['2021-06-01':'2021-09-30']
df = df.replace(0, np.nan)
dict = {'datax_ 1': '1', 'datax_ 2': '2', 'datax_ 3': '3', 'datax_ 4': '4', 'datax_ 5': '5', 'datax_ 6': '6', 'datax_ 7': '7', 'datax_ 8': '8', 'datax_ 9': '9', 'datax_10': '10', 'datax_11': '11', 'datax_12': '12', 'datax_13': '13', 'datax_14': '14', 'datax_15': '15', 'datax_16': '16', 'datax_17': '17', 'datax_18': '18', 'datax_19': '20', 'datax_20': '21', 'datax_21': '23', 'datax_22': '24', 'datax_23': '25', 'datax_24': '26', 'datax_25': '27', 'datax_26': '28', 'datax_27': '29', 'datax_28': '30', 'datax_29': '31', 'datax_30': '32', 'datax_31': '33', 'datax_32': '34', 'datax_33': '35', 'datax_34': '36', 'datax_35': '37', 'datax_36': '38', 'datax_37': '40', 'datax_38': '41', 'datax_39': '42', 'datax_40': '43', 'datax_41': '45', 'datax_42': '46', 'datax_43': '47', 'datax_44': '48', 'datax_45': '49', 'datax_46': '50', 'datax_47': '51', 'datax_48': '52', 'datax_49': '53', 'datax_50': '54', 'datax_51': '55', 'datax_52': '56', 'datax_53': '57', 'datax_54': '58', 'datax_55': '59', 'datax_56': '60'}
df.rename(columns=dict, inplace=True)
rowmean = df.mean(axis=1).copy()
rowmean.groupby(rowmean.index.hour).mean().plot()
def diurnal_cycle(df, station):
df[station].groupby(df[station].index.hour).mean().plot(label=station, c='r', marker='o', markersize=4)
rowmean.groupby(rowmean.index.hour).mean().plot(label='All buttons', c='k')
diurnal_cycle(df, '51')
diurnal_cycle(df, '33')
diurnal_cycle(df, '45')
diurnal_cycle(df, '30')
diurnal_cycle(df, '42')
diurnal_cycle(df, '60')
diurnal_cycle(df, '29')
diurnal_cycle(df, '49')
plt.show()
diurnal_cycle(df, '12')
plt.show()
diurnal_cycle(df, '4')
plt.show()
diurnal_cycle(df, '2')
plt.show()
diurnal_cycle(df, '46')
plt.show()
diurnal_cycle(df, '56')
plt.show() | code |
122255715/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
122255715/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/temp2021/Temp2021.csv', skiprows=[i for i in range(1, 98)], parse_dates=['Var1'], index_col=['Var1'])
df.index.name = 'Date'
df.index = pd.to_datetime(df.index)
df = df.loc['2021-06-01':'2021-09-30']
df = df.replace(0, np.nan)
df.head() | code |
122255715/cell_5 | [
"image_output_5.png",
"image_output_4.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/temp2021/Temp2021.csv', skiprows=[i for i in range(1, 98)], parse_dates=['Var1'], index_col=['Var1'])
df.index.name = 'Date'
df.index = pd.to_datetime(df.index)
df = df.loc['2021-06-01':'2021-09-30']
df = df.replace(0, np.nan)
dict = {'datax_ 1': '1', 'datax_ 2': '2', 'datax_ 3': '3', 'datax_ 4': '4', 'datax_ 5': '5', 'datax_ 6': '6', 'datax_ 7': '7', 'datax_ 8': '8', 'datax_ 9': '9', 'datax_10': '10', 'datax_11': '11', 'datax_12': '12', 'datax_13': '13', 'datax_14': '14', 'datax_15': '15', 'datax_16': '16', 'datax_17': '17', 'datax_18': '18', 'datax_19': '20', 'datax_20': '21', 'datax_21': '23', 'datax_22': '24', 'datax_23': '25', 'datax_24': '26', 'datax_25': '27', 'datax_26': '28', 'datax_27': '29', 'datax_28': '30', 'datax_29': '31', 'datax_30': '32', 'datax_31': '33', 'datax_32': '34', 'datax_33': '35', 'datax_34': '36', 'datax_35': '37', 'datax_36': '38', 'datax_37': '40', 'datax_38': '41', 'datax_39': '42', 'datax_40': '43', 'datax_41': '45', 'datax_42': '46', 'datax_43': '47', 'datax_44': '48', 'datax_45': '49', 'datax_46': '50', 'datax_47': '51', 'datax_48': '52', 'datax_49': '53', 'datax_50': '54', 'datax_51': '55', 'datax_52': '56', 'datax_53': '57', 'datax_54': '58', 'datax_55': '59', 'datax_56': '60'}
df.rename(columns=dict, inplace=True)
rowmean = df.mean(axis=1).copy()
print(rowmean) | code |
90119657/cell_4 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sub1 = pd.read_csv('../input/ncaam-2022/stage2_seeds_sample_submission.csv')
sub1.sort_values(by=['ID'], inplace=True)
sub2 = pd.read_csv('../input/mens-march-mania-2022/MDataFiles_Stage2/MSampleSubmissionStage2.csv')
sub2.sort_values(by=['ID'], inplace=True)
prob = 0.5 + 0.03 * (sub1['T2_seed'] - sub1['T1_seed'])
prob = np.round(prob, decimals=2)
sub2['Pred'] = np.clip(prob, 0.05, 0.95)
sub2.to_csv('submission.csv', index=False)
sub2.head() | code |
90119657/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
1007485/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
data.shape
Color_Count = data.color.value_counts()
idx = range(2)
labels = ['Color', 'Black & White']
plt.xticks(idx, labels)
Director = data.director_name.value_counts()
D_Name = Director.head(n=10).index
New_D = data[(data['director_name'].isin(D_Name))]
New_D.pivot_table(index=['director_name','imdb_score'],aggfunc='mean')
plt.figure(1,figsize=(12,6))
plt.subplot(1,2,1)
Director.head(n=10).sort_index().plot(kind='bar')
plt.title('Top 10 directors that have most volume movies')
plt.subplot(1,2,2)
New_D.groupby(['director_name'])['imdb_score'].mean().plot(kind='bar')
plt.xlabel("")
plt.title("Top 10 direcotors' average IMDB scores")
plt.show()
Language = data.language.value_counts()
Country = data.country.value_counts()
score_by_content = data.pivot_table(index=['content_rating'], values='imdb_score', aggfunc='mean')
Contents = data.content_rating.value_counts().sort_index()
plt.figure(1, figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.ylabel('Score')
plt.title('Average IMDB Socre by Movie Content')
score_by_content.plot(kind='bar')
plt.xlabel('')
plt.subplot(1, 2, 2)
Contents.plot(kind='bar')
plt.xlabel('Contents')
plt.ylabel('Volume')
plt.title('Movie amounts by content')
plt.show() | code |
1007485/cell_4 | [
"image_output_1.png"
] | data.shape
for i in data.columns:
print(i, end='; ') | code |
1007485/cell_6 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
data.shape
Color_Count = data.color.value_counts()
idx = range(2)
labels = ['Color', 'Black & White']
plt.xticks(idx, labels)
Director = data.director_name.value_counts()
D_Name = Director.head(n=10).index
New_D = data[data['director_name'].isin(D_Name)]
New_D.pivot_table(index=['director_name', 'imdb_score'], aggfunc='mean')
plt.figure(1, figsize=(12, 6))
plt.subplot(1, 2, 1)
Director.head(n=10).sort_index().plot(kind='bar')
plt.title('Top 10 directors that have most volume movies')
plt.subplot(1, 2, 2)
New_D.groupby(['director_name'])['imdb_score'].mean().plot(kind='bar')
plt.xlabel('')
plt.title("Top 10 direcotors' average IMDB scores")
plt.show() | code |
1007485/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
data.shape
Color_Count = data.color.value_counts()
idx = range(2)
labels = ['Color', 'Black & White']
plt.xticks(idx, labels)
Director = data.director_name.value_counts()
D_Name = Director.head(n=10).index
New_D = data[(data['director_name'].isin(D_Name))]
New_D.pivot_table(index=['director_name','imdb_score'],aggfunc='mean')
plt.figure(1,figsize=(12,6))
plt.subplot(1,2,1)
Director.head(n=10).sort_index().plot(kind='bar')
plt.title('Top 10 directors that have most volume movies')
plt.subplot(1,2,2)
New_D.groupby(['director_name'])['imdb_score'].mean().plot(kind='bar')
plt.xlabel("")
plt.title("Top 10 direcotors' average IMDB scores")
plt.show()
Language = data.language.value_counts()
Country = data.country.value_counts()
score_by_content = data.pivot_table(index=['content_rating'], values='imdb_score', aggfunc='mean')
Contents = data.content_rating.value_counts().sort_index()
Year = data.title_year.value_counts().sort_index().tail(50)
year = range(50)
loc = range(3, 49, 5)
ticks = range(1970, 2017, 5)
plt.xticks(loc, ticks)
Gen = data['genres'].str.split('|')
New_Gen = []
Gen_Dict = {}
for item in Gen:
for i in item:
New_Gen.append(i)
if i not in Gen_Dict:
Gen_Dict[i] = 1
else:
Gen_Dict[i] += 1
Gen = pd.DataFrame.from_dict(Gen_Dict, orient='index')
Gen.columns = ['Counts']
Gen = Gen.sort_values('Counts', ascending=1)
Gen.plot(kind='barh', legend=False, figsize=(12, 6))
plt.title('Movie amounts by different genres')
plt.show() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.