path
stringlengths 13
17
| screenshot_names
listlengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
73067465/cell_28
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import KNNImputer, IterativeImputer
from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
sns.set_style('whitegrid')
from sklearn.metrics import accuracy_score
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
submit = pd.DataFrame(test['PassengerId'])
train['title'] = 0
for i in range(0, len(train)):
train.loc[i, 'title'] = train['Name'].iloc[i].split(',')[1].split('.')[0][1:]
train['title'].replace({'Mr': 1, 'Miss': 2, 'Mrs': 2, 'Master': 3, 'Dr': 4, 'Rev': 5}, inplace=True)
train['title'].replace(['Major', 'Mlle', 'Col', 'Don', 'the Countess', 'Sir', 'Capt', 'Mme', 'Lady', 'Jonkheer', 'Ms'], 6, inplace=True)
for i in range(len(train)):
if not pd.isnull(train['Cabin'].iloc[i]):
train.loc[i, 'Cabin'] = train['Cabin'].loc[i][0]
train['Cabin'].replace({'C': 1, 'B': 2, 'D': 3, 'E': 4, 'A': 5, 'F': 6, 'G': 7, 'T': 8}, inplace=True)
train['Fare'] = np.sqrt(train['Fare'])
train.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'PassengerId', 'Cabin'], axis=1, inplace=True)
fig,ax=plt.subplots(3,1,figsize=(15,13))
sns.heatmap(train.corr('spearman'),annot=True,ax=ax[0],label='spearman') #spearman
sns.heatmap(train.corr('kendall'),annot=True,ax=ax[1],label='kendall') #Kendall
sns.heatmap(train.corr('pearson'),annot=True,ax=ax[2],label='pearson') #pearson
sns.ecdfplot(x='Age', data=train, hue='Survived')
plt.annotate('The plot has a little up showing young children to survive', xy=(13, 0.17), xytext=(60, 0.3), arrowprops={'color': 'gray'})
plt.show()
|
code
|
73067465/cell_38
|
[
"text_plain_output_1.png"
] |
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,VotingClassifier
from sklearn.impute import KNNImputer,IterativeImputer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split,cross_val_score,StratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import KNNImputer, IterativeImputer
from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
sns.set_style('whitegrid')
from sklearn.metrics import accuracy_score
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
submit = pd.DataFrame(test['PassengerId'])
train['title'] = 0
for i in range(0, len(train)):
train.loc[i, 'title'] = train['Name'].iloc[i].split(',')[1].split('.')[0][1:]
train['title'].replace({'Mr': 1, 'Miss': 2, 'Mrs': 2, 'Master': 3, 'Dr': 4, 'Rev': 5}, inplace=True)
train['title'].replace(['Major', 'Mlle', 'Col', 'Don', 'the Countess', 'Sir', 'Capt', 'Mme', 'Lady', 'Jonkheer', 'Ms'], 6, inplace=True)
for i in range(len(train)):
if not pd.isnull(train['Cabin'].iloc[i]):
train.loc[i, 'Cabin'] = train['Cabin'].loc[i][0]
train['Cabin'].replace({'C': 1, 'B': 2, 'D': 3, 'E': 4, 'A': 5, 'F': 6, 'G': 7, 'T': 8}, inplace=True)
train['Fare'] = np.sqrt(train['Fare'])
train.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'PassengerId', 'Cabin'], axis=1, inplace=True)
fig,ax=plt.subplots(3,1,figsize=(15,13))
sns.heatmap(train.corr('spearman'),annot=True,ax=ax[0],label='spearman') #spearman
sns.heatmap(train.corr('kendall'),annot=True,ax=ax[1],label='kendall') #Kendall
sns.heatmap(train.corr('pearson'),annot=True,ax=ax[2],label='pearson') #pearson
train = pd.get_dummies(train, columns=['Pclass', 'Embarked', 'title', 'family'], drop_first=True)
impute = KNNImputer(n_neighbors=13)
train = pd.DataFrame(impute.fit_transform(train), columns=train.columns)
model = []
model.append(('Logistic Regression', LogisticRegression(max_iter=1000)))
model.append(('LDA', LinearDiscriminantAnalysis()))
model.append(('SVC', SVC(kernel='rbf')))
model.append(('DTC', DecisionTreeClassifier()))
model.append(('GBC', GradientBoostingClassifier()))
model.append(('RFC', RandomForestClassifier()))
model.append(('Kneig', KNeighborsClassifier()))
x = train.drop('Survived', axis=1)
y = train['Survived']
xtrain, xvalid, ytrain, yvalid = train_test_split(x, y, test_size=0.3)
from sklearn.metrics import classification_report
model = LogisticRegression(max_iter=3000)
model.fit(xtrain, ytrain)
ypred = model.predict(xvalid)
model = RandomForestClassifier()
model.fit(xtrain, ytrain)
ypred = model.predict(xvalid)
estimator = []
estimator.append(('LR', GradientBoostingClassifier()))
estimator.append(('SVC', RandomForestClassifier()))
estimator.append(('kd', LogisticRegression(max_iter=3000)))
vot_hard = VotingClassifier(estimators=estimator, voting='hard')
vot_hard.fit(xtrain, ytrain)
ypred = vot_hard.predict(xvalid)
print(classification_report(yvalid, ypred))
|
code
|
73067465/cell_35
|
[
"text_plain_output_1.png"
] |
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,VotingClassifier
from sklearn.impute import KNNImputer,IterativeImputer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split,cross_val_score,StratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import KNNImputer, IterativeImputer
from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
sns.set_style('whitegrid')
from sklearn.metrics import accuracy_score
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
submit = pd.DataFrame(test['PassengerId'])
train['title'] = 0
for i in range(0, len(train)):
train.loc[i, 'title'] = train['Name'].iloc[i].split(',')[1].split('.')[0][1:]
train['title'].replace({'Mr': 1, 'Miss': 2, 'Mrs': 2, 'Master': 3, 'Dr': 4, 'Rev': 5}, inplace=True)
train['title'].replace(['Major', 'Mlle', 'Col', 'Don', 'the Countess', 'Sir', 'Capt', 'Mme', 'Lady', 'Jonkheer', 'Ms'], 6, inplace=True)
for i in range(len(train)):
if not pd.isnull(train['Cabin'].iloc[i]):
train.loc[i, 'Cabin'] = train['Cabin'].loc[i][0]
train['Cabin'].replace({'C': 1, 'B': 2, 'D': 3, 'E': 4, 'A': 5, 'F': 6, 'G': 7, 'T': 8}, inplace=True)
train['Fare'] = np.sqrt(train['Fare'])
train.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'PassengerId', 'Cabin'], axis=1, inplace=True)
fig,ax=plt.subplots(3,1,figsize=(15,13))
sns.heatmap(train.corr('spearman'),annot=True,ax=ax[0],label='spearman') #spearman
sns.heatmap(train.corr('kendall'),annot=True,ax=ax[1],label='kendall') #Kendall
sns.heatmap(train.corr('pearson'),annot=True,ax=ax[2],label='pearson') #pearson
train = pd.get_dummies(train, columns=['Pclass', 'Embarked', 'title', 'family'], drop_first=True)
impute = KNNImputer(n_neighbors=13)
train = pd.DataFrame(impute.fit_transform(train), columns=train.columns)
model = []
model.append(('Logistic Regression', LogisticRegression(max_iter=1000)))
model.append(('LDA', LinearDiscriminantAnalysis()))
model.append(('SVC', SVC(kernel='rbf')))
model.append(('DTC', DecisionTreeClassifier()))
model.append(('GBC', GradientBoostingClassifier()))
model.append(('RFC', RandomForestClassifier()))
model.append(('Kneig', KNeighborsClassifier()))
x = train.drop('Survived', axis=1)
y = train['Survived']
xtrain, xvalid, ytrain, yvalid = train_test_split(x, y, test_size=0.3)
from sklearn.metrics import classification_report
model = LogisticRegression(max_iter=3000)
model.fit(xtrain, ytrain)
ypred = model.predict(xvalid)
print(classification_report(yvalid, ypred))
|
code
|
73067465/cell_22
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import KNNImputer, IterativeImputer
from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
sns.set_style('whitegrid')
from sklearn.metrics import accuracy_score
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
submit = pd.DataFrame(test['PassengerId'])
train['title'] = 0
for i in range(0, len(train)):
train.loc[i, 'title'] = train['Name'].iloc[i].split(',')[1].split('.')[0][1:]
train['title'].replace({'Mr': 1, 'Miss': 2, 'Mrs': 2, 'Master': 3, 'Dr': 4, 'Rev': 5}, inplace=True)
train['title'].replace(['Major', 'Mlle', 'Col', 'Don', 'the Countess', 'Sir', 'Capt', 'Mme', 'Lady', 'Jonkheer', 'Ms'], 6, inplace=True)
for i in range(len(train)):
if not pd.isnull(train['Cabin'].iloc[i]):
train.loc[i, 'Cabin'] = train['Cabin'].loc[i][0]
train['Cabin'].replace({'C': 1, 'B': 2, 'D': 3, 'E': 4, 'A': 5, 'F': 6, 'G': 7, 'T': 8}, inplace=True)
train['Fare'] = np.sqrt(train['Fare'])
train.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'PassengerId', 'Cabin'], axis=1, inplace=True)
fig, ax = plt.subplots(3, 1, figsize=(15, 13))
sns.heatmap(train.corr('spearman'), annot=True, ax=ax[0], label='spearman')
sns.heatmap(train.corr('kendall'), annot=True, ax=ax[1], label='kendall')
sns.heatmap(train.corr('pearson'), annot=True, ax=ax[2], label='pearson')
|
code
|
73067465/cell_27
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import KNNImputer, IterativeImputer
from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
sns.set_style('whitegrid')
from sklearn.metrics import accuracy_score
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
submit = pd.DataFrame(test['PassengerId'])
train['title'] = 0
for i in range(0, len(train)):
train.loc[i, 'title'] = train['Name'].iloc[i].split(',')[1].split('.')[0][1:]
train['title'].replace({'Mr': 1, 'Miss': 2, 'Mrs': 2, 'Master': 3, 'Dr': 4, 'Rev': 5}, inplace=True)
train['title'].replace(['Major', 'Mlle', 'Col', 'Don', 'the Countess', 'Sir', 'Capt', 'Mme', 'Lady', 'Jonkheer', 'Ms'], 6, inplace=True)
for i in range(len(train)):
if not pd.isnull(train['Cabin'].iloc[i]):
train.loc[i, 'Cabin'] = train['Cabin'].loc[i][0]
train['Cabin'].replace({'C': 1, 'B': 2, 'D': 3, 'E': 4, 'A': 5, 'F': 6, 'G': 7, 'T': 8}, inplace=True)
train['Fare'] = np.sqrt(train['Fare'])
train.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'PassengerId', 'Cabin'], axis=1, inplace=True)
fig,ax=plt.subplots(3,1,figsize=(15,13))
sns.heatmap(train.corr('spearman'),annot=True,ax=ax[0],label='spearman') #spearman
sns.heatmap(train.corr('kendall'),annot=True,ax=ax[1],label='kendall') #Kendall
sns.heatmap(train.corr('pearson'),annot=True,ax=ax[2],label='pearson') #pearson
sns.countplot(x='title', data=train, hue='Survived')
|
code
|
73067465/cell_36
|
[
"text_plain_output_1.png"
] |
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,VotingClassifier
from sklearn.impute import KNNImputer,IterativeImputer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split,cross_val_score,StratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import KNNImputer, IterativeImputer
from sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
sns.set_style('whitegrid')
from sklearn.metrics import accuracy_score
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
submit = pd.DataFrame(test['PassengerId'])
train['title'] = 0
for i in range(0, len(train)):
train.loc[i, 'title'] = train['Name'].iloc[i].split(',')[1].split('.')[0][1:]
train['title'].replace({'Mr': 1, 'Miss': 2, 'Mrs': 2, 'Master': 3, 'Dr': 4, 'Rev': 5}, inplace=True)
train['title'].replace(['Major', 'Mlle', 'Col', 'Don', 'the Countess', 'Sir', 'Capt', 'Mme', 'Lady', 'Jonkheer', 'Ms'], 6, inplace=True)
for i in range(len(train)):
if not pd.isnull(train['Cabin'].iloc[i]):
train.loc[i, 'Cabin'] = train['Cabin'].loc[i][0]
train['Cabin'].replace({'C': 1, 'B': 2, 'D': 3, 'E': 4, 'A': 5, 'F': 6, 'G': 7, 'T': 8}, inplace=True)
train['Fare'] = np.sqrt(train['Fare'])
train.drop(['Name', 'SibSp', 'Parch', 'Ticket', 'PassengerId', 'Cabin'], axis=1, inplace=True)
fig,ax=plt.subplots(3,1,figsize=(15,13))
sns.heatmap(train.corr('spearman'),annot=True,ax=ax[0],label='spearman') #spearman
sns.heatmap(train.corr('kendall'),annot=True,ax=ax[1],label='kendall') #Kendall
sns.heatmap(train.corr('pearson'),annot=True,ax=ax[2],label='pearson') #pearson
train = pd.get_dummies(train, columns=['Pclass', 'Embarked', 'title', 'family'], drop_first=True)
impute = KNNImputer(n_neighbors=13)
train = pd.DataFrame(impute.fit_transform(train), columns=train.columns)
model = []
model.append(('Logistic Regression', LogisticRegression(max_iter=1000)))
model.append(('LDA', LinearDiscriminantAnalysis()))
model.append(('SVC', SVC(kernel='rbf')))
model.append(('DTC', DecisionTreeClassifier()))
model.append(('GBC', GradientBoostingClassifier()))
model.append(('RFC', RandomForestClassifier()))
model.append(('Kneig', KNeighborsClassifier()))
x = train.drop('Survived', axis=1)
y = train['Survived']
xtrain, xvalid, ytrain, yvalid = train_test_split(x, y, test_size=0.3)
from sklearn.metrics import classification_report
model = LogisticRegression(max_iter=3000)
model.fit(xtrain, ytrain)
ypred = model.predict(xvalid)
model = RandomForestClassifier()
model.fit(xtrain, ytrain)
ypred = model.predict(xvalid)
print(classification_report(yvalid, ypred))
|
code
|
122251830/cell_9
|
[
"text_plain_output_1.png"
] |
import pandas as pd
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
test_pass_id = test.PassengerId.copy()
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train_pass_id = train.PassengerId.copy()
X = train.drop(columns='Transported')
y = train[['Transported']]
df = pd.concat([X, test], ignore_index=True)
df.info()
|
code
|
122251830/cell_20
|
[
"text_plain_output_1.png"
] |
import pandas as pd
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
test_pass_id = test.PassengerId.copy()
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train_pass_id = train.PassengerId.copy()
X = train.drop(columns='Transported')
y = train[['Transported']]
df = pd.concat([X, test], ignore_index=True)
nan_inicial = df.isna().sum()
nan_inicial
df[['deck', 'num', 'side']] = df['Cabin'].str.split(pat='/', expand=True)
df[['Passenger', '_Id']] = df['PassengerId'].str.split(pat='_', expand=True)
df[['Nome', 'Sobrenome']] = df['Name'].str.split(pat=' ', expand=True)
df.drop(columns=['Cabin', 'Name'], inplace=True)
df.replace({False: 0, True: 1}, inplace=True)
df.groupby(['HomePlanet']).deck.value_counts()
df.loc[df.deck == 'G', 'HomePlanet'] = 'Earth'
df.loc[(df.deck == 'T') | (df.deck == 'A') | (df.deck == 'B') | (df.deck == 'C'), 'HomePlanet'] = 'Europa'
df.groupby(['VIP']).deck.value_counts()
|
code
|
122251830/cell_11
|
[
"text_plain_output_1.png"
] |
import pandas as pd
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
test_pass_id = test.PassengerId.copy()
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train_pass_id = train.PassengerId.copy()
X = train.drop(columns='Transported')
y = train[['Transported']]
df = pd.concat([X, test], ignore_index=True)
nan_inicial = df.isna().sum()
nan_inicial
df.head()
|
code
|
122251830/cell_28
|
[
"text_plain_output_1.png"
] |
import pandas as pd
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
test_pass_id = test.PassengerId.copy()
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train_pass_id = train.PassengerId.copy()
X = train.drop(columns='Transported')
y = train[['Transported']]
df = pd.concat([X, test], ignore_index=True)
nan_inicial = df.isna().sum()
nan_inicial
df[['deck', 'num', 'side']] = df['Cabin'].str.split(pat='/', expand=True)
df[['Passenger', '_Id']] = df['PassengerId'].str.split(pat='_', expand=True)
df[['Nome', 'Sobrenome']] = df['Name'].str.split(pat=' ', expand=True)
df.drop(columns=['Cabin', 'Name'], inplace=True)
df.replace({False: 0, True: 1}, inplace=True)
df.groupby(['HomePlanet']).deck.value_counts()
df.loc[df.deck == 'G', 'HomePlanet'] = 'Earth'
df.loc[(df.deck == 'T') | (df.deck == 'A') | (df.deck == 'B') | (df.deck == 'C'), 'HomePlanet'] = 'Europa'
df.groupby(['VIP']).deck.value_counts()
df.loc[(df.deck == 'G') | (df.deck == 'T'), 'VIP'] = 0
df.groupby(['CryoSleep']).deck.value_counts()
df.loc[df.deck == 'T', 'CryoSleep'] = 0
df.groupby(['CryoSleep', 'VIP']).agg({'RoomService': ['mean', 'min', 'max'], 'FoodCourt': ['mean', 'min', 'max'], 'ShoppingMall': ['mean', 'min', 'max'], 'Spa': ['mean', 'min', 'max'], 'VRDeck': ['mean', 'min', 'max']})
|
code
|
122251830/cell_16
|
[
"text_html_output_1.png"
] |
import pandas as pd
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
test_pass_id = test.PassengerId.copy()
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train_pass_id = train.PassengerId.copy()
X = train.drop(columns='Transported')
y = train[['Transported']]
df = pd.concat([X, test], ignore_index=True)
nan_inicial = df.isna().sum()
nan_inicial
df[['deck', 'num', 'side']] = df['Cabin'].str.split(pat='/', expand=True)
df[['Passenger', '_Id']] = df['PassengerId'].str.split(pat='_', expand=True)
df[['Nome', 'Sobrenome']] = df['Name'].str.split(pat=' ', expand=True)
df.drop(columns=['Cabin', 'Name'], inplace=True)
df.replace({False: 0, True: 1}, inplace=True)
df.groupby(['HomePlanet']).deck.value_counts()
|
code
|
122251830/cell_24
|
[
"text_plain_output_1.png"
] |
import pandas as pd
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
test_pass_id = test.PassengerId.copy()
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train_pass_id = train.PassengerId.copy()
X = train.drop(columns='Transported')
y = train[['Transported']]
df = pd.concat([X, test], ignore_index=True)
nan_inicial = df.isna().sum()
nan_inicial
df[['deck', 'num', 'side']] = df['Cabin'].str.split(pat='/', expand=True)
df[['Passenger', '_Id']] = df['PassengerId'].str.split(pat='_', expand=True)
df[['Nome', 'Sobrenome']] = df['Name'].str.split(pat=' ', expand=True)
df.drop(columns=['Cabin', 'Name'], inplace=True)
df.replace({False: 0, True: 1}, inplace=True)
df.groupby(['HomePlanet']).deck.value_counts()
df.loc[df.deck == 'G', 'HomePlanet'] = 'Earth'
df.loc[(df.deck == 'T') | (df.deck == 'A') | (df.deck == 'B') | (df.deck == 'C'), 'HomePlanet'] = 'Europa'
df.groupby(['VIP']).deck.value_counts()
df.loc[(df.deck == 'G') | (df.deck == 'T'), 'VIP'] = 0
df.groupby(['CryoSleep']).deck.value_counts()
|
code
|
122251830/cell_10
|
[
"text_plain_output_1.png"
] |
import pandas as pd
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
test_pass_id = test.PassengerId.copy()
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train_pass_id = train.PassengerId.copy()
X = train.drop(columns='Transported')
y = train[['Transported']]
df = pd.concat([X, test], ignore_index=True)
nan_inicial = df.isna().sum()
nan_inicial
|
code
|
122251830/cell_12
|
[
"text_html_output_1.png"
] |
import pandas as pd
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
test_pass_id = test.PassengerId.copy()
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train_pass_id = train.PassengerId.copy()
X = train.drop(columns='Transported')
y = train[['Transported']]
df = pd.concat([X, test], ignore_index=True)
nan_inicial = df.isna().sum()
nan_inicial
df[['deck', 'num', 'side']] = df['Cabin'].str.split(pat='/', expand=True)
df[['Passenger', '_Id']] = df['PassengerId'].str.split(pat='_', expand=True)
df[['Nome', 'Sobrenome']] = df['Name'].str.split(pat=' ', expand=True)
df.drop(columns=['Cabin', 'Name'], inplace=True)
df.head()
|
code
|
130026158/cell_42
|
[
"text_plain_output_1.png"
] |
number = input('Enter an integer: ')
number = int(input('Enter an integer:'))
print('The number is', number)
print(type(number))
|
code
|
130026158/cell_9
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
|
code
|
130026158/cell_25
|
[
"text_plain_output_1.png"
] |
message = 'Python is a programming language.'
message.split()
|
code
|
130026158/cell_4
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
|
code
|
130026158/cell_57
|
[
"text_plain_output_1.png"
] |
x = 3.14
x += 5
x = 3.14
x -= 5
x = 3.14
x *= 5
x = 3.14
x /= 5
print(x)
|
code
|
130026158/cell_56
|
[
"text_plain_output_1.png"
] |
x = 3.14
x += 5
x = 3.14
x -= 5
x = 3.14
x *= 5
print(x)
|
code
|
130026158/cell_34
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.extend(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.append(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis[0] = 'hello python!'
nlis[1] = 1.618
nlis[2] = [3.14, 2022]
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
del nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
copy_list = nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
copy_list = nlis
nlis[0] = 'hello python!'
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
clone_lis = nlis[:]
clone_lis
|
code
|
130026158/cell_23
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.extend(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.append(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis[0] = 'hello python!'
nlis[1] = 1.618
nlis[2] = [3.14, 2022]
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
print('Before deleting:', nlis)
del nlis
print('After deleting:', nlis)
|
code
|
130026158/cell_20
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.extend(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.append(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
print('Before changing:', nlis)
nlis[0] = 'hello python!'
print('After changing:', nlis)
nlis[1] = 1.618
print('After changing:', nlis)
nlis[2] = [3.14, 2022]
print('After changing:', nlis)
|
code
|
130026158/cell_55
|
[
"text_plain_output_1.png"
] |
x = 3.14
x += 5
x = 3.14
x -= 5
print(x)
|
code
|
130026158/cell_40
|
[
"text_plain_output_1.png"
] |
text = 'p,y,t,h,o,n'
text.split(',')
text = input('Enter a string:')
print('The text is', text)
print(type(text))
|
code
|
130026158/cell_29
|
[
"text_plain_output_1.png"
] |
nlis_1 = ['a', 'b', 'hello', 'Python']
nlis_2 = [1, 2, 3, 4, 5, 6]
print(len(nlis_1))
print(len(nlis_2))
print(nlis_1 + nlis_2)
print(nlis_1 * 3)
print(nlis_2 * 3)
for i in nlis_1:
print(i)
for i in nlis_2:
print(i)
print(4 in nlis_1)
print(4 in nlis_2)
|
code
|
130026158/cell_48
|
[
"text_plain_output_1.png"
] |
expression = '8+7'
total = eval(expression)
a = float(input('Enter the pi number:'))
b = float(input('Enter the golden ratio:'))
total = a + b
a = input('Enter your favorite fruit:')
b = input('Enter your favorite food:')
print('I like {} and {}.'.format(a, b))
print('I like {0} and {1}.'.format(a, b))
print('I like {1} and {0}.'.format(a, b))
|
code
|
130026158/cell_41
|
[
"text_plain_output_1.png"
] |
number = input('Enter an integer: ')
print('The number is', number)
print(type(number))
|
code
|
130026158/cell_54
|
[
"text_plain_output_1.png"
] |
x = 3.14
x += 5
print(x)
|
code
|
130026158/cell_60
|
[
"text_plain_output_1.png"
] |
x = 3.14
x += 5
x = 3.14
x -= 5
x = 3.14
x *= 5
x = 3.14
x /= 5
x = 3.14
x %= 5
x = 3.14
x //= 5
x = 3.14
x **= 5
print(x)
|
code
|
130026158/cell_50
|
[
"text_plain_output_1.png"
] |
expression = '8+7'
total = eval(expression)
a = float(input('Enter the pi number:'))
b = float(input('Enter the golden ratio:'))
total = a + b
a = input('Enter your favorite fruit:')
b = input('Enter your favorite food:')
a = 3.14
b = 1.618
print('a>b is:', a > b)
print('a<b is:', a < b)
print('a<=b is:', a <= b)
print('a>=b is:', a >= b)
print('a==b is:', a == b)
print('a!=b is:', a != b)
|
code
|
130026158/cell_52
|
[
"text_plain_output_1.png"
] |
expression = '8+7'
total = eval(expression)
a = float(input('Enter the pi number:'))
b = float(input('Enter the golden ratio:'))
total = a + b
a = input('Enter your favorite fruit:')
b = input('Enter your favorite food:')
a = 3.14
b = 1.618
a = 3.14
b = 1.618
c = 12
d = 3.14
print(a > b and c > a)
print(b > c and d > a)
print(b < c or d > a)
print(not a == b)
print(not a == d)
|
code
|
130026158/cell_7
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
|
code
|
130026158/cell_45
|
[
"text_plain_output_1.png"
] |
expression = '8+7'
total = eval(expression)
print('Sum of the expression is', total)
print(type(expression))
print(type(total))
|
code
|
130026158/cell_18
|
[
"text_plain_output_1.png"
] |
lis = [1, 2, 3, 4, 5, 6, 7]
print(len(lis))
lis.append(4)
print(lis)
print(lis.count(4))
print(lis.index(2))
lis.insert(8, 9)
print(lis)
print(max(lis))
print(min(lis))
print(sum(lis))
|
code
|
130026158/cell_32
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.extend(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.append(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis[0] = 'hello python!'
nlis[1] = 1.618
nlis[2] = [3.14, 2022]
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
del nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
copy_list = nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
print(nlis)
copy_list = nlis
print(copy_list)
print('copy_list[0]:', copy_list[0])
nlis[0] = 'hello python!'
print('copy_list[0]:', copy_list[0])
|
code
|
130026158/cell_62
|
[
"text_plain_output_1.png"
] |
expression = '8+7'
total = eval(expression)
a = float(input('Enter the pi number:'))
b = float(input('Enter the golden ratio:'))
total = a + b
a = input('Enter your favorite fruit:')
b = input('Enter your favorite food:')
a = 3.14
b = 1.618
a = 3.14
b = 1.618
c = 12
d = 3.14
a = 3.14
b = 1.618
print(a is b)
print(a is not b)
msg1 = 'Hello, Python!'
msg2 = 'Hello, World!'
print(msg1 is msg2)
print(msg1 is not msg2)
lis1 = [3.14, 1.618]
lis2 = [3.14, 1.618]
print(lis1 is lis2)
print(lis1 is not lis2)
|
code
|
130026158/cell_59
|
[
"text_plain_output_1.png"
] |
x = 3.14
x += 5
x = 3.14
x -= 5
x = 3.14
x *= 5
x = 3.14
x /= 5
x = 3.14
x %= 5
x = 3.14
x //= 5
print(x)
|
code
|
130026158/cell_58
|
[
"text_plain_output_1.png"
] |
x = 3.14
x += 5
x = 3.14
x -= 5
x = 3.14
x *= 5
x = 3.14
x /= 5
x = 3.14
x %= 5
print(x)
|
code
|
130026158/cell_16
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.extend(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.append(['hello world!', 1.618])
nlis
|
code
|
130026158/cell_47
|
[
"text_plain_output_1.png"
] |
expression = '8+7'
total = eval(expression)
a = float(input('Enter the pi number:'))
b = float(input('Enter the golden ratio:'))
total = a + b
print('Sum of {} and {} is {}.'.format(a, b, total))
|
code
|
130026158/cell_35
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.extend(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.append(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis[0] = 'hello python!'
nlis[1] = 1.618
nlis[2] = [3.14, 2022]
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
del nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
copy_list = nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
copy_list = nlis
nlis[0] = 'hello python!'
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
clone_lis = nlis[:]
clone_lis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
print(nlis)
clone_list = nlis[:]
print(clone_list)
print('clone_list[0]:', clone_list[0])
nlis[0] = 'hello, python!'
print('nlis[0]:', nlis[0])
|
code
|
130026158/cell_43
|
[
"text_plain_output_1.png"
] |
number = input('Enter an integer: ')
number = int(input('Enter an integer:'))
number = float(input('Enter an integer:'))
print('The number is', number)
print(type(number))
|
code
|
130026158/cell_31
|
[
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.extend(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.append(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis[0] = 'hello python!'
nlis[1] = 1.618
nlis[2] = [3.14, 2022]
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
del nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
copy_list = nlis
print('nlis:', nlis)
print('copy_list:', copy_list)
|
code
|
130026158/cell_14
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.extend(['hello world!', 1.618])
nlis
|
code
|
130026158/cell_22
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.extend(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis.append(['hello world!', 1.618])
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis[0] = 'hello python!'
nlis[1] = 1.618
nlis[2] = [3.14, 2022]
print('Before changing:', nlis)
del nlis[0]
print('After changing:', nlis)
del nlis[-1]
print('After changing:', nlis)
|
code
|
130026158/cell_10
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
len(nlis)
|
code
|
130026158/cell_27
|
[
"text_plain_output_1.png"
] |
text = 'p,y,t,h,o,n'
text.split(',')
|
code
|
130026158/cell_37
|
[
"text_plain_output_1.png"
] |
a_list = ['a', 'b', ['c', 'd'], 'e']
b_list = [1, 2, 3, 4, 5, (6, 7), True, False]
new_list = a_list + b_list
print(new_list)
|
code
|
130026158/cell_12
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
nlis = ['python', 3.14, 2022, [1, 1, 2, 3, 5, 8, 13, 21, 34], ('hello', 'python', 3, 14, 2022)]
nlis
print(nlis[0:2])
print(nlis[2:4])
print(nlis[4:6])
|
code
|
130026158/cell_5
|
[
"text_plain_output_1.png"
] |
nlis = ['python', 25, 2022]
nlis
print('Positive and negative indexing of the first element: \n - Positive index:', nlis[0], '\n - Negative index:', nlis[-3])
print('Positive and negative indexing of the second element: \n - Positive index:', nlis[1], '\n - Negative index:', nlis[-2])
print('Positive and negative indexing of the third element: \n - Positive index:', nlis[2], '\n - Negative index:', nlis[-1])
|
code
|
34129362/cell_9
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
train_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/train_set.csv')
validation_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/validation_set.csv')
test_set = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
train_set.head().T
x_train = train_set.drop(['item_cnt_month', 'date_block_num'], axis=1)
y_train = train_set['item_cnt_month'].astype(int)
x_val = validation_set.drop(['item_cnt_month', 'date_block_num'], axis=1)
y_val = validation_set['item_cnt_month'].astype(int)
latest_records = pd.concat([train_set, validation_set]).drop_duplicates(subset=['shop_id', 'item_id'], keep='last')
x_test = pd.merge(test_set, latest_records, on=['shop_id', 'item_id'], how='left', suffixes=['', '_'])
x_test['year'] = 2015
x_test['month'] = 9
x_test.drop('item_cnt_month', axis=1, inplace=True)
x_test = x_test[x_train.columns]
ts = time.time()
sets = [x_train, x_val, x_test]
for dataset in sets:
for shop_id in dataset['shop_id'].unique():
for column in dataset.columns:
shop_median = dataset[dataset['shop_id'] == shop_id][column].median()
dataset.loc[dataset[column].isnull() & (dataset['shop_id'] == shop_id), column] = shop_median
x_test.fillna(x_test.mean(), inplace=True)
print('Time taken : ', time.time() - ts)
|
code
|
34129362/cell_25
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/train_set.csv')
validation_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/validation_set.csv')
test_set = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
train_set.head().T
x_train = train_set.drop(['item_cnt_month', 'date_block_num'], axis=1)
y_train = train_set['item_cnt_month'].astype(int)
x_val = validation_set.drop(['item_cnt_month', 'date_block_num'], axis=1)
y_val = validation_set['item_cnt_month'].astype(int)
latest_records = pd.concat([train_set, validation_set]).drop_duplicates(subset=['shop_id', 'item_id'], keep='last')
x_test = pd.merge(test_set, latest_records, on=['shop_id', 'item_id'], how='left', suffixes=['', '_'])
x_test['year'] = 2015
x_test['month'] = 9
x_test.drop('item_cnt_month', axis=1, inplace=True)
x_test = x_test[x_train.columns]
train_predictions = {'LR': M1_train, 'KN': M2_train, 'RF': M3_train}
train_predictions = pd.DataFrame(train_predictions)
train_predictions.head(10).T
|
code
|
34129362/cell_23
|
[
"text_html_output_1.png"
] |
from sklearn.metrics import mean_squared_error
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
train_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/train_set.csv')
validation_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/validation_set.csv')
test_set = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
train_set.head().T
x_train = train_set.drop(['item_cnt_month', 'date_block_num'], axis=1)
y_train = train_set['item_cnt_month'].astype(int)
x_val = validation_set.drop(['item_cnt_month', 'date_block_num'], axis=1)
y_val = validation_set['item_cnt_month'].astype(int)
latest_records = pd.concat([train_set, validation_set]).drop_duplicates(subset=['shop_id', 'item_id'], keep='last')
x_test = pd.merge(test_set, latest_records, on=['shop_id', 'item_id'], how='left', suffixes=['', '_'])
x_test['year'] = 2015
x_test['month'] = 9
x_test.drop('item_cnt_month', axis=1, inplace=True)
x_test = x_test[x_train.columns]
ts = time.time()
sets = [x_train, x_val, x_test]
for dataset in sets:
for shop_id in dataset['shop_id'].unique():
for column in dataset.columns:
shop_median = dataset[dataset['shop_id'] == shop_id][column].median()
dataset.loc[dataset[column].isnull() & (dataset['shop_id'] == shop_id), column] = shop_median
x_test.fillna(x_test.mean(), inplace=True)
x_test.head().T
all_f = ['shop_id', 'item_id', 'item_cnt', 'mean_item_cnt', 'transactions', 'year', 'month', 'item_cnt_mean', 'item_cnt_std', 'item_cnt_shifted1', 'item_cnt_shifted2', 'item_cnt_shifted3', 'item_trend', 'shop_mean', 'item_mean', 'shop_item_mean', 'year_mean', 'month_mean']
x_tr = x_train[all_f]
x_va = x_val[all_f]
x_te = x_test[all_f]
def models(model, x_tr, y_train, x_va, x_te):
model.fit(x_tr, y_train)
train_pred = model.predict(x_tr)
val_pred = model.predict(x_va)
test_pred = model.predict(x_te)
return (train_pred, val_pred, test_pred)
x_t_knn = x_tr[:100000]
y_t_knn = y_train[:100000]
scaler = MinMaxScaler()
scaler.fit(x_t_knn)
scaled_x_t_knn = scaler.transform(x_t_knn)
scaled_x_v = scaler.transform(x_v)
KN = KNeighborsRegressor(n_neighbors=20, leaf_size=15, n_jobs=-1)
M2_train, M2_val = models(KN, scaled_x_t_knn, y_t_knn, scaled_x_v, x_te)
print('Train rmse:', np.sqrt(mean_squared_error(y_train, M3_train)))
print('Validation rmse:', np.sqrt(mean_squared_error(y_val, M3_val)))
|
code
|
34129362/cell_2
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
34129362/cell_17
|
[
"text_html_output_1.png"
] |
from sklearn.metrics import mean_squared_error
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/train_set.csv')
validation_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/validation_set.csv')
test_set = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
train_set.head().T
x_train = train_set.drop(['item_cnt_month', 'date_block_num'], axis=1)
y_train = train_set['item_cnt_month'].astype(int)
x_val = validation_set.drop(['item_cnt_month', 'date_block_num'], axis=1)
y_val = validation_set['item_cnt_month'].astype(int)
print('Train rmse:', np.sqrt(mean_squared_error(y_train, M1_train)))
print('Validation rmse:', np.sqrt(mean_squared_error(y_val, M1_val)))
|
code
|
34129362/cell_10
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
train_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/train_set.csv')
validation_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/validation_set.csv')
test_set = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
train_set.head().T
x_train = train_set.drop(['item_cnt_month', 'date_block_num'], axis=1)
y_train = train_set['item_cnt_month'].astype(int)
x_val = validation_set.drop(['item_cnt_month', 'date_block_num'], axis=1)
y_val = validation_set['item_cnt_month'].astype(int)
latest_records = pd.concat([train_set, validation_set]).drop_duplicates(subset=['shop_id', 'item_id'], keep='last')
x_test = pd.merge(test_set, latest_records, on=['shop_id', 'item_id'], how='left', suffixes=['', '_'])
x_test['year'] = 2015
x_test['month'] = 9
x_test.drop('item_cnt_month', axis=1, inplace=True)
x_test = x_test[x_train.columns]
ts = time.time()
sets = [x_train, x_val, x_test]
for dataset in sets:
for shop_id in dataset['shop_id'].unique():
for column in dataset.columns:
shop_median = dataset[dataset['shop_id'] == shop_id][column].median()
dataset.loc[dataset[column].isnull() & (dataset['shop_id'] == shop_id), column] = shop_median
x_test.fillna(x_test.mean(), inplace=True)
x_test.head().T
|
code
|
34129362/cell_5
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/train_set.csv')
validation_set = pd.read_csv('/kaggle/input/preprocessed-sales-data/validation_set.csv')
test_set = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
train_set.head().T
|
code
|
17101817/cell_13
|
[
"text_plain_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
c3 = df['Genres'].value_counts(dropna=False, sort=False)
c3
|
code
|
17101817/cell_4
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/googleplaystore.csv')
df.head()
|
code
|
17101817/cell_23
|
[
"text_plain_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
c5 = df['Content Rating'].value_counts(dropna=False, sort=True, normalize=True)
c5
|
code
|
17101817/cell_30
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='ticks')
sns.set_context('paper')
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import os
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
c2 = df['Category'].value_counts(dropna=True, sort=True, normalize=True).plot.bar()
plt.title('Frequency of App Categories')
ax = sns.boxplot(x="Rating", y="Content Rating", data=df)
plt.xlabel('Rating counts from 0 to 5')
df['Price'] = df[df.columns[7]].replace('[\\$,]', '', regex=True).astype(float)
df['Price_Bin'].value_counts(sort=True, dropna=False, normalize=True).plot.bar()
plt.title('Distribution of apps in price ranges')
plt.xlabel('Price Ranges')
plt.ylabel('Amount of apps in the price range in %')
|
code
|
17101817/cell_20
|
[
"text_plain_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
c5 = df['Content Rating'].value_counts(dropna=False, sort=True, normalize=True)
c5.plot.bar()
|
code
|
17101817/cell_6
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/googleplaystore.csv')
c1 = df['Category'].value_counts(dropna=False, sort=False)
c1
|
code
|
17101817/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='ticks')
sns.set_context('paper')
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import os
print(os.listdir('../input'))
|
code
|
17101817/cell_7
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/googleplaystore.csv')
c1 = df['Category'].value_counts(dropna=False, sort=False)
c1
len(c1)
|
code
|
17101817/cell_18
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
c2 = df['Category'].value_counts(dropna=True, sort=True, normalize=True).plot.bar()
plt.title('Frequency of App Categories')
df['Genres_Sele'].value_counts(normalize=True).plot.bar()
plt.title('Frequency of App Genres')
|
code
|
17101817/cell_32
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='ticks')
sns.set_context('paper')
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import os
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
c2 = df['Category'].value_counts(dropna=True, sort=True, normalize=True).plot.bar()
plt.title('Frequency of App Categories')
ax = sns.boxplot(x="Rating", y="Content Rating", data=df)
plt.xlabel('Rating counts from 0 to 5')
df['Price'] = df[df.columns[7]].replace('[\\$,]', '', regex=True).astype(float)
sns.regplot(x='Price', y='Rating', fit_reg=True, data=df)
plt.xlabel('Price')
plt.ylabel('Rating')
plt.title('The relationship between app price and rating')
|
code
|
17101817/cell_28
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
df['Price'] = df[df.columns[7]].replace('[\\$,]', '', regex=True).astype(float)
print('Max:{} Min:{}'.format(df['Price'].max(), df['Price'].min()))
|
code
|
17101817/cell_35
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='ticks')
sns.set_context('paper')
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import os
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
c2 = df['Category'].value_counts(dropna=True, sort=True, normalize=True).plot.bar()
plt.title('Frequency of App Categories')
ax = sns.boxplot(x="Rating", y="Content Rating", data=df)
plt.xlabel('Rating counts from 0 to 5')
df['Price'] = df[df.columns[7]].replace('[\\$,]', '', regex=True).astype(float)
df_price = df[df['Price'] <= 50]
sns.regplot(x='Price', y='Rating', fit_reg=True, data=df_price)
plt.xlabel('Price')
plt.ylabel('Rating')
plt.title('The relationship between app price and rating from $0 to $50')
|
code
|
17101817/cell_43
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='ticks')
sns.set_context('paper')
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import os
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
c2 = df['Category'].value_counts(dropna=True, sort=True, normalize=True).plot.bar()
plt.title('Frequency of App Categories')
df['Genres_List'] = list(map(lambda x: x.split(';'), df['Genres']))
df['Genres_Sele'] = list(map(lambda x: x[0], df['Genres_List']))
ax = sns.boxplot(x="Rating", y="Content Rating", data=df)
plt.xlabel('Rating counts from 0 to 5')
df['Price'] = df[df.columns[7]].replace('[\\$,]', '', regex=True).astype(float)
df_price = df[df['Price'] <= 50]
def KB_to_MB(kb):
"""converts all sizes in the DF to float MB size"""
if 'Varies with device' in kb:
return np.nan
else:
num = float(kb[:-1])
mes = kb[-1]
if mes is 'k':
mb = float(0.000976562)
c_mb = mb * num
return c_mb
else:
return num
df['Size_MB'] = list(map(lambda x: KB_to_MB(x), df['Size']))
df = df.dropna()
df['Installs'] = df[df.columns[5]].replace('[\\+,]', '', regex=True).astype(float)
sns.regplot(x='Installs', y='Size_MB', fit_reg=True, data=df)
plt.xlabel('Amount of installs in K')
plt.ylabel('Size of App in MB')
plt.title('The relationship between App Size and Download Frequency')
|
code
|
17101817/cell_24
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='ticks')
sns.set_context('paper')
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import os
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
c2 = df['Category'].value_counts(dropna=True, sort=True, normalize=True).plot.bar()
plt.title('Frequency of App Categories')
ax = sns.boxplot(x='Rating', y='Content Rating', data=df)
plt.xlabel('Rating counts from 0 to 5')
|
code
|
17101817/cell_14
|
[
"text_plain_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
df['Genres'].describe()
|
code
|
17101817/cell_10
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/googleplaystore.csv')
df = df[df['Category'] != '1.9']
c2 = df['Category'].value_counts(dropna=True, sort=True, normalize=True).plot.bar()
plt.title('Frequency of App Categories')
|
code
|
88082030/cell_42
|
[
"text_plain_output_1.png"
] |
import pandas as pd
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries.setdefault('India')
countries.update({'Nigeria': 'Africa', 'Egypt': 'Africa', 'Ethiopia': 'Africa', 'Kenya': 'Africa'})
list(countries.keys())
list(set(countries.values()))
countries_df = pd.DataFrame(countries.items(), columns=['country', 'continent'])
countries_df = pd.DataFrame.from_dict(countries, orient='index').reset_index()
countries_df.columns = ['country', 'continent']
countries_df
|
code
|
88082030/cell_21
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict
|
code
|
88082030/cell_13
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
|
code
|
88082030/cell_9
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict
|
code
|
88082030/cell_25
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
type(countries)
|
code
|
88082030/cell_34
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries.setdefault('India')
countries.update({'Nigeria': 'Africa', 'Egypt': 'Africa', 'Ethiopia': 'Africa', 'Kenya': 'Africa'})
list(countries.keys())
list(set(countries.values()))
|
code
|
88082030/cell_30
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries.setdefault('India')
|
code
|
88082030/cell_33
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries.setdefault('India')
countries.update({'Nigeria': 'Africa', 'Egypt': 'Africa', 'Ethiopia': 'Africa', 'Kenya': 'Africa'})
list(countries.keys())
|
code
|
88082030/cell_20
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
|
code
|
88082030/cell_40
|
[
"text_plain_output_1.png"
] |
import pandas as pd
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries.setdefault('India')
countries.update({'Nigeria': 'Africa', 'Egypt': 'Africa', 'Ethiopia': 'Africa', 'Kenya': 'Africa'})
list(countries.keys())
list(set(countries.values()))
countries_df = pd.DataFrame(countries.items(), columns=['country', 'continent'])
countries_df
|
code
|
88082030/cell_29
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries
|
code
|
88082030/cell_26
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries
|
code
|
88082030/cell_11
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
|
code
|
88082030/cell_19
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
|
code
|
88082030/cell_7
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict
|
code
|
88082030/cell_18
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
|
code
|
88082030/cell_32
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries.setdefault('India')
countries.update({'Nigeria': 'Africa', 'Egypt': 'Africa', 'Ethiopia': 'Africa', 'Kenya': 'Africa'})
countries
|
code
|
88082030/cell_15
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
|
code
|
88082030/cell_16
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
|
code
|
88082030/cell_35
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries.setdefault('India')
countries.update({'Nigeria': 'Africa', 'Egypt': 'Africa', 'Ethiopia': 'Africa', 'Kenya': 'Africa'})
list(countries.keys())
list(set(countries.values()))
'Canada' in countries
|
code
|
88082030/cell_43
|
[
"text_plain_output_1.png"
] |
import pandas as pd
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries.setdefault('India')
countries.update({'Nigeria': 'Africa', 'Egypt': 'Africa', 'Ethiopia': 'Africa', 'Kenya': 'Africa'})
list(countries.keys())
list(set(countries.values()))
countries_df = pd.DataFrame(countries.items(), columns=['country', 'continent'])
for key, value in countries.items():
print('Key:', key, '|', 'Value:', value)
|
code
|
88082030/cell_14
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
|
code
|
88082030/cell_10
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
|
code
|
88082030/cell_37
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries.setdefault('India')
countries.update({'Nigeria': 'Africa', 'Egypt': 'Africa', 'Ethiopia': 'Africa', 'Kenya': 'Africa'})
list(countries.keys())
list(set(countries.values()))
list(countries)
|
code
|
88082030/cell_5
|
[
"text_html_output_1.png"
] |
mydict = {}
type(mydict)
|
code
|
88082030/cell_36
|
[
"text_plain_output_1.png"
] |
mydict = {}
mydict.keys()
mydict.values()
mydict.update({'c': 'cat', 'd': 'dog', 'e': 'elephant'})
mydict.keys()
mydict.values()
mydict.items()
mydict.get('a')
mydict.get('g')
mydict.pop('e')
mydict.keys()
mydict.popitem()
mydict.clear()
keys = {'India', 'Srilanka', 'Bangladesh', 'Singapore', 'Japan'}
val = 'Asia'
countries = mydict.fromkeys(keys, val)
countries.setdefault('USA', None)
countries.setdefault('India')
countries.update({'Nigeria': 'Africa', 'Egypt': 'Africa', 'Ethiopia': 'Africa', 'Kenya': 'Africa'})
list(countries.keys())
list(set(countries.values()))
'Nigeria' in countries
|
code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.