path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18112986/cell_10
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') IDtest = test['PassengerId'] def detect_outlier(df, n, features): outlier_indices = [] for col in features: Q1 = np.percentile(df[col].dropna(), 25) Q3 = np.percentile(df[col].dropna(), 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outliers_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > n)) return multiple_outliers outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare']) train.loc[outliers_to_drop] train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True) train.info() train.isnull().sum()
code
18112986/cell_12
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') IDtest = test['PassengerId'] def detect_outlier(df, n, features): outlier_indices = [] for col in features: Q1 = np.percentile(df[col].dropna(), 25) Q3 = np.percentile(df[col].dropna(), 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outliers_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > n)) return multiple_outliers outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare']) train.loc[outliers_to_drop] train = train.drop(outliers_to_drop, axis=0).reset_index(drop=True) train.isnull().sum() train.dtypes
code
18112986/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') IDtest = test['PassengerId'] def detect_outlier(df, n, features): outlier_indices = [] for col in features: Q1 = np.percentile(df[col].dropna(), 25) Q3 = np.percentile(df[col].dropna(), 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outliers_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outliers_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > n)) return multiple_outliers outliers_to_drop = detect_outlier(train, 2, ['Age', 'SibSp', 'Parch', 'Fare']) train.loc[outliers_to_drop]
code
122258235/cell_13
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd import pandas as pd from pathlib import Path df = pd.read_csv('/kaggle/input/iris/Iris.csv') from sklearn.linear_model import LinearRegression df = LinearRegression() df.fit(X_train, y_train) predictions = df.predict(X_test) df.predict([[5.1, 2.5, 1.1]]) df.predict([[7.5, 3.0, 1.8]])
code
122258235/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd from pathlib import Path df = pd.read_csv('/kaggle/input/iris/Iris.csv') print('Distinct values for species', df['Species'].unique())
code
122258235/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd from pathlib import Path df = pd.read_csv('/kaggle/input/iris/Iris.csv') df.describe()
code
122258235/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd from pathlib import Path df = pd.read_csv('/kaggle/input/iris/Iris.csv') df.head()
code
122258235/cell_11
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd import pandas as pd from pathlib import Path df = pd.read_csv('/kaggle/input/iris/Iris.csv') from sklearn.linear_model import LinearRegression df = LinearRegression() df.fit(X_train, y_train) predictions = df.predict(X_test) print('coefficient of determination:', df.score(X_train, y_train)) print('intercept:', df.intercept_) print('slope:', df.coef_)
code
122258235/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd from pathlib import Path df = pd.read_csv('/kaggle/input/iris/Iris.csv') print('Maximum number of the value for sepal_lengte', df['SepalLengthCm'].max())
code
122258235/cell_14
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd import pandas as pd from pathlib import Path df = pd.read_csv('/kaggle/input/iris/Iris.csv') from sklearn.linear_model import LinearRegression df = LinearRegression() df.fit(X_train, y_train) predictions = df.predict(X_test) df.predict([[5.1, 2.5, 1.1]]) df.predict([[7.5, 3.0, 1.8]]) df.predict([[4.6, 3.5, 0.2]])
code
122258235/cell_10
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, mean_absolute_error import pandas as pd import pandas as pd from pathlib import Path df = pd.read_csv('/kaggle/input/iris/Iris.csv') from sklearn.linear_model import LinearRegression df = LinearRegression() df.fit(X_train, y_train) predictions = df.predict(X_test) from sklearn.metrics import mean_squared_error, mean_absolute_error print('mean_squared_error : ', mean_squared_error(y_test, predictions)) print('mean_absolute_error : ', mean_absolute_error(y_test, predictions))
code
122258235/cell_12
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd import pandas as pd from pathlib import Path df = pd.read_csv('/kaggle/input/iris/Iris.csv') from sklearn.linear_model import LinearRegression df = LinearRegression() df.fit(X_train, y_train) predictions = df.predict(X_test) df.predict([[5.1, 2.5, 1.1]])
code
122258235/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd from pathlib import Path df = pd.read_csv('/kaggle/input/iris/Iris.csv') print(df['Species'].value_counts())
code
2012154/cell_42
[ "text_plain_output_1.png" ]
from nltk.sentiment.vader import SentimentIntensityAnalyzer from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) from nltk.sentiment.vader import SentimentIntensityAnalyzer def sentiment_value(paragraph): analyser = SentimentIntensityAnalyzer() result = analyser.polarity_scores(paragraph) score = result['compound'] return round(score, 1) sample2 = data_df['Reviews'][9001] print(sample2) print('Sentiment: ') print(sentiment_value(sample2))
code
2012154/cell_21
[ "text_html_output_1.png" ]
from sklearn.utils import shuffle import numpy as np import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() info = pd.pivot_table(data_df, index=['Brand Name'], values=['Rating', 'Review Votes'], columns=[], aggfunc=[np.sum, np.mean], fill_value=0) info = info.sort_values(by=('sum', 'Rating'), ascending=False) info.head(10)
code
2012154/cell_25
[ "text_html_output_1.png" ]
from sklearn.utils import shuffle import matplotlib.pyplot as plt import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() import matplotlib.pyplot as plt ylabel = data_df['Price'] xlabel = data_df['Rating'] ylabel2 = data_df['Price'] plt.ylabel('Price') xlabel2 = data_df['Review Votes'] plt.xlabel('Review Votes') plt.scatter(xlabel2, ylabel2, alpha=0.1) plt.show()
code
2012154/cell_23
[ "text_html_output_1.png" ]
from sklearn.utils import shuffle import matplotlib.pyplot as plt import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() import matplotlib.pyplot as plt ylabel = data_df['Price'] plt.ylabel('Price') plt.xlabel('Rating') xlabel = data_df['Rating'] plt.scatter(xlabel, ylabel, alpha=0.1) plt.show()
code
2012154/cell_33
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) all_reviews = data_df['Reviews'] all_reviews.head()
code
2012154/cell_55
[ "text_html_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) temp_data = data_df[0:20000] temp_data.shape counter = 0 for i in range(0, 20000): if abs(temp_data['Rating'][i] - temp_data['SENTIMENT_VALUE'][i]) > 1: counter += 1 counter
code
2012154/cell_74
[ "text_html_output_1.png" ]
from sklearn.utils import shuffle import numpy as np import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() info = pd.pivot_table(data_df, index=['Brand Name'], values=['Rating', 'Review Votes'], columns=[], aggfunc=[np.sum, np.mean], fill_value=0) info = info.sort_values(by=('sum', 'Rating'), ascending=False) corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) temp_data = data_df[0:20000] temp_data.shape counter = 0 for i in range(0, 20000): if abs(temp_data['Rating'][i] - temp_data['SENTIMENT_VALUE'][i]) > 1: counter += 1 accuracy = (temp_data.shape[0] - counter) / temp_data.shape[0] testing2 = pd.pivot_table(temp_data, index=['Brand Name'], values=['Rating', 'Review Votes', 'SENTIMENT_VALUE'], columns=[], aggfunc=[np.sum, np.mean], fill_value=0) testing2 = testing2.sort_values(by=('sum', 'Rating'), ascending=False) testing3 = pd.pivot_table(temp_data, index=['Product Name'], values=['Rating', 'Review Votes', 'SENTIMENT_VALUE'], columns=[], aggfunc=[np.sum, np.mean], fill_value=0) testing3 = testing3.sort_values(by=('sum', 'Rating'), ascending=False) testing3.head(10)
code
2012154/cell_76
[ "image_output_2.png", "image_output_1.png" ]
from sklearn.utils import shuffle import numpy as np import pandas as pd import pylab data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() info = pd.pivot_table(data_df, index=['Brand Name'], values=['Rating', 'Review Votes'], columns=[], aggfunc=[np.sum, np.mean], fill_value=0) info = info.sort_values(by=('sum', 'Rating'), ascending=False) corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) temp_data = data_df[0:20000] temp_data.shape counter = 0 for i in range(0, 20000): if abs(temp_data['Rating'][i] - temp_data['SENTIMENT_VALUE'][i]) > 1: counter += 1 accuracy = (temp_data.shape[0] - counter) / temp_data.shape[0] testing2 = pd.pivot_table(temp_data, index=['Brand Name'], values=['Rating', 'Review Votes', 'SENTIMENT_VALUE'], columns=[], aggfunc=[np.sum, np.mean], fill_value=0) testing2 = testing2.sort_values(by=('sum', 'Rating'), ascending=False) import pylab names = testing2.index[:10] y = testing2['sum', 'SENTIMENT_VALUE'][:10] y2 = testing2['sum', 'Rating'][:10] pylab.figure(figsize=(15, 7)) x = range(10) pylab.subplot(2, 1, 1) pylab.xticks(x, names) pylab.ylabel('Summed Values') pylab.title('Total Sum Values') pylab.plot(x, y, 'r-', x, y2, 'b-') pylab.legend(['SentimentValue', 'Rating']) y_new = testing2['mean', 'SENTIMENT_VALUE'][:10] y2_new = testing2['mean', 'Rating'][:10] pylab.figure(figsize=(15, 7)) pylab.subplot(2, 1, 2) pylab.xticks(x, names) pylab.ylabel('Mean Values') pylab.title('Mean Values') pylab.plot(x, y_new, 'r-', x, y2_new, 'b-') pylab.legend(['SentimentValue', 'Rating']) pylab.show()
code
2012154/cell_40
[ "text_html_output_1.png" ]
from nltk.sentiment.vader import SentimentIntensityAnalyzer from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) from nltk.sentiment.vader import SentimentIntensityAnalyzer def sentiment_value(paragraph): analyser = SentimentIntensityAnalyzer() result = analyser.polarity_scores(paragraph) score = result['compound'] return round(score, 1) sample = data_df['Reviews'][1231] print(sample) print('Sentiment: ') print(sentiment_value(sample))
code
2012154/cell_29
[ "image_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False)
code
2012154/cell_39
[ "text_plain_output_1.png" ]
from nltk.sentiment.vader import SentimentIntensityAnalyzer from nltk.sentiment.vader import SentimentIntensityAnalyzer def sentiment_value(paragraph): analyser = SentimentIntensityAnalyzer() result = analyser.polarity_scores(paragraph) score = result['compound'] return round(score, 1)
code
2012154/cell_65
[ "image_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) temp_data = data_df[0:20000] temp_data.shape counter = 0 for i in range(0, 20000): if abs(temp_data['Rating'][i] - temp_data['SENTIMENT_VALUE'][i]) > 1: counter += 1 accuracy = (temp_data.shape[0] - counter) / temp_data.shape[0] product_name_20k = [] for item in temp_data['Product Name']: if item in product_name_20k: continue else: product_name_20k.append(item) len(product_name_20k)
code
2012154/cell_41
[ "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.sentiment.vader import SentimentIntensityAnalyzer from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) from nltk.sentiment.vader import SentimentIntensityAnalyzer def sentiment_value(paragraph): analyser = SentimentIntensityAnalyzer() result = analyser.polarity_scores(paragraph) score = result['compound'] return round(score, 1) sample1 = data_df['Reviews'][99314] print(sample1) print('Sentiment: ') print(sentiment_value(sample1))
code
2012154/cell_61
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) temp_data = data_df[0:20000] temp_data.shape counter = 0 for i in range(0, 20000): if abs(temp_data['Rating'][i] - temp_data['SENTIMENT_VALUE'][i]) > 1: counter += 1 accuracy = (temp_data.shape[0] - counter) / temp_data.shape[0] temp_data.head()
code
2012154/cell_19
[ "text_html_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() data_df.describe()
code
2012154/cell_69
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) temp_data = data_df[0:20000] temp_data.shape counter = 0 for i in range(0, 20000): if abs(temp_data['Rating'][i] - temp_data['SENTIMENT_VALUE'][i]) > 1: counter += 1 accuracy = (temp_data.shape[0] - counter) / temp_data.shape[0] brands_temp = [] for item in temp_data['Brand Name']: if item in brands_temp: continue else: brands_temp.append(item) len(brands_temp)
code
2012154/cell_52
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) temp_data = data_df[0:20000] temp_data.shape temp_data.head()
code
2012154/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) product_name = [] for item in data['Product Name']: if item in product_name: continue else: product_name.append(item) len(product_name)
code
2012154/cell_45
[ "text_plain_output_1.png" ]
from nltk.sentiment.vader import SentimentIntensityAnalyzer from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) all_reviews = data_df['Reviews'] data_df = data_df.reset_index(drop=True) all_reviews = data_df['Reviews'] all_sent_values = [] all_sentiments = [] from nltk.sentiment.vader import SentimentIntensityAnalyzer def sentiment_value(paragraph): analyser = SentimentIntensityAnalyzer() result = analyser.polarity_scores(paragraph) score = result['compound'] return round(score, 1) for i in range(0, 20000): all_sent_values.append(sentiment_value(all_reviews[i])) len(all_sent_values)
code
2012154/cell_51
[ "text_plain_output_1.png" ]
from nltk.sentiment.vader import SentimentIntensityAnalyzer from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) all_reviews = data_df['Reviews'] data_df = data_df.reset_index(drop=True) all_reviews = data_df['Reviews'] all_sent_values = [] all_sentiments = [] from nltk.sentiment.vader import SentimentIntensityAnalyzer def sentiment_value(paragraph): analyser = SentimentIntensityAnalyzer() result = analyser.polarity_scores(paragraph) score = result['compound'] return round(score, 1) for i in range(0, 20000): all_sent_values.append(sentiment_value(all_reviews[i])) temp_data = data_df[0:20000] temp_data.shape SENTIMENT_VALUE = [] SENTIMENT = [] for i in range(0, 20000): sent = all_sent_values[i] if sent <= 1 and sent >= 0.5: SENTIMENT.append('V.Positive') SENTIMENT_VALUE.append(5) elif sent < 0.5 and sent > 0: SENTIMENT.append('Positive') SENTIMENT_VALUE.append(4) elif sent == 0: SENTIMENT.append('Neutral') SENTIMENT_VALUE.append(3) elif sent < 0 and sent >= -0.5: SENTIMENT.append('Negative') SENTIMENT_VALUE.append(2) else: SENTIMENT.append('V.Negative') SENTIMENT_VALUE.append(1) temp_data['SENTIMENT_VALUE'] = SENTIMENT_VALUE temp_data['SENTIMENT'] = SENTIMENT
code
2012154/cell_62
[ "text_html_output_1.png" ]
from nltk.sentiment.vader import SentimentIntensityAnalyzer from sklearn.utils import shuffle import matplotlib.pyplot as plt import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() import matplotlib.pyplot as plt ylabel = data_df['Price'] xlabel = data_df['Rating'] ylabel2 = data_df['Price'] xlabel2 = data_df['Review Votes'] ylabel3 = data_df['Rating'] xlabel3 = data_df['Review Votes'] corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) all_reviews = data_df['Reviews'] data_df = data_df.reset_index(drop=True) all_reviews = data_df['Reviews'] all_sent_values = [] all_sentiments = [] from nltk.sentiment.vader import SentimentIntensityAnalyzer def sentiment_value(paragraph): analyser = SentimentIntensityAnalyzer() result = analyser.polarity_scores(paragraph) score = result['compound'] return round(score, 1) for i in range(0, 20000): all_sent_values.append(sentiment_value(all_reviews[i])) xaxis = [] for i in range(0, 20000): xaxis.append(i) ylabel_new_1 = all_sent_values[:20000] xlabel = xaxis plt.figure(figsize=(9, 9)) plt.xlabel('ReviewIndex') plt.ylabel('SentimentValue(-1 to 1)') plt.plot(xlabel, ylabel_new_1, 'ro', alpha=0.04) plt.title('Scatter Intensity Plot of Sentiments') plt.show()
code
2012154/cell_59
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) temp_data = data_df[0:20000] temp_data.shape counter = 0 for i in range(0, 20000): if abs(temp_data['Rating'][i] - temp_data['SENTIMENT_VALUE'][i]) > 1: counter += 1 accuracy = (temp_data.shape[0] - counter) / temp_data.shape[0] percent_accuracy = accuracy * 100 percent_accuracy
code
2012154/cell_28
[ "image_output_1.png" ]
from sklearn.utils import shuffle import matplotlib.pyplot as plt import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() import matplotlib.pyplot as plt ylabel = data_df['Price'] xlabel = data_df['Rating'] ylabel2 = data_df['Price'] xlabel2 = data_df['Review Votes'] ylabel3 = data_df['Rating'] plt.ylabel('Rating') xlabel3 = data_df['Review Votes'] plt.xlabel('Review Votes') plt.scatter(xlabel3, ylabel3, alpha=0.1) plt.show()
code
2012154/cell_16
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df[:10]
code
2012154/cell_47
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) temp_data = data_df[0:20000] temp_data.shape
code
2012154/cell_31
[ "image_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False)
code
2012154/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df.head()
code
2012154/cell_10
[ "text_html_output_1.png" ]
import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data['Brand Name'] brands = [] for item in data['Brand Name']: if item in brands: continue else: brands.append(item) len(brands)
code
2012154/cell_71
[ "text_html_output_1.png" ]
from sklearn.utils import shuffle import numpy as np import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() info = pd.pivot_table(data_df, index=['Brand Name'], values=['Rating', 'Review Votes'], columns=[], aggfunc=[np.sum, np.mean], fill_value=0) info = info.sort_values(by=('sum', 'Rating'), ascending=False) corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) temp_data = data_df[0:20000] temp_data.shape counter = 0 for i in range(0, 20000): if abs(temp_data['Rating'][i] - temp_data['SENTIMENT_VALUE'][i]) > 1: counter += 1 accuracy = (temp_data.shape[0] - counter) / temp_data.shape[0] testing2 = pd.pivot_table(temp_data, index=['Brand Name'], values=['Rating', 'Review Votes', 'SENTIMENT_VALUE'], columns=[], aggfunc=[np.sum, np.mean], fill_value=0) testing2 = testing2.sort_values(by=('sum', 'Rating'), ascending=False) testing2.head(10)
code
2012154/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data.head()
code
2012154/cell_36
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd data_file = '../input/Amazon_Unlocked_Mobile.csv' data = pd.read_csv(data_file) data_df = pd.DataFrame(data) data_df = shuffle(data_df) data_df = data_df.dropna() corr_matrix = data_df.corr() corr_matrix['Rating'].sort_values(ascending=False) corr_matrix = data_df.corr() corr_matrix['Price'].sort_values(ascending=False) data_df = data_df.reset_index(drop=True) data_df.head()
code
2019757/cell_4
[ "text_plain_output_1.png" ]
df.groupby(['country'])['quality_of_education'].mean() a = df.groupby(['country'])['quality_of_education'].mean() a.sort_values(ascending=False)
code
2019757/cell_6
[ "text_plain_output_1.png" ]
df.groupby(['country'])['quality_of_education'].mean() a = df.groupby(['country'])['quality_of_education'].mean() a.sort_values(ascending=False) koolid = df.groupby(['country'])['year'].count() koolid.sort_values(ascending=False) a2015 = df[df['year'] == 2015] koolid2015 = a2015.groupby(['country'])['year'].count() koolid2015.sort_values(ascending=False)
code
2019757/cell_2
[ "text_html_output_1.png" ]
df[df['country'] == 'Estonia']
code
2019757/cell_3
[ "text_plain_output_1.png" ]
df.groupby(['country'])['quality_of_education'].mean()
code
2019757/cell_5
[ "text_plain_output_1.png" ]
df.groupby(['country'])['quality_of_education'].mean() a = df.groupby(['country'])['quality_of_education'].mean() a.sort_values(ascending=False) koolid = df.groupby(['country'])['year'].count() koolid.sort_values(ascending=False)
code
17122451/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/matchesheader.csv') list(train_df.columns.values) train_df.isna().sum()
code
17122451/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/matchesheader.csv') train_df.head()
code
17122451/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17122451/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/matchesheader.csv') list(train_df.columns.values)
code
17122451/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/matchesheader.csv') list(train_df.columns.values) train_df.isna().sum() from sklearn.preprocessing import LabelEncoder labelEncoder = LabelEncoder() train_df.team_1_name = labelEncoder.fit_transform(train_df.team_1_name) train_df.team_2_name = labelEncoder.fit_transform(train_df.team_2_name) train_df.head()
code
89133672/cell_9
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/top-100-cryptocurrency-2022/Top 100 Cryptocurrency 2022.csv') df.columns = ['ranking', 'name', 'price', 'changes_24h', 'changes_7d', 'changes_30d', 'changes_1y', 'market_cap', 'volume', 'supply'] df.shape
code
89133672/cell_29
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/top-100-cryptocurrency-2022/Top 100 Cryptocurrency 2022.csv') df.columns = ['ranking', 'name', 'price', 'changes_24h', 'changes_7d', 'changes_30d', 'changes_1y', 'market_cap', 'volume', 'supply'] df.shape df.isnull().sum() blacklist_index = df[~df.market_cap.str.contains('B|M|C')].index market_cap = df.market_cap.drop(blacklist_index) def market_cap_conv(price: str): sym_dict = {'B': 1000000000, 'M': 1000000, 'K': 1000} price, symbol = price.replace('$', '').split(' ') price = float(price) price *= sym_dict[symbol] return price market_cap = market_cap.apply(market_cap_conv) top_market_cap = pd.DataFrame(data={'name': df.name, 'market_cap': market_cap}).sort_values('market_cap', ascending=False)[1:21] volume_cln_index = df[~df.volume.str.contains('B|M|K', regex=True)].index df.loc[volume_cln_index, 'volume'] = df.volume[volume_cln_index].str.replace('$', '', regex=False) + 'M' def volume_cap_conv(price: str): sym_dict = {'B': 1000000000, 'M': 1000000, 'K': 1000} price, symbol = price.replace('$', '').split(' ')[:2] price = float(price) price *= sym_dict[symbol] return price df.volume = df.volume.apply(volume_cap_conv) top_volume_coins = df.sort_values('volume', ascending=False)[:20] df[['changes_24h', 'changes_7d', 'changes_30d', 'changes_1y']].apply(lambda col: ~col.str.contains('%')).sum()
code
89133672/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/top-100-cryptocurrency-2022/Top 100 Cryptocurrency 2022.csv') df.head()
code
89133672/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns TITLE_SIZE = 20 TITLE_PAD = 15 LABELE_SIZE = 15 LABELE_PAD = 10 df = pd.read_csv('../input/top-100-cryptocurrency-2022/Top 100 Cryptocurrency 2022.csv') df.columns = ['ranking', 'name', 'price', 'changes_24h', 'changes_7d', 'changes_30d', 'changes_1y', 'market_cap', 'volume', 'supply'] df.shape df.isnull().sum() top_price_alts = df[['name', 'price']][3:23] sns.set_theme() fig, ax = plt.subplots(figsize=(11, 9)) sns.barplot(ax=ax, x='price', y='name', data=top_price_alts) ax.set_title('Top 20 Altcoins by Price', fontsize=TITLE_SIZE, pad=TITLE_PAD) ax.set_ylabel('Crypto Name', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) ax.set_xlabel('Price', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) plt.show() blacklist_index = df[~df.market_cap.str.contains('B|M|C')].index market_cap = df.market_cap.drop(blacklist_index) def market_cap_conv(price: str): sym_dict = {'B': 1000000000, 'M': 1000000, 'K': 1000} price, symbol = price.replace('$', '').split(' ') price = float(price) price *= sym_dict[symbol] return price market_cap = market_cap.apply(market_cap_conv) top_market_cap = pd.DataFrame(data={'name': df.name, 'market_cap': market_cap}).sort_values('market_cap', ascending=False)[1:21] fig, ax = plt.subplots(figsize=(11, 8)) sns.barplot(ax=ax, x='market_cap', y='name', data=top_market_cap, palette='pastel') ax.set_title('Top 20 Altcoins by Market Cap', fontsize=TITLE_SIZE, pad=TITLE_PAD) ax.set_xlabel('Market Cap in bn', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) ax.set_ylabel('Crypto Name', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) ax.xaxis.set_major_formatter(lambda x, pos: f'{int(x)/1_000_000_000}B') plt.show() volume_cln_index = df[~df.volume.str.contains('B|M|K', regex=True)].index df.loc[volume_cln_index, 'volume'] = df.volume[volume_cln_index].str.replace('$', '', regex=False) + 'M' def volume_cap_conv(price: str): sym_dict = {'B': 1000000000, 'M': 1000000, 'K': 1000} price, symbol = price.replace('$', '').split(' ')[:2] price = float(price) price *= sym_dict[symbol] return price df.volume = df.volume.apply(volume_cap_conv) top_volume_coins = df.sort_values('volume', ascending=False)[:20] fig, ax = plt.subplots(figsize=(11, 8)) palette = sns.color_palette('husl', 6) sns.barplot(ax=ax, x='volume', y='name', data=top_volume_coins, palette=palette) ax.set_title('Top 20 Coins by Volume 24H', fontsize=TITLE_SIZE, pad=TITLE_PAD) ax.set_xlabel('Volume', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) ax.set_ylabel('Crypto Name', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) ax.xaxis.set_major_formatter(lambda x, pos: f'{float(x) / 1000000000}B') plt.show()
code
89133672/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns TITLE_SIZE = 20 TITLE_PAD = 15 LABELE_SIZE = 15 LABELE_PAD = 10 df = pd.read_csv('../input/top-100-cryptocurrency-2022/Top 100 Cryptocurrency 2022.csv') df.columns = ['ranking', 'name', 'price', 'changes_24h', 'changes_7d', 'changes_30d', 'changes_1y', 'market_cap', 'volume', 'supply'] df.shape df.isnull().sum() top_price_alts = df[['name', 'price']][3:23] sns.set_theme() fig, ax = plt.subplots(figsize=(11, 9)) sns.barplot(ax=ax, x='price', y='name', data=top_price_alts) ax.set_title('Top 20 Altcoins by Price', fontsize=TITLE_SIZE, pad=TITLE_PAD) ax.set_ylabel('Crypto Name', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) ax.set_xlabel('Price', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) plt.show()
code
89133672/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns TITLE_SIZE = 20 TITLE_PAD = 15 LABELE_SIZE = 15 LABELE_PAD = 10 df = pd.read_csv('../input/top-100-cryptocurrency-2022/Top 100 Cryptocurrency 2022.csv') df.columns = ['ranking', 'name', 'price', 'changes_24h', 'changes_7d', 'changes_30d', 'changes_1y', 'market_cap', 'volume', 'supply'] df.shape df.isnull().sum() top_price_alts = df[['name', 'price']][3:23] sns.set_theme() fig, ax = plt.subplots(figsize=(11, 9)) sns.barplot(ax=ax, x='price', y='name', data=top_price_alts) ax.set_title('Top 20 Altcoins by Price', fontsize=TITLE_SIZE, pad=TITLE_PAD) ax.set_ylabel('Crypto Name', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) ax.set_xlabel('Price', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) plt.show() blacklist_index = df[~df.market_cap.str.contains('B|M|C')].index market_cap = df.market_cap.drop(blacklist_index) def market_cap_conv(price: str): sym_dict = {'B': 1000000000, 'M': 1000000, 'K': 1000} price, symbol = price.replace('$', '').split(' ') price = float(price) price *= sym_dict[symbol] return price market_cap = market_cap.apply(market_cap_conv) top_market_cap = pd.DataFrame(data={'name': df.name, 'market_cap': market_cap}).sort_values('market_cap', ascending=False)[1:21] fig, ax = plt.subplots(figsize=(11, 8)) sns.barplot(ax=ax, x='market_cap', y='name', data=top_market_cap, palette='pastel') ax.set_title('Top 20 Altcoins by Market Cap', fontsize=TITLE_SIZE, pad=TITLE_PAD) ax.set_xlabel('Market Cap in bn', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) ax.set_ylabel('Crypto Name', fontdict={'fontsize': LABELE_SIZE}, labelpad=LABELE_PAD) ax.xaxis.set_major_formatter(lambda x, pos: f'{int(x) / 1000000000}B') plt.show()
code
89133672/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/top-100-cryptocurrency-2022/Top 100 Cryptocurrency 2022.csv') df.columns = ['ranking', 'name', 'price', 'changes_24h', 'changes_7d', 'changes_30d', 'changes_1y', 'market_cap', 'volume', 'supply'] df.shape df.isnull().sum()
code
34121718/cell_63
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp round(payment_data.isnull().sum() / len(payment_data) * 100, 2) round(review_data.isnull().sum() / len(review_data) * 100, 2) round(items_data.isnull().sum() / len(items_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(customers_data.isnull().sum() / len(customers_data) * 100, 2) round(sellers_data.isnull().sum() / len(sellers_data) * 100, 2) round(product_trans_data.isnull().sum() / len(product_trans_data) * 100, 2) orders_data['order_purchase_timestamp'] = pd.to_datetime(orders_data['order_purchase_timestamp']) orders_data['order_approved_at'] = pd.to_datetime(orders_data['order_approved_at']) orders_data['order_delivered_carrier_date'] = pd.to_datetime(orders_data['order_delivered_carrier_date']) orders_data['order_delivered_customer_date'] = pd.to_datetime(orders_data['order_delivered_customer_date']) orders_data['order_estimated_delivery_date'] = pd.to_datetime(orders_data['order_estimated_delivery_date']) orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data.isnull().sum() orders_data.dropna(inplace=True) orders_data.isnull().sum() review_data.isnull().sum() review_data.drop(columns=['review_comment_title', 'review_comment_message'], inplace=True) review_data.isnull().sum() product_data.dropna(inplace=True) product_data.isnull().sum() orders_data.duplicated(['order_id']).sum() items_data.duplicated(['order_id']).sum() payment_data.duplicated(['order_id']).sum() review_data.duplicated(['order_id']).sum() customers_data.duplicated(['customer_id']).sum() data_merge = pd.merge(orders_data, items_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, payment_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, review_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, product_data, on='product_id', how='inner') data_merge = pd.merge(data_merge, customers_data, on='customer_id', how='inner') data_merge = pd.merge(data_merge, sellers_data, on='seller_id', how='inner') data_merge = pd.merge(data_merge, product_trans_data, on='product_category_name', how='inner') Df_ecommerce = data_merge.drop_duplicates(['order_id']) Df_ecommerce.isnull().sum() Df_ecommerce
code
34121718/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(product_data.isnull().sum() / len(product_data) * 100, 2)
code
34121718/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp
code
34121718/cell_25
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum()
code
34121718/cell_56
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt #collection of command style functions that make matplotlib work import numpy as np import pandas as pd import seaborn as sns #statistical data visualization. orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp round(payment_data.isnull().sum() / len(payment_data) * 100, 2) round(review_data.isnull().sum() / len(review_data) * 100, 2) round(items_data.isnull().sum() / len(items_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(customers_data.isnull().sum() / len(customers_data) * 100, 2) round(sellers_data.isnull().sum() / len(sellers_data) * 100, 2) round(product_trans_data.isnull().sum() / len(product_trans_data) * 100, 2) orders_data['order_purchase_timestamp'] = pd.to_datetime(orders_data['order_purchase_timestamp']) orders_data['order_approved_at'] = pd.to_datetime(orders_data['order_approved_at']) orders_data['order_delivered_carrier_date'] = pd.to_datetime(orders_data['order_delivered_carrier_date']) orders_data['order_delivered_customer_date'] = pd.to_datetime(orders_data['order_delivered_customer_date']) orders_data['order_estimated_delivery_date'] = pd.to_datetime(orders_data['order_estimated_delivery_date']) orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data.isnull().sum() orders_data.dropna(inplace=True) orders_data.isnull().sum() review_data.isnull().sum() review_data.drop(columns=['review_comment_title', 'review_comment_message'], inplace=True) review_data.isnull().sum() product_data.dropna(inplace=True) product_data.isnull().sum() orders_data.duplicated(['order_id']).sum() items_data.duplicated(['order_id']).sum() payment_data.duplicated(['order_id']).sum() review_data.duplicated(['order_id']).sum() customers_data.duplicated(['customer_id']).sum() data_merge = pd.merge(orders_data, items_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, payment_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, review_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, product_data, on='product_id', how='inner') data_merge = pd.merge(data_merge, customers_data, on='customer_id', how='inner') data_merge = pd.merge(data_merge, sellers_data, on='seller_id', how='inner') data_merge = pd.merge(data_merge, product_trans_data, on='product_category_name', how='inner') Df_ecommerce = data_merge.drop_duplicates(['order_id']) Df_ecommerce.isnull().sum() Df_top20prod_rev = Df_ecommerce['price'].groupby(Df_ecommerce['product_category_name_english']).sum().sort_values(ascending=False)[:20] Df_top20prod_rev fig = plt.figure(figsize=(16, 10)) sns.barplot(y=Df_top20prod_rev.index, x=Df_top20prod_rev.values) plt.title('Top 20 product category having the largest revenue', fontsize=20) plt.xlabel('Total revenue', fontsize=17) plt.ylabel('Product category', fontsize=17)
code
34121718/cell_34
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data.isnull().sum() orders_data.dropna(inplace=True) orders_data.isnull().sum()
code
34121718/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier
code
34121718/cell_44
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(items_data.isnull().sum() / len(items_data) * 100, 2) items_data.duplicated(['order_id']).sum()
code
34121718/cell_55
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp round(payment_data.isnull().sum() / len(payment_data) * 100, 2) round(review_data.isnull().sum() / len(review_data) * 100, 2) round(items_data.isnull().sum() / len(items_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(customers_data.isnull().sum() / len(customers_data) * 100, 2) round(sellers_data.isnull().sum() / len(sellers_data) * 100, 2) round(product_trans_data.isnull().sum() / len(product_trans_data) * 100, 2) orders_data['order_purchase_timestamp'] = pd.to_datetime(orders_data['order_purchase_timestamp']) orders_data['order_approved_at'] = pd.to_datetime(orders_data['order_approved_at']) orders_data['order_delivered_carrier_date'] = pd.to_datetime(orders_data['order_delivered_carrier_date']) orders_data['order_delivered_customer_date'] = pd.to_datetime(orders_data['order_delivered_customer_date']) orders_data['order_estimated_delivery_date'] = pd.to_datetime(orders_data['order_estimated_delivery_date']) orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data.isnull().sum() orders_data.dropna(inplace=True) orders_data.isnull().sum() review_data.isnull().sum() review_data.drop(columns=['review_comment_title', 'review_comment_message'], inplace=True) review_data.isnull().sum() product_data.dropna(inplace=True) product_data.isnull().sum() orders_data.duplicated(['order_id']).sum() items_data.duplicated(['order_id']).sum() payment_data.duplicated(['order_id']).sum() review_data.duplicated(['order_id']).sum() customers_data.duplicated(['customer_id']).sum() data_merge = pd.merge(orders_data, items_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, payment_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, review_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, product_data, on='product_id', how='inner') data_merge = pd.merge(data_merge, customers_data, on='customer_id', how='inner') data_merge = pd.merge(data_merge, sellers_data, on='seller_id', how='inner') data_merge = pd.merge(data_merge, product_trans_data, on='product_category_name', how='inner') Df_ecommerce = data_merge.drop_duplicates(['order_id']) Df_ecommerce.isnull().sum() Df_top20prod_rev = Df_ecommerce['price'].groupby(Df_ecommerce['product_category_name_english']).sum().sort_values(ascending=False)[:20] Df_top20prod_rev
code
34121718/cell_29
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data_2 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] mean_deliver = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_customer_date']).mean() mean_deliver added_date = orders_data[orders_data['order_delivered_customer_date'].isnull()]['order_purchase_timestamp'] - mean_deliver added_date
code
34121718/cell_26
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum()
code
34121718/cell_65
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt #collection of command style functions that make matplotlib work import numpy as np import pandas as pd import seaborn as sns #statistical data visualization. orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp round(payment_data.isnull().sum() / len(payment_data) * 100, 2) round(review_data.isnull().sum() / len(review_data) * 100, 2) round(items_data.isnull().sum() / len(items_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(customers_data.isnull().sum() / len(customers_data) * 100, 2) round(sellers_data.isnull().sum() / len(sellers_data) * 100, 2) round(product_trans_data.isnull().sum() / len(product_trans_data) * 100, 2) orders_data['order_purchase_timestamp'] = pd.to_datetime(orders_data['order_purchase_timestamp']) orders_data['order_approved_at'] = pd.to_datetime(orders_data['order_approved_at']) orders_data['order_delivered_carrier_date'] = pd.to_datetime(orders_data['order_delivered_carrier_date']) orders_data['order_delivered_customer_date'] = pd.to_datetime(orders_data['order_delivered_customer_date']) orders_data['order_estimated_delivery_date'] = pd.to_datetime(orders_data['order_estimated_delivery_date']) orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data.isnull().sum() orders_data.dropna(inplace=True) orders_data.isnull().sum() review_data.isnull().sum() review_data.drop(columns=['review_comment_title', 'review_comment_message'], inplace=True) review_data.isnull().sum() product_data.dropna(inplace=True) product_data.isnull().sum() orders_data.duplicated(['order_id']).sum() items_data.duplicated(['order_id']).sum() payment_data.duplicated(['order_id']).sum() review_data.duplicated(['order_id']).sum() customers_data.duplicated(['customer_id']).sum() data_merge = pd.merge(orders_data, items_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, payment_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, review_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, product_data, on='product_id', how='inner') data_merge = pd.merge(data_merge, customers_data, on='customer_id', how='inner') data_merge = pd.merge(data_merge, sellers_data, on='seller_id', how='inner') data_merge = pd.merge(data_merge, product_trans_data, on='product_category_name', how='inner') Df_ecommerce = data_merge.drop_duplicates(['order_id']) Df_ecommerce.isnull().sum() Df_top20prod_rev = Df_ecommerce['price'].groupby(Df_ecommerce['product_category_name_english']).sum().sort_values(ascending=False)[:20] Df_top20prod_rev fig=plt.figure(figsize=(16,10)) sns.barplot(y=Df_top20prod_rev.index,x=Df_top20prod_rev.values) plt.title('Top 20 product category having the largest revenue',fontsize=20) plt.xlabel('Total revenue',fontsize=17) plt.ylabel('Product category',fontsize=17) Df_top20prod_rev
code
34121718/cell_41
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(product_data.isnull().sum() / len(product_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) product_data.dropna(inplace=True) product_data.isnull().sum()
code
34121718/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(review_data.isnull().sum() / len(review_data) * 100, 2)
code
34121718/cell_45
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(payment_data.isnull().sum() / len(payment_data) * 100, 2) payment_data.duplicated(['order_id']).sum()
code
34121718/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(product_trans_data.isnull().sum() / len(product_trans_data) * 100, 2)
code
34121718/cell_51
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp round(payment_data.isnull().sum() / len(payment_data) * 100, 2) round(review_data.isnull().sum() / len(review_data) * 100, 2) round(items_data.isnull().sum() / len(items_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(customers_data.isnull().sum() / len(customers_data) * 100, 2) round(sellers_data.isnull().sum() / len(sellers_data) * 100, 2) round(product_trans_data.isnull().sum() / len(product_trans_data) * 100, 2) orders_data['order_purchase_timestamp'] = pd.to_datetime(orders_data['order_purchase_timestamp']) orders_data['order_approved_at'] = pd.to_datetime(orders_data['order_approved_at']) orders_data['order_delivered_carrier_date'] = pd.to_datetime(orders_data['order_delivered_carrier_date']) orders_data['order_delivered_customer_date'] = pd.to_datetime(orders_data['order_delivered_customer_date']) orders_data['order_estimated_delivery_date'] = pd.to_datetime(orders_data['order_estimated_delivery_date']) orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data.isnull().sum() orders_data.dropna(inplace=True) orders_data.isnull().sum() review_data.isnull().sum() review_data.drop(columns=['review_comment_title', 'review_comment_message'], inplace=True) review_data.isnull().sum() product_data.dropna(inplace=True) product_data.isnull().sum() orders_data.duplicated(['order_id']).sum() items_data.duplicated(['order_id']).sum() payment_data.duplicated(['order_id']).sum() review_data.duplicated(['order_id']).sum() customers_data.duplicated(['customer_id']).sum() data_merge = pd.merge(orders_data, items_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, payment_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, review_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, product_data, on='product_id', how='inner') data_merge = pd.merge(data_merge, customers_data, on='customer_id', how='inner') data_merge = pd.merge(data_merge, sellers_data, on='seller_id', how='inner') data_merge = pd.merge(data_merge, product_trans_data, on='product_category_name', how='inner') Df_ecommerce = data_merge.drop_duplicates(['order_id']) Df_ecommerce.isnull().sum()
code
34121718/cell_62
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp round(payment_data.isnull().sum() / len(payment_data) * 100, 2) round(review_data.isnull().sum() / len(review_data) * 100, 2) round(items_data.isnull().sum() / len(items_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(customers_data.isnull().sum() / len(customers_data) * 100, 2) round(sellers_data.isnull().sum() / len(sellers_data) * 100, 2) round(product_trans_data.isnull().sum() / len(product_trans_data) * 100, 2) orders_data['order_purchase_timestamp'] = pd.to_datetime(orders_data['order_purchase_timestamp']) orders_data['order_approved_at'] = pd.to_datetime(orders_data['order_approved_at']) orders_data['order_delivered_carrier_date'] = pd.to_datetime(orders_data['order_delivered_carrier_date']) orders_data['order_delivered_customer_date'] = pd.to_datetime(orders_data['order_delivered_customer_date']) orders_data['order_estimated_delivery_date'] = pd.to_datetime(orders_data['order_estimated_delivery_date']) orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data_2 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] mean_deliver = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_customer_date']).mean() mean_deliver added_date = orders_data[orders_data['order_delivered_customer_date'].isnull()]['order_purchase_timestamp'] - mean_deliver added_date orders_data['order_delivered_customer_date'] = orders_data['order_delivered_customer_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.dropna(inplace=True) orders_data.isnull().sum() review_data.isnull().sum() review_data.drop(columns=['review_comment_title', 'review_comment_message'], inplace=True) review_data.isnull().sum() product_data.dropna(inplace=True) product_data.isnull().sum() orders_data.duplicated(['order_id']).sum() items_data.duplicated(['order_id']).sum() payment_data.duplicated(['order_id']).sum() review_data.duplicated(['order_id']).sum() customers_data.duplicated(['customer_id']).sum() data_merge = pd.merge(orders_data, items_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, payment_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, review_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, product_data, on='product_id', how='inner') data_merge = pd.merge(data_merge, customers_data, on='customer_id', how='inner') data_merge = pd.merge(data_merge, sellers_data, on='seller_id', how='inner') data_merge = pd.merge(data_merge, product_trans_data, on='product_category_name', how='inner') Df_ecommerce = data_merge.drop_duplicates(['order_id']) Df_ecommerce.isnull().sum() Df_ecommerce['Expecting_Delivery'] = Df_ecommerce['order_estimated_delivery_date'] - Df_ecommerce['order_purchase_timestamp'] Df_ecommerce['Real_Delivery'] = Df_ecommerce['order_delivered_customer_date'] - Df_ecommerce['order_purchase_timestamp'] Df_ecommerce['Real_Delivery_hour'] = (Df_ecommerce['Real_Delivery'] / np.timedelta64(1, 'h')).round(2) Df_ecommerce['Expecting_delivery_hour'] = (Df_ecommerce['Expecting_Delivery'] / np.timedelta64(1, 'h')).round(2) Df_ecommerce['delivery_evaluate'] = round((2 * Df_ecommerce['Expecting_Delivery'] - Df_ecommerce['Real_Delivery']) / Df_ecommerce['Expecting_Delivery'] * 100, 2)
code
34121718/cell_59
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt #collection of command style functions that make matplotlib work import numpy as np import pandas as pd import seaborn as sns #statistical data visualization. orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp round(payment_data.isnull().sum() / len(payment_data) * 100, 2) round(review_data.isnull().sum() / len(review_data) * 100, 2) round(items_data.isnull().sum() / len(items_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2) round(customers_data.isnull().sum() / len(customers_data) * 100, 2) round(sellers_data.isnull().sum() / len(sellers_data) * 100, 2) round(product_trans_data.isnull().sum() / len(product_trans_data) * 100, 2) orders_data['order_purchase_timestamp'] = pd.to_datetime(orders_data['order_purchase_timestamp']) orders_data['order_approved_at'] = pd.to_datetime(orders_data['order_approved_at']) orders_data['order_delivered_carrier_date'] = pd.to_datetime(orders_data['order_delivered_carrier_date']) orders_data['order_delivered_customer_date'] = pd.to_datetime(orders_data['order_delivered_customer_date']) orders_data['order_estimated_delivery_date'] = pd.to_datetime(orders_data['order_estimated_delivery_date']) orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data.isnull().sum() orders_data.dropna(inplace=True) orders_data.isnull().sum() review_data.isnull().sum() review_data.drop(columns=['review_comment_title', 'review_comment_message'], inplace=True) review_data.isnull().sum() product_data.dropna(inplace=True) product_data.isnull().sum() orders_data.duplicated(['order_id']).sum() items_data.duplicated(['order_id']).sum() payment_data.duplicated(['order_id']).sum() review_data.duplicated(['order_id']).sum() customers_data.duplicated(['customer_id']).sum() data_merge = pd.merge(orders_data, items_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, payment_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, review_data, on='order_id', how='inner') data_merge = pd.merge(data_merge, product_data, on='product_id', how='inner') data_merge = pd.merge(data_merge, customers_data, on='customer_id', how='inner') data_merge = pd.merge(data_merge, sellers_data, on='seller_id', how='inner') data_merge = pd.merge(data_merge, product_trans_data, on='product_category_name', how='inner') Df_ecommerce = data_merge.drop_duplicates(['order_id']) Df_ecommerce.isnull().sum() Df_top20prod_rev = Df_ecommerce['price'].groupby(Df_ecommerce['product_category_name_english']).sum().sort_values(ascending=False)[:20] Df_top20prod_rev fig=plt.figure(figsize=(16,10)) sns.barplot(y=Df_top20prod_rev.index,x=Df_top20prod_rev.values) plt.title('Top 20 product category having the largest revenue',fontsize=20) plt.xlabel('Total revenue',fontsize=17) plt.ylabel('Product category',fontsize=17) Df_top20prod_numsell = Df_ecommerce['order_id'].groupby(Df_ecommerce['product_category_name_english']).count().sort_values(ascending=False)[:20] fig = plt.figure(figsize=(16, 10)) sns.barplot(y=Df_top20prod_numsell.index, x=Df_top20prod_numsell.values) plt.title('Top 20 product category having the largest amount of selling', fontsize=20) plt.xlabel('Number of selling', fontsize=17) plt.ylabel('Product category', fontsize=17)
code
34121718/cell_28
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data_2 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] mean_deliver = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_customer_date']).mean() mean_deliver
code
34121718/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2)
code
34121718/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(customers_data.isnull().sum() / len(customers_data) * 100, 2)
code
34121718/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(sellers_data.isnull().sum() / len(sellers_data) * 100, 2)
code
34121718/cell_38
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(review_data.isnull().sum() / len(review_data) * 100, 2) review_data.isnull().sum() review_data.drop(columns=['review_comment_title', 'review_comment_message'], inplace=True) review_data.isnull().sum()
code
34121718/cell_47
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(customers_data.isnull().sum() / len(customers_data) * 100, 2) customers_data.duplicated(['customer_id']).sum()
code
34121718/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(geoloca_data.isnull().sum() / len(geoloca_data) * 100, 2)
code
34121718/cell_43
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data.isnull().sum() orders_data.dropna(inplace=True) orders_data.isnull().sum() orders_data.duplicated(['order_id']).sum()
code
34121718/cell_31
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date orders_data['order_delivered_carrier_date'] = orders_data['order_delivered_carrier_date'].replace(np.nan, added_date) orders_data.isnull().sum() orders_data.isnull().sum() orders_data.isnull().sum()
code
34121718/cell_46
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(review_data.isnull().sum() / len(review_data) * 100, 2) review_data.isnull().sum() review_data.drop(columns=['review_comment_title', 'review_comment_message'], inplace=True) review_data.isnull().sum() review_data.duplicated(['order_id']).sum()
code
34121718/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(orders_data.isnull().sum() / len(orders_data) * 100, 2) orders_data.order_purchase_timestamp orders_data_1 = orders_data[orders_data['order_delivered_carrier_date'].notnull()] miss_carrier = (orders_data_1['order_purchase_timestamp'] - orders_data_1['order_delivered_carrier_date']).mean() miss_carrier added_date = orders_data[orders_data['order_delivered_carrier_date'].isnull()]['order_purchase_timestamp'] - miss_carrier added_date
code
34121718/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(product_data.isnull().sum() / len(product_data) * 100, 2) round(product_data.isnull().sum() / len(product_data) * 100, 2)
code
34121718/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(payment_data.isnull().sum() / len(payment_data) * 100, 2)
code
34121718/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(items_data.isnull().sum() / len(items_data) * 100, 2)
code
34121718/cell_36
[ "text_plain_output_1.png" ]
import pandas as pd orders_data = pd.read_csv('../input/brazilian-ecommerce/olist_orders_dataset.csv') payment_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_payments_dataset.csv') review_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_reviews_dataset.csv') items_data = pd.read_csv('../input/brazilian-ecommerce/olist_order_items_dataset.csv') product_data = pd.read_csv('../input/brazilian-ecommerce/olist_products_dataset.csv') customers_data = pd.read_csv('../input/brazilian-ecommerce/olist_customers_dataset.csv') sellers_data = pd.read_csv('../input/brazilian-ecommerce/olist_sellers_dataset.csv') product_trans_data = pd.read_csv('../input/brazilian-ecommerce/product_category_name_translation.csv') geoloca_data = pd.read_csv('../input/brazilian-ecommerce/olist_geolocation_dataset.csv') round(review_data.isnull().sum() / len(review_data) * 100, 2) review_data.isnull().sum()
code
2033003/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) haberman = pd.read_csv('../input/haberman.csv') print(haberman.columns)
code
2033003/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) haberman = pd.read_csv('../input/haberman.csv') import matplotlib.pyplot as plt haberman.plot() plt.show()
code
2033003/cell_2
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) haberman = pd.read_csv('../input/haberman.csv') haberman.head()
code
2033003/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2033003/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) haberman = pd.read_csv('../input/haberman.csv') import matplotlib.pyplot as plt haberman.hist() plt.show()
code