path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
88087414/cell_29
[ "text_html_output_1.png" ]
import pandas as pd import re import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() def clean_text(data): data['clean_text'] = data['text'].str.lower() data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('http\\S+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('[^\\w\\s]', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('/n', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\d+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+', ' ', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+[a-zA-Z]\\s+', ' ', elem)) return data data_train = clean_text(data_train) data_test = clean_text(data_test) print(data_train['text'][2]) print(data_train['clean_text'][2]) print(' ') print(data_test['text'][2]) print(data_test['clean_text'][2])
code
88087414/cell_39
[ "text_plain_output_1.png" ]
from keras.layers import LSTM,Embedding, Conv1D, Dense, Flatten, MaxPooling1D, Dropout , Bidirectional , Dropout , Flatten , GlobalMaxPooling1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.preprocessing.text import Tokenizer from sklearn.model_selection import train_test_split from tensorflow.keras.utils import plot_model import keras import pandas as pd import re import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() def clean_text(data): data['clean_text'] = data['text'].str.lower() data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('http\\S+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('[^\\w\\s]', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('/n', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\d+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+', ' ', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+[a-zA-Z]\\s+', ' ', elem)) return data data_train = clean_text(data_train) data_test = clean_text(data_test) max_fatures = 5000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data_train['clean_text'].values) X = tokenizer.texts_to_sequences(data_train['clean_text'].values) X = pad_sequences(X, maxlen=31, padding='post') Y = data_train['target'].values X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0) len(tokenizer.index_word) embed_dim = 50 vocab_size = len(tokenizer.index_word) + 1 model1 = Sequential() model1.add(Embedding(input_dim=vocab_size, input_length=31, output_dim=embed_dim)) model1.add(LSTM(30)) model1.add(Dropout(0.2)) model1.add(Flatten()) model1.add(Dense(1, activation='sigmoid')) model1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from tensorflow.keras.utils import plot_model keras.backend.clear_session() batch_size = 32 history1 = model1.fit(X_train, Y_train, epochs=10, batch_size=batch_size, validation_data=(X_test, Y_test), verbose=1) scores = model1.evaluate(X_test, Y_test, verbose=0) print('Test loss:', round(scores[0], 2)) print('Test accuracy:', round(scores[1], 2))
code
88087414/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() print(s) print('0 :', round(s[0] / len(data_train) * 100, 2), '%') print('1 :', round(s[1] / len(data_train) * 100, 2), '%') sns.countplot(data_train['target'])
code
88087414/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud,STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() text = list(data_train[data_train['target'] == 0].text.values) wordcloud = WordCloud(stopwords=STOPWORDS).generate(str(text)) plt.axis('off') text = list(data_train[data_train['target'] == 1].text.values) wordcloud = WordCloud(stopwords=STOPWORDS).generate(str(text)) plt.figure(figsize=(15, 7)) plt.imshow(wordcloud) plt.axis('off') plt.title('Wordcloud for Disaster tweets') plt.show()
code
88087414/cell_45
[ "text_plain_output_1.png" ]
from keras.layers import LSTM,Embedding, Conv1D, Dense, Flatten, MaxPooling1D, Dropout , Bidirectional , Dropout , Flatten , GlobalMaxPooling1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.preprocessing.text import Tokenizer from sklearn.model_selection import train_test_split from tensorflow.keras.utils import plot_model from wordcloud import WordCloud,STOPWORDS import keras import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() text = list(data_train[data_train['target'] == 0].text.values) wordcloud = WordCloud(stopwords=STOPWORDS).generate(str(text)) plt.axis('off') text = list(data_train[data_train['target'] == 1].text.values) wordcloud = WordCloud(stopwords=STOPWORDS).generate(str(text)) plt.axis('off') def clean_text(data): data['clean_text'] = data['text'].str.lower() data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('http\\S+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('[^\\w\\s]', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('/n', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\d+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+', ' ', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+[a-zA-Z]\\s+', ' ', elem)) return data data_train = clean_text(data_train) data_test = clean_text(data_test) max_fatures = 5000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data_train['clean_text'].values) X = tokenizer.texts_to_sequences(data_train['clean_text'].values) X = pad_sequences(X, maxlen=31, padding='post') Y = data_train['target'].values X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0) len(tokenizer.index_word) embed_dim = 50 vocab_size = len(tokenizer.index_word) + 1 model1 = Sequential() model1.add(Embedding(input_dim=vocab_size, input_length=31, output_dim=embed_dim)) model1.add(LSTM(30)) model1.add(Dropout(0.2)) model1.add(Flatten()) model1.add(Dense(1, activation='sigmoid')) model1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from tensorflow.keras.utils import plot_model keras.backend.clear_session() batch_size = 32 history1 = model1.fit(X_train, Y_train, epochs=10, batch_size=batch_size, validation_data=(X_test, Y_test), verbose=1) plt.style.use('seaborn') plt.plot(history1.history['accuracy']) plt.plot(history1.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show()
code
88087414/cell_18
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud,STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() text = list(data_train[data_train['target'] == 0].text.values) wordcloud = WordCloud(stopwords=STOPWORDS).generate(str(text)) plt.figure(figsize=(15, 7)) plt.imshow(wordcloud) plt.axis('off') plt.title('Wordcloud for normal tweets') plt.show()
code
88087414/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() data_train['text_length'].plot.hist()
code
88087414/cell_16
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() data_train.head()
code
88087414/cell_38
[ "text_plain_output_1.png" ]
from keras.layers import LSTM,Embedding, Conv1D, Dense, Flatten, MaxPooling1D, Dropout , Bidirectional , Dropout , Flatten , GlobalMaxPooling1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.preprocessing.text import Tokenizer from sklearn.model_selection import train_test_split from tensorflow.keras.utils import plot_model import keras import pandas as pd import re import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() def clean_text(data): data['clean_text'] = data['text'].str.lower() data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('http\\S+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('[^\\w\\s]', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('/n', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\d+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+', ' ', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+[a-zA-Z]\\s+', ' ', elem)) return data data_train = clean_text(data_train) data_test = clean_text(data_test) max_fatures = 5000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data_train['clean_text'].values) X = tokenizer.texts_to_sequences(data_train['clean_text'].values) X = pad_sequences(X, maxlen=31, padding='post') Y = data_train['target'].values X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0) len(tokenizer.index_word) embed_dim = 50 vocab_size = len(tokenizer.index_word) + 1 model1 = Sequential() model1.add(Embedding(input_dim=vocab_size, input_length=31, output_dim=embed_dim)) model1.add(LSTM(30)) model1.add(Dropout(0.2)) model1.add(Flatten()) model1.add(Dense(1, activation='sigmoid')) model1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from tensorflow.keras.utils import plot_model keras.backend.clear_session() batch_size = 32 history1 = model1.fit(X_train, Y_train, epochs=10, batch_size=batch_size, validation_data=(X_test, Y_test), verbose=1)
code
88087414/cell_35
[ "text_plain_output_1.png" ]
from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.preprocessing.text import Tokenizer from sklearn.model_selection import train_test_split import pandas as pd import re import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() def clean_text(data): data['clean_text'] = data['text'].str.lower() data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('http\\S+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('[^\\w\\s]', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('/n', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\d+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+', ' ', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+[a-zA-Z]\\s+', ' ', elem)) return data data_train = clean_text(data_train) data_test = clean_text(data_test) max_fatures = 5000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data_train['clean_text'].values) X = tokenizer.texts_to_sequences(data_train['clean_text'].values) X = pad_sequences(X, maxlen=31, padding='post') Y = data_train['target'].values X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0) len(tokenizer.index_word)
code
88087414/cell_43
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from keras.layers import LSTM,Embedding, Conv1D, Dense, Flatten, MaxPooling1D, Dropout , Bidirectional , Dropout , Flatten , GlobalMaxPooling1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.preprocessing.text import Tokenizer from sklearn.metrics import classification_report,confusion_matrix from sklearn.model_selection import train_test_split from tensorflow.keras.utils import plot_model import keras import pandas as pd import re import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() def clean_text(data): data['clean_text'] = data['text'].str.lower() data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('http\\S+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('[^\\w\\s]', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('/n', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\d+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+', ' ', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+[a-zA-Z]\\s+', ' ', elem)) return data data_train = clean_text(data_train) data_test = clean_text(data_test) max_fatures = 5000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data_train['clean_text'].values) X = tokenizer.texts_to_sequences(data_train['clean_text'].values) X = pad_sequences(X, maxlen=31, padding='post') Y = data_train['target'].values X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0) len(tokenizer.index_word) embed_dim = 50 vocab_size = len(tokenizer.index_word) + 1 model1 = Sequential() model1.add(Embedding(input_dim=vocab_size, input_length=31, output_dim=embed_dim)) model1.add(LSTM(30)) model1.add(Dropout(0.2)) model1.add(Flatten()) model1.add(Dense(1, activation='sigmoid')) model1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from tensorflow.keras.utils import plot_model keras.backend.clear_session() batch_size = 32 history1 = model1.fit(X_train, Y_train, epochs=10, batch_size=batch_size, validation_data=(X_test, Y_test), verbose=1) scores = model1.evaluate(X_test, Y_test, verbose=0) predict = model1.predict(X_test) predict1 = [1 if i > 0.5 else 0 for i in predict] conf = confusion_matrix(Y_test, predict1) conf
code
88087414/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() print('the max length tweet is:', data_train['text_length'].max()) print('the min length tweet is:', data_train['text_length'].min()) print('the avg length tweet is:', data_train['text_length'].mean())
code
88087414/cell_10
[ "text_html_output_1.png" ]
import pandas as pd data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] print('null values for train data : ') print(data_train.isna().sum()) print('null values for test data : ') print(data_test.isna().sum())
code
88087414/cell_37
[ "text_plain_output_1.png" ]
from keras.layers import LSTM,Embedding, Conv1D, Dense, Flatten, MaxPooling1D, Dropout , Bidirectional , Dropout , Flatten , GlobalMaxPooling1D from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.preprocessing.text import Tokenizer from sklearn.model_selection import train_test_split from tensorflow.keras.utils import plot_model import pandas as pd import re import seaborn as sns data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train = data_train[['id', 'text', 'target']] data_test = data_test[['id', 'text']] s = data_train.target.value_counts() def clean_text(data): data['clean_text'] = data['text'].str.lower() data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('http\\S+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('[^\\w\\s]', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('/n', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\d+', '', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+', ' ', elem)) data['clean_text'] = data['clean_text'].apply(lambda elem: re.sub('\\s+[a-zA-Z]\\s+', ' ', elem)) return data data_train = clean_text(data_train) data_test = clean_text(data_test) max_fatures = 5000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data_train['clean_text'].values) X = tokenizer.texts_to_sequences(data_train['clean_text'].values) X = pad_sequences(X, maxlen=31, padding='post') Y = data_train['target'].values X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0) len(tokenizer.index_word) embed_dim = 50 vocab_size = len(tokenizer.index_word) + 1 model1 = Sequential() model1.add(Embedding(input_dim=vocab_size, input_length=31, output_dim=embed_dim)) model1.add(LSTM(30)) model1.add(Dropout(0.2)) model1.add(Flatten()) model1.add(Dense(1, activation='sigmoid')) model1.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from tensorflow.keras.utils import plot_model plot_model(model1, show_shapes=True)
code
88087414/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data_train = pd.read_csv('../input/nlp-getting-started/train.csv') data_test = pd.read_csv('../input/nlp-getting-started/test.csv') data_train.head()
code
34121960/cell_6
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv') df1.head(3)
code
34121960/cell_2
[ "text_plain_output_35.png", "text_plain_output_43.png", "text_plain_output_37.png", "text_plain_output_5.png", "text_plain_output_30.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_44.png", "text_plain_output_40.png", "text_plain_output_31.png", "text_plain_output_20.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_45.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_29.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_25.png", "text_plain_output_18.png", "text_plain_output_36.png", "text_plain_output_3.png", "text_plain_output_22.png", "text_plain_output_38.png", "text_plain_output_7.png", "text_plain_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_42.png", "text_plain_output_23.png", "text_plain_output_28.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_39.png", "text_plain_output_19.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "text_plain_output_46.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34121960/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv') listOfLists1 = [] with open('../input/CORD-19-research-challenge/json_schema.txt') as f: for line in f: inner_list = [line.strip() for line in line.split(' split character')] listOfLists1.append(inner_list) df2 = pd.DataFrame(listOfLists1) df2
code
122253082/cell_9
[ "text_html_output_2.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('/kaggle/input/tomato-daily-prices/Tomato.csv') backup = df.copy(deep=True) df['Date'] = pd.to_datetime(df['Date']) fig, ax = plt.subplots(figsize=(12, 6)) ax.plot(df['Date'], df['Average']) ax.set_title('Tomato Weight Time Series') ax.set_xlabel('Date') ax.set_ylabel('Average Weight') plt.show()
code
122253082/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/tomato-daily-prices/Tomato.csv') backup = df.copy(deep=True) df.head()
code
122253082/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import plotly.express as px df = pd.read_csv('/kaggle/input/tomato-daily-prices/Tomato.csv') backup = df.copy(deep=True) import plotly.express as px df['Year'] = df['Date'].dt.year df['Month'] = df['Date'].dt.month grouped = df.groupby(['Year', 'Month'])['Average'].mean().reset_index() fig = px.line(grouped, x='Month', y='Average', color='Year', title='Monthly Tomato Price over Years', labels={'Month': 'Month', 'Weight': 'Mean Weight', 'Year': 'Year'}) fig.show()
code
122253082/cell_7
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/tomato-daily-prices/Tomato.csv') backup = df.copy(deep=True) df.info()
code
89126605/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from moviepy.editor import * clip = VideoFileClip('./Rick Astley - Never Gonna Give You Up (Official Music Video).mp4') clip1 = clip.subclip(0, 5) clip2 = clip.subclip(60, 65) final = concatenate_videoclips([clip1, clip2]) final.write_videofile('merged.mp4')
code
89126605/cell_2
[ "text_plain_output_1.png" ]
!pip install moviepy !pip install pytube
code
89126605/cell_3
[ "text_plain_output_1.png" ]
import pytube import pytube url = 'https://www.youtube.com/watch?v=dQw4w9WgXcQ' youtube = pytube.YouTube(url) video = youtube.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first() video.download()
code
2015996/cell_4
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') cols = ['store_id', 'visit_datetime', 'reserve_datetime', 'reserve_visitors'] air_reserve.columns = cols hpg_reserve.columns = cols reserves = pd.DataFrame(columns=cols) reserves = pd.concat([air_reserve, hpg_reserve]) reserves['visit_datetime'] = pd.to_datetime(reserves['visit_datetime']) reserves['reserve_datetime'] = pd.to_datetime(reserves['reserve_datetime']) reserves.info() reserves.describe()
code
2015996/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') air_reserve.head()
code
2015996/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') hpg_reserve.head()
code
2015996/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import missingno as msno air_reserve = pd.read_csv('../input/air_reserve.csv') hpg_reserve = pd.read_csv('../input/hpg_reserve.csv') air_store_info = pd.read_csv('../input/air_store_info.csv') hpg_store_info = pd.read_csv('../input/hpg_store_info.csv') visits = pd.read_csv('../input/air_visit_data.csv') dates = pd.read_csv('../input/date_info.csv') relation = pd.read_csv('../input/store_id_relation.csv') cols = ['store_id', 'visit_datetime', 'reserve_datetime', 'reserve_visitors'] air_reserve.columns = cols hpg_reserve.columns = cols reserves = pd.DataFrame(columns=cols) reserves = pd.concat([air_reserve, hpg_reserve]) reserves['visit_datetime'] = pd.to_datetime(reserves['visit_datetime']) reserves['reserve_datetime'] = pd.to_datetime(reserves['reserve_datetime']) plt.plot_date(x='visit_datetime', y='reserve_visitors', data=reserves)
code
90120064/cell_21
[ "image_output_1.png" ]
from scipy.cluster.hierarchy import dendrogram, linkage from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.decomposition import PCA, FactorAnalysis from scipy.cluster.hierarchy import dendrogram, linkage import os import warnings warnings.filterwarnings('ignore') PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape data.isnull().sum().sort_values(ascending=False) plt.figure(figsize=(40, 20)) mask = np.triu(np.ones_like(data.corr(), dtype=np.bool)) heatmap = sns.heatmap(data.corr(), annot=True, mask=mask) heatmap.set_title('Triangle Correlation Heatmap', fontdict={'fontsize':18}, pad=16); plt.figure(figsize=(9,6)) ax = plt.axes() ax.set_facecolor('darkgrey') g = sns.scatterplot(x='balance_frequency', y='balance', data=data) g.set_title('Balance Frequency vs. Balance') plt.show() plt.figure(figsize=(9,6)) ax = plt.axes() ax.set_facecolor('darkgrey') g = sns.scatterplot(x='credit_limit', y='balance', data=data) g.set_title('Credit Limit vs. Balance') plt.show() o_cols = data.select_dtypes(include=['object']).columns.tolist() num_cols = data.select_dtypes(exclude=['object']).columns.tolist() ax = plt.axes() ax.set_facecolor('darkgrey') data.drop(columns='cust_id', inplace=True) data.dropna(axis='index', inplace=True) scaler = StandardScaler() data_scaled = scaler.fit_transform(data) hier_cluster = linkage(data_scaled, method='ward') plt.figure(figsize=(10, 9)) plt.title('Hierarchical Clustering Dendrogram') plt.xlabel('Observations') plt.ylabel('Distance') dendrogram(hier_cluster, truncate_mode='level', p=5, show_leaf_counts=False, no_labels=True) plt.show()
code
90120064/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.decomposition import PCA, FactorAnalysis from scipy.cluster.hierarchy import dendrogram, linkage import os import warnings warnings.filterwarnings('ignore') PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape data.isnull().sum().sort_values(ascending=False) plt.figure(figsize=(40, 20)) mask = np.triu(np.ones_like(data.corr(), dtype=np.bool)) heatmap = sns.heatmap(data.corr(), annot=True, mask=mask) heatmap.set_title('Triangle Correlation Heatmap', fontdict={'fontsize':18}, pad=16); o_cols = data.select_dtypes(include=['object']).columns.tolist() num_cols = data.select_dtypes(exclude=['object']).columns.tolist() data[num_cols].hist(bins=15, figsize=(20, 15), layout=(5, 4))
code
90120064/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.decomposition import PCA, FactorAnalysis from scipy.cluster.hierarchy import dendrogram, linkage import os import warnings warnings.filterwarnings('ignore') PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape data.isnull().sum().sort_values(ascending=False) plt.figure(figsize=(40, 20)) mask = np.triu(np.ones_like(data.corr(), dtype=np.bool)) heatmap = sns.heatmap(data.corr(), annot=True, mask=mask) heatmap.set_title('Triangle Correlation Heatmap', fontdict={'fontsize': 18}, pad=16)
code
90120064/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape
code
90120064/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape data.describe()
code
90120064/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.decomposition import PCA, FactorAnalysis from scipy.cluster.hierarchy import dendrogram, linkage import os import warnings warnings.filterwarnings('ignore') PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape data.isnull().sum().sort_values(ascending=False) plt.figure(figsize=(40, 20)) mask = np.triu(np.ones_like(data.corr(), dtype=np.bool)) heatmap = sns.heatmap(data.corr(), annot=True, mask=mask) heatmap.set_title('Triangle Correlation Heatmap', fontdict={'fontsize':18}, pad=16); plt.figure(figsize=(9,6)) ax = plt.axes() ax.set_facecolor('darkgrey') g = sns.scatterplot(x='balance_frequency', y='balance', data=data) g.set_title('Balance Frequency vs. Balance') plt.show() plt.figure(figsize=(9, 6)) ax = plt.axes() ax.set_facecolor('darkgrey') g = sns.scatterplot(x='credit_limit', y='balance', data=data) g.set_title('Credit Limit vs. Balance') plt.show()
code
90120064/cell_1
[ "text_plain_output_1.png" ]
import os import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.decomposition import PCA, FactorAnalysis from scipy.cluster.hierarchy import dendrogram, linkage import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import warnings warnings.filterwarnings('ignore')
code
90120064/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape data.isnull().sum().sort_values(ascending=False)
code
90120064/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.head()
code
90120064/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.decomposition import PCA, FactorAnalysis from scipy.cluster.hierarchy import dendrogram, linkage import os import warnings warnings.filterwarnings('ignore') PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape data.isnull().sum().sort_values(ascending=False) plt.figure(figsize=(40, 20)) mask = np.triu(np.ones_like(data.corr(), dtype=np.bool)) heatmap = sns.heatmap(data.corr(), annot=True, mask=mask) heatmap.set_title('Triangle Correlation Heatmap', fontdict={'fontsize':18}, pad=16); o_cols = data.select_dtypes(include=['object']).columns.tolist() num_cols = data.select_dtypes(exclude=['object']).columns.tolist() data.drop(columns='cust_id', inplace=True) data.head()
code
90120064/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.decomposition import PCA, FactorAnalysis from scipy.cluster.hierarchy import dendrogram, linkage import os import warnings warnings.filterwarnings('ignore') PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape data.isnull().sum().sort_values(ascending=False) plt.figure(figsize=(40, 20)) mask = np.triu(np.ones_like(data.corr(), dtype=np.bool)) heatmap = sns.heatmap(data.corr(), annot=True, mask=mask) heatmap.set_title('Triangle Correlation Heatmap', fontdict={'fontsize':18}, pad=16); plt.figure(figsize=(9,6)) ax = plt.axes() ax.set_facecolor('darkgrey') g = sns.scatterplot(x='balance_frequency', y='balance', data=data) g.set_title('Balance Frequency vs. Balance') plt.show() plt.figure(figsize=(9,6)) ax = plt.axes() ax.set_facecolor('darkgrey') g = sns.scatterplot(x='credit_limit', y='balance', data=data) g.set_title('Credit Limit vs. Balance') plt.show() o_cols = data.select_dtypes(include=['object']).columns.tolist() num_cols = data.select_dtypes(exclude=['object']).columns.tolist() plt.figure(figsize=(9, 7)) ax = plt.axes() ax.set_facecolor('darkgrey') sns.violinplot(x='tenure', y='balance', data=data, inner='quartile') plt.xlabel('Account Tenure') plt.ylabel('Account Balance') plt.title('Account Balance over Tenure') plt.show()
code
90120064/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.decomposition import PCA, FactorAnalysis from scipy.cluster.hierarchy import dendrogram, linkage import os import warnings warnings.filterwarnings('ignore') PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape data.isnull().sum().sort_values(ascending=False) plt.figure(figsize=(40, 20)) mask = np.triu(np.ones_like(data.corr(), dtype=np.bool)) heatmap = sns.heatmap(data.corr(), annot=True, mask=mask) heatmap.set_title('Triangle Correlation Heatmap', fontdict={'fontsize':18}, pad=16); plt.figure(figsize=(9, 6)) ax = plt.axes() ax.set_facecolor('darkgrey') g = sns.scatterplot(x='balance_frequency', y='balance', data=data) g.set_title('Balance Frequency vs. Balance') plt.show()
code
90120064/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '/kaggle/input/ccdata/' df = pd.read_csv(PATH + 'CC GENERAL.csv') data = df.copy() data.columns = data.columns.str.lower() data.shape data.info()
code
90102236/cell_13
[ "text_plain_output_1.png" ]
from tensorflow import keras from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import pandas as pd test = pd.read_csv('../input/histopathologic-cancer-detection/sample_submission.csv') test['filename'] = test.id + '.tif' test_path = '../input/histopathologic-cancer-detection/test' BATCH_SIZE = 64 test_datagen = ImageDataGenerator(rescale=1 / 255) test_loader = test_datagen.flow_from_dataframe(dataframe=test, directory=test_path, x_col='filename', batch_size=BATCH_SIZE, shuffle=False, class_mode=None, target_size=(32, 32)) cnn = keras.models.load_model('../input/hcd601/HCDv01.h5') cnn.summary() test_probs = cnn.predict(test_loader) print(test_probs[:10,].round(2))
code
90102236/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import pandas as pd test = pd.read_csv('../input/histopathologic-cancer-detection/sample_submission.csv') test['filename'] = test.id + '.tif' test_path = '../input/histopathologic-cancer-detection/test' BATCH_SIZE = 64 test_datagen = ImageDataGenerator(rescale=1 / 255) test_loader = test_datagen.flow_from_dataframe(dataframe=test, directory=test_path, x_col='filename', batch_size=BATCH_SIZE, shuffle=False, class_mode=None, target_size=(32, 32))
code
90102236/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd test = pd.read_csv('../input/histopathologic-cancer-detection/sample_submission.csv') print('Test Set Size:', test.shape)
code
90102236/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd test = pd.read_csv('../input/histopathologic-cancer-detection/sample_submission.csv') test['filename'] = test.id + '.tif' test.head()
code
90102236/cell_11
[ "text_plain_output_1.png" ]
from tensorflow import keras cnn = keras.models.load_model('../input/hcd601/HCDv01.h5') cnn.summary()
code
90102236/cell_7
[ "text_html_output_1.png" ]
import os test_path = '../input/histopathologic-cancer-detection/test' print('Test Images:', len(os.listdir(test_path)))
code
90102236/cell_16
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd test = pd.read_csv('../input/histopathologic-cancer-detection/sample_submission.csv') submission = pd.read_csv('../input/histopathologic-cancer-detection/sample_submission.csv') submission.head()
code
90102236/cell_17
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow import keras from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import os import pandas as pd test = pd.read_csv('../input/histopathologic-cancer-detection/sample_submission.csv') test['filename'] = test.id + '.tif' test_path = '../input/histopathologic-cancer-detection/test' BATCH_SIZE = 64 test_datagen = ImageDataGenerator(rescale=1 / 255) test_loader = test_datagen.flow_from_dataframe(dataframe=test, directory=test_path, x_col='filename', batch_size=BATCH_SIZE, shuffle=False, class_mode=None, target_size=(32, 32)) cnn = keras.models.load_model('../input/hcd601/HCDv01.h5') cnn.summary() test_probs = cnn.predict(test_loader) test_pred = np.argmax(test_probs, axis=1) submission = pd.read_csv('../input/histopathologic-cancer-detection/sample_submission.csv') submission.label = test_pred submission.head()
code
90102236/cell_14
[ "text_plain_output_1.png" ]
from tensorflow import keras from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import os import pandas as pd test = pd.read_csv('../input/histopathologic-cancer-detection/sample_submission.csv') test['filename'] = test.id + '.tif' test_path = '../input/histopathologic-cancer-detection/test' BATCH_SIZE = 64 test_datagen = ImageDataGenerator(rescale=1 / 255) test_loader = test_datagen.flow_from_dataframe(dataframe=test, directory=test_path, x_col='filename', batch_size=BATCH_SIZE, shuffle=False, class_mode=None, target_size=(32, 32)) cnn = keras.models.load_model('../input/hcd601/HCDv01.h5') cnn.summary() test_probs = cnn.predict(test_loader) test_pred = np.argmax(test_probs, axis=1) print(test_pred[:10])
code
90102236/cell_12
[ "text_html_output_1.png" ]
from tensorflow import keras from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import pandas as pd test = pd.read_csv('../input/histopathologic-cancer-detection/sample_submission.csv') test['filename'] = test.id + '.tif' test_path = '../input/histopathologic-cancer-detection/test' BATCH_SIZE = 64 test_datagen = ImageDataGenerator(rescale=1 / 255) test_loader = test_datagen.flow_from_dataframe(dataframe=test, directory=test_path, x_col='filename', batch_size=BATCH_SIZE, shuffle=False, class_mode=None, target_size=(32, 32)) cnn = keras.models.load_model('../input/hcd601/HCDv01.h5') cnn.summary() test_probs = cnn.predict(test_loader) print(test_probs.shape)
code
34144500/cell_21
[ "text_plain_output_1.png" ]
import json id_to_cat = {} with open('/kaggle/input/youtube-new/US_category_id.json', 'r') as f: data = json.load(f) for category in data['items']: id_to_cat[category['id']] = category['snippet']['title'] id_to_cat
code
34144500/cell_9
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import json import seaborn as sns sns.set_style('whitegrid') import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import nltk from nltk.corpus import stopwords from wordcloud import WordCloud, STOPWORDS import warnings warnings.filterwarnings(action='ignore') pd.set_option('display.max_columns', 50) df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') df.info()
code
34144500/cell_33
[ "text_html_output_1.png" ]
import json import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import json import seaborn as sns sns.set_style('whitegrid') import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import nltk from nltk.corpus import stopwords from wordcloud import WordCloud, STOPWORDS import warnings warnings.filterwarnings(action='ignore') pd.set_option('display.max_columns', 50) df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') df.insert(5, 'publish_date', df['publish_time'].dt.date) id_to_cat = {} with open('/kaggle/input/youtube-new/US_category_id.json', 'r') as f: data = json.load(f) for category in data['items']: id_to_cat[category['id']] = category['snippet']['title'] df.insert(5, 'category', df['category_id'].map(id_to_cat)) df.insert(8, 'publish_to_trend_days', df['trending_date'] - df['publish_date']) df.insert(7, 'publish_month', df['publish_date'].dt.strftime('%m')) df.insert(8, 'publish_day', df['publish_date'].dt.strftime('%a')) df.insert(10, 'publish_hour', df['publish_time'].apply(lambda x: x.hour)) df.head()
code
34144500/cell_40
[ "text_plain_output_1.png" ]
import json import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import json import seaborn as sns sns.set_style('whitegrid') import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import nltk from nltk.corpus import stopwords from wordcloud import WordCloud, STOPWORDS import warnings warnings.filterwarnings(action='ignore') pd.set_option('display.max_columns', 50) df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') df.insert(5, 'publish_date', df['publish_time'].dt.date) id_to_cat = {} with open('/kaggle/input/youtube-new/US_category_id.json', 'r') as f: data = json.load(f) for category in data['items']: id_to_cat[category['id']] = category['snippet']['title'] df.insert(5, 'category', df['category_id'].map(id_to_cat)) df.insert(8, 'publish_to_trend_days', df['trending_date'] - df['publish_date']) df.insert(7, 'publish_month', df['publish_date'].dt.strftime('%m')) df.insert(8, 'publish_day', df['publish_date'].dt.strftime('%a')) df.insert(10, 'publish_hour', df['publish_time'].apply(lambda x: x.hour)) df_last = df.drop_duplicates(subset=['video_id'], keep='last', inplace=False) df_first = df.drop_duplicates(subset=['video_id'], keep='first', inplace=False) print(df['video_id'].duplicated().any()) print(df_last['video_id'].duplicated().any()) print(df_first['video_id'].duplicated().any())
code
34144500/cell_39
[ "text_plain_output_1.png" ]
import json import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import json import seaborn as sns sns.set_style('whitegrid') import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import nltk from nltk.corpus import stopwords from wordcloud import WordCloud, STOPWORDS import warnings warnings.filterwarnings(action='ignore') pd.set_option('display.max_columns', 50) df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') df.insert(5, 'publish_date', df['publish_time'].dt.date) id_to_cat = {} with open('/kaggle/input/youtube-new/US_category_id.json', 'r') as f: data = json.load(f) for category in data['items']: id_to_cat[category['id']] = category['snippet']['title'] df.insert(5, 'category', df['category_id'].map(id_to_cat)) df.insert(8, 'publish_to_trend_days', df['trending_date'] - df['publish_date']) df.insert(7, 'publish_month', df['publish_date'].dt.strftime('%m')) df.insert(8, 'publish_day', df['publish_date'].dt.strftime('%a')) df.insert(10, 'publish_hour', df['publish_time'].apply(lambda x: x.hour)) print(df.shape) df_last = df.drop_duplicates(subset=['video_id'], keep='last', inplace=False) df_first = df.drop_duplicates(subset=['video_id'], keep='first', inplace=False) print(df_last.shape) print(df_first.shape)
code
34144500/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import json import seaborn as sns sns.set_style('whitegrid') import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import nltk from nltk.corpus import stopwords from wordcloud import WordCloud, STOPWORDS import warnings warnings.filterwarnings(action='ignore') pd.set_option('display.max_columns', 50) df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') df.describe()
code
34144500/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import json import seaborn as sns sns.set_style('whitegrid') import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import nltk from nltk.corpus import stopwords from wordcloud import WordCloud, STOPWORDS import warnings warnings.filterwarnings(action='ignore') pd.set_option('display.max_columns', 50) df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') df.head()
code
34144500/cell_37
[ "text_html_output_1.png" ]
import json import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import json import seaborn as sns sns.set_style('whitegrid') import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import nltk from nltk.corpus import stopwords from wordcloud import WordCloud, STOPWORDS import warnings warnings.filterwarnings(action='ignore') pd.set_option('display.max_columns', 50) df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') df.insert(5, 'publish_date', df['publish_time'].dt.date) id_to_cat = {} with open('/kaggle/input/youtube-new/US_category_id.json', 'r') as f: data = json.load(f) for category in data['items']: id_to_cat[category['id']] = category['snippet']['title'] df.insert(5, 'category', df['category_id'].map(id_to_cat)) df.insert(8, 'publish_to_trend_days', df['trending_date'] - df['publish_date']) df.insert(7, 'publish_month', df['publish_date'].dt.strftime('%m')) df.insert(8, 'publish_day', df['publish_date'].dt.strftime('%a')) df.insert(10, 'publish_hour', df['publish_time'].apply(lambda x: x.hour)) len(df['video_id'])
code
34144500/cell_36
[ "text_plain_output_1.png" ]
import json import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import json import seaborn as sns sns.set_style('whitegrid') import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import nltk from nltk.corpus import stopwords from wordcloud import WordCloud, STOPWORDS import warnings warnings.filterwarnings(action='ignore') pd.set_option('display.max_columns', 50) df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') df.insert(5, 'publish_date', df['publish_time'].dt.date) id_to_cat = {} with open('/kaggle/input/youtube-new/US_category_id.json', 'r') as f: data = json.load(f) for category in data['items']: id_to_cat[category['id']] = category['snippet']['title'] df.insert(5, 'category', df['category_id'].map(id_to_cat)) df.insert(8, 'publish_to_trend_days', df['trending_date'] - df['publish_date']) df.insert(7, 'publish_month', df['publish_date'].dt.strftime('%m')) df.insert(8, 'publish_day', df['publish_date'].dt.strftime('%a')) df.insert(10, 'publish_hour', df['publish_time'].apply(lambda x: x.hour)) df['video_id'].nunique()
code
32062754/cell_4
[ "image_output_1.png" ]
import geopandas as gpd import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt df_countries = pd.read_csv('/kaggle/input/countries-iso-codes/wikipedia-iso-country-codes.csv') df_global = pd.read_csv('/kaggle/input/global-hospital-beds-capacity-for-covid19/hospital_beds_global_v1.csv') df_global.dataframeName = 'hospital_beds_global_v1.csv' df_global = df_global.merge(df_countries, how='left', left_on=['country'], right_on=['Alpha-2 code']) df_global.rename(columns={'Alpha-3 code': 'country code', 'English short name lower case': 'country name'}, inplace=True) df_global = df_global[['country name', 'country code', 'beds', 'type', 'year', 'population']] world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) df_global_acute = df_global[df_global['type'] == 'ACUTE'] mapped = world.merge(df_global_acute[['country code', 'beds']], how='left', left_on='iso_a3', right_on='country code') mapped = mapped.fillna(0) to_be_mapped = 'beds' vmin, vmax = 0,df_global_acute['beds'].max() fig, ax = plt.subplots(1, figsize=(25,25)) mapped.plot(column=to_be_mapped, cmap='Blues', linewidth=0.8, ax=ax, edgecolors='0.8') ax.set_title('Number of ACUTE beds per 1000 inhabitants in countries', fontdict={'fontsize':30}) ax.set_axis_off() sm = plt.cm.ScalarMappable(cmap='Blues', norm=plt.Normalize(vmin=vmin, vmax=vmax)) sm._A = [] cbar = fig.colorbar(sm, orientation='horizontal') df_global_icu = df_global[df_global['type'] == 'ICU'] mapped = world.merge(df_global_icu[['country code', 'beds']], how='left', left_on='iso_a3', right_on='country code') mapped = mapped.fillna(0) to_be_mapped = 'beds' vmin, vmax = (0, df_global_icu['beds'].max()) fig, ax = plt.subplots(1, figsize=(25, 25)) mapped.plot(column=to_be_mapped, cmap='Blues', linewidth=0.8, ax=ax, edgecolors='0.8') ax.set_title('Number of ICU beds per 1000 inhabitants in countries', fontdict={'fontsize': 30}) ax.set_axis_off() sm = plt.cm.ScalarMappable(cmap='Blues', norm=plt.Normalize(vmin=vmin, vmax=vmax)) sm._A = [] cbar = fig.colorbar(sm, orientation='horizontal')
code
32062754/cell_1
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt df_countries = pd.read_csv('/kaggle/input/countries-iso-codes/wikipedia-iso-country-codes.csv') df_global = pd.read_csv('/kaggle/input/global-hospital-beds-capacity-for-covid19/hospital_beds_global_v1.csv') df_global.dataframeName = 'hospital_beds_global_v1.csv' df_global = df_global.merge(df_countries, how='left', left_on=['country'], right_on=['Alpha-2 code']) df_global.rename(columns={'Alpha-3 code': 'country code', 'English short name lower case': 'country name'}, inplace=True) df_global = df_global[['country name', 'country code', 'beds', 'type', 'year', 'population']] df_global.head()
code
32062754/cell_3
[ "image_output_1.png" ]
import geopandas as gpd import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt df_countries = pd.read_csv('/kaggle/input/countries-iso-codes/wikipedia-iso-country-codes.csv') df_global = pd.read_csv('/kaggle/input/global-hospital-beds-capacity-for-covid19/hospital_beds_global_v1.csv') df_global.dataframeName = 'hospital_beds_global_v1.csv' df_global = df_global.merge(df_countries, how='left', left_on=['country'], right_on=['Alpha-2 code']) df_global.rename(columns={'Alpha-3 code': 'country code', 'English short name lower case': 'country name'}, inplace=True) df_global = df_global[['country name', 'country code', 'beds', 'type', 'year', 'population']] world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) df_global_acute = df_global[df_global['type'] == 'ACUTE'] mapped = world.merge(df_global_acute[['country code', 'beds']], how='left', left_on='iso_a3', right_on='country code') mapped = mapped.fillna(0) to_be_mapped = 'beds' vmin, vmax = (0, df_global_acute['beds'].max()) fig, ax = plt.subplots(1, figsize=(25, 25)) mapped.plot(column=to_be_mapped, cmap='Blues', linewidth=0.8, ax=ax, edgecolors='0.8') ax.set_title('Number of ACUTE beds per 1000 inhabitants in countries', fontdict={'fontsize': 30}) ax.set_axis_off() sm = plt.cm.ScalarMappable(cmap='Blues', norm=plt.Normalize(vmin=vmin, vmax=vmax)) sm._A = [] cbar = fig.colorbar(sm, orientation='horizontal')
code
32062754/cell_5
[ "image_output_1.png" ]
import geopandas as gpd import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt df_countries = pd.read_csv('/kaggle/input/countries-iso-codes/wikipedia-iso-country-codes.csv') df_global = pd.read_csv('/kaggle/input/global-hospital-beds-capacity-for-covid19/hospital_beds_global_v1.csv') df_global.dataframeName = 'hospital_beds_global_v1.csv' df_global = df_global.merge(df_countries, how='left', left_on=['country'], right_on=['Alpha-2 code']) df_global.rename(columns={'Alpha-3 code': 'country code', 'English short name lower case': 'country name'}, inplace=True) df_global = df_global[['country name', 'country code', 'beds', 'type', 'year', 'population']] world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) df_global_acute = df_global[df_global['type'] == 'ACUTE'] mapped = world.merge(df_global_acute[['country code', 'beds']], how='left', left_on='iso_a3', right_on='country code') mapped = mapped.fillna(0) to_be_mapped = 'beds' vmin, vmax = 0,df_global_acute['beds'].max() fig, ax = plt.subplots(1, figsize=(25,25)) mapped.plot(column=to_be_mapped, cmap='Blues', linewidth=0.8, ax=ax, edgecolors='0.8') ax.set_title('Number of ACUTE beds per 1000 inhabitants in countries', fontdict={'fontsize':30}) ax.set_axis_off() sm = plt.cm.ScalarMappable(cmap='Blues', norm=plt.Normalize(vmin=vmin, vmax=vmax)) sm._A = [] cbar = fig.colorbar(sm, orientation='horizontal') df_global_icu = df_global[df_global['type'] == 'ICU'] mapped = world.merge(df_global_icu[['country code', 'beds']], how='left', left_on='iso_a3', right_on='country code') mapped = mapped.fillna(0) to_be_mapped = 'beds' vmin, vmax = 0,df_global_icu['beds'].max() fig, ax = plt.subplots(1, figsize=(25,25)) mapped.plot(column=to_be_mapped, cmap='Blues', linewidth=0.8, ax=ax, edgecolors='0.8') ax.set_title('Number of ICU beds per 1000 inhabitants in countries', fontdict={'fontsize':30}) ax.set_axis_off() sm = plt.cm.ScalarMappable(cmap='Blues', norm=plt.Normalize(vmin=vmin, vmax=vmax)) sm._A = [] cbar = fig.colorbar(sm, orientation='horizontal') df_global_total = df_global[df_global['type'] == 'TOTAL'] mapped = world.merge(df_global_icu[['country code', 'beds']], how='left', left_on='iso_a3', right_on='country code') mapped = mapped.fillna(0) to_be_mapped = 'beds' vmin, vmax = (0, df_global_total['beds'].max()) fig, ax = plt.subplots(1, figsize=(25, 25)) mapped.plot(column=to_be_mapped, cmap='Blues', linewidth=0.8, ax=ax, edgecolors='0.8') ax.set_title('Number of TOTAL beds per 1000 inhabitants in countries', fontdict={'fontsize': 30}) ax.set_axis_off() sm = plt.cm.ScalarMappable(cmap='Blues', norm=plt.Normalize(vmin=vmin, vmax=vmax)) sm._A = [] cbar = fig.colorbar(sm, orientation='horizontal')
code
18100887/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd label = ['a', 'b', 'c'] my_data = [10, 20, 30] arr = np.array(my_data) d = {'ax': 10, 'by': 20, 'cz': 30} pd.Series(data=my_data)
code
18100887/cell_6
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd label = ['a', 'b', 'c'] my_data = [10, 20, 30] arr = np.array(my_data) d = {'ax': 10, 'by': 20, 'cz': 30} pd.Series(data=my_data) pd.Series(data=my_data, index=label) pd.Series(data=arr, index=label)
code
18100887/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd label = ['a', 'b', 'c'] my_data = [10, 20, 30] arr = np.array(my_data) d = {'ax': 10, 'by': 20, 'cz': 30} pd.Series(data=my_data) pd.Series(data=my_data, index=label) pd.Series(data=arr, index=label) pd.Series(d)
code
18100887/cell_8
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd label = ['a', 'b', 'c'] my_data = [10, 20, 30] arr = np.array(my_data) d = {'ax': 10, 'by': 20, 'cz': 30} pd.Series(data=my_data) pd.Series(data=my_data, index=label) pd.Series(data=arr, index=label) pd.Series(d) pd.Series(label)
code
18100887/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd label = ['a', 'b', 'c'] my_data = [10, 20, 30] arr = np.array(my_data) d = {'ax': 10, 'by': 20, 'cz': 30} pd.Series(data=my_data) pd.Series(data=my_data, index=label)
code
121151851/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360.isnull().sum() df360.fillna(0, inplace=True) df360.isnull().sum() df360.duplicated().sum() df360['Gender'].value_counts()
code
121151851/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') rf = pd.Series([108, 70, 17]) rf.sum() rfp = pd.Series([108, 70, 17], index=['Male', 'Female', 'Firm']) rfp / 195 * 100 overseas = pd.Series([72, 7, 4, 2, 2, 1, 1, 1], index=('abroad', 'Canada', 'russia', 'uk', 'belgium', 'denmark', 'germany', 'mexico')) states = pd.Series([119, 86, 17, 11, 11, 11, 6, 4, 1, 1], index=('california', 'abroad', 'nevada', 'arizona', 'oregon', 'colorado', 'utah', 'virginia', 'kansas', 'wyoming')) states.sum()
code
121151851/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360.head()
code
121151851/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') rf = pd.Series([108, 70, 17]) rf.sum() rfp = pd.Series([108, 70, 17], index=['Male', 'Female', 'Firm']) rfp / 195 * 100 overseas = pd.Series([72, 7, 4, 2, 2, 1, 1, 1], index=('abroad', 'Canada', 'russia', 'uk', 'belgium', 'denmark', 'germany', 'mexico')) print(overseas)
code
121151851/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np genders = np.array([36, 55, 9]) mylabels = ['Female 36%', 'Male 55%', 'Firm 9%'] myexplode = [0.2, 0, 0] plt.pie(genders, labels=mylabels, explode=myexplode, shadow=True) plt.show()
code
121151851/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360['Country'].unique()
code
121151851/cell_29
[ "text_plain_output_1.png" ]
state_cf = state_percent.round()
code
121151851/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360.isnull().sum() df360.fillna(0, inplace=True) df360.isnull().sum() df360.duplicated().sum()
code
121151851/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360['State'].unique()
code
121151851/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') rf = pd.Series([108, 70, 17]) rf.sum() rfp = pd.Series([108, 70, 17], index=['Male', 'Female', 'Firm']) rfp / 195 * 100 overseas = pd.Series([72, 7, 4, 2, 2, 1, 1, 1], index=('abroad', 'Canada', 'russia', 'uk', 'belgium', 'denmark', 'germany', 'mexico')) states = pd.Series([119, 86, 17, 11, 11, 11, 6, 4, 1, 1], index=('california', 'abroad', 'nevada', 'arizona', 'oregon', 'colorado', 'utah', 'virginia', 'kansas', 'wyoming')) states.sum() states = pd.Series([119, 86, 17, 11, 11, 11, 6, 4, 1, 1], index=('california', 'abroad', 'nevada', 'arizona', 'oregon', 'colorado', 'utah', 'virginia', 'kansas', 'wyoming')) state_rf = states / 267 * 100 state_rf.round()
code
121151851/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360.isnull().sum()
code
121151851/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') rf = pd.Series([108, 70, 17]) rf.sum()
code
121151851/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') rf = pd.Series([108, 70, 17]) rf.sum() rfp = pd.Series([108, 70, 17], index=['Male', 'Female', 'Firm']) rfp / 195 * 100
code
121151851/cell_17
[ "text_plain_output_1.png" ]
print(round(55.38)) print(round(35.89)) print(round(8.71))
code
121151851/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360.isnull().sum() df360.fillna(0, inplace=True) df360.isnull().sum() df360.duplicated().sum() df360['State'].value_counts()
code
121151851/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360.isnull().sum() df360.fillna(0, inplace=True) df360.isnull().sum() df360.duplicated().sum() df360['Entity'].value_counts()
code
121151851/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360.isnull().sum() df360.fillna(0, inplace=True) df360.isnull().sum() df360.duplicated().sum() df360['Country'].value_counts()
code
121151851/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360.isnull().sum() df360.fillna(0, inplace=True) df360.isnull().sum()
code
121151851/cell_27
[ "image_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') rf = pd.Series([108, 70, 17]) rf.sum() rfp = pd.Series([108, 70, 17], index=['Male', 'Female', 'Firm']) rfp / 195 * 100 overseas = pd.Series([72, 7, 4, 2, 2, 1, 1, 1], index=('abroad', 'Canada', 'russia', 'uk', 'belgium', 'denmark', 'germany', 'mexico')) states = pd.Series([119, 86, 17, 11, 11, 11, 6, 4, 1, 1], index=('california', 'abroad', 'nevada', 'arizona', 'oregon', 'colorado', 'utah', 'virginia', 'kansas', 'wyoming')) states.sum() states = pd.Series([119, 86, 17, 11, 11, 11, 6, 4, 1, 1], index=('california', 'abroad', 'nevada', 'arizona', 'oregon', 'colorado', 'utah', 'virginia', 'kansas', 'wyoming')) state_rf = states / 267 * 100 state_rf
code
121151851/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df360 = pd.read_csv('/kaggle/input/360-real-estate-company/360 real estate company.csv') df360.info()
code
122251358/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from pprint import pprint from tqdm import tqdm import operator import pandas as pd train_df = pd.read_parquet(paths.DIFUSSION_DB_META) train_df.rename(columns={'Prompt': 'prompt'}, inplace=True) train_df['prompt'] = train_df['prompt'].astype(str) train_df['prompt'] = train_df['prompt'].apply(lambda x: x.lower()) def build_vocab(sentences, verbose=True): """ Builds a vocabulary dictionary where keys are the unique words in our sentences and the values are the word counts. :param sentences: list of list of words. :return: dictionary of words and their count. """ vocab = {} for sentence in tqdm(sentences, disable=not verbose): for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab def division(n, d): """Avoid zero division""" return n / d if d else -1 def check_coverage(vocab, embeddings_index): """ :param vocab: a python dictionary with all the words in our dataframe as keys and their count as value. :param embeddings_index: a dict-like object where its keys are words and the values are index or the corresponding word's embedding. """ a = {} oov = {} k = 0 i = 0 for word in tqdm(vocab): try: a[word] = embeddings_index[word] k += vocab[word] except: oov[word] = vocab[word] i += vocab[word] pass sorted_x = sorted(oov.items(), key=operator.itemgetter(1))[::-1] return sorted_x def count_sentence_match(sentences, embeddings_index): """ :param sentences: list of list of words :return: dictionary of words and their count """ sentences_matches = [] for sentence in tqdm(sentences): match = 0 no_match = 0 for word in sentence: try: embeddings_index[word] match += 1 except KeyError: no_match += 1 sentences_matches.append(division(match, match + no_match)) return sentences_matches def count_word_exists(sentences, vocab_dict): """ :param sentences: list of list of words :return: dictionary of words and values """ sentences_matches = [] word_appeared = [] for sentence in tqdm(sentences): exist = 0 word_list = [] for word in sentence: try: vocab_dict[word] exist += 1 word_list.append(word) except KeyError: pass sentences_matches.append(exist) word_appeared.append(word_list) return (sentences_matches, word_appeared) def check_intersection(vocab_input, vocab_tokenizer): vocab_input = list(vocab_input.keys()) vocab_tokenizer = list(vocab_tokenizer.keys()) intersection = list(set(vocab_input) & set(vocab_tokenizer)) input_percentage = len(intersection) / len(vocab_input) tokenizer_percentage = len(intersection) / len(vocab_tokenizer) tqdm.pandas() sentences = train_df['prompt'].progress_apply(lambda x: x.split()).values vocab_input = build_vocab(sentences) print(f'There are {len(vocab_input)} unique words in our vocabulary') pprint({k: vocab_input[k] for k in list(vocab_input)[:5]})
code
122251358/cell_8
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train_df = pd.read_parquet(paths.DIFUSSION_DB_META) train_df.rename(columns={'Prompt': 'prompt'}, inplace=True) train_df['prompt'] = train_df['prompt'].astype(str) train_df['prompt'] = train_df['prompt'].apply(lambda x: x.lower()) print(f'Train shape: {train_df.shape}') train_df.head()
code
122251358/cell_15
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from pprint import pprint from tqdm import tqdm from transformers import AutoTokenizer import operator def build_vocab(sentences, verbose=True): """ Builds a vocabulary dictionary where keys are the unique words in our sentences and the values are the word counts. :param sentences: list of list of words. :return: dictionary of words and their count. """ vocab = {} for sentence in tqdm(sentences, disable=not verbose): for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab def division(n, d): """Avoid zero division""" return n / d if d else -1 def check_coverage(vocab, embeddings_index): """ :param vocab: a python dictionary with all the words in our dataframe as keys and their count as value. :param embeddings_index: a dict-like object where its keys are words and the values are index or the corresponding word's embedding. """ a = {} oov = {} k = 0 i = 0 for word in tqdm(vocab): try: a[word] = embeddings_index[word] k += vocab[word] except: oov[word] = vocab[word] i += vocab[word] pass sorted_x = sorted(oov.items(), key=operator.itemgetter(1))[::-1] return sorted_x def count_sentence_match(sentences, embeddings_index): """ :param sentences: list of list of words :return: dictionary of words and their count """ sentences_matches = [] for sentence in tqdm(sentences): match = 0 no_match = 0 for word in sentence: try: embeddings_index[word] match += 1 except KeyError: no_match += 1 sentences_matches.append(division(match, match + no_match)) return sentences_matches def count_word_exists(sentences, vocab_dict): """ :param sentences: list of list of words :return: dictionary of words and values """ sentences_matches = [] word_appeared = [] for sentence in tqdm(sentences): exist = 0 word_list = [] for word in sentence: try: vocab_dict[word] exist += 1 word_list.append(word) except KeyError: pass sentences_matches.append(exist) word_appeared.append(word_list) return (sentences_matches, word_appeared) def check_intersection(vocab_input, vocab_tokenizer): vocab_input = list(vocab_input.keys()) vocab_tokenizer = list(vocab_tokenizer.keys()) intersection = list(set(vocab_input) & set(vocab_tokenizer)) input_percentage = len(intersection) / len(vocab_input) tokenizer_percentage = len(intersection) / len(vocab_tokenizer) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2') vocab_tokenizer = tokenizer.get_vocab() print(f'There are {len(vocab_tokenizer)} unique words in our vocabulary') pprint({k: vocab_tokenizer[k] for k in list(vocab_tokenizer)[:5]})
code
122251358/cell_17
[ "text_plain_output_1.png" ]
from pprint import pprint from tqdm import tqdm from transformers import AutoTokenizer import operator import pandas as pd train_df = pd.read_parquet(paths.DIFUSSION_DB_META) train_df.rename(columns={'Prompt': 'prompt'}, inplace=True) train_df['prompt'] = train_df['prompt'].astype(str) train_df['prompt'] = train_df['prompt'].apply(lambda x: x.lower()) def build_vocab(sentences, verbose=True): """ Builds a vocabulary dictionary where keys are the unique words in our sentences and the values are the word counts. :param sentences: list of list of words. :return: dictionary of words and their count. """ vocab = {} for sentence in tqdm(sentences, disable=not verbose): for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab def division(n, d): """Avoid zero division""" return n / d if d else -1 def check_coverage(vocab, embeddings_index): """ :param vocab: a python dictionary with all the words in our dataframe as keys and their count as value. :param embeddings_index: a dict-like object where its keys are words and the values are index or the corresponding word's embedding. """ a = {} oov = {} k = 0 i = 0 for word in tqdm(vocab): try: a[word] = embeddings_index[word] k += vocab[word] except: oov[word] = vocab[word] i += vocab[word] pass sorted_x = sorted(oov.items(), key=operator.itemgetter(1))[::-1] return sorted_x def count_sentence_match(sentences, embeddings_index): """ :param sentences: list of list of words :return: dictionary of words and their count """ sentences_matches = [] for sentence in tqdm(sentences): match = 0 no_match = 0 for word in sentence: try: embeddings_index[word] match += 1 except KeyError: no_match += 1 sentences_matches.append(division(match, match + no_match)) return sentences_matches def count_word_exists(sentences, vocab_dict): """ :param sentences: list of list of words :return: dictionary of words and values """ sentences_matches = [] word_appeared = [] for sentence in tqdm(sentences): exist = 0 word_list = [] for word in sentence: try: vocab_dict[word] exist += 1 word_list.append(word) except KeyError: pass sentences_matches.append(exist) word_appeared.append(word_list) return (sentences_matches, word_appeared) def check_intersection(vocab_input, vocab_tokenizer): vocab_input = list(vocab_input.keys()) vocab_tokenizer = list(vocab_tokenizer.keys()) intersection = list(set(vocab_input) & set(vocab_tokenizer)) input_percentage = len(intersection) / len(vocab_input) tokenizer_percentage = len(intersection) / len(vocab_tokenizer) tqdm.pandas() sentences = train_df['prompt'].progress_apply(lambda x: x.split()).values vocab_input = build_vocab(sentences) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2') vocab_tokenizer = tokenizer.get_vocab() oov = check_coverage(vocab_input, vocab_tokenizer) sentences_matches = count_sentence_match(sentences_df, vocab_tokenizer) check_intersection(vocab_input, vocab_tokenizer) train_df['match'] = sentences_matches
code
16136181/cell_13
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) data.month.unique() data['visitor_type'].value_counts()
code
16136181/cell_25
[ "text_plain_output_1.png" ]
from scipy.stats import skew import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) data.month.unique() cat = ['admin_pages', 'info_pages', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend'] cont = ['admin_duration', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value'] mask = np.array(data1[cont].corr()) mask[np.tril_indices_from(data1[cont].corr())] = False def cat_data(i): pass for i in cat: cat_data(i) from scipy.stats import skew sns.set() def continous_data(i): sns.boxplot(data1[i]) print('--' * 60) plt.title('Boxplot of ' + str(i)) plt.show() plt.title('histogram of ' + str(i)) sns.distplot(data1[i], bins=40, kde=True, color='blue') plt.show() print('skewness :', skew(data1[i])) for i in cont: continous_data(i)
code
16136181/cell_4
[ "text_plain_output_5.png", "text_plain_output_9.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_10.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "text_plain_output_8.png", "image_output_6.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import pandas as pd data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.head()
code
16136181/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) data.month.unique() cat = ['admin_pages', 'info_pages', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend'] cont = ['admin_duration', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value'] mask = np.array(data1[cont].corr()) mask[np.tril_indices_from(data1[cont].corr())] = False def cat_data(i): pass for i in cat: cat_data(i) sns.countplot(data.revenue)
code
16136181/cell_30
[ "image_output_11.png", "text_plain_output_5.png", "image_output_14.png", "text_plain_output_4.png", "image_output_13.png", "image_output_5.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "text_plain_output_8.png", "image_output_6.png", "image_output_12.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from scipy.stats import skew import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) data.month.unique() cat = ['admin_pages', 'info_pages', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend'] cont = ['admin_duration', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value'] mask = np.array(data1[cont].corr()) mask[np.tril_indices_from(data1[cont].corr())] = False def cat_data(i): pass for i in cat: cat_data(i) from scipy.stats import skew sns.set() def continous_data(i): pass for i in cont: continous_data(i) def cat_bivar(i): pass for i in cat: cat_bivar(i) sns.boxplot(x=data1.revenue, y=data1.avg_exit_rate)
code
16136181/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/online_shoppers_intention.csv') data.shape data.describe().T data.columns = ['admin_pages', 'admin_duration', 'info_pages', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend', 'revenue'] data1 = data.copy() data1.weekend = np.where(data.weekend == True, 1, 0) data1.revenue = np.where(data.revenue == True, 1, 0) cat = ['admin_pages', 'info_pages', 'spl_day', 'month', 'os', 'browser', 'region', 'traffic_type', 'visitor_type', 'weekend'] cont = ['admin_duration', 'info_duration', 'product_pages', 'prod_duration', 'avg_bounce_rate', 'avg_exit_rate', 'avg_page_value'] print('Correlation Heat map of the data') plt.figure(figsize=(15, 10)) mask = np.array(data1[cont].corr()) mask[np.tril_indices_from(data1[cont].corr())] = False sns.heatmap(data1[cont].corr(), annot=True, mask=mask, fmt='.2f', vmin=-1, vmax=1) plt.show()
code