path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
50218411/cell_4
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') df.head()
code
50218411/cell_23
[ "image_output_1.png" ]
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') df = df.drop('enrollee_id', axis=1) fig= plt.figure(figsize=(25, 16)) i = 1 for val in df.columns: if val not in ["city", "city_development_index", "training_hours", "target"]: fig.add_subplot(4, 3, i, frameon=True, ) plt.xticks(rotation=45) ax = sns.countplot(x=val, data=df,fill=True, edgecolor = sns.color_palette("pastel")) i+=1 plt.subplots_adjust(top = 2, bottom=0.2, hspace=0.3) le = {} df = df.fillna(0) for val in df.columns: if val not in ['city_development_index', 'training_hours', 'target']: le[val] = label_encoding(val) fig = plt.figure(figsize=(10, 10)) fig.add_subplot() ax = sns.heatmap(df.corr(), linewidths=.5, square=True, annot=True) plt.show() test = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_test.csv') enrollee_id = pd.DataFrame(data={'enrollee_id': test['enrollee_id']}) test = test.drop('enrollee_id', axis=1) test['gender'] = test['gender'].fillna('Other') test['enrolled_university'] = test['enrolled_university'].fillna('no_enrollment') test['education_level'] = test['education_level'].fillna('Graduate') test['major_discipline'] = test['major_discipline'].fillna('STEM') test['experience'] = test['experience'].fillna('>20') test['company_size'] = test['company_size'].fillna('50-99') test['company_type'] = test['company_type'].fillna('Pvt Ltd') test['last_new_job'] = test['last_new_job'].fillna('1') test = test.fillna(0) for val in test.columns: if val not in ['city_development_index', 'training_hours', 'target']: test[val] = le[val].transform(test[val]) from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.metrics import confusion_matrix, accuracy_score clf = LinearDiscriminantAnalysis() clf.fit(df.iloc[:, :-1], df.iloc[:, -1]) y_pred = clf.predict(test) submission = pd.DataFrame(data={'Enrollee_id': enrollee_id['enrollee_id'], 'target': y_pred}) sns.countplot(data=submission, x='target')
code
50218411/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') df = df.drop('enrollee_id', axis=1) df.describe()
code
50218411/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') df = df.drop('enrollee_id', axis=1) fig= plt.figure(figsize=(25, 16)) i = 1 for val in df.columns: if val not in ["city", "city_development_index", "training_hours", "target"]: fig.add_subplot(4, 3, i, frameon=True, ) plt.xticks(rotation=45) ax = sns.countplot(x=val, data=df,fill=True, edgecolor = sns.color_palette("pastel")) i+=1 plt.subplots_adjust(top = 2, bottom=0.2, hspace=0.3) le = {} df = df.fillna(0) for val in df.columns: if val not in ['city_development_index', 'training_hours', 'target']: le[val] = label_encoding(val) fig = plt.figure(figsize=(10, 10)) fig.add_subplot() ax = sns.heatmap(df.corr(), linewidths=0.5, square=True, annot=True) plt.show()
code
50218411/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') df = df.drop('enrollee_id', axis=1) df.head()
code
50218411/cell_22
[ "text_plain_output_1.png" ]
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') df = df.drop('enrollee_id', axis=1) fig= plt.figure(figsize=(25, 16)) i = 1 for val in df.columns: if val not in ["city", "city_development_index", "training_hours", "target"]: fig.add_subplot(4, 3, i, frameon=True, ) plt.xticks(rotation=45) ax = sns.countplot(x=val, data=df,fill=True, edgecolor = sns.color_palette("pastel")) i+=1 plt.subplots_adjust(top = 2, bottom=0.2, hspace=0.3) le = {} df = df.fillna(0) for val in df.columns: if val not in ['city_development_index', 'training_hours', 'target']: le[val] = label_encoding(val) fig = plt.figure(figsize=(10, 10)) fig.add_subplot() ax = sns.heatmap(df.corr(), linewidths=.5, square=True, annot=True) plt.show() test = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_test.csv') enrollee_id = pd.DataFrame(data={'enrollee_id': test['enrollee_id']}) test = test.drop('enrollee_id', axis=1) test['gender'] = test['gender'].fillna('Other') test['enrolled_university'] = test['enrolled_university'].fillna('no_enrollment') test['education_level'] = test['education_level'].fillna('Graduate') test['major_discipline'] = test['major_discipline'].fillna('STEM') test['experience'] = test['experience'].fillna('>20') test['company_size'] = test['company_size'].fillna('50-99') test['company_type'] = test['company_type'].fillna('Pvt Ltd') test['last_new_job'] = test['last_new_job'].fillna('1') test = test.fillna(0) for val in test.columns: if val not in ['city_development_index', 'training_hours', 'target']: test[val] = le[val].transform(test[val]) from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.metrics import confusion_matrix, accuracy_score clf = LinearDiscriminantAnalysis() clf.fit(df.iloc[:, :-1], df.iloc[:, -1]) print('LinearDiscriminantAnalysis') y_pred = clf.predict(test) submission = pd.DataFrame(data={'Enrollee_id': enrollee_id['enrollee_id'], 'target': y_pred})
code
50218411/cell_10
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') df = df.drop('enrollee_id', axis=1) for i in df.columns: print(df[i].value_counts()) print('----------------')
code
50218411/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') df = df.drop('enrollee_id', axis=1) fig = plt.figure(figsize=(25, 16)) i = 1 for val in df.columns: if val not in ['city', 'city_development_index', 'training_hours', 'target']: fig.add_subplot(4, 3, i, frameon=True) plt.xticks(rotation=45) ax = sns.countplot(x=val, data=df, fill=True, edgecolor=sns.color_palette('pastel')) i += 1 plt.subplots_adjust(top=2, bottom=0.2, hspace=0.3)
code
1005676/cell_9
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output df = pd.read_csv('../input/movie_metadata.csv') df1 = df.dropna() df2 = df1.select_dtypes(include=['int64', 'float64']) print(df2.shape)
code
1005676/cell_23
[ "image_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output df = pd.read_csv('../input/movie_metadata.csv') df1 = df.dropna() df2 = df1.select_dtypes(include=['int64', 'float64']) profit = (df2['gross'].values - df2['budget'].values) / df2['gross'].values * 100 df2.loc[:, 'profit'] = pd.Series(profit, index=df2.index) corr = df2.corr() print (corr) f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(corr, cmap=cmap, vmax=1, square=True, linewidths=.5, cbar_kws={"shrink": .5}, ax=ax) print(df2.describe())
code
1005676/cell_11
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output df = pd.read_csv('../input/movie_metadata.csv') df1 = df.dropna() df2 = df1.select_dtypes(include=['int64', 'float64']) profit = (df2['gross'].values - df2['budget'].values) / df2['gross'].values * 100 df2.loc[:, 'profit'] = pd.Series(profit, index=df2.index) print(df2.head(5))
code
1005676/cell_19
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output df = pd.read_csv('../input/movie_metadata.csv') df1 = df.dropna() df2 = df1.select_dtypes(include=['int64', 'float64']) profit = (df2['gross'].values - df2['budget'].values) / df2['gross'].values * 100 df2.loc[:, 'profit'] = pd.Series(profit, index=df2.index) corr = df2.corr() print (corr) f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(corr, cmap=cmap, vmax=1, square=True, linewidths=.5, cbar_kws={"shrink": .5}, ax=ax) g = sns.jointplot(x="title_year", y="profit",kind='scatter',size=10,ylim = [0,110],xlim=[1980,2020],data=df2) h = sns.jointplot(x="imdb_score", y="profit",kind='reg',size=10,ylim = [0,110],data=df2) h = sns.jointplot(x='num_critic_for_reviews', y='profit', kind='reg', size=10, ylim=[0, 110], data=df2)
code
1005676/cell_7
[ "image_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output df = pd.read_csv('../input/movie_metadata.csv') df1 = df.dropna() print(df1.shape) print(df.dtypes)
code
1005676/cell_15
[ "text_plain_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output df = pd.read_csv('../input/movie_metadata.csv') df1 = df.dropna() df2 = df1.select_dtypes(include=['int64', 'float64']) profit = (df2['gross'].values - df2['budget'].values) / df2['gross'].values * 100 df2.loc[:, 'profit'] = pd.Series(profit, index=df2.index) corr = df2.corr() print (corr) f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(corr, cmap=cmap, vmax=1, square=True, linewidths=.5, cbar_kws={"shrink": .5}, ax=ax) g = sns.jointplot(x='title_year', y='profit', kind='scatter', size=10, ylim=[0, 110], xlim=[1980, 2020], data=df2)
code
1005676/cell_3
[ "image_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) df = pd.read_csv('../input/movie_metadata.csv')
code
1005676/cell_17
[ "text_plain_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output df = pd.read_csv('../input/movie_metadata.csv') df1 = df.dropna() df2 = df1.select_dtypes(include=['int64', 'float64']) profit = (df2['gross'].values - df2['budget'].values) / df2['gross'].values * 100 df2.loc[:, 'profit'] = pd.Series(profit, index=df2.index) corr = df2.corr() print (corr) f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(corr, cmap=cmap, vmax=1, square=True, linewidths=.5, cbar_kws={"shrink": .5}, ax=ax) g = sns.jointplot(x="title_year", y="profit",kind='scatter',size=10,ylim = [0,110],xlim=[1980,2020],data=df2) h = sns.jointplot(x='imdb_score', y='profit', kind='reg', size=10, ylim=[0, 110], data=df2)
code
1005676/cell_22
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output df = pd.read_csv('../input/movie_metadata.csv') df1 = df.dropna() df2 = df1.select_dtypes(include=['int64', 'float64']) profit = (df2['gross'].values - df2['budget'].values) / df2['gross'].values * 100 df2.loc[:, 'profit'] = pd.Series(profit, index=df2.index) corr = df2.corr() print (corr) f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(corr, cmap=cmap, vmax=1, square=True, linewidths=.5, cbar_kws={"shrink": .5}, ax=ax) g = sns.jointplot(x="title_year", y="profit",kind='scatter',size=10,ylim = [0,110],xlim=[1980,2020],data=df2) h = sns.jointplot(x="imdb_score", y="profit",kind='reg',size=10,ylim = [0,110],data=df2) h = sns.jointplot(x="num_critic_for_reviews", y="profit",kind='reg',size=10,ylim = [0,110],data=df2) g = sns.pairplot(df1, hue='content_rating')
code
1005676/cell_12
[ "text_plain_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output df = pd.read_csv('../input/movie_metadata.csv') df1 = df.dropna() df2 = df1.select_dtypes(include=['int64', 'float64']) profit = (df2['gross'].values - df2['budget'].values) / df2['gross'].values * 100 df2.loc[:, 'profit'] = pd.Series(profit, index=df2.index) corr = df2.corr() print(corr) f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(corr, cmap=cmap, vmax=1, square=True, linewidths=0.5, cbar_kws={'shrink': 0.5}, ax=ax)
code
1005676/cell_5
[ "image_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import seaborn as sns sns.set(style='white') import matplotlib.pyplot as plt from subprocess import check_output df = pd.read_csv('../input/movie_metadata.csv') print(df.shape)
code
34119229/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/fifa19/data.csv') data = pd.read_csv('/kaggle/input/fifa19/data.csv') data.columns
code
34119229/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/fifa19/data.csv') df.head()
code
34119229/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/fifa19/data.csv') data = pd.read_csv('/kaggle/input/fifa19/data.csv') data.columns plt.rcParams['figure.figsize'] = (25, 16) plt.rcParams['font.family'] = 'times new roman' df = pd.read_csv('/kaggle/input/fifa19/data.csv') threshold1 = sum(data.Overall) / len(data.Overall) + 25 threshold2 = sum(data.Overall) / len(data.Overall) + 15 threshold3 = sum(data.Overall) / len(data.Overall) + 5 threshold4 = sum(data.Overall) / len(data.Overall) threshold5 = sum(data.Overall) / len(data.Overall) - 5 threshold6 = sum(data.Overall) / len(data.Overall) - 15 threshold7 = sum(data.Overall) / len(data.Overall) - 25 data['Oyuncu_Overall_Sınıfı'] = ['SuperStar' if i >= threshold1 else 'Star' if threshold1 > i >= threshold2 else 'Yetenekli' if threshold2 > i >= threshold3 else 'Vasat' if threshold3 > i >= threshold4 else 'Gelişmez' if threshold4 > i >= threshold5 else 'Kötü' if threshold5 > i >= threshold6 else 'Rezil' for i in data.Overall] data.loc[:10000, ['Oyuncu_Overall_Sınıfı', 'Overall']] data.boxplot(column='Overall', by='Preferred Foot') data.dropna() plt.show()
code
34119229/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34119229/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/fifa19/data.csv') data = pd.read_csv('/kaggle/input/fifa19/data.csv') data.columns df = pd.read_csv('/kaggle/input/fifa19/data.csv') threshold1 = sum(data.Overall) / len(data.Overall) + 25 threshold2 = sum(data.Overall) / len(data.Overall) + 15 threshold3 = sum(data.Overall) / len(data.Overall) + 5 threshold4 = sum(data.Overall) / len(data.Overall) threshold5 = sum(data.Overall) / len(data.Overall) - 5 threshold6 = sum(data.Overall) / len(data.Overall) - 15 threshold7 = sum(data.Overall) / len(data.Overall) - 25 data['Oyuncu_Overall_Sınıfı'] = ['SuperStar' if i >= threshold1 else 'Star' if threshold1 > i >= threshold2 else 'Yetenekli' if threshold2 > i >= threshold3 else 'Vasat' if threshold3 > i >= threshold4 else 'Gelişmez' if threshold4 > i >= threshold5 else 'Kötü' if threshold5 > i >= threshold6 else 'Rezil' for i in data.Overall] data.loc[:10000, ['Oyuncu_Overall_Sınıfı', 'Overall']] data.describe()
code
34119229/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/fifa19/data.csv') data = pd.read_csv('/kaggle/input/fifa19/data.csv') data.columns df = pd.read_csv('/kaggle/input/fifa19/data.csv') threshold1 = sum(data.Overall) / len(data.Overall) + 25 threshold2 = sum(data.Overall) / len(data.Overall) + 15 threshold3 = sum(data.Overall) / len(data.Overall) + 5 threshold4 = sum(data.Overall) / len(data.Overall) threshold5 = sum(data.Overall) / len(data.Overall) - 5 threshold6 = sum(data.Overall) / len(data.Overall) - 15 threshold7 = sum(data.Overall) / len(data.Overall) - 25 print('average1', threshold1) print('average2', threshold2) print('average3', threshold3) print('average4', threshold4) print('average5', threshold5) print('average6', threshold6) print('average7', threshold7) data['Oyuncu_Overall_Sınıfı'] = ['SuperStar' if i >= threshold1 else 'Star' if threshold1 > i >= threshold2 else 'Yetenekli' if threshold2 > i >= threshold3 else 'Vasat' if threshold3 > i >= threshold4 else 'Gelişmez' if threshold4 > i >= threshold5 else 'Kötü' if threshold5 > i >= threshold6 else 'Rezil' for i in data.Overall] data.loc[:10000, ['Oyuncu_Overall_Sınıfı', 'Overall']]
code
34119229/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/fifa19/data.csv') data = pd.read_csv('/kaggle/input/fifa19/data.csv') data.columns data.info()
code
72087083/cell_13
[ "text_html_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/source-based-news-classification/news_articles.csv') df.isnull().sum() df = df.dropna() text = ' '.join(df['text_without_stopwords']) wordcloud = WordCloud().generate(text) plt.axis('off') wordcloud = WordCloud(max_font_size=50, max_words=10, background_color='white').generate(text) plt.imshow(wordcloud, interpolation='bilinear') plt.figure() plt.axis('off') plt.show()
code
72087083/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/source-based-news-classification/news_articles.csv') df.isnull().sum()
code
72087083/cell_4
[ "image_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72087083/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/source-based-news-classification/news_articles.csv') df.head()
code
72087083/cell_15
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from PIL import Image from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/source-based-news-classification/news_articles.csv') df.isnull().sum() df = df.dropna() text = ' '.join(df['text_without_stopwords']) wordcloud = WordCloud().generate(text) plt.axis('off') wordcloud = WordCloud(max_font_size=50, max_words=10, background_color='white').generate(text) plt.axis('off') stopwords = set(STOPWORDS) wordcloud = WordCloud(width=3000, height=2000, random_state=1, background_color='black', colormap='Set2', collocations=False, stopwords=STOPWORDS).generate(text) plt.axis('off') stopwords = set(STOPWORDS) mask = np.array(Image.open('../input/input-img/download1.png')) wordcloud = WordCloud(stopwords=stopwords, background_color='white', max_words=1000, mask=mask, contour_color='#023075', contour_width=3, colormap='rainbow').generate(' '.join(df['text_without_stopwords'])) plt.figure() plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.savefig('cloud.png', format='png') plt.show()
code
72087083/cell_16
[ "image_output_2.png", "image_output_1.png" ]
from PIL import Image from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/source-based-news-classification/news_articles.csv') df.isnull().sum() df = df.dropna() text = ' '.join(df['text_without_stopwords']) wordcloud = WordCloud().generate(text) plt.axis('off') wordcloud = WordCloud(max_font_size=50, max_words=10, background_color='white').generate(text) plt.axis('off') stopwords = set(STOPWORDS) wordcloud = WordCloud(width=3000, height=2000, random_state=1, background_color='black', colormap='Set2', collocations=False, stopwords=STOPWORDS).generate(text) plt.axis('off') stopwords = set(STOPWORDS) mask = np.array(Image.open('../input/input-img/download1.png')) wordcloud = WordCloud(stopwords=stopwords, background_color='white', max_words=1000, mask=mask, contour_color='#023075', contour_width=3, colormap='rainbow').generate(' '.join(df['text_without_stopwords'])) plt.axis('off') stopwords = set(STOPWORDS) mask = np.array(Image.open('../input/input-img/Twitter.png')) wordcloud = WordCloud(stopwords=stopwords, background_color='white', max_words=1000, mask=mask).generate(' '.join(df['text_without_stopwords'])) plt.figure() plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.savefig('twitter.png', format='png') plt.show()
code
72087083/cell_17
[ "image_output_1.png" ]
from PIL import Image from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/source-based-news-classification/news_articles.csv') df.isnull().sum() df = df.dropna() text = ' '.join(df['text_without_stopwords']) wordcloud = WordCloud().generate(text) plt.axis('off') wordcloud = WordCloud(max_font_size=50, max_words=10, background_color='white').generate(text) plt.axis('off') stopwords = set(STOPWORDS) wordcloud = WordCloud(width=3000, height=2000, random_state=1, background_color='black', colormap='Set2', collocations=False, stopwords=STOPWORDS).generate(text) plt.axis('off') stopwords = set(STOPWORDS) mask = np.array(Image.open('../input/input-img/download1.png')) wordcloud = WordCloud(stopwords=stopwords, background_color='white', max_words=1000, mask=mask, contour_color='#023075', contour_width=3, colormap='rainbow').generate(' '.join(df['text_without_stopwords'])) plt.axis('off') stopwords = set(STOPWORDS) mask = np.array(Image.open('../input/input-img/Twitter.png')) wordcloud = WordCloud(stopwords=stopwords, background_color='white', max_words=1000, mask=mask).generate(' '.join(df['text_without_stopwords'])) plt.axis('off') stopwords = set(STOPWORDS) mask = np.array(Image.open('../input/input-img/News_mask.PNG')) wordcloud = WordCloud(width=3000, height=2000, random_state=1, background_color='white', colormap='Set2', collocations=False, stopwords=STOPWORDS, mask=mask).generate(' '.join(df['text_without_stopwords'])) image_colors = ImageColorGenerator(mask) plt.figure(figsize=[20, 20]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.savefig('news.png', format='png') plt.show()
code
72087083/cell_14
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/source-based-news-classification/news_articles.csv') df.isnull().sum() df = df.dropna() text = ' '.join(df['text_without_stopwords']) wordcloud = WordCloud().generate(text) plt.axis('off') wordcloud = WordCloud(max_font_size=50, max_words=10, background_color='white').generate(text) plt.axis('off') stopwords = set(STOPWORDS) wordcloud = WordCloud(width=3000, height=2000, random_state=1, background_color='black', colormap='Set2', collocations=False, stopwords=STOPWORDS).generate(text) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()
code
72087083/cell_12
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/source-based-news-classification/news_articles.csv') df.isnull().sum() df = df.dropna() text = ' '.join(df['text_without_stopwords']) wordcloud = WordCloud().generate(text) plt.imshow(wordcloud, interpolation='bilinear') plt.figure() plt.axis('off')
code
105188272/cell_13
[ "text_plain_output_1.png" ]
from transformers import pipeline sentiment_analyzer = pipeline('sentiment-analysis')
code
105188272/cell_9
[ "text_plain_output_1.png" ]
from happytransformer import HappyTextToText happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6') text = '\nIn light of recent news from Apple, facial recognition technology is now the talk of the town. Ranging from privacy concerns to a curious boyfriend unlocking his girlfriend’s phone while she sleeps. The first commercial application of 3D facial recognition means that biometric technology is advancing at a staggering rate but so is the ability to hack or fool facial recognition tech.\n\nToday, 99% of the infrastructure scattered around the world consists of 2D cameras capable of running advanced facial recognition software and it will likely be years before a physical overhaul to 3D cameras takes place. Hackers, hobbyists, educators and the like have been developing creative ways to either fool or hide from 2D face recognition in a perpetually innovative game of cat and mouse.\n\nThe introduction of 3D recognition will give way to an entirely new thought process of how to beat an incredibly advanced technology, and also raise some questions as to how and by whom it will be used. The fight is futile as the effort exerted to trick this technology is better spent working with the facial recognition companies to enforce acceptable regulations.\n\nFirst, I question the intention of these types of face recognition-fooling solutions. Are they for bad actors hoping to sneak away undetected? Are they for people who are concerned about their picture being stored in a massive database (I’d worry more about posting a picture to Facebook…)? Are they to demonstrate the inherent risk of biometric information?\n\nThe variations that humans have designed to deceive the machine are more likely to make you stand out than blend in. Such as custom makeup, hairstyles, masks, a pair of glasses designed at Carnegie Melon consisting of camouflage patterns or simply wearing a box over your head. Some of them are realistic, but most are just for fun\n\nAs these techniques improve, so does the technology capable of identifying and training against them resulting in more robust face recognition software. These solutions seem like a great way to protect individuals who are looking to keep their identity hidden for nefarious reasons.\nA further concern would be detecting an attempt to impersonate an individual by using a picture of their face. With 3D cameras, this type of hack is very difficult to achieve due to the complexity of depth measurements. Facial recognition technology can run liveness detection or use additional hardware for heat sensing, both of which are capable of being tricked to a degree as seen with Samsung and Google.\n\nThe availability of public facial images is far more concerning due to the ease of which you would be able to render a 3D model from a small set of images and use virtual reality tools to project a face.\n\nOn the surface, these types of hacks would seem alarming but if gaining access to your phone now is as easy as guessing a 4-digit passcode, society should deem these tolerable risks.\n\nUltimately it is on facial recognition companies and Trueface to mitigate this risk by being more innovative, detecting consistencies in attacks and training the technology to identify when hacks occur. This game will continue to be played and become significantly more sophisticated with the adoption of 3D facial recognition.\n\n' result1 = happy_tt1.generate_text(text) string = str(result1) string
code
105188272/cell_25
[ "text_plain_output_1.png" ]
from happytransformer import HappyTextToText from heapq import nlargest from spacy.lang.en.stop_words import STOP_WORDS from string import punctuation from transformers import pipeline import spacy happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6') text = '\nIn light of recent news from Apple, facial recognition technology is now the talk of the town. Ranging from privacy concerns to a curious boyfriend unlocking his girlfriend’s phone while she sleeps. The first commercial application of 3D facial recognition means that biometric technology is advancing at a staggering rate but so is the ability to hack or fool facial recognition tech.\n\nToday, 99% of the infrastructure scattered around the world consists of 2D cameras capable of running advanced facial recognition software and it will likely be years before a physical overhaul to 3D cameras takes place. Hackers, hobbyists, educators and the like have been developing creative ways to either fool or hide from 2D face recognition in a perpetually innovative game of cat and mouse.\n\nThe introduction of 3D recognition will give way to an entirely new thought process of how to beat an incredibly advanced technology, and also raise some questions as to how and by whom it will be used. The fight is futile as the effort exerted to trick this technology is better spent working with the facial recognition companies to enforce acceptable regulations.\n\nFirst, I question the intention of these types of face recognition-fooling solutions. Are they for bad actors hoping to sneak away undetected? Are they for people who are concerned about their picture being stored in a massive database (I’d worry more about posting a picture to Facebook…)? Are they to demonstrate the inherent risk of biometric information?\n\nThe variations that humans have designed to deceive the machine are more likely to make you stand out than blend in. Such as custom makeup, hairstyles, masks, a pair of glasses designed at Carnegie Melon consisting of camouflage patterns or simply wearing a box over your head. Some of them are realistic, but most are just for fun\n\nAs these techniques improve, so does the technology capable of identifying and training against them resulting in more robust face recognition software. These solutions seem like a great way to protect individuals who are looking to keep their identity hidden for nefarious reasons.\nA further concern would be detecting an attempt to impersonate an individual by using a picture of their face. With 3D cameras, this type of hack is very difficult to achieve due to the complexity of depth measurements. Facial recognition technology can run liveness detection or use additional hardware for heat sensing, both of which are capable of being tricked to a degree as seen with Samsung and Google.\n\nThe availability of public facial images is far more concerning due to the ease of which you would be able to render a 3D model from a small set of images and use virtual reality tools to project a face.\n\nOn the surface, these types of hacks would seem alarming but if gaining access to your phone now is as easy as guessing a 4-digit passcode, society should deem these tolerable risks.\n\nUltimately it is on facial recognition companies and Trueface to mitigate this risk by being more innovative, detecting consistencies in attacks and training the technology to identify when hacks occur. This game will continue to be played and become significantly more sophisticated with the adoption of 3D facial recognition.\n\n' result1 = happy_tt1.generate_text(text) string = str(result1) sentiment_analyzer = pipeline('sentiment-analysis') result = sentiment_analyzer(string) def sum_sent(text): result1 = happy_tt1.generate_text(text) string = str(result1) result = sentiment_analyzer(string) text = 'Smoking is a practice in which a substance is burned and the resulting smoke is typically breathed in to be tasted and absorbed into the bloodstream. \nMost commonly, the substance used is the dried leaves of the tobacco plant, which have been rolled into a small rectangle of rolling paper to create a small, round cylinder called a cigarette. \nSmoking is primarily practised as a route of administration for recreational drug use because the combustion of the dried plant leaves vaporizes and delivers active substances into the lungs where they are rapidly absorbed into the bloodstream and reach bodily tissue. \nIn the case of cigarette smoking these substances are contained in a mixture of aerosol particles and gases and include the pharmacologically active alkaloid nicotine; the vaporization creates heated aerosol and gas into a form that allows inhalation and deep penetration into the lungs where absorption into the bloodstream of the active substances occurs. In some cultures, smoking is also carried out as a part of various rituals, where participants use it to help induce trance-like states that, they believe, can lead them to spiritual enlightenment.\n\n' def summarize_sentiment(text): stopwords = list(STOP_WORDS) nlp = spacy.load('en_core_web_sm') doc = nlp(text) tokens = [token.text for token in doc] word_freq = {} for word in doc: if word.text.lower() not in stopwords: if word.text.lower() not in punctuation: if word.text not in word_freq.keys(): word_freq[word.text] = 1 else: word_freq[word.text] += 1 max_freq = max(word_freq.values()) for word in word_freq.keys(): word_freq[word] = word_freq[word] / max_freq sentence_tok = [sent for sent in doc.sents] sent_scores = {} for sent in sentence_tok: for word in sent: if word.text.lower() in word_freq.keys(): if sent not in sent_scores.keys(): sent_scores[sent] = word_freq[word.text.lower()] else: sent_scores[sent] += word_freq[word.text.lower()] select_len = int(len(sentence_tok) * 0.3) summary = nlargest(select_len, sent_scores, key=sent_scores.get) final_sum = [word.text for word in summary] summary = ' '.join(final_sum) result = sentiment_analyzer(summary) text = 'Have you ever found yourself dwelling on an insult or fixating on your mistakes? Criticisms often have a greater impact than compliments, and bad news frequently draws more attention than good.\n\nThe reason for this is that negative events have a greater impact on our brains than positive ones. \nPsychologists refer to this as the negative bias (also called the negativity bias), and it can have a powerful effect on your behavior, your decisions, and even your relationships.\nThe negative bias is our tendency not only to register negative stimuli more readily but also to dwell on these events. Also known as positive-negative asymmetry, this negativity bias means that we feel the sting of a rebuke more powerfully than we feel the joy of praise.\n\nThis psychological phenomenon explains why bad first impressions can be so difficult to overcome and why past traumas can have such long lingering effects. In almost any interaction, we are more likely to notice negative things and later remember them more vividly.1\ufeff\n\nAs humans, we tend to:\n\nRemember traumatic experiences better than positive ones.\nRecall insults better than praise.\nReact more strongly to negative stimuli.\nThink about negative things more frequently than positive ones.\nRespond more strongly to negative events than to equally positive ones.\nFor example, you might be having a great day at work when a coworker makes an offhand\ncomment that you find irritating. \nYou then find yourself stewing over his words for the rest of the workday.' summarize_sentiment(text) text = 'Friendship is a bond between two or more people. These people are called friends. Friends need each other for fun and help. Without friendship, life would be lonely and sad. Friendship is like happiness gifted to us by God. We can share so many things with friends. We can share joy, sorrow, and your feelings. Just being with friends feels so good.\nFriends support you and give you advice. Friends will always be there for you in need. You can trust them and have faith in them. They never judge you or put you down. If a friendship is a garden, friends are flowers. Together they make your life smell like flowers. We must always hold onto friendship and friends.\n' summarize_sentiment(text)
code
105188272/cell_4
[ "text_plain_output_1.png" ]
from happytransformer import HappyTextToText happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6')
code
105188272/cell_23
[ "text_plain_output_1.png" ]
from happytransformer import HappyTextToText from heapq import nlargest from spacy.lang.en.stop_words import STOP_WORDS from string import punctuation from transformers import pipeline import spacy happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6') text = '\nIn light of recent news from Apple, facial recognition technology is now the talk of the town. Ranging from privacy concerns to a curious boyfriend unlocking his girlfriend’s phone while she sleeps. The first commercial application of 3D facial recognition means that biometric technology is advancing at a staggering rate but so is the ability to hack or fool facial recognition tech.\n\nToday, 99% of the infrastructure scattered around the world consists of 2D cameras capable of running advanced facial recognition software and it will likely be years before a physical overhaul to 3D cameras takes place. Hackers, hobbyists, educators and the like have been developing creative ways to either fool or hide from 2D face recognition in a perpetually innovative game of cat and mouse.\n\nThe introduction of 3D recognition will give way to an entirely new thought process of how to beat an incredibly advanced technology, and also raise some questions as to how and by whom it will be used. The fight is futile as the effort exerted to trick this technology is better spent working with the facial recognition companies to enforce acceptable regulations.\n\nFirst, I question the intention of these types of face recognition-fooling solutions. Are they for bad actors hoping to sneak away undetected? Are they for people who are concerned about their picture being stored in a massive database (I’d worry more about posting a picture to Facebook…)? Are they to demonstrate the inherent risk of biometric information?\n\nThe variations that humans have designed to deceive the machine are more likely to make you stand out than blend in. Such as custom makeup, hairstyles, masks, a pair of glasses designed at Carnegie Melon consisting of camouflage patterns or simply wearing a box over your head. Some of them are realistic, but most are just for fun\n\nAs these techniques improve, so does the technology capable of identifying and training against them resulting in more robust face recognition software. These solutions seem like a great way to protect individuals who are looking to keep their identity hidden for nefarious reasons.\nA further concern would be detecting an attempt to impersonate an individual by using a picture of their face. With 3D cameras, this type of hack is very difficult to achieve due to the complexity of depth measurements. Facial recognition technology can run liveness detection or use additional hardware for heat sensing, both of which are capable of being tricked to a degree as seen with Samsung and Google.\n\nThe availability of public facial images is far more concerning due to the ease of which you would be able to render a 3D model from a small set of images and use virtual reality tools to project a face.\n\nOn the surface, these types of hacks would seem alarming but if gaining access to your phone now is as easy as guessing a 4-digit passcode, society should deem these tolerable risks.\n\nUltimately it is on facial recognition companies and Trueface to mitigate this risk by being more innovative, detecting consistencies in attacks and training the technology to identify when hacks occur. This game will continue to be played and become significantly more sophisticated with the adoption of 3D facial recognition.\n\n' result1 = happy_tt1.generate_text(text) string = str(result1) sentiment_analyzer = pipeline('sentiment-analysis') result = sentiment_analyzer(string) def sum_sent(text): result1 = happy_tt1.generate_text(text) string = str(result1) result = sentiment_analyzer(string) text = 'Smoking is a practice in which a substance is burned and the resulting smoke is typically breathed in to be tasted and absorbed into the bloodstream. \nMost commonly, the substance used is the dried leaves of the tobacco plant, which have been rolled into a small rectangle of rolling paper to create a small, round cylinder called a cigarette. \nSmoking is primarily practised as a route of administration for recreational drug use because the combustion of the dried plant leaves vaporizes and delivers active substances into the lungs where they are rapidly absorbed into the bloodstream and reach bodily tissue. \nIn the case of cigarette smoking these substances are contained in a mixture of aerosol particles and gases and include the pharmacologically active alkaloid nicotine; the vaporization creates heated aerosol and gas into a form that allows inhalation and deep penetration into the lungs where absorption into the bloodstream of the active substances occurs. In some cultures, smoking is also carried out as a part of various rituals, where participants use it to help induce trance-like states that, they believe, can lead them to spiritual enlightenment.\n\n' def summarize_sentiment(text): stopwords = list(STOP_WORDS) nlp = spacy.load('en_core_web_sm') doc = nlp(text) tokens = [token.text for token in doc] word_freq = {} for word in doc: if word.text.lower() not in stopwords: if word.text.lower() not in punctuation: if word.text not in word_freq.keys(): word_freq[word.text] = 1 else: word_freq[word.text] += 1 max_freq = max(word_freq.values()) for word in word_freq.keys(): word_freq[word] = word_freq[word] / max_freq sentence_tok = [sent for sent in doc.sents] sent_scores = {} for sent in sentence_tok: for word in sent: if word.text.lower() in word_freq.keys(): if sent not in sent_scores.keys(): sent_scores[sent] = word_freq[word.text.lower()] else: sent_scores[sent] += word_freq[word.text.lower()] select_len = int(len(sentence_tok) * 0.3) summary = nlargest(select_len, sent_scores, key=sent_scores.get) final_sum = [word.text for word in summary] summary = ' '.join(final_sum) result = sentiment_analyzer(summary) summarize_sentiment(text)
code
105188272/cell_20
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from happytransformer import HappyTextToText from transformers import pipeline happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6') text = '\nIn light of recent news from Apple, facial recognition technology is now the talk of the town. Ranging from privacy concerns to a curious boyfriend unlocking his girlfriend’s phone while she sleeps. The first commercial application of 3D facial recognition means that biometric technology is advancing at a staggering rate but so is the ability to hack or fool facial recognition tech.\n\nToday, 99% of the infrastructure scattered around the world consists of 2D cameras capable of running advanced facial recognition software and it will likely be years before a physical overhaul to 3D cameras takes place. Hackers, hobbyists, educators and the like have been developing creative ways to either fool or hide from 2D face recognition in a perpetually innovative game of cat and mouse.\n\nThe introduction of 3D recognition will give way to an entirely new thought process of how to beat an incredibly advanced technology, and also raise some questions as to how and by whom it will be used. The fight is futile as the effort exerted to trick this technology is better spent working with the facial recognition companies to enforce acceptable regulations.\n\nFirst, I question the intention of these types of face recognition-fooling solutions. Are they for bad actors hoping to sneak away undetected? Are they for people who are concerned about their picture being stored in a massive database (I’d worry more about posting a picture to Facebook…)? Are they to demonstrate the inherent risk of biometric information?\n\nThe variations that humans have designed to deceive the machine are more likely to make you stand out than blend in. Such as custom makeup, hairstyles, masks, a pair of glasses designed at Carnegie Melon consisting of camouflage patterns or simply wearing a box over your head. Some of them are realistic, but most are just for fun\n\nAs these techniques improve, so does the technology capable of identifying and training against them resulting in more robust face recognition software. These solutions seem like a great way to protect individuals who are looking to keep their identity hidden for nefarious reasons.\nA further concern would be detecting an attempt to impersonate an individual by using a picture of their face. With 3D cameras, this type of hack is very difficult to achieve due to the complexity of depth measurements. Facial recognition technology can run liveness detection or use additional hardware for heat sensing, both of which are capable of being tricked to a degree as seen with Samsung and Google.\n\nThe availability of public facial images is far more concerning due to the ease of which you would be able to render a 3D model from a small set of images and use virtual reality tools to project a face.\n\nOn the surface, these types of hacks would seem alarming but if gaining access to your phone now is as easy as guessing a 4-digit passcode, society should deem these tolerable risks.\n\nUltimately it is on facial recognition companies and Trueface to mitigate this risk by being more innovative, detecting consistencies in attacks and training the technology to identify when hacks occur. This game will continue to be played and become significantly more sophisticated with the adoption of 3D facial recognition.\n\n' result1 = happy_tt1.generate_text(text) string = str(result1) sentiment_analyzer = pipeline('sentiment-analysis') result = sentiment_analyzer(string) def sum_sent(text): result1 = happy_tt1.generate_text(text) string = str(result1) result = sentiment_analyzer(string) text = 'Smoking is a practice in which a substance is burned and the resulting smoke is typically breathed in to be tasted and absorbed into the bloodstream. \nMost commonly, the substance used is the dried leaves of the tobacco plant, which have been rolled into a small rectangle of rolling paper to create a small, round cylinder called a cigarette. \nSmoking is primarily practised as a route of administration for recreational drug use because the combustion of the dried plant leaves vaporizes and delivers active substances into the lungs where they are rapidly absorbed into the bloodstream and reach bodily tissue. \nIn the case of cigarette smoking these substances are contained in a mixture of aerosol particles and gases and include the pharmacologically active alkaloid nicotine; the vaporization creates heated aerosol and gas into a form that allows inhalation and deep penetration into the lungs where absorption into the bloodstream of the active substances occurs. In some cultures, smoking is also carried out as a part of various rituals, where participants use it to help induce trance-like states that, they believe, can lead them to spiritual enlightenment.\n\n' sum_sent(text)
code
105188272/cell_26
[ "text_plain_output_1.png" ]
from happytransformer import HappyTextToText from heapq import nlargest from spacy.lang.en.stop_words import STOP_WORDS from string import punctuation from transformers import pipeline import spacy happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6') text = '\nIn light of recent news from Apple, facial recognition technology is now the talk of the town. Ranging from privacy concerns to a curious boyfriend unlocking his girlfriend’s phone while she sleeps. The first commercial application of 3D facial recognition means that biometric technology is advancing at a staggering rate but so is the ability to hack or fool facial recognition tech.\n\nToday, 99% of the infrastructure scattered around the world consists of 2D cameras capable of running advanced facial recognition software and it will likely be years before a physical overhaul to 3D cameras takes place. Hackers, hobbyists, educators and the like have been developing creative ways to either fool or hide from 2D face recognition in a perpetually innovative game of cat and mouse.\n\nThe introduction of 3D recognition will give way to an entirely new thought process of how to beat an incredibly advanced technology, and also raise some questions as to how and by whom it will be used. The fight is futile as the effort exerted to trick this technology is better spent working with the facial recognition companies to enforce acceptable regulations.\n\nFirst, I question the intention of these types of face recognition-fooling solutions. Are they for bad actors hoping to sneak away undetected? Are they for people who are concerned about their picture being stored in a massive database (I’d worry more about posting a picture to Facebook…)? Are they to demonstrate the inherent risk of biometric information?\n\nThe variations that humans have designed to deceive the machine are more likely to make you stand out than blend in. Such as custom makeup, hairstyles, masks, a pair of glasses designed at Carnegie Melon consisting of camouflage patterns or simply wearing a box over your head. Some of them are realistic, but most are just for fun\n\nAs these techniques improve, so does the technology capable of identifying and training against them resulting in more robust face recognition software. These solutions seem like a great way to protect individuals who are looking to keep their identity hidden for nefarious reasons.\nA further concern would be detecting an attempt to impersonate an individual by using a picture of their face. With 3D cameras, this type of hack is very difficult to achieve due to the complexity of depth measurements. Facial recognition technology can run liveness detection or use additional hardware for heat sensing, both of which are capable of being tricked to a degree as seen with Samsung and Google.\n\nThe availability of public facial images is far more concerning due to the ease of which you would be able to render a 3D model from a small set of images and use virtual reality tools to project a face.\n\nOn the surface, these types of hacks would seem alarming but if gaining access to your phone now is as easy as guessing a 4-digit passcode, society should deem these tolerable risks.\n\nUltimately it is on facial recognition companies and Trueface to mitigate this risk by being more innovative, detecting consistencies in attacks and training the technology to identify when hacks occur. This game will continue to be played and become significantly more sophisticated with the adoption of 3D facial recognition.\n\n' result1 = happy_tt1.generate_text(text) string = str(result1) sentiment_analyzer = pipeline('sentiment-analysis') result = sentiment_analyzer(string) def sum_sent(text): result1 = happy_tt1.generate_text(text) string = str(result1) result = sentiment_analyzer(string) text = 'Smoking is a practice in which a substance is burned and the resulting smoke is typically breathed in to be tasted and absorbed into the bloodstream. \nMost commonly, the substance used is the dried leaves of the tobacco plant, which have been rolled into a small rectangle of rolling paper to create a small, round cylinder called a cigarette. \nSmoking is primarily practised as a route of administration for recreational drug use because the combustion of the dried plant leaves vaporizes and delivers active substances into the lungs where they are rapidly absorbed into the bloodstream and reach bodily tissue. \nIn the case of cigarette smoking these substances are contained in a mixture of aerosol particles and gases and include the pharmacologically active alkaloid nicotine; the vaporization creates heated aerosol and gas into a form that allows inhalation and deep penetration into the lungs where absorption into the bloodstream of the active substances occurs. In some cultures, smoking is also carried out as a part of various rituals, where participants use it to help induce trance-like states that, they believe, can lead them to spiritual enlightenment.\n\n' def summarize_sentiment(text): stopwords = list(STOP_WORDS) nlp = spacy.load('en_core_web_sm') doc = nlp(text) tokens = [token.text for token in doc] word_freq = {} for word in doc: if word.text.lower() not in stopwords: if word.text.lower() not in punctuation: if word.text not in word_freq.keys(): word_freq[word.text] = 1 else: word_freq[word.text] += 1 max_freq = max(word_freq.values()) for word in word_freq.keys(): word_freq[word] = word_freq[word] / max_freq sentence_tok = [sent for sent in doc.sents] sent_scores = {} for sent in sentence_tok: for word in sent: if word.text.lower() in word_freq.keys(): if sent not in sent_scores.keys(): sent_scores[sent] = word_freq[word.text.lower()] else: sent_scores[sent] += word_freq[word.text.lower()] select_len = int(len(sentence_tok) * 0.3) summary = nlargest(select_len, sent_scores, key=sent_scores.get) final_sum = [word.text for word in summary] summary = ' '.join(final_sum) result = sentiment_analyzer(summary) text = 'Have you ever found yourself dwelling on an insult or fixating on your mistakes? Criticisms often have a greater impact than compliments, and bad news frequently draws more attention than good.\n\nThe reason for this is that negative events have a greater impact on our brains than positive ones. \nPsychologists refer to this as the negative bias (also called the negativity bias), and it can have a powerful effect on your behavior, your decisions, and even your relationships.\nThe negative bias is our tendency not only to register negative stimuli more readily but also to dwell on these events. Also known as positive-negative asymmetry, this negativity bias means that we feel the sting of a rebuke more powerfully than we feel the joy of praise.\n\nThis psychological phenomenon explains why bad first impressions can be so difficult to overcome and why past traumas can have such long lingering effects. In almost any interaction, we are more likely to notice negative things and later remember them more vividly.1\ufeff\n\nAs humans, we tend to:\n\nRemember traumatic experiences better than positive ones.\nRecall insults better than praise.\nReact more strongly to negative stimuli.\nThink about negative things more frequently than positive ones.\nRespond more strongly to negative events than to equally positive ones.\nFor example, you might be having a great day at work when a coworker makes an offhand\ncomment that you find irritating. \nYou then find yourself stewing over his words for the rest of the workday.' summarize_sentiment(text) text = 'Friendship is a bond between two or more people. These people are called friends. Friends need each other for fun and help. Without friendship, life would be lonely and sad. Friendship is like happiness gifted to us by God. We can share so many things with friends. We can share joy, sorrow, and your feelings. Just being with friends feels so good.\n\nFriends support you and give you advice. Friends will always be there for you in need. You can trust them and have faith in them. They never judge you or put you down. If a friendship is a garden, friends are flowers. Together they make your life smell like flowers. We must always hold onto friendship and friends.\n\n' summarize_sentiment(text) text = "Nearly all great ideas follow a similar creative process and this article explains how this process works. Understanding this is important because creative thinking is one of the most useful skills you can possess. Nearly every problem you face in work and in life can benefit from innovative solutions, lateral thinking, and creative ideas.\nAnyone can learn to be creative by using these five steps. That's not to say being creative is easy. Uncovering your creative genius requires courage and tons of practice. However, this five-step approach should help demystify the creative process and illuminate the path to more innovative thinking.\nTo explain how this process works, let me tell you a short story.\nA Problem in Need of a Creative Solution\nIn the 1870s, newspapers and printers faced a very specific and very costly problem. Photography was a new and exciting medium at the time. Readers wanted to see more pictures, but nobody could figure out how to print images quickly and cheaply.\nFor example, if a newspaper wanted to print an image in the 1870s, they had to commission an engraver to etch a copy of the photograph onto a steel plate by hand. These plates were used to press the image onto the page, but they often broke after just a few uses. This process of photoengraving, you can imagine, was remarkably time consuming and expensive.\nThe man who invented a solution to this problem was named Frederic Eugene Ives. He went on to become a trailblazer in the field of photography and held over 70 patents by the end of his career. His story of creativity and innovation, which I will share now, is a useful case study for understanding the 5 key steps of the creative process.\nA Flash of Insight\nIves got his start as a printer’s apprentice in Ithaca, New York. After two years of learning the ins and outs of the printing process, he began managing the photographic laboratory at nearby Cornell University. He spent the rest of the decade experimenting with new photography techniques and learning about cameras, printers, and optics.\nIn 1881, Ives had a flash of insight regarding a better printing technique.\n“While operating my photostereotype process in Ithaca, I studied the problem of halftone process,” Ives said. “I went to bed one night in a state of brain fog over the problem, and the instant I woke in the morning saw before me, apparently projected on the ceiling, the completely worked out process and equipment in operation.”\nIves quickly translated his vision into reality and patented his printing approach in 1881. He spent the remainder of the decade improving upon it. By 1885, he had developed a simplified process that delivered even better results. The Ives Process, as it came to be known, reduced the cost of printing images by 15x and remained the standard printing technique for the next 80 years.\nAlright, now let's discuss what lessons we can learn from Ives about the creative process.\nIn some circles, Ben Hogan is credited with “inventing practice.”\nHogan was one of the greatest golfers of the 20th century, an accomplishment he achieved through tireless repetition. He simply loved to practice. Hogan said, “I couldn't wait to get up in the morning so I could hit balls. I'd be at the practice tee at the crack of dawn, hit balls for a few hours, then take a break and get right back to it.”\nFor Hogan, every practice session had a purpose. He reportedly spent years breaking down each phase of the golf swing and testing new methods for each segment. The result was near perfection. He developed one of the most finely-tuned golf swings in the history of the game.\nHis precision made him more like a surgeon than a golfer. During the 1953 Masters, for example, Hogan hit the flagstick on back-to-back holes. A few days later, he broke the tournament scoring record.\nAudrey Hepburn was an icon.\nRising to fame in the 1950s, she was one of the greatest actresses of her era. In 1953, Hepburn became the first actress to win an Academy Award, a Golden Globe Award, and a BAFTA Award for a single performance: her leading role in the romantic comedy Roman Holiday.\nEven today, over half a century later, she remains one of just 15 people to earn an “EGOT” by winning all four major entertainment awards: Emmy, Grammy, Oscar, and Tony. By the 1960s, she was averaging more than one new film per year and, by everyone's estimation, she was on a trajectory to be a movie star for decades to come.\nBut then something funny happened: she stopped acting.\nDespite being in her 30s and at the height of her popularity, Hepburn basically stopped appearing in films after 1967. She would perform in television shows or movies just five times during the rest of her life.\nInstead, she switched careers. She spent the next 25 years working tirelessly for UNICEF, the arm of the United Nations that provides food and healthcare to children in war-torn countries. She performed volunteer work throughout Africa, South America, and Asia.\nHepburn's first act was on stage. Her next act was one of service. In December 1992, she was awarded the Presidential Medal of Freedom for her efforts, which is the highest civilian award of the United States.\nWe will return to her story in a moment.\nThe ultimate productivity hack is saying no.\nNot doing something will always be faster than doing it. This statement reminds me of the old computer programming saying, “Remember that there is no code faster than no code.”\nThe same philosophy applies in other areas of life. For example, there is no meeting that goes faster than not having a meeting at all.\nThis is not to say you should never attend another meeting, but the truth is that we say yes to many things we don't actually want to do. There are many meetings held that don't need to be held. There is a lot of code written that could be deleted.\nHow often do people ask you to do something and you just reply, “Sure thing.” Three days later, you're overwhelmed by how much is on your to-do list. We become frustrated by our obligations even though we were the ones who said yes to them in the first place.\nIt's worth asking if things are necessary. Many of them are not, and a simple “no” will be more productive than whatever work the most efficient person can muster.\nBut if the benefits of saying no are so obvious, then why do we say yes so often?" summarize_sentiment(text)
code
105188272/cell_2
[ "text_plain_output_1.png" ]
!pip install happytransformer
code
105188272/cell_7
[ "text_plain_output_1.png" ]
from happytransformer import HappyTextToText happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6') text = '\nIn light of recent news from Apple, facial recognition technology is now the talk of the town. Ranging from privacy concerns to a curious boyfriend unlocking his girlfriend’s phone while she sleeps. The first commercial application of 3D facial recognition means that biometric technology is advancing at a staggering rate but so is the ability to hack or fool facial recognition tech.\n\nToday, 99% of the infrastructure scattered around the world consists of 2D cameras capable of running advanced facial recognition software and it will likely be years before a physical overhaul to 3D cameras takes place. Hackers, hobbyists, educators and the like have been developing creative ways to either fool or hide from 2D face recognition in a perpetually innovative game of cat and mouse.\n\nThe introduction of 3D recognition will give way to an entirely new thought process of how to beat an incredibly advanced technology, and also raise some questions as to how and by whom it will be used. The fight is futile as the effort exerted to trick this technology is better spent working with the facial recognition companies to enforce acceptable regulations.\n\nFirst, I question the intention of these types of face recognition-fooling solutions. Are they for bad actors hoping to sneak away undetected? Are they for people who are concerned about their picture being stored in a massive database (I’d worry more about posting a picture to Facebook…)? Are they to demonstrate the inherent risk of biometric information?\n\nThe variations that humans have designed to deceive the machine are more likely to make you stand out than blend in. Such as custom makeup, hairstyles, masks, a pair of glasses designed at Carnegie Melon consisting of camouflage patterns or simply wearing a box over your head. Some of them are realistic, but most are just for fun\n\nAs these techniques improve, so does the technology capable of identifying and training against them resulting in more robust face recognition software. These solutions seem like a great way to protect individuals who are looking to keep their identity hidden for nefarious reasons.\nA further concern would be detecting an attempt to impersonate an individual by using a picture of their face. With 3D cameras, this type of hack is very difficult to achieve due to the complexity of depth measurements. Facial recognition technology can run liveness detection or use additional hardware for heat sensing, both of which are capable of being tricked to a degree as seen with Samsung and Google.\n\nThe availability of public facial images is far more concerning due to the ease of which you would be able to render a 3D model from a small set of images and use virtual reality tools to project a face.\n\nOn the surface, these types of hacks would seem alarming but if gaining access to your phone now is as easy as guessing a 4-digit passcode, society should deem these tolerable risks.\n\nUltimately it is on facial recognition companies and Trueface to mitigate this risk by being more innovative, detecting consistencies in attacks and training the technology to identify when hacks occur. This game will continue to be played and become significantly more sophisticated with the adoption of 3D facial recognition.\n\n' result1 = happy_tt1.generate_text(text) print(result1)
code
105188272/cell_18
[ "text_plain_output_1.png" ]
from happytransformer import HappyTextToText from transformers import pipeline happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6') text = '\nIn light of recent news from Apple, facial recognition technology is now the talk of the town. Ranging from privacy concerns to a curious boyfriend unlocking his girlfriend’s phone while she sleeps. The first commercial application of 3D facial recognition means that biometric technology is advancing at a staggering rate but so is the ability to hack or fool facial recognition tech.\n\nToday, 99% of the infrastructure scattered around the world consists of 2D cameras capable of running advanced facial recognition software and it will likely be years before a physical overhaul to 3D cameras takes place. Hackers, hobbyists, educators and the like have been developing creative ways to either fool or hide from 2D face recognition in a perpetually innovative game of cat and mouse.\n\nThe introduction of 3D recognition will give way to an entirely new thought process of how to beat an incredibly advanced technology, and also raise some questions as to how and by whom it will be used. The fight is futile as the effort exerted to trick this technology is better spent working with the facial recognition companies to enforce acceptable regulations.\n\nFirst, I question the intention of these types of face recognition-fooling solutions. Are they for bad actors hoping to sneak away undetected? Are they for people who are concerned about their picture being stored in a massive database (I’d worry more about posting a picture to Facebook…)? Are they to demonstrate the inherent risk of biometric information?\n\nThe variations that humans have designed to deceive the machine are more likely to make you stand out than blend in. Such as custom makeup, hairstyles, masks, a pair of glasses designed at Carnegie Melon consisting of camouflage patterns or simply wearing a box over your head. Some of them are realistic, but most are just for fun\n\nAs these techniques improve, so does the technology capable of identifying and training against them resulting in more robust face recognition software. These solutions seem like a great way to protect individuals who are looking to keep their identity hidden for nefarious reasons.\nA further concern would be detecting an attempt to impersonate an individual by using a picture of their face. With 3D cameras, this type of hack is very difficult to achieve due to the complexity of depth measurements. Facial recognition technology can run liveness detection or use additional hardware for heat sensing, both of which are capable of being tricked to a degree as seen with Samsung and Google.\n\nThe availability of public facial images is far more concerning due to the ease of which you would be able to render a 3D model from a small set of images and use virtual reality tools to project a face.\n\nOn the surface, these types of hacks would seem alarming but if gaining access to your phone now is as easy as guessing a 4-digit passcode, society should deem these tolerable risks.\n\nUltimately it is on facial recognition companies and Trueface to mitigate this risk by being more innovative, detecting consistencies in attacks and training the technology to identify when hacks occur. This game will continue to be played and become significantly more sophisticated with the adoption of 3D facial recognition.\n\n' result1 = happy_tt1.generate_text(text) string = str(result1) sentiment_analyzer = pipeline('sentiment-analysis') result = sentiment_analyzer(string) def sum_sent(text): result1 = happy_tt1.generate_text(text) string = str(result1) result = sentiment_analyzer(string) text1 = 'A friend means so much to us. And we enjoy being with our friends. \nTrue friends are hard to come by. True friends love each other through thick and thin. \nThey share with and care for each other at all times. The bond between true friends is so strong \nthat it lasts through life.Having a true friend is a real gift.' sum_sent(text1)
code
105188272/cell_15
[ "text_plain_output_1.png" ]
from happytransformer import HappyTextToText from transformers import pipeline happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6') text = '\nIn light of recent news from Apple, facial recognition technology is now the talk of the town. Ranging from privacy concerns to a curious boyfriend unlocking his girlfriend’s phone while she sleeps. The first commercial application of 3D facial recognition means that biometric technology is advancing at a staggering rate but so is the ability to hack or fool facial recognition tech.\n\nToday, 99% of the infrastructure scattered around the world consists of 2D cameras capable of running advanced facial recognition software and it will likely be years before a physical overhaul to 3D cameras takes place. Hackers, hobbyists, educators and the like have been developing creative ways to either fool or hide from 2D face recognition in a perpetually innovative game of cat and mouse.\n\nThe introduction of 3D recognition will give way to an entirely new thought process of how to beat an incredibly advanced technology, and also raise some questions as to how and by whom it will be used. The fight is futile as the effort exerted to trick this technology is better spent working with the facial recognition companies to enforce acceptable regulations.\n\nFirst, I question the intention of these types of face recognition-fooling solutions. Are they for bad actors hoping to sneak away undetected? Are they for people who are concerned about their picture being stored in a massive database (I’d worry more about posting a picture to Facebook…)? Are they to demonstrate the inherent risk of biometric information?\n\nThe variations that humans have designed to deceive the machine are more likely to make you stand out than blend in. Such as custom makeup, hairstyles, masks, a pair of glasses designed at Carnegie Melon consisting of camouflage patterns or simply wearing a box over your head. Some of them are realistic, but most are just for fun\n\nAs these techniques improve, so does the technology capable of identifying and training against them resulting in more robust face recognition software. These solutions seem like a great way to protect individuals who are looking to keep their identity hidden for nefarious reasons.\nA further concern would be detecting an attempt to impersonate an individual by using a picture of their face. With 3D cameras, this type of hack is very difficult to achieve due to the complexity of depth measurements. Facial recognition technology can run liveness detection or use additional hardware for heat sensing, both of which are capable of being tricked to a degree as seen with Samsung and Google.\n\nThe availability of public facial images is far more concerning due to the ease of which you would be able to render a 3D model from a small set of images and use virtual reality tools to project a face.\n\nOn the surface, these types of hacks would seem alarming but if gaining access to your phone now is as easy as guessing a 4-digit passcode, society should deem these tolerable risks.\n\nUltimately it is on facial recognition companies and Trueface to mitigate this risk by being more innovative, detecting consistencies in attacks and training the technology to identify when hacks occur. This game will continue to be played and become significantly more sophisticated with the adoption of 3D facial recognition.\n\n' result1 = happy_tt1.generate_text(text) string = str(result1) sentiment_analyzer = pipeline('sentiment-analysis') result = sentiment_analyzer(string) print(result)
code
105188272/cell_24
[ "text_plain_output_1.png" ]
from happytransformer import HappyTextToText from heapq import nlargest from spacy.lang.en.stop_words import STOP_WORDS from string import punctuation from transformers import pipeline import spacy happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6') text = '\nIn light of recent news from Apple, facial recognition technology is now the talk of the town. Ranging from privacy concerns to a curious boyfriend unlocking his girlfriend’s phone while she sleeps. The first commercial application of 3D facial recognition means that biometric technology is advancing at a staggering rate but so is the ability to hack or fool facial recognition tech.\n\nToday, 99% of the infrastructure scattered around the world consists of 2D cameras capable of running advanced facial recognition software and it will likely be years before a physical overhaul to 3D cameras takes place. Hackers, hobbyists, educators and the like have been developing creative ways to either fool or hide from 2D face recognition in a perpetually innovative game of cat and mouse.\n\nThe introduction of 3D recognition will give way to an entirely new thought process of how to beat an incredibly advanced technology, and also raise some questions as to how and by whom it will be used. The fight is futile as the effort exerted to trick this technology is better spent working with the facial recognition companies to enforce acceptable regulations.\n\nFirst, I question the intention of these types of face recognition-fooling solutions. Are they for bad actors hoping to sneak away undetected? Are they for people who are concerned about their picture being stored in a massive database (I’d worry more about posting a picture to Facebook…)? Are they to demonstrate the inherent risk of biometric information?\n\nThe variations that humans have designed to deceive the machine are more likely to make you stand out than blend in. Such as custom makeup, hairstyles, masks, a pair of glasses designed at Carnegie Melon consisting of camouflage patterns or simply wearing a box over your head. Some of them are realistic, but most are just for fun\n\nAs these techniques improve, so does the technology capable of identifying and training against them resulting in more robust face recognition software. These solutions seem like a great way to protect individuals who are looking to keep their identity hidden for nefarious reasons.\nA further concern would be detecting an attempt to impersonate an individual by using a picture of their face. With 3D cameras, this type of hack is very difficult to achieve due to the complexity of depth measurements. Facial recognition technology can run liveness detection or use additional hardware for heat sensing, both of which are capable of being tricked to a degree as seen with Samsung and Google.\n\nThe availability of public facial images is far more concerning due to the ease of which you would be able to render a 3D model from a small set of images and use virtual reality tools to project a face.\n\nOn the surface, these types of hacks would seem alarming but if gaining access to your phone now is as easy as guessing a 4-digit passcode, society should deem these tolerable risks.\n\nUltimately it is on facial recognition companies and Trueface to mitigate this risk by being more innovative, detecting consistencies in attacks and training the technology to identify when hacks occur. This game will continue to be played and become significantly more sophisticated with the adoption of 3D facial recognition.\n\n' result1 = happy_tt1.generate_text(text) string = str(result1) sentiment_analyzer = pipeline('sentiment-analysis') result = sentiment_analyzer(string) def sum_sent(text): result1 = happy_tt1.generate_text(text) string = str(result1) result = sentiment_analyzer(string) text = 'Smoking is a practice in which a substance is burned and the resulting smoke is typically breathed in to be tasted and absorbed into the bloodstream. \nMost commonly, the substance used is the dried leaves of the tobacco plant, which have been rolled into a small rectangle of rolling paper to create a small, round cylinder called a cigarette. \nSmoking is primarily practised as a route of administration for recreational drug use because the combustion of the dried plant leaves vaporizes and delivers active substances into the lungs where they are rapidly absorbed into the bloodstream and reach bodily tissue. \nIn the case of cigarette smoking these substances are contained in a mixture of aerosol particles and gases and include the pharmacologically active alkaloid nicotine; the vaporization creates heated aerosol and gas into a form that allows inhalation and deep penetration into the lungs where absorption into the bloodstream of the active substances occurs. In some cultures, smoking is also carried out as a part of various rituals, where participants use it to help induce trance-like states that, they believe, can lead them to spiritual enlightenment.\n\n' def summarize_sentiment(text): stopwords = list(STOP_WORDS) nlp = spacy.load('en_core_web_sm') doc = nlp(text) tokens = [token.text for token in doc] word_freq = {} for word in doc: if word.text.lower() not in stopwords: if word.text.lower() not in punctuation: if word.text not in word_freq.keys(): word_freq[word.text] = 1 else: word_freq[word.text] += 1 max_freq = max(word_freq.values()) for word in word_freq.keys(): word_freq[word] = word_freq[word] / max_freq sentence_tok = [sent for sent in doc.sents] sent_scores = {} for sent in sentence_tok: for word in sent: if word.text.lower() in word_freq.keys(): if sent not in sent_scores.keys(): sent_scores[sent] = word_freq[word.text.lower()] else: sent_scores[sent] += word_freq[word.text.lower()] select_len = int(len(sentence_tok) * 0.3) summary = nlargest(select_len, sent_scores, key=sent_scores.get) final_sum = [word.text for word in summary] summary = ' '.join(final_sum) result = sentiment_analyzer(summary) text = 'Have you ever found yourself dwelling on an insult or fixating on your mistakes? Criticisms often have a greater impact than compliments, and bad news frequently draws more attention than good.\nThe reason for this is that negative events have a greater impact on our brains than positive ones. \nPsychologists refer to this as the negative bias (also called the negativity bias), and it can have a powerful effect on your behavior, your decisions, and even your relationships.\nThe negative bias is our tendency not only to register negative stimuli more readily but also to dwell on these events. Also known as positive-negative asymmetry, this negativity bias means that we feel the sting of a rebuke more powerfully than we feel the joy of praise.\nThis psychological phenomenon explains why bad first impressions can be so difficult to overcome and why past traumas can have such long lingering effects. In almost any interaction, we are more likely to notice negative things and later remember them more vividly.1\ufeff\nAs humans, we tend to:\nRemember traumatic experiences better than positive ones.\nRecall insults better than praise.\nReact more strongly to negative stimuli.\nThink about negative things more frequently than positive ones.\nRespond more strongly to negative events than to equally positive ones.\nFor example, you might be having a great day at work when a coworker makes an offhand\ncomment that you find irritating. \nYou then find yourself stewing over his words for the rest of the workday.' summarize_sentiment(text)
code
105188272/cell_10
[ "text_plain_output_1.png" ]
from happytransformer import HappyTextToText happy_tt1 = HappyTextToText('BERT', 'sshleifer/distilbart-cnn-12-6') text = '\nIn light of recent news from Apple, facial recognition technology is now the talk of the town. Ranging from privacy concerns to a curious boyfriend unlocking his girlfriend’s phone while she sleeps. The first commercial application of 3D facial recognition means that biometric technology is advancing at a staggering rate but so is the ability to hack or fool facial recognition tech.\n\nToday, 99% of the infrastructure scattered around the world consists of 2D cameras capable of running advanced facial recognition software and it will likely be years before a physical overhaul to 3D cameras takes place. Hackers, hobbyists, educators and the like have been developing creative ways to either fool or hide from 2D face recognition in a perpetually innovative game of cat and mouse.\n\nThe introduction of 3D recognition will give way to an entirely new thought process of how to beat an incredibly advanced technology, and also raise some questions as to how and by whom it will be used. The fight is futile as the effort exerted to trick this technology is better spent working with the facial recognition companies to enforce acceptable regulations.\n\nFirst, I question the intention of these types of face recognition-fooling solutions. Are they for bad actors hoping to sneak away undetected? Are they for people who are concerned about their picture being stored in a massive database (I’d worry more about posting a picture to Facebook…)? Are they to demonstrate the inherent risk of biometric information?\n\nThe variations that humans have designed to deceive the machine are more likely to make you stand out than blend in. Such as custom makeup, hairstyles, masks, a pair of glasses designed at Carnegie Melon consisting of camouflage patterns or simply wearing a box over your head. Some of them are realistic, but most are just for fun\n\nAs these techniques improve, so does the technology capable of identifying and training against them resulting in more robust face recognition software. These solutions seem like a great way to protect individuals who are looking to keep their identity hidden for nefarious reasons.\nA further concern would be detecting an attempt to impersonate an individual by using a picture of their face. With 3D cameras, this type of hack is very difficult to achieve due to the complexity of depth measurements. Facial recognition technology can run liveness detection or use additional hardware for heat sensing, both of which are capable of being tricked to a degree as seen with Samsung and Google.\n\nThe availability of public facial images is far more concerning due to the ease of which you would be able to render a 3D model from a small set of images and use virtual reality tools to project a face.\n\nOn the surface, these types of hacks would seem alarming but if gaining access to your phone now is as easy as guessing a 4-digit passcode, society should deem these tolerable risks.\n\nUltimately it is on facial recognition companies and Trueface to mitigate this risk by being more innovative, detecting consistencies in attacks and training the technology to identify when hacks occur. This game will continue to be played and become significantly more sophisticated with the adoption of 3D facial recognition.\n\n' result1 = happy_tt1.generate_text(text) string = str(result1) len(string)
code
34146352/cell_30
[ "text_html_output_1.png" ]
from pandasql import sqldf import pandas as pd import plotly.express as px df_villagers = pd.read_csv('../input/animal-crossing/villagers.csv', encoding='utf-8') df_villagers.drop(columns=['id', 'row_n', 'phrase', 'full_id', 'url']) species = sqldf("SELECT species, COUNT(species) AS size FROM df_villagers GROUP BY species ORDER BY size DESC") pie = px.pie(species, values='size', names='species', title='Villager Species', color_discrete_sequence=px.colors.qualitative.Dark24,) pie.show() barh = px.bar(species, x="size", y="species", orientation='h', color="size", title='Villager Species') barh.update_layout( autosize=False, height=800, ) barh.show() gender = sqldf("SELECT gender, COUNT(gender) AS count FROM df_villagers GROUP BY gender ORDER BY count DESC") species_gender = sqldf("SELECT species, gender, COUNT(gender) AS count FROM df_villagers GROUP BY species, gender ORDER BY count ASC") fig = px.pie(gender, values='count', names='gender', title='Gender Breakdown') fig.show() bar = px.bar(species_gender, x="species", y="count", color="gender", title="Gender and Species") bar.show() personality = sqldf("SELECT personality, COUNT(personality) AS count FROM df_villagers GROUP by personality ORDER BY count ASC") species_personality = sqldf("SELECT species, personality, COUNT(personality) AS count FROM df_villagers GROUP BY species, personality") fig = px.pie(personality, values='count', names='personality', title='Personality Types') fig.show() bar = px.bar(species_personality, x="count", y="species", color="personality", orientation='h', title='Personality and Species') bar.update_layout( autosize=False, width=1000, height=1000, ) bar.show() df_villagers.fillna({'song': 'Not available'}, inplace = True) df_villagers.isnull().sum() song = sqldf("SELECT species, personality, song, COUNT(song) AS count FROM df_villagers GROUP by species, personality, song") fig = px.scatter(song, x="species", y="personality", size="count", color="song") fig.show() sign = sqldf("SELECT sign, COUNT(sign) AS count FROM df_villagers GROUP BY sign") sign_personality = sqldf("SELECT sign, personality, COUNT(sign) AS count FROM df_villagers GROUP BY sign, personality") sign_personality_species = sqldf("SELECT species, personality, sign, COUNT(sign) AS count FROM df_villagers GROUP by species, personality, sign") bar = px.bar(sign_personality, x="count", y="sign", color="personality", orientation='h') bar.show() pie = px.pie(sign, values='count', names='sign', title='Horoscope Signs') pie.show() fig = px.scatter(sign_personality_species, x='species', y='sign', size='count', color='personality', title='Horoscope, Personality, Species') fig.update_layout(autosize=False, width=1000, height=1000) fig.show()
code
34146352/cell_33
[ "text_html_output_1.png" ]
import pandas as pd df_villagers = pd.read_csv('../input/animal-crossing/villagers.csv', encoding='utf-8') df_items = pd.read_csv('../input/animal-crossing/items.csv', encoding='utf-8') df_items.head() df_items.drop(columns=['num_id', 'id', 'orderable', 'sources', 'customizable', 'recipe', 'recipe_id', 'games_id', 'id_full', 'image_url'])
code
34146352/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df_villagers = pd.read_csv('../input/animal-crossing/villagers.csv', encoding='utf-8') df_villagers.head() df_villagers.drop(columns=['id', 'row_n', 'phrase', 'full_id', 'url'])
code
34146352/cell_29
[ "text_html_output_2.png", "text_html_output_1.png" ]
from pandasql import sqldf import pandas as pd import plotly.express as px df_villagers = pd.read_csv('../input/animal-crossing/villagers.csv', encoding='utf-8') df_villagers.drop(columns=['id', 'row_n', 'phrase', 'full_id', 'url']) species = sqldf("SELECT species, COUNT(species) AS size FROM df_villagers GROUP BY species ORDER BY size DESC") pie = px.pie(species, values='size', names='species', title='Villager Species', color_discrete_sequence=px.colors.qualitative.Dark24,) pie.show() barh = px.bar(species, x="size", y="species", orientation='h', color="size", title='Villager Species') barh.update_layout( autosize=False, height=800, ) barh.show() gender = sqldf("SELECT gender, COUNT(gender) AS count FROM df_villagers GROUP BY gender ORDER BY count DESC") species_gender = sqldf("SELECT species, gender, COUNT(gender) AS count FROM df_villagers GROUP BY species, gender ORDER BY count ASC") fig = px.pie(gender, values='count', names='gender', title='Gender Breakdown') fig.show() bar = px.bar(species_gender, x="species", y="count", color="gender", title="Gender and Species") bar.show() personality = sqldf("SELECT personality, COUNT(personality) AS count FROM df_villagers GROUP by personality ORDER BY count ASC") species_personality = sqldf("SELECT species, personality, COUNT(personality) AS count FROM df_villagers GROUP BY species, personality") fig = px.pie(personality, values='count', names='personality', title='Personality Types') fig.show() bar = px.bar(species_personality, x="count", y="species", color="personality", orientation='h', title='Personality and Species') bar.update_layout( autosize=False, width=1000, height=1000, ) bar.show() df_villagers.fillna({'song': 'Not available'}, inplace = True) df_villagers.isnull().sum() song = sqldf("SELECT species, personality, song, COUNT(song) AS count FROM df_villagers GROUP by species, personality, song") fig = px.scatter(song, x="species", y="personality", size="count", color="song") fig.show() sign = sqldf('SELECT sign, COUNT(sign) AS count FROM df_villagers GROUP BY sign') sign_personality = sqldf('SELECT sign, personality, COUNT(sign) AS count FROM df_villagers GROUP BY sign, personality') sign_personality_species = sqldf('SELECT species, personality, sign, COUNT(sign) AS count FROM df_villagers GROUP by species, personality, sign') bar = px.bar(sign_personality, x='count', y='sign', color='personality', orientation='h') bar.show()
code
34146352/cell_8
[ "text_html_output_1.png" ]
from pandasql import sqldf import plotly.express as px species = sqldf('SELECT species, COUNT(species) AS size FROM df_villagers GROUP BY species ORDER BY size DESC') pie = px.pie(species, values='size', names='species', title='Villager Species', color_discrete_sequence=px.colors.qualitative.Dark24) pie.show() barh = px.bar(species, x='size', y='species', orientation='h', color='size', title='Villager Species') barh.update_layout(autosize=False, height=800) barh.show()
code
34146352/cell_38
[ "text_html_output_2.png", "text_html_output_1.png" ]
from pandasql import sqldf import pandas as pd import plotly.express as px df_villagers = pd.read_csv('../input/animal-crossing/villagers.csv', encoding='utf-8') df_villagers.drop(columns=['id', 'row_n', 'phrase', 'full_id', 'url']) species = sqldf("SELECT species, COUNT(species) AS size FROM df_villagers GROUP BY species ORDER BY size DESC") pie = px.pie(species, values='size', names='species', title='Villager Species', color_discrete_sequence=px.colors.qualitative.Dark24,) pie.show() barh = px.bar(species, x="size", y="species", orientation='h', color="size", title='Villager Species') barh.update_layout( autosize=False, height=800, ) barh.show() gender = sqldf("SELECT gender, COUNT(gender) AS count FROM df_villagers GROUP BY gender ORDER BY count DESC") species_gender = sqldf("SELECT species, gender, COUNT(gender) AS count FROM df_villagers GROUP BY species, gender ORDER BY count ASC") fig = px.pie(gender, values='count', names='gender', title='Gender Breakdown') fig.show() bar = px.bar(species_gender, x="species", y="count", color="gender", title="Gender and Species") bar.show() personality = sqldf("SELECT personality, COUNT(personality) AS count FROM df_villagers GROUP by personality ORDER BY count ASC") species_personality = sqldf("SELECT species, personality, COUNT(personality) AS count FROM df_villagers GROUP BY species, personality") fig = px.pie(personality, values='count', names='personality', title='Personality Types') fig.show() bar = px.bar(species_personality, x="count", y="species", color="personality", orientation='h', title='Personality and Species') bar.update_layout( autosize=False, width=1000, height=1000, ) bar.show() df_villagers.fillna({'song': 'Not available'}, inplace = True) df_villagers.isnull().sum() song = sqldf("SELECT species, personality, song, COUNT(song) AS count FROM df_villagers GROUP by species, personality, song") fig = px.scatter(song, x="species", y="personality", size="count", color="song") fig.show() sign = sqldf("SELECT sign, COUNT(sign) AS count FROM df_villagers GROUP BY sign") sign_personality = sqldf("SELECT sign, personality, COUNT(sign) AS count FROM df_villagers GROUP BY sign, personality") sign_personality_species = sqldf("SELECT species, personality, sign, COUNT(sign) AS count FROM df_villagers GROUP by species, personality, sign") bar = px.bar(sign_personality, x="count", y="sign", color="personality", orientation='h') bar.show() pie = px.pie(sign, values='count', names='sign', title='Horoscope Signs') pie.show() fig = px.scatter(sign_personality_species, x="species", y="sign", size="count", color="personality", title="Horoscope, Personality, Species") fig.update_layout(autosize=False,width=1000, height=1000) fig.show() categories = sqldf("SELECT category, COUNT(category) AS count FROM df_items GROUP BY category ORDER BY count DESC") fig = px.pie(categories, values='count', names='category', title='Item Categories') fig.update_layout( autosize=False, width=800, height=500, ) fig.show() barh = px.bar(categories, x="count", y="category", barmode='relative', orientation='h', title='Item Categories') barh.show() resale = sqldf('SELECT category,sell_value, buy_value from df_items') resale = resale.dropna() resale['resale'] = resale['sell_value'] / resale['buy_value'] * 100 resale_categories = sqldf('SELECT category, AVG(resale) AS avg_resale from resale GROUP BY category ORDER BY avg_resale DESC') resale_categories bar = px.bar(resale_categories, x='category', y='avg_resale', color='avg_resale', title='Average Resale Value by Item Category') bar.update_layout(autosize=False, width=1000, height=1000) bar.show()
code
34146352/cell_35
[ "text_html_output_2.png", "text_html_output_1.png" ]
from pandasql import sqldf import pandas as pd import plotly.express as px df_villagers = pd.read_csv('../input/animal-crossing/villagers.csv', encoding='utf-8') df_villagers.drop(columns=['id', 'row_n', 'phrase', 'full_id', 'url']) species = sqldf("SELECT species, COUNT(species) AS size FROM df_villagers GROUP BY species ORDER BY size DESC") pie = px.pie(species, values='size', names='species', title='Villager Species', color_discrete_sequence=px.colors.qualitative.Dark24,) pie.show() barh = px.bar(species, x="size", y="species", orientation='h', color="size", title='Villager Species') barh.update_layout( autosize=False, height=800, ) barh.show() gender = sqldf("SELECT gender, COUNT(gender) AS count FROM df_villagers GROUP BY gender ORDER BY count DESC") species_gender = sqldf("SELECT species, gender, COUNT(gender) AS count FROM df_villagers GROUP BY species, gender ORDER BY count ASC") fig = px.pie(gender, values='count', names='gender', title='Gender Breakdown') fig.show() bar = px.bar(species_gender, x="species", y="count", color="gender", title="Gender and Species") bar.show() personality = sqldf("SELECT personality, COUNT(personality) AS count FROM df_villagers GROUP by personality ORDER BY count ASC") species_personality = sqldf("SELECT species, personality, COUNT(personality) AS count FROM df_villagers GROUP BY species, personality") fig = px.pie(personality, values='count', names='personality', title='Personality Types') fig.show() bar = px.bar(species_personality, x="count", y="species", color="personality", orientation='h', title='Personality and Species') bar.update_layout( autosize=False, width=1000, height=1000, ) bar.show() df_villagers.fillna({'song': 'Not available'}, inplace = True) df_villagers.isnull().sum() song = sqldf("SELECT species, personality, song, COUNT(song) AS count FROM df_villagers GROUP by species, personality, song") fig = px.scatter(song, x="species", y="personality", size="count", color="song") fig.show() sign = sqldf("SELECT sign, COUNT(sign) AS count FROM df_villagers GROUP BY sign") sign_personality = sqldf("SELECT sign, personality, COUNT(sign) AS count FROM df_villagers GROUP BY sign, personality") sign_personality_species = sqldf("SELECT species, personality, sign, COUNT(sign) AS count FROM df_villagers GROUP by species, personality, sign") bar = px.bar(sign_personality, x="count", y="sign", color="personality", orientation='h') bar.show() pie = px.pie(sign, values='count', names='sign', title='Horoscope Signs') pie.show() fig = px.scatter(sign_personality_species, x="species", y="sign", size="count", color="personality", title="Horoscope, Personality, Species") fig.update_layout(autosize=False,width=1000, height=1000) fig.show() categories = sqldf('SELECT category, COUNT(category) AS count FROM df_items GROUP BY category ORDER BY count DESC') fig = px.pie(categories, values='count', names='category', title='Item Categories') fig.update_layout(autosize=False, width=800, height=500) fig.show() barh = px.bar(categories, x='count', y='category', barmode='relative', orientation='h', title='Item Categories') barh.show()
code
34146352/cell_14
[ "text_html_output_2.png", "text_html_output_1.png" ]
from pandasql import sqldf import pandas as pd import plotly.express as px df_villagers = pd.read_csv('../input/animal-crossing/villagers.csv', encoding='utf-8') df_villagers.drop(columns=['id', 'row_n', 'phrase', 'full_id', 'url']) species = sqldf("SELECT species, COUNT(species) AS size FROM df_villagers GROUP BY species ORDER BY size DESC") pie = px.pie(species, values='size', names='species', title='Villager Species', color_discrete_sequence=px.colors.qualitative.Dark24,) pie.show() barh = px.bar(species, x="size", y="species", orientation='h', color="size", title='Villager Species') barh.update_layout( autosize=False, height=800, ) barh.show() gender = sqldf("SELECT gender, COUNT(gender) AS count FROM df_villagers GROUP BY gender ORDER BY count DESC") species_gender = sqldf("SELECT species, gender, COUNT(gender) AS count FROM df_villagers GROUP BY species, gender ORDER BY count ASC") fig = px.pie(gender, values='count', names='gender', title='Gender Breakdown') fig.show() bar = px.bar(species_gender, x="species", y="count", color="gender", title="Gender and Species") bar.show() personality = sqldf("SELECT personality, COUNT(personality) AS count FROM df_villagers GROUP by personality ORDER BY count ASC") species_personality = sqldf("SELECT species, personality, COUNT(personality) AS count FROM df_villagers GROUP BY species, personality") fig = px.pie(personality, values='count', names='personality', title='Personality Types') fig.show() bar = px.bar(species_personality, x="count", y="species", color="personality", orientation='h', title='Personality and Species') bar.update_layout( autosize=False, width=1000, height=1000, ) bar.show() df_villagers.fillna({'song': 'Not available'}, inplace=True) df_villagers.isnull().sum() song = sqldf('SELECT species, personality, song, COUNT(song) AS count FROM df_villagers GROUP by species, personality, song') fig = px.scatter(song, x='species', y='personality', size='count', color='song') fig.show()
code
34146352/cell_10
[ "text_html_output_1.png" ]
from pandasql import sqldf import plotly.express as px species = sqldf("SELECT species, COUNT(species) AS size FROM df_villagers GROUP BY species ORDER BY size DESC") pie = px.pie(species, values='size', names='species', title='Villager Species', color_discrete_sequence=px.colors.qualitative.Dark24,) pie.show() barh = px.bar(species, x="size", y="species", orientation='h', color="size", title='Villager Species') barh.update_layout( autosize=False, height=800, ) barh.show() gender = sqldf('SELECT gender, COUNT(gender) AS count FROM df_villagers GROUP BY gender ORDER BY count DESC') species_gender = sqldf('SELECT species, gender, COUNT(gender) AS count FROM df_villagers GROUP BY species, gender ORDER BY count ASC') fig = px.pie(gender, values='count', names='gender', title='Gender Breakdown') fig.show() bar = px.bar(species_gender, x='species', y='count', color='gender', title='Gender and Species') bar.show()
code
34146352/cell_37
[ "text_html_output_1.png" ]
from pandasql import sqldf resale = sqldf('SELECT category,sell_value, buy_value from df_items') resale = resale.dropna() resale['resale'] = resale['sell_value'] / resale['buy_value'] * 100 resale_categories = sqldf('SELECT category, AVG(resale) AS avg_resale from resale GROUP BY category ORDER BY avg_resale DESC') resale_categories
code
34146352/cell_12
[ "text_html_output_2.png", "text_html_output_1.png" ]
from pandasql import sqldf import plotly.express as px species = sqldf("SELECT species, COUNT(species) AS size FROM df_villagers GROUP BY species ORDER BY size DESC") pie = px.pie(species, values='size', names='species', title='Villager Species', color_discrete_sequence=px.colors.qualitative.Dark24,) pie.show() barh = px.bar(species, x="size", y="species", orientation='h', color="size", title='Villager Species') barh.update_layout( autosize=False, height=800, ) barh.show() gender = sqldf("SELECT gender, COUNT(gender) AS count FROM df_villagers GROUP BY gender ORDER BY count DESC") species_gender = sqldf("SELECT species, gender, COUNT(gender) AS count FROM df_villagers GROUP BY species, gender ORDER BY count ASC") fig = px.pie(gender, values='count', names='gender', title='Gender Breakdown') fig.show() bar = px.bar(species_gender, x="species", y="count", color="gender", title="Gender and Species") bar.show() personality = sqldf('SELECT personality, COUNT(personality) AS count FROM df_villagers GROUP by personality ORDER BY count ASC') species_personality = sqldf('SELECT species, personality, COUNT(personality) AS count FROM df_villagers GROUP BY species, personality') fig = px.pie(personality, values='count', names='personality', title='Personality Types') fig.show() bar = px.bar(species_personality, x='count', y='species', color='personality', orientation='h', title='Personality and Species') bar.update_layout(autosize=False, width=1000, height=1000) bar.show()
code
18128635/cell_4
[ "text_html_output_1.png" ]
import numpy as np import os import pandas as pd import numpy as np import pandas as pd import pandas_profiling as pp pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 500) pd.set_option('display.max_colwidth', -1) # Função reduce memory -> https://www.kaggle.com/cttsai def reduce_mem_usage(df, verbose=True): start_mem = df.memory_usage().sum() / 1024**2 numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] for col in df.columns: # print(col) col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) # RCO - Acrescentado para conversao de Yes or No em 1 e 0 elif col_type == 'object': if df[col].unique().all() in ['Y', 'N', 'Yes', 'No', 'Sim', 'Não', 'Verdadeiro', 'Falso']: df[col] = df[col].map({'Y': 1, 'N': 0}).astype('int8') end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df def import_merge(tipo, nr=None): trans = pd.read_csv(DATA_FOLDER + tipo + '_transaction.csv', nrows=nr) ident = pd.read_csv(DATA_FOLDER + tipo + '_identity.csv', nrows=nr) ident[id] = 1 # para identificação das linhas correspondentes no merge df = trans.merge(ident, how='left') df = reduce_mem_usage(df) return df def describe_object(df): df1 = pd.DataFrame() for col in df.select_dtypes(include = 'object').columns: # somente colunas texto (categóricas) item = df[col].dropna().unique() # lista de valores unicos na coluna nulo = df[col].isna().sum() # quantidade te valores null pnulo = nulo / len(df[col].index) # % de valores null dic = {'ind': col,'lista':[item], 'nulos':nulo, '%nulo':pnulo} #monta um dict df2 = pd.DataFrame(dic) # passa para df df1 = pd.concat([df1,df2]) # vai juntando cada coluna como um registro novo no df df1.set_index('ind', drop=True, inplace=True) # define index como nome da variavel df3 = df.describe(include = 'object').T df3['%freq'] = df3['freq'].div(df3['count']).astype(np.float64).round(4) # inclui campo com % da moda df4 = df3.merge(df1, left_index = True, right_index = True) # merge c/ describe do pd return df4 DATA_FOLDER = '../input/' import os print(os.listdir(DATA_FOLDER)) df_train = import_merge('train', 10000) describe_object(df_train)
code
18128635/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd import numpy as np import pandas as pd import pandas_profiling as pp pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 500) pd.set_option('display.max_colwidth', -1) def reduce_mem_usage(df, verbose=True): start_mem = df.memory_usage().sum() / 1024 ** 2 numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) elif c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) elif col_type == 'object': if df[col].unique().all() in ['Y', 'N', 'Yes', 'No', 'Sim', 'Não', 'Verdadeiro', 'Falso']: df[col] = df[col].map({'Y': 1, 'N': 0}).astype('int8') end_mem = df.memory_usage().sum() / 1024 ** 2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df def import_merge(tipo, nr=None): trans = pd.read_csv(DATA_FOLDER + tipo + '_transaction.csv', nrows=nr) ident = pd.read_csv(DATA_FOLDER + tipo + '_identity.csv', nrows=nr) ident[id] = 1 df = trans.merge(ident, how='left') df = reduce_mem_usage(df) return df def describe_object(df): df1 = pd.DataFrame() for col in df.select_dtypes(include='object').columns: item = df[col].dropna().unique() nulo = df[col].isna().sum() pnulo = nulo / len(df[col].index) dic = {'ind': col, 'lista': [item], 'nulos': nulo, '%nulo': pnulo} df2 = pd.DataFrame(dic) df1 = pd.concat([df1, df2]) df1.set_index('ind', drop=True, inplace=True) df3 = df.describe(include='object').T df3['%freq'] = df3['freq'].div(df3['count']).astype(np.float64).round(4) df4 = df3.merge(df1, left_index=True, right_index=True) return df4 DATA_FOLDER = '../input/' import os print(os.listdir(DATA_FOLDER))
code
18128635/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd import numpy as np import pandas as pd import pandas_profiling as pp pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 500) pd.set_option('display.max_colwidth', -1) # Função reduce memory -> https://www.kaggle.com/cttsai def reduce_mem_usage(df, verbose=True): start_mem = df.memory_usage().sum() / 1024**2 numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] for col in df.columns: # print(col) col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) # RCO - Acrescentado para conversao de Yes or No em 1 e 0 elif col_type == 'object': if df[col].unique().all() in ['Y', 'N', 'Yes', 'No', 'Sim', 'Não', 'Verdadeiro', 'Falso']: df[col] = df[col].map({'Y': 1, 'N': 0}).astype('int8') end_mem = df.memory_usage().sum() / 1024**2 print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df def import_merge(tipo, nr=None): trans = pd.read_csv(DATA_FOLDER + tipo + '_transaction.csv', nrows=nr) ident = pd.read_csv(DATA_FOLDER + tipo + '_identity.csv', nrows=nr) ident[id] = 1 # para identificação das linhas correspondentes no merge df = trans.merge(ident, how='left') df = reduce_mem_usage(df) return df def describe_object(df): df1 = pd.DataFrame() for col in df.select_dtypes(include = 'object').columns: # somente colunas texto (categóricas) item = df[col].dropna().unique() # lista de valores unicos na coluna nulo = df[col].isna().sum() # quantidade te valores null pnulo = nulo / len(df[col].index) # % de valores null dic = {'ind': col,'lista':[item], 'nulos':nulo, '%nulo':pnulo} #monta um dict df2 = pd.DataFrame(dic) # passa para df df1 = pd.concat([df1,df2]) # vai juntando cada coluna como um registro novo no df df1.set_index('ind', drop=True, inplace=True) # define index como nome da variavel df3 = df.describe(include = 'object').T df3['%freq'] = df3['freq'].div(df3['count']).astype(np.float64).round(4) # inclui campo com % da moda df4 = df3.merge(df1, left_index = True, right_index = True) # merge c/ describe do pd return df4 DATA_FOLDER = '../input/' import os print(os.listdir(DATA_FOLDER)) df_train = import_merge('train', 10000)
code
2010736/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) sale_price_column = data.SalePrice two_columns = ['Alley', 'LotShape'] two_columns_data = data[two_columns] from sklearn.tree import DecisionTreeRegressor import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) y = data.SalePrice predictors = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] x = data[predictors] model = DecisionTreeRegressor() model.fit(x, y) model.predict(x.head()) import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_x = test[predictor_cols] predicted_prices = my_model.predict(test_x) print(predicted_prices)
code
2010736/cell_4
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) sale_price_column = data.SalePrice two_columns = ['Alley', 'LotShape'] two_columns_data = data[two_columns] from sklearn.tree import DecisionTreeRegressor import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) y = data.SalePrice predictors = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] x = data[predictors] model = DecisionTreeRegressor() model.fit(x, y) print(x.head()) model.predict(x.head())
code
2010736/cell_6
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) sale_price_column = data.SalePrice two_columns = ['Alley', 'LotShape'] two_columns_data = data[two_columns] from sklearn.tree import DecisionTreeRegressor import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) y = data.SalePrice predictors = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] x = data[predictors] model = DecisionTreeRegressor() model.fit(x, y) model.predict(x.head()) from sklearn.model_selection import train_test_split train_x, val_x, train_y, val_y = train_test_split(x, y, random_state=0) data_model = DecisionTreeRegressor() data_model.fit(train_x, train_y) val_predictions = data_model.predict(val_x) print(mean_absolute_error(val_y, val_predictions))
code
2010736/cell_7
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) sale_price_column = data.SalePrice two_columns = ['Alley', 'LotShape'] two_columns_data = data[two_columns] from sklearn.tree import DecisionTreeRegressor import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) y = data.SalePrice predictors = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] x = data[predictors] model = DecisionTreeRegressor() model.fit(x, y) model.predict(x.head()) from sklearn.metrics import mean_absolute_error predicted_home_prices = model.predict(x) mean_absolute_error(y, predicted_home_prices) from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(predictors_train, targ_train) preds_val = model.predict(predictors_val) mae = mean_absolute_error(targ_val, preds_val) return mae for max_leaf_nodes in [5, 50, 75, 80, 85, 100, 500, 5000]: my_mae = get_mae(max_leaf_nodes, train_x, val_x, train_y, val_y) print('Max leaf nodes: %d \t\t Mean Absolute Error: %d' % (max_leaf_nodes, my_mae))
code
2010736/cell_8
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error forest_model = RandomForestRegressor() forest_model.fit(train_x, train_y) preds = forest_model.predict(val_x) print(mean_absolute_error(val_y, preds))
code
2010736/cell_3
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) print(data.describe()) sale_price_column = data.SalePrice print(sale_price_column.head()) two_columns = ['Alley', 'LotShape'] two_columns_data = data[two_columns] print(two_columns_data.describe())
code
2010736/cell_10
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) sale_price_column = data.SalePrice two_columns = ['Alley', 'LotShape'] two_columns_data = data[two_columns] from sklearn.tree import DecisionTreeRegressor import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) y = data.SalePrice predictors = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] x = data[predictors] model = DecisionTreeRegressor() model.fit(x, y) model.predict(x.head()) import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor train = pd.read_csv('../input/train.csv') train_y = train.SalePrice predictor_cols = ['LotArea', 'OverallQual', 'YearBuilt', 'TotRmsAbvGrd'] train_X = train[predictor_cols] my_model = RandomForestRegressor() my_model.fit(train_X, train_y) test = pd.read_csv('../input/test.csv') test_x = test[predictor_cols] predicted_prices = my_model.predict(test_x) submission = DataFrame({'Id': test.Id, 'SalePrice': predicted_prices}) submission.to_csv('predictions_submission.csv', index=False)
code
2010736/cell_5
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) sale_price_column = data.SalePrice two_columns = ['Alley', 'LotShape'] two_columns_data = data[two_columns] from sklearn.tree import DecisionTreeRegressor import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) y = data.SalePrice predictors = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] x = data[predictors] model = DecisionTreeRegressor() model.fit(x, y) model.predict(x.head()) from sklearn.metrics import mean_absolute_error predicted_home_prices = model.predict(x) mean_absolute_error(y, predicted_home_prices)
code
2007899/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.columns #Correlations with the target variable a = train.corr() b = print(a['SalePrice'].sort_values(ascending = False )[:10],'\n') var = 'GrLivArea' plt = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'TotalBsmtSF' plt = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); var = 'YearBuilt' data = pd.concat([train['SalePrice'],train[var]], axis = 1) f,ax = plt.subplots(figsize = (8,6)) fig = sns.boxplot(x=var, y='SalePrice',data = data) fig.axis(ymin =0 ,ymax = 800000) missing = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([missing, percent], axis=1, keys=['missing', 'percent']) sns.set() cols = ['SalePrice', 'GrLivArea', 'GarageCars', 'GarageArea', 'TotalBsmtSF', '1stFlrSF', 'FullBath', 'YearBuilt'] sns.pairplot(train[cols], size=2.5) plt.show()
code
2007899/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.columns #Correlations with the target variable a = train.corr() b = print(a['SalePrice'].sort_values(ascending = False )[:10],'\n') var = 'GrLivArea' plt = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'TotalBsmtSF' plt = pd.concat([train['SalePrice'], train[var]], axis=1) plt.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000))
code
2007899/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.columns train['SalePrice'].describe()
code
2007899/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.columns #Correlations with the target variable a = train.corr() b = print(a['SalePrice'].sort_values(ascending = False )[:10],'\n') sns.distplot(train['SalePrice'])
code
2007899/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.head()
code
2007899/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.columns #Correlations with the target variable a = train.corr() b = print(a['SalePrice'].sort_values(ascending = False )[:10],'\n') var = 'GrLivArea' plt = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'TotalBsmtSF' plt = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); var = 'YearBuilt' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y='SalePrice', data=data) fig.axis(ymin=0, ymax=800000)
code
2007899/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.columns #Correlations with the target variable a = train.corr() b = print(a['SalePrice'].sort_values(ascending = False )[:10],'\n') print('Skewness: %f' % train['SalePrice'].skew()) print('kurtosis: %f' % train['SalePrice'].kurt())
code
2007899/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.columns #Correlations with the target variable a = train.corr() b = print(a['SalePrice'].sort_values(ascending = False )[:10],'\n') var = 'GrLivArea' plt = pd.concat([train['SalePrice'], train[var]], axis=1) plt.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000))
code
2007899/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') print(train.dtypes) train.columns
code
2007899/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.columns #Correlations with the target variable a = train.corr() b = print(a['SalePrice'].sort_values(ascending = False )[:10],'\n') var = 'GrLivArea' plt = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'TotalBsmtSF' plt = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y='SalePrice', data=data) fig.axis(ymin=0, ymax=800000)
code
2007899/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.columns #Correlations with the target variable a = train.corr() b = print(a['SalePrice'].sort_values(ascending = False )[:10],'\n') var = 'GrLivArea' plt = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'TotalBsmtSF' plt = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); var = 'YearBuilt' data = pd.concat([train['SalePrice'],train[var]], axis = 1) f,ax = plt.subplots(figsize = (8,6)) fig = sns.boxplot(x=var, y='SalePrice',data = data) fig.axis(ymin =0 ,ymax = 800000) missing = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([missing, percent], axis=1, keys=['missing', 'percent']) missing_data.head(20)
code
2007899/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('C:\\Users\\sreeram\\Desktop\\pythonfiles\\House-Train.csv') train.columns a = train.corr() b = print(a['SalePrice'].sort_values(ascending=False)[:10], '\n')
code
34124852/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/gun-violence-data/gun-violence-data_01-2013_03-2018.csv') p_null = (len(df) - df.count()) * 100.0 / len(df) p_null train = df[['date', 'state', 'city_or_county', 'address', 'n_killed', 'n_injured']] df.isnull().any() state = df['state'].value_counts() sns.despine(left=True, right=True) state = df['city_or_county'].value_counts()[:20] sns.despine(left=True, right=True) year_wise = df[["n_killed", "n_injured"]].groupby(df["year"]).sum() density_plot=sns.kdeplot(year_wise['n_killed'],shade=True,color="red") density_plot=sns.kdeplot(year_wise['n_injured'],shade=True,color="blue") print(year_wise['n_killed']) sns.distplot(year_wise['n_killed'], hist=False, rug=True); sns.countplot(x='month', data=df)
code
34124852/cell_4
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/gun-violence-data/gun-violence-data_01-2013_03-2018.csv') df.head()
code
34124852/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/gun-violence-data/gun-violence-data_01-2013_03-2018.csv') p_null = (len(df) - df.count()) * 100.0 / len(df) p_null train = df[['date', 'state', 'city_or_county', 'address', 'n_killed', 'n_injured']] df.isnull().any()
code
34124852/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/gun-violence-data/gun-violence-data_01-2013_03-2018.csv') p_null = (len(df) - df.count()) * 100.0 / len(df) p_null train = df[['date', 'state', 'city_or_county', 'address', 'n_killed', 'n_injured']] df.isnull().any() year_wise_total = df[['incident_id']].groupby(df['year']).count() top_year = year_wise_total.sort_values(by='incident_id', ascending=False) print(top_year) top_year.plot.barh() del top_year
code
34124852/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34124852/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/gun-violence-data/gun-violence-data_01-2013_03-2018.csv') p_null = (len(df) - df.count()) * 100.0 / len(df) p_null train = df[['date', 'state', 'city_or_county', 'address', 'n_killed', 'n_injured']] df.isnull().any() plt.figure(figsize=(18, 12)) state = df['state'].value_counts() sns.barplot(state.values, state.index) plt.xlabel('Number of incidences', fontsize=15) plt.ylabel('States', fontsize=15) plt.title('Данные о насилии и оружии в Штатах', fontsize=20) sns.despine(left=True, right=True) plt.show()
code
34124852/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/gun-violence-data/gun-violence-data_01-2013_03-2018.csv') p_null = (len(df) - df.count()) * 100.0 / len(df) p_null train = df[['date', 'state', 'city_or_county', 'address', 'n_killed', 'n_injured']] df.isnull().any() state = df['state'].value_counts() sns.despine(left=True, right=True) plt.figure(figsize=(18, 12)) state = df['city_or_county'].value_counts()[:20] sns.barplot(state.values, state.index) plt.xlabel('Number of incidences', fontsize=15) plt.ylabel('cities', fontsize=15) plt.title('Данные о насилии и оружии в городах', fontsize=20) sns.despine(left=True, right=True) plt.show()
code
34124852/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/gun-violence-data/gun-violence-data_01-2013_03-2018.csv') p_null = (len(df) - df.count()) * 100.0 / len(df) p_null train = df[['date', 'state', 'city_or_county', 'address', 'n_killed', 'n_injured']] df.isnull().any() state = df['state'].value_counts() sns.despine(left=True, right=True) state = df['city_or_county'].value_counts()[:20] sns.despine(left=True, right=True) year_wise = df[['n_killed', 'n_injured']].groupby(df['year']).sum() density_plot = sns.kdeplot(year_wise['n_killed'], shade=True, color='red') density_plot = sns.kdeplot(year_wise['n_injured'], shade=True, color='blue') print(year_wise['n_killed']) sns.distplot(year_wise['n_killed'], hist=False, rug=True)
code
34124852/cell_5
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/gun-violence-data/gun-violence-data_01-2013_03-2018.csv') p_null = (len(df) - df.count()) * 100.0 / len(df) p_null
code
73064431/cell_4
[ "text_plain_output_1.png" ]
import json output_file = open('data.txt', 'w') output_file.write('Hello World!') output_file.write('\n') output_file.write('Goodbye cruel world...') output_file.write('\n') output_file.close() line_list = [['ID', 'NAME', 'PRICE', 'DESCRIPTION', 'PHOTO_URL'], ['1003', 'Meat Lovers', '39.99', 'All the meats!!!', 'http://www.example.com/photos/img_1001.png'], ['1004', 'Veggie Delight', '39.99', 'All the veg!!!', 'http://www.example.com/photos/img_1002.png']] file_name = 'products.csv' output_file = open(file_name, 'w') for line in line_list: text = ','.join(line) output_file.write(text + '\n') output_file.close() file_name = 'products.csv' input_file = open(file_name, 'r') line_list = input_file.readlines() file_name = 'products.csv' input_file = open(file_name, 'r') line_list = input_file.readlines() ID_FIELD = 0 NAME_FIELD = 1 menu = {} index = 0 for line in line_list: if index > 0: parts = line.split(',') menu[parts[ID_FIELD]] = parts print(parts[NAME_FIELD]) index += 1 import json print(json.dumps(menu, indent=4))
code
73064431/cell_3
[ "text_plain_output_1.png" ]
output_file = open('data.txt', 'w') output_file.write('Hello World!') output_file.write('\n') output_file.write('Goodbye cruel world...') output_file.write('\n') output_file.close() line_list = [['ID', 'NAME', 'PRICE', 'DESCRIPTION', 'PHOTO_URL'], ['1003', 'Meat Lovers', '39.99', 'All the meats!!!', 'http://www.example.com/photos/img_1001.png'], ['1004', 'Veggie Delight', '39.99', 'All the veg!!!', 'http://www.example.com/photos/img_1002.png']] file_name = 'products.csv' output_file = open(file_name, 'w') for line in line_list: text = ','.join(line) output_file.write(text + '\n') output_file.close() file_name = 'products.csv' input_file = open(file_name, 'r') line_list = input_file.readlines() for line in line_list: print(line)
code
17105701/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns plt.clf() happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy[np.logical_and(happy['Corruption'] > 140, happy['Freedom'] > 100)]
code
17105701/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns happy.Corruption.plot(kind='hist', bins=50) plt.clf()
code
17105701/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns
code
17105701/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns plt.clf() happy = pd.read_csv('../input/world-happiness-report-2019.csv') x = happy['Corruption'] > 140 happy[x] x = 2 def f(): x = 3 return x print(x) print(f()) x = 5 def f(): y = 2 * x return y print(f())
code
17105701/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr
code
17105701/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns plt.clf() dictionary = {'spain': 'madrid', 'usa': 'vegas'} dictionary['spain'] = 'barcelona' dictionary['france'] = 'paris' del dictionary['spain'] dictionary.clear() happy = pd.read_csv('../input/world-happiness-report-2019.csv') i = 0 while i != 5: i += 1 lis = [1, 2, 3, 4, 5] for i in lis: print('i is: ', i) print('') for index, value in enumerate(lis): print(index, ' : ', value) print('') dictionary = {'spain': 'madrid', 'france': 'paris'} for key, value in dictionary.items(): print(key, ' : ', value) print('') for index, value in happy[['Corruption']][0:1].iterrows(): print(index, ' : ', value)
code
17105701/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns plt.clf() happy = pd.read_csv('../input/world-happiness-report-2019.csv') x = happy['Corruption'] > 140 happy[x]
code