path
stringlengths 13
17
| screenshot_names
listlengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
17120135/cell_9
|
[
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] |
import pandas as pd
df_main = pd.read_csv('../input/zomato.csv')
df_main.head(1)
|
code
|
17120135/cell_33
|
[
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df_main = pd.read_csv('../input/zomato.csv')
df_loc = df_main['location'].value_counts()[:20]
df_BTM =df_main.loc[df_main['location']=='BTM']
df_BTM_REST= df_BTM['rest_type'].value_counts()
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
sns.barplot(x=df_BTM_REST, y= df_BTM_REST.index,ax=ax1)
plt.title('Count of restaurant types in BTM')
plt.xlabel('Count')
plt.ylabel('Restaurant Name')
df_BTM_REST1 = df_BTM_REST[:10]
labels = df_BTM_REST1.index
explode = (0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
df_RATE_BTM = df_BTM[['rate', 'rest_type', 'online_order', 'votes', 'book_table', 'approx_cost(for two people)', 'listed_in(type)', 'listed_in(city)']].dropna()
df_RATE_BTM['rate'] = df_RATE_BTM['rate'].apply(lambda x: float(x.split('/')[0]) if len(x) > 3 else 0)
df_RATE_BTM['approx_cost(for two people)'] = df_RATE_BTM['approx_cost(for two people)'].apply(lambda x: int(x.replace(',', '')))
df_rating = df_BTM['rate'].dropna().apply(lambda x: float(x.split('/')[0]) if len(x) > 3 else np.nan).dropna()
f, axes = plt.subplots(1, 2, figsize=(20, 10), sharex=True)
sns.despine(left=True)
sns.distplot(df_rating,bins= 20,ax = axes[0]).set_title('Rating distribution in BTM Region')
plt.xlabel('Rating')
df_grp= df_RATE_BTM.groupby(by= 'rest_type').agg('mean').sort_values(by='votes', ascending=False)
sns.distplot(df_grp['rate'],bins= 20,ax = axes[1]).set_title('Average Rating distribution in BTM Region')
df_grp.reset_index(inplace=True)
df_grp1 = df_RATE_BTM.groupby(by='rest_type').agg('mean').sort_values(by='rate', ascending=False)
plt.xlim(2.5, 5)
df_grp2 = df_RATE_BTM.groupby(by='rest_type').agg('mean').sort_values(by='approx_cost(for two people)', ascending=False)
df_Count_CasualDinning = df_main.loc[df_main['rest_type'] == 'Casual Dining, Bar'].groupby(by='location').agg('count').sort_values(by='rest_type')
df_count_casual = df_main.loc[df_main['rest_type'] == 'Casual Dining, Microbrewery'].groupby(by='location').agg('count').sort_values(by='rest_type')
sns.barplot(x=df_count_casual['name'], y=df_count_casual.index).set_title('Number of Casual Dining, Microbrewery in Bengaluru ')
plt.xlabel('Count')
|
code
|
17120135/cell_26
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df_main = pd.read_csv('../input/zomato.csv')
df_loc = df_main['location'].value_counts()[:20]
df_BTM =df_main.loc[df_main['location']=='BTM']
df_BTM_REST= df_BTM['rest_type'].value_counts()
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
sns.barplot(x=df_BTM_REST, y= df_BTM_REST.index,ax=ax1)
plt.title('Count of restaurant types in BTM')
plt.xlabel('Count')
plt.ylabel('Restaurant Name')
df_BTM_REST1 = df_BTM_REST[:10]
labels = df_BTM_REST1.index
explode = (0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
df_RATE_BTM = df_BTM[['rate', 'rest_type', 'online_order', 'votes', 'book_table', 'approx_cost(for two people)', 'listed_in(type)', 'listed_in(city)']].dropna()
df_RATE_BTM['rate'] = df_RATE_BTM['rate'].apply(lambda x: float(x.split('/')[0]) if len(x) > 3 else 0)
df_RATE_BTM['approx_cost(for two people)'] = df_RATE_BTM['approx_cost(for two people)'].apply(lambda x: int(x.replace(',', '')))
df_rating = df_BTM['rate'].dropna().apply(lambda x: float(x.split('/')[0]) if len(x) > 3 else np.nan).dropna()
f, axes = plt.subplots(1, 2, figsize=(20, 10), sharex=True)
sns.despine(left=True)
sns.distplot(df_rating,bins= 20,ax = axes[0]).set_title('Rating distribution in BTM Region')
plt.xlabel('Rating')
df_grp= df_RATE_BTM.groupby(by= 'rest_type').agg('mean').sort_values(by='votes', ascending=False)
sns.distplot(df_grp['rate'],bins= 20,ax = axes[1]).set_title('Average Rating distribution in BTM Region')
df_grp.reset_index(inplace=True)
df_grp1 = df_RATE_BTM.groupby(by='rest_type').agg('mean').sort_values(by='rate', ascending=False)
plt.xlim(2.5, 5)
df_grp2 = df_RATE_BTM.groupby(by='rest_type').agg('mean').sort_values(by='approx_cost(for two people)', ascending=False)
plt.figure(figsize=(20, 10))
sns.barplot(y=df_grp2.index, x=df_grp2['approx_cost(for two people)']).set_title('Average Cost for 2 distributed in BTM Region')
|
code
|
17120135/cell_7
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
df_main = pd.read_csv('../input/zomato.csv')
df_main.info()
|
code
|
17120135/cell_16
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_main = pd.read_csv('../input/zomato.csv')
df_loc = df_main['location'].value_counts()[:20]
df_BTM = df_main.loc[df_main['location'] == 'BTM']
df_BTM_REST = df_BTM['rest_type'].value_counts()
fig = plt.figure(figsize=(20, 10))
ax1 = fig.add_subplot(121)
sns.barplot(x=df_BTM_REST, y=df_BTM_REST.index, ax=ax1)
plt.title('Count of restaurant types in BTM')
plt.xlabel('Count')
plt.ylabel('Restaurant Name')
|
code
|
17120135/cell_17
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_main = pd.read_csv('../input/zomato.csv')
df_loc = df_main['location'].value_counts()[:20]
df_BTM =df_main.loc[df_main['location']=='BTM']
df_BTM_REST= df_BTM['rest_type'].value_counts()
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
sns.barplot(x=df_BTM_REST, y= df_BTM_REST.index,ax=ax1)
plt.title('Count of restaurant types in BTM')
plt.xlabel('Count')
plt.ylabel('Restaurant Name')
plt.figure(figsize=(20, 15))
df_BTM_REST1 = df_BTM_REST[:10]
labels = df_BTM_REST1.index
explode = (0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
plt.pie(df_BTM_REST1.values, labels=labels, autopct='%1.1f%%', shadow=True, startangle=140)
plt.title('top 10 restaurant types in BTM')
print('Quick bites are {} % of all the Restaurant types'.format(df_BTM_REST.values[0] / df_BTM_REST.sum() * 100))
|
code
|
17120135/cell_31
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df_main = pd.read_csv('../input/zomato.csv')
df_loc = df_main['location'].value_counts()[:20]
df_BTM =df_main.loc[df_main['location']=='BTM']
df_BTM_REST= df_BTM['rest_type'].value_counts()
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
sns.barplot(x=df_BTM_REST, y= df_BTM_REST.index,ax=ax1)
plt.title('Count of restaurant types in BTM')
plt.xlabel('Count')
plt.ylabel('Restaurant Name')
df_BTM_REST1 = df_BTM_REST[:10]
labels = df_BTM_REST1.index
explode = (0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
df_RATE_BTM = df_BTM[['rate', 'rest_type', 'online_order', 'votes', 'book_table', 'approx_cost(for two people)', 'listed_in(type)', 'listed_in(city)']].dropna()
df_RATE_BTM['rate'] = df_RATE_BTM['rate'].apply(lambda x: float(x.split('/')[0]) if len(x) > 3 else 0)
df_RATE_BTM['approx_cost(for two people)'] = df_RATE_BTM['approx_cost(for two people)'].apply(lambda x: int(x.replace(',', '')))
df_rating = df_BTM['rate'].dropna().apply(lambda x: float(x.split('/')[0]) if len(x) > 3 else np.nan).dropna()
f, axes = plt.subplots(1, 2, figsize=(20, 10), sharex=True)
sns.despine(left=True)
sns.distplot(df_rating,bins= 20,ax = axes[0]).set_title('Rating distribution in BTM Region')
plt.xlabel('Rating')
df_grp= df_RATE_BTM.groupby(by= 'rest_type').agg('mean').sort_values(by='votes', ascending=False)
sns.distplot(df_grp['rate'],bins= 20,ax = axes[1]).set_title('Average Rating distribution in BTM Region')
df_grp.reset_index(inplace=True)
df_grp1 = df_RATE_BTM.groupby(by='rest_type').agg('mean').sort_values(by='rate', ascending=False)
plt.xlim(2.5, 5)
df_grp2 = df_RATE_BTM.groupby(by='rest_type').agg('mean').sort_values(by='approx_cost(for two people)', ascending=False)
df_Count_CasualDinning = df_main.loc[df_main['rest_type'] == 'Casual Dining, Bar'].groupby(by='location').agg('count').sort_values(by='rest_type')
plt.figure(figsize=(10, 10))
sns.barplot(x=df_Count_CasualDinning['rest_type'], y=df_Count_CasualDinning.index).set_title('Count of Casual Dining, Bar in Bengaluru')
print('There are about {} number of Casual Dining, Bar in Bengaluru.'.format(df_Count_CasualDinning['rest_type'].sum()))
|
code
|
17120135/cell_24
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df_main = pd.read_csv('../input/zomato.csv')
df_loc = df_main['location'].value_counts()[:20]
df_BTM =df_main.loc[df_main['location']=='BTM']
df_BTM_REST= df_BTM['rest_type'].value_counts()
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
sns.barplot(x=df_BTM_REST, y= df_BTM_REST.index,ax=ax1)
plt.title('Count of restaurant types in BTM')
plt.xlabel('Count')
plt.ylabel('Restaurant Name')
df_BTM_REST1 = df_BTM_REST[:10]
labels = df_BTM_REST1.index
explode = (0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
df_RATE_BTM = df_BTM[['rate', 'rest_type', 'online_order', 'votes', 'book_table', 'approx_cost(for two people)', 'listed_in(type)', 'listed_in(city)']].dropna()
df_RATE_BTM['rate'] = df_RATE_BTM['rate'].apply(lambda x: float(x.split('/')[0]) if len(x) > 3 else 0)
df_RATE_BTM['approx_cost(for two people)'] = df_RATE_BTM['approx_cost(for two people)'].apply(lambda x: int(x.replace(',', '')))
df_rating = df_BTM['rate'].dropna().apply(lambda x: float(x.split('/')[0]) if len(x) > 3 else np.nan).dropna()
f, axes = plt.subplots(1, 2, figsize=(20, 10), sharex=True)
sns.despine(left=True)
sns.distplot(df_rating,bins= 20,ax = axes[0]).set_title('Rating distribution in BTM Region')
plt.xlabel('Rating')
df_grp= df_RATE_BTM.groupby(by= 'rest_type').agg('mean').sort_values(by='votes', ascending=False)
sns.distplot(df_grp['rate'],bins= 20,ax = axes[1]).set_title('Average Rating distribution in BTM Region')
df_grp.reset_index(inplace=True)
plt.figure(figsize=(20, 10))
sns.barplot(x=df_grp['votes'], y=df_grp['rest_type']).set_title('Average Votes distribution in BTM Region')
df_grp1 = df_RATE_BTM.groupby(by='rest_type').agg('mean').sort_values(by='rate', ascending=False)
plt.figure(figsize=(20, 10))
sns.barplot(y=df_grp1.index, x=df_grp1['rate']).set_title('Average Rating distributed in BTM Region')
plt.xlim(2.5, 5)
|
code
|
17120135/cell_5
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
df_main = pd.read_csv('../input/zomato.csv')
df_main.describe()
|
code
|
33107127/cell_9
|
[
"text_plain_output_1.png"
] |
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn.svm import SVC
model = SVC()
model.fit(X_train, y_train)
pred2 = model.predict(X_test)
print(classification_report(y_test, pred2))
|
code
|
33107127/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
33107127/cell_7
|
[
"text_plain_output_1.png"
] |
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, y_train)
pred1 = lr.predict(X_test)
print(classification_report(y_test, pred1))
|
code
|
33107127/cell_3
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/league-of-legends-diamond-ranked-games-10-min/high_diamond_ranked_10min.csv')
data.head()
|
code
|
33107127/cell_17
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
import numpy as np # linear algebra
from sklearn.neighbors import KNeighborsClassifier
knnscore = []
for i, k in enumerate(range(1, 40)):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
knnscore.append(knn.score(X_test, y_test))
knn = KNeighborsClassifier(1 + knnscore.index(np.max(knnscore)))
knn.fit(X_train, y_train)
pred3 = knn.predict(X_test)
from sklearn.ensemble import RandomForestClassifier
rfcscore = []
for i, k in enumerate(range(100, 300, 20)):
rfc = RandomForestClassifier(n_estimators=k)
rfc.fit(X_train, y_train)
rfcscore.append(rfc.score(X_test, y_test))
rfc = RandomForestClassifier(n_estimators=1 + rfcscore.index(np.max(rfcscore)))
rfc.fit(X_train, y_train)
pred5 = rfc.predict(X_test)
print(classification_report(y_test, pred5))
|
code
|
33107127/cell_14
|
[
"text_plain_output_1.png"
] |
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
dt.fit(X_train, y_train)
pred4 = dt.predict(X_test)
print(classification_report(y_test, pred4))
|
code
|
33107127/cell_12
|
[
"text_html_output_1.png"
] |
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
import numpy as np # linear algebra
from sklearn.neighbors import KNeighborsClassifier
knnscore = []
for i, k in enumerate(range(1, 40)):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
knnscore.append(knn.score(X_test, y_test))
knn = KNeighborsClassifier(1 + knnscore.index(np.max(knnscore)))
knn.fit(X_train, y_train)
pred3 = knn.predict(X_test)
print(classification_report(y_test, pred3))
|
code
|
2008917/cell_13
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.offline as py
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import squarify
import matplotlib.ticker as plticker
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
plt.style.use('fivethirtyeight')
import warnings
warnings.filterwarnings('ignore')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import base64
import io
import codecs
from IPython.display import HTML
import jupyter
data = pd.read_csv('../input/WorldPopulation.csv', encoding='ISO-8859-1')
index_min = np.argmin(data['2016'])
index_max = np.argmax(data['2016'])
unit_min = data['Country'].values[index_min]
unit_max = data['Country'].values[index_max]
plt.subplots(figsize=(13,8))
chart = plt.subplot(2, 2, 1)
withWorld = np.array(data['2016'])
withoutWorld = np.delete(withWorld, 210, axis=0)
plt.plot(withoutWorld)
plt.ylabel('Population', fontsize=12)
plt.xlabel('Population Countries Distribution', fontsize=12)
plt.annotate("Circa 200 countries \n Distribution of countries \n smaller than 100 Million",
xy=(0.45,0.95), xycoords='axes fraction', fontsize=10)
#Between 10 Millions and 100 Millions
result2 = data[np.logical_and(data['2016']>10000000, data['2016']<100000000)]
result2 = (result2['2016'])
#Between 1 Millions and 10 Millions
result3 = data[np.logical_and(data['2016']>1000000, data['2016']<10000000)]
result3 = (result3['2016'])
#Less than 1 Millions
result4 = data['2016']<1000000
result4= (data[result4]['2016'])
chart2 = plt.subplot(2, 2, 2)
result2.hist()
plt.setp(chart2, xticks = range(10000000,100000000,10000000), yticks=range(0,35,5))
plt.axvline(result2.mean(),linestyle='dashed',color='blue')
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries between 10 and 100 Million \n Frequency distribution and median",
xy=(0.3,0.8), xycoords='axes fraction', fontsize=10)
chart3 = plt.subplot(2, 2, 3)
result3.hist()
plt.setp(chart3, xticks = range(1000000,10000000,1000000), yticks=range(0,35,5))
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.axvline(result3.mean(),linestyle='dashed',color='blue')
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries between 1 and 10 Million \n Frequency distribution and median",
xy=(0.45,0.8), xycoords='axes fraction', fontsize=10)
chart4 = plt.subplot(2, 2, 4)
result4.hist()
plt.setp(chart4, xticks = range(100000,1000000,100000), yticks=range(0,35,5))
plt.axvline(result4.mean(),linestyle='dashed',color='blue')
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries smaller than 1 Million \n Frequency distribution and median",
xy=(0.3,0.8), xycoords='axes fraction', fontsize=10)
population = data['2016'].sort_values(ascending=False)[1:11].to_frame()
population = data['2015'].sort_values(ascending=False)[1:6].to_frame()
worldPopulation = data['2015'].max()
sizes = (population['2015'] / worldPopulation).iloc[::-1]
labels = data['Country'].values[population.index[0:6]][::-1]
explode = (0, 0, 0, 0, 0)
fig1, ax1 = plt.subplots(figsize=(13, 4))
ax1.pie(sizes, radius=1.1, explode=explode, labels=labels, labeldistance=1.1, autopct='%1.1f%%', shadow=False, startangle=-5)
ax1.axis('equal')
plt.show()
|
code
|
2008917/cell_4
|
[
"image_output_1.png"
] |
import pandas as pd
data = pd.read_csv('../input/WorldPopulation.csv', encoding='ISO-8859-1')
data.head(10)
|
code
|
2008917/cell_2
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import plotly.offline as py
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import squarify
import matplotlib.ticker as plticker
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
plt.style.use('fivethirtyeight')
import warnings
warnings.filterwarnings('ignore')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import base64
import io
import codecs
from IPython.display import HTML
import jupyter
|
code
|
2008917/cell_19
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.offline as py
import seaborn as sns
import squarify
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import squarify
import matplotlib.ticker as plticker
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
plt.style.use('fivethirtyeight')
import warnings
warnings.filterwarnings('ignore')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import base64
import io
import codecs
from IPython.display import HTML
import jupyter
data = pd.read_csv('../input/WorldPopulation.csv', encoding='ISO-8859-1')
index_min = np.argmin(data['2016'])
index_max = np.argmax(data['2016'])
unit_min = data['Country'].values[index_min]
unit_max = data['Country'].values[index_max]
plt.subplots(figsize=(13,8))
chart = plt.subplot(2, 2, 1)
withWorld = np.array(data['2016'])
withoutWorld = np.delete(withWorld, 210, axis=0)
plt.plot(withoutWorld)
plt.ylabel('Population', fontsize=12)
plt.xlabel('Population Countries Distribution', fontsize=12)
plt.annotate("Circa 200 countries \n Distribution of countries \n smaller than 100 Million",
xy=(0.45,0.95), xycoords='axes fraction', fontsize=10)
#Between 10 Millions and 100 Millions
result2 = data[np.logical_and(data['2016']>10000000, data['2016']<100000000)]
result2 = (result2['2016'])
#Between 1 Millions and 10 Millions
result3 = data[np.logical_and(data['2016']>1000000, data['2016']<10000000)]
result3 = (result3['2016'])
#Less than 1 Millions
result4 = data['2016']<1000000
result4= (data[result4]['2016'])
chart2 = plt.subplot(2, 2, 2)
result2.hist()
plt.setp(chart2, xticks = range(10000000,100000000,10000000), yticks=range(0,35,5))
plt.axvline(result2.mean(),linestyle='dashed',color='blue')
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries between 10 and 100 Million \n Frequency distribution and median",
xy=(0.3,0.8), xycoords='axes fraction', fontsize=10)
chart3 = plt.subplot(2, 2, 3)
result3.hist()
plt.setp(chart3, xticks = range(1000000,10000000,1000000), yticks=range(0,35,5))
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.axvline(result3.mean(),linestyle='dashed',color='blue')
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries between 1 and 10 Million \n Frequency distribution and median",
xy=(0.45,0.8), xycoords='axes fraction', fontsize=10)
chart4 = plt.subplot(2, 2, 4)
result4.hist()
plt.setp(chart4, xticks = range(100000,1000000,100000), yticks=range(0,35,5))
plt.axvline(result4.mean(),linestyle='dashed',color='blue')
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries smaller than 1 Million \n Frequency distribution and median",
xy=(0.3,0.8), xycoords='axes fraction', fontsize=10)
population = data['2016'].sort_values(ascending=False)[1:11].to_frame()
population=data['2015'].sort_values(ascending=False)[1:6].to_frame()
worldPopulation = data['2015'].max()
sizes = (population['2015']/worldPopulation).iloc[::-1]
labels = data['Country'].values[population.index[0:6]][::-1]
explode = (0, 0, 0, 0, 0)
fig1, ax1 = plt.subplots(figsize=(13,4))
ax1.pie(sizes, radius = 1.1, explode=explode, labels=labels, labeldistance=1.1,
autopct='%1.1f%%', shadow=False, startangle=-5)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
x = range(0, 216)
withoutWorld = withoutWorld.astype(float)
population = data['2016'].sort_values(ascending=False)[1:11]
country = data['Country'].values[population.index[0:11]]
df = pd.DataFrame({'nb_people': population, 'group': country})
plt.axis('off')
plt.subplots(figsize=(13, 8))
Mammals = ['Horse', 'Dogs', 'Cats', 'Goats', 'Pigs', 'Sheep', 'Cows', 'People']
MammalsPopulation = (60000000, 425000000, 625000000, 860000000, 1000000000, 1100000000, 1500000000, 7330000000)
Mammals = Mammals[::-1]
MammalsPopulation = MammalsPopulation[::-1]
squarify.plot(sizes=MammalsPopulation, label=Mammals, alpha=0.9)
plt.axis('off')
plt.title('Top Mammals by population')
plt.show()
|
code
|
2008917/cell_7
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.offline as py
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import squarify
import matplotlib.ticker as plticker
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
plt.style.use('fivethirtyeight')
import warnings
warnings.filterwarnings('ignore')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import base64
import io
import codecs
from IPython.display import HTML
import jupyter
data = pd.read_csv('../input/WorldPopulation.csv', encoding='ISO-8859-1')
index_min = np.argmin(data['2016'])
index_max = np.argmax(data['2016'])
unit_min = data['Country'].values[index_min]
unit_max = data['Country'].values[index_max]
plt.subplots(figsize=(13, 8))
chart = plt.subplot(2, 2, 1)
withWorld = np.array(data['2016'])
withoutWorld = np.delete(withWorld, 210, axis=0)
plt.plot(withoutWorld)
plt.ylabel('Population', fontsize=12)
plt.xlabel('Population Countries Distribution', fontsize=12)
plt.annotate('Circa 200 countries \n Distribution of countries \n smaller than 100 Million', xy=(0.45, 0.95), xycoords='axes fraction', fontsize=10)
result2 = data[np.logical_and(data['2016'] > 10000000, data['2016'] < 100000000)]
result2 = result2['2016']
result3 = data[np.logical_and(data['2016'] > 1000000, data['2016'] < 10000000)]
result3 = result3['2016']
result4 = data['2016'] < 1000000
result4 = data[result4]['2016']
chart2 = plt.subplot(2, 2, 2)
result2.hist()
plt.setp(chart2, xticks=range(10000000, 100000000, 10000000), yticks=range(0, 35, 5))
plt.axvline(result2.mean(), linestyle='dashed', color='blue')
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate('Countries between 10 and 100 Million \n Frequency distribution and median', xy=(0.3, 0.8), xycoords='axes fraction', fontsize=10)
chart3 = plt.subplot(2, 2, 3)
result3.hist()
plt.setp(chart3, xticks=range(1000000, 10000000, 1000000), yticks=range(0, 35, 5))
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.axvline(result3.mean(), linestyle='dashed', color='blue')
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate('Countries between 1 and 10 Million \n Frequency distribution and median', xy=(0.45, 0.8), xycoords='axes fraction', fontsize=10)
chart4 = plt.subplot(2, 2, 4)
result4.hist()
plt.setp(chart4, xticks=range(100000, 1000000, 100000), yticks=range(0, 35, 5))
plt.axvline(result4.mean(), linestyle='dashed', color='blue')
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate('Countries smaller than 1 Million \n Frequency distribution and median', xy=(0.3, 0.8), xycoords='axes fraction', fontsize=10)
|
code
|
2008917/cell_15
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.offline as py
import seaborn as sns
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import squarify
import matplotlib.ticker as plticker
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
plt.style.use('fivethirtyeight')
import warnings
warnings.filterwarnings('ignore')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import base64
import io
import codecs
from IPython.display import HTML
import jupyter
data = pd.read_csv('../input/WorldPopulation.csv', encoding='ISO-8859-1')
index_min = np.argmin(data['2016'])
index_max = np.argmax(data['2016'])
unit_min = data['Country'].values[index_min]
unit_max = data['Country'].values[index_max]
plt.subplots(figsize=(13,8))
chart = plt.subplot(2, 2, 1)
withWorld = np.array(data['2016'])
withoutWorld = np.delete(withWorld, 210, axis=0)
plt.plot(withoutWorld)
plt.ylabel('Population', fontsize=12)
plt.xlabel('Population Countries Distribution', fontsize=12)
plt.annotate("Circa 200 countries \n Distribution of countries \n smaller than 100 Million",
xy=(0.45,0.95), xycoords='axes fraction', fontsize=10)
#Between 10 Millions and 100 Millions
result2 = data[np.logical_and(data['2016']>10000000, data['2016']<100000000)]
result2 = (result2['2016'])
#Between 1 Millions and 10 Millions
result3 = data[np.logical_and(data['2016']>1000000, data['2016']<10000000)]
result3 = (result3['2016'])
#Less than 1 Millions
result4 = data['2016']<1000000
result4= (data[result4]['2016'])
chart2 = plt.subplot(2, 2, 2)
result2.hist()
plt.setp(chart2, xticks = range(10000000,100000000,10000000), yticks=range(0,35,5))
plt.axvline(result2.mean(),linestyle='dashed',color='blue')
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries between 10 and 100 Million \n Frequency distribution and median",
xy=(0.3,0.8), xycoords='axes fraction', fontsize=10)
chart3 = plt.subplot(2, 2, 3)
result3.hist()
plt.setp(chart3, xticks = range(1000000,10000000,1000000), yticks=range(0,35,5))
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.axvline(result3.mean(),linestyle='dashed',color='blue')
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries between 1 and 10 Million \n Frequency distribution and median",
xy=(0.45,0.8), xycoords='axes fraction', fontsize=10)
chart4 = plt.subplot(2, 2, 4)
result4.hist()
plt.setp(chart4, xticks = range(100000,1000000,100000), yticks=range(0,35,5))
plt.axvline(result4.mean(),linestyle='dashed',color='blue')
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries smaller than 1 Million \n Frequency distribution and median",
xy=(0.3,0.8), xycoords='axes fraction', fontsize=10)
population = data['2016'].sort_values(ascending=False)[1:11].to_frame()
population=data['2015'].sort_values(ascending=False)[1:6].to_frame()
worldPopulation = data['2015'].max()
sizes = (population['2015']/worldPopulation).iloc[::-1]
labels = data['Country'].values[population.index[0:6]][::-1]
explode = (0, 0, 0, 0, 0)
fig1, ax1 = plt.subplots(figsize=(13,4))
ax1.pie(sizes, radius = 1.1, explode=explode, labels=labels, labeldistance=1.1,
autopct='%1.1f%%', shadow=False, startangle=-5)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
plt.subplots(figsize=(13, 8))
x = range(0, 216)
withoutWorld = withoutWorld.astype(float)
plt.scatter(x, withoutWorld, s=withoutWorld / 1000000, c=withoutWorld)
plt.xlabel('Countries')
plt.ylabel('Population')
|
code
|
2008917/cell_17
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.offline as py
import seaborn as sns
import squarify
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import squarify
import matplotlib.ticker as plticker
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
plt.style.use('fivethirtyeight')
import warnings
warnings.filterwarnings('ignore')
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import base64
import io
import codecs
from IPython.display import HTML
import jupyter
data = pd.read_csv('../input/WorldPopulation.csv', encoding='ISO-8859-1')
index_min = np.argmin(data['2016'])
index_max = np.argmax(data['2016'])
unit_min = data['Country'].values[index_min]
unit_max = data['Country'].values[index_max]
plt.subplots(figsize=(13,8))
chart = plt.subplot(2, 2, 1)
withWorld = np.array(data['2016'])
withoutWorld = np.delete(withWorld, 210, axis=0)
plt.plot(withoutWorld)
plt.ylabel('Population', fontsize=12)
plt.xlabel('Population Countries Distribution', fontsize=12)
plt.annotate("Circa 200 countries \n Distribution of countries \n smaller than 100 Million",
xy=(0.45,0.95), xycoords='axes fraction', fontsize=10)
#Between 10 Millions and 100 Millions
result2 = data[np.logical_and(data['2016']>10000000, data['2016']<100000000)]
result2 = (result2['2016'])
#Between 1 Millions and 10 Millions
result3 = data[np.logical_and(data['2016']>1000000, data['2016']<10000000)]
result3 = (result3['2016'])
#Less than 1 Millions
result4 = data['2016']<1000000
result4= (data[result4]['2016'])
chart2 = plt.subplot(2, 2, 2)
result2.hist()
plt.setp(chart2, xticks = range(10000000,100000000,10000000), yticks=range(0,35,5))
plt.axvline(result2.mean(),linestyle='dashed',color='blue')
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries between 10 and 100 Million \n Frequency distribution and median",
xy=(0.3,0.8), xycoords='axes fraction', fontsize=10)
chart3 = plt.subplot(2, 2, 3)
result3.hist()
plt.setp(chart3, xticks = range(1000000,10000000,1000000), yticks=range(0,35,5))
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.axvline(result3.mean(),linestyle='dashed',color='blue')
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries between 1 and 10 Million \n Frequency distribution and median",
xy=(0.45,0.8), xycoords='axes fraction', fontsize=10)
chart4 = plt.subplot(2, 2, 4)
result4.hist()
plt.setp(chart4, xticks = range(100000,1000000,100000), yticks=range(0,35,5))
plt.axvline(result4.mean(),linestyle='dashed',color='blue')
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.ylabel('Number of Countries', fontsize=12)
plt.annotate("Countries smaller than 1 Million \n Frequency distribution and median",
xy=(0.3,0.8), xycoords='axes fraction', fontsize=10)
population = data['2016'].sort_values(ascending=False)[1:11].to_frame()
population=data['2015'].sort_values(ascending=False)[1:6].to_frame()
worldPopulation = data['2015'].max()
sizes = (population['2015']/worldPopulation).iloc[::-1]
labels = data['Country'].values[population.index[0:6]][::-1]
explode = (0, 0, 0, 0, 0)
fig1, ax1 = plt.subplots(figsize=(13,4))
ax1.pie(sizes, radius = 1.1, explode=explode, labels=labels, labeldistance=1.1,
autopct='%1.1f%%', shadow=False, startangle=-5)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
x = range(0, 216)
withoutWorld = withoutWorld.astype(float)
plt.subplots(figsize=(13, 10))
population = data['2016'].sort_values(ascending=False)[1:11]
country = data['Country'].values[population.index[0:11]]
df = pd.DataFrame({'nb_people': population, 'group': country})
squarify.plot(sizes=df['nb_people'], label=df['group'], alpha=0.8)
plt.axis('off')
plt.show()
|
code
|
2008917/cell_5
|
[
"image_output_1.png"
] |
import numpy as np
import pandas as pd
data = pd.read_csv('../input/WorldPopulation.csv', encoding='ISO-8859-1')
index_min = np.argmin(data['2016'])
index_max = np.argmax(data['2016'])
unit_min = data['Country'].values[index_min]
unit_max = data['Country'].values[index_max]
print('The most populated political unit:', unit_max, '-', round(data['2016'].max()), '; The least populated:', unit_min, '-', data['2016'].min())
|
code
|
72084932/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd
df_train = pd.read_csv('../input/tabulardata-kfolds-created/train_folds.csv')
df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv')
sample_submission = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv')
useful_features = [c for c in df_train.columns if c not in ('id', 'loss', 'kfold')]
df_train[useful_features]
|
code
|
72084932/cell_3
|
[
"text_html_output_1.png"
] |
import pandas as pd
df_train = pd.read_csv('../input/tabulardata-kfolds-created/train_folds.csv')
df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv')
sample_submission = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv')
sample_submission.head()
|
code
|
34134329/cell_13
|
[
"text_html_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
data_df.dtypes
|
code
|
34134329/cell_44
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
data_df.dtypes
data_df.duplicated().sum()
cleaned_data = data_df.copy()
cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True)
cleaned_data.dtypes
cleaned_data.dtypes
cleaned_data.dtypes
appointment_df = cleaned_data[['AppointmentID', 'PatientId', 'ScheduledDay', 'AppointmentDay', 'SMS_received', 'No-show']]
patient_df = cleaned_data[['PatientId', 'Gender', 'Age', 'Neighbourhood', 'Scholarship', 'Hypertension', 'Diabetes', 'Alcoholism', 'Handcap']]
patient_df.duplicated().sum()
|
code
|
34134329/cell_6
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
34134329/cell_40
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
data_df.dtypes
data_df.duplicated().sum()
cleaned_data = data_df.copy()
cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True)
cleaned_data.dtypes
cleaned_data.dtypes
cleaned_data.dtypes
|
code
|
34134329/cell_48
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
data_df.dtypes
data_df.duplicated().sum()
cleaned_data = data_df.copy()
cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True)
cleaned_data.dtypes
cleaned_data.dtypes
cleaned_data.dtypes
appointment_df = cleaned_data[['AppointmentID', 'PatientId', 'ScheduledDay', 'AppointmentDay', 'SMS_received', 'No-show']]
patient_df = cleaned_data[['PatientId', 'Gender', 'Age', 'Neighbourhood', 'Scholarship', 'Hypertension', 'Diabetes', 'Alcoholism', 'Handcap']]
patient_df.duplicated().sum()
patient_df.drop_duplicates(inplace=True)
patient_df.reset_index(drop=True, inplace=True)
patient_df.duplicated().sum()
|
code
|
34134329/cell_19
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
data_df.dtypes
data_df.duplicated().sum()
|
code
|
34134329/cell_7
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.head(2)
|
code
|
34134329/cell_32
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
data_df.dtypes
data_df.duplicated().sum()
cleaned_data = data_df.copy()
cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True)
cleaned_data.dtypes
|
code
|
34134329/cell_28
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
data_df.dtypes
data_df.duplicated().sum()
cleaned_data = data_df.copy()
cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True)
cleaned_data.head(2)
|
code
|
34134329/cell_17
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
data_df.dtypes
data_df['Neighbourhood'].unique()
|
code
|
34134329/cell_46
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
data_df.dtypes
data_df.duplicated().sum()
cleaned_data = data_df.copy()
cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True)
cleaned_data.dtypes
cleaned_data.dtypes
cleaned_data.dtypes
appointment_df = cleaned_data[['AppointmentID', 'PatientId', 'ScheduledDay', 'AppointmentDay', 'SMS_received', 'No-show']]
patient_df = cleaned_data[['PatientId', 'Gender', 'Age', 'Neighbourhood', 'Scholarship', 'Hypertension', 'Diabetes', 'Alcoholism', 'Handcap']]
patient_df.duplicated().sum()
patient_df.drop_duplicates(inplace=True)
patient_df.reset_index(drop=True, inplace=True)
|
code
|
34134329/cell_10
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
|
code
|
34134329/cell_36
|
[
"text_html_output_1.png"
] |
import pandas as pd
data_df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv')
data_df.isnull().sum()
data_df.dtypes
data_df.duplicated().sum()
cleaned_data = data_df.copy()
cleaned_data.rename(columns={'Hipertension': 'Hypertension'}, inplace=True)
cleaned_data.dtypes
cleaned_data.dtypes
|
code
|
1004118/cell_13
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
for i in [2, 3, 4, 5, 6, 7]:
mean_satisfaction_level = df['satisfaction_level'][df['number_project'] == i].mean()
print('project_total', i, ':', mean_satisfaction_level)
|
code
|
1004118/cell_4
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
|
code
|
1004118/cell_6
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
sns.heatmap(df.corr(), vmax=0.8, square=True, annot=True, fmt='.2f')
|
code
|
1004118/cell_11
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
print(sorted(feature_importance_dict.items(), key=lambda x: x[1], reverse=True))
|
code
|
1004118/cell_18
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
for i in [2, 3, 4, 5, 6, 7]:
mean_satisfaction_level = df['satisfaction_level'][df['number_project'] == i].mean()
for i in [2, 3, 4, 5, 6, 7]:
mean_satisfaction_level = df['satisfaction_level'][df['number_project'] == i].mean()
print(i, mean_satisfaction_level)
|
code
|
1004118/cell_8
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import ExtraTreesClassifier
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
print(sorted(feature_importance_dict.items(), key=lambda x: x[1], reverse=True))
|
code
|
1004118/cell_15
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
"""
for i in [2,3,4,5,6,7]:
mean_satisfaction_level=df['satisfaction_level'][df['number_project']==i].mean()
print('project_total',i,':',mean_satisfaction_level)
"""
plt.hist(df['satisfaction_level'], df['average_montly_hours'])
|
code
|
1004118/cell_16
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
"""
for i in [2,3,4,5,6,7]:
mean_satisfaction_level=df['satisfaction_level'][df['number_project']==i].mean()
print('project_total',i,':',mean_satisfaction_level)
"""
plt.scatter(df['satisfaction_level'], df['last_evaluation'])
plt.xlabel('satisfaction_level')
plt.ylabel('last_evaluation')
|
code
|
1004118/cell_3
|
[
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/HR_comma_sep.csv')
df.describe()
|
code
|
1004118/cell_17
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
sns.pointplot(x=df['number_project'], y=df['average_montly_hours'])
|
code
|
1004118/cell_14
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
plt.scatter(df['satisfaction_level'], df['average_montly_hours'])
plt.ylabel('average_montly_hours')
plt.xlabel('satisfaction_level')
|
code
|
1004118/cell_10
|
[
"text_html_output_1.png"
] |
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
print(results.mean())
|
code
|
1004118/cell_12
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold,cross_val_score
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/HR_comma_sep.csv')
df.isnull().any()
df = df.rename(columns={'sales': 'job'})
X = np.array(df.drop('left', 1))
y = np.array(df['left'])
model = ExtraTreesClassifier()
model.fit(X, y)
feature_list = list(df.drop('left', 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import classification_report
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
kfold = KFold(n_splits=10, random_state=7)
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
scoring = 'neg_mean_squared_error'
for i in models:
results = cross_val_score(i, X, y, cv=kfold, scoring=scoring)
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
X = df.drop(['left', 'satisfaction_level'], axis=1)
y = df['satisfaction_level']
model.fit(X, y)
feature_list = list(df.drop(['left', 'satisfaction_level'], 1).columns)
feature_importance_dict = dict(zip(feature_list, model.feature_importances_))
sns.pointplot(x=df['number_project'], y=df['satisfaction_level'])
|
code
|
130016562/cell_9
|
[
"text_plain_output_1.png"
] |
patient_check_dict = {}
use_model_ratio = 0
first_cb_huber_use_ratio = {'updrs_1': 0.8, 'updrs_2': 0.8, 'updrs_3': 0.3, 'updrs_4': 0}
first_cb_mae_use_ratio = {'updrs_1': 0.2, 'updrs_2': 0.8, 'updrs_3': 0.1, 'updrs_4': 0}
cb_huber_use_ratio = {'updrs_1': 0.4, 'updrs_2': 0.5, 'updrs_3': 0.6, 'updrs_4': 0.5}
cb_mae_use_ratio = {'updrs_1': 0.2, 'updrs_2': 0.2, 'updrs_3': 0.05, 'updrs_4': 0.5}
for test, test_peptides, test_proteins, sample_submission in iter_test:
visit_month = test['visit_month'].iloc[0]
test_pl = pl.DataFrame(test[['patient_id', 'visit_month']]).unique()
test_proteins_pl = pl.DataFrame(test_proteins)
test_peptides_pl = pl.DataFrame(test_peptides)
protein_user_list = list(test_proteins_pl['patient_id'].unique())
print('protein prediction...')
test_proteins_pl_pivot = test_proteins_pl.pivot(values='NPX', index='patient_id', columns='UniProt')
test_peptides_pl_pivot = test_peptides_pl.pivot(values='PeptideAbundance', index='patient_id', columns='Peptide')
test_pr_pe_base = test_proteins_pl_pivot.join(test_peptides_pl_pivot, on='patient_id', how='left')
test_pr_pe_base = test_pr_pe_base.to_pandas()
oof_df = test_pr_pe_base[['patient_id']]
for t in [1, 2, 3]:
cb_use_features = cb_feature_dict[f'updrs_{t}']
cb_null_cols = [col for col in cb_use_features if col not in test_pr_pe_base.columns]
pred_model = np.zeros(len(oof_df))
if len(cb_null_cols) > 0:
for col in cb_null_cols:
test_pr_pe_base[col] = np.nan
for fold in range(folds):
model_cb = cb_model_dict[f'model_updrs_{t}_{fold}']
pred_model += model_cb.predict(test_pr_pe_base[cb_use_features]) / folds
oof_df[f'pred_updrs_{t}'] = pred_model
prediction_id_list = []
pred_list = []
for row in test_pl.to_numpy():
patient_id = row[0]
visit_month = row[1]
check_dict_value = patient_check_dict.get(patient_id, 'nothing')
if check_dict_value == 'nothing':
patient_check_dict[patient_id] = 0
if visit_month == 6 or visit_month == 18:
patient_check_dict[patient_id] += 1
for t in [1, 2, 3, 4]:
for p in [0, 6, 12, 24]:
pred_month = visit_month + p
prediction_id = f'{patient_id}_{visit_month}_updrs_{t}_plus_{p}_months'
pred = 0
pred_trend = 0
pred_huber_cb = 0
pred_mae_cb = 0
if visit_month == 0:
pred_trend = first_linear_trend_df.iloc[pred_month][f'updrs_{t}']
pred_huber_cb = first_cb_trend_huber_df.iloc[pred_month][f'updrs_{t}']
pred_mae_cb = first_cb_trend_mae_df.iloc[pred_month][f'updrs_{t}']
pred = pred_trend
pred = pred * (1 - first_cb_huber_use_ratio[f'updrs_{t}']) + pred_huber_cb * first_cb_huber_use_ratio[f'updrs_{t}']
pred = pred * (1 - first_cb_mae_use_ratio[f'updrs_{t}']) + pred_mae_cb * first_cb_mae_use_ratio[f'updrs_{t}']
if t != 4:
if patient_id in protein_user_list:
pred_model = oof_df[oof_df['patient_id'] == patient_id][f'pred_updrs_{t}'].item()
pred = pred * (1 - use_model_ratio) + pred_model * use_model_ratio
pred = np.round(pred)
else:
check_healthy = patient_check_dict[patient_id]
if check_healthy == 0:
pred = healthy_trend_df.iloc[pred_month][f'updrs_{t}']
else:
pred_trend = linear_trend_df.iloc[pred_month][f'updrs_{t}']
pred_huber_cb = cb_trend_huber_df.iloc[pred_month][f'updrs_{t}']
pred_mae_cb = cb_trend_mae_df.iloc[pred_month][f'updrs_{t}']
pred = pred_trend
pred = pred * (1 - cb_huber_use_ratio[f'updrs_{t}']) + pred_huber_cb * cb_huber_use_ratio[f'updrs_{t}']
pred = pred * (1 - cb_mae_use_ratio[f'updrs_{t}']) + pred_mae_cb * cb_mae_use_ratio[f'updrs_{t}']
if t != 4:
if patient_id in protein_user_list:
pred_model = oof_df[oof_df['patient_id'] == patient_id][f'pred_updrs_{t}'].item()
pred = pred * (1 - use_model_ratio) + pred_model * use_model_ratio
pred = np.round(pred)
prediction_id_list.append(prediction_id)
pred_list.append(pred)
result = pd.DataFrame(prediction_id_list, columns=['prediction_id'])
result['rating'] = pred_list
env.predict(result)
|
code
|
130016562/cell_4
|
[
"text_plain_output_5.png",
"text_html_output_4.png",
"text_html_output_6.png",
"text_plain_output_4.png",
"text_html_output_2.png",
"text_html_output_5.png",
"text_plain_output_6.png",
"text_plain_output_3.png",
"text_plain_output_7.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"text_html_output_3.png",
"text_html_output_7.png"
] |
import pandas as pd
first_linear_trend_df = pd.read_csv('/kaggle/input/amp-visitmonth-model-first-month/first_linear_trend_df.csv')
first_cb_trend_huber_df = pd.read_csv('/kaggle/input/amp-visitmonth-model-first-month/first_cb_trend_huber_df.csv')
first_cb_trend_mae_df = pd.read_csv('/kaggle/input/amp-visitmonth-model-first-month/first_cb_trend_mae_df.csv')
linear_trend_df = pd.read_csv('/kaggle/input/amp-visitmonth-model/linear_trend_df.csv')
cb_trend_huber_df = pd.read_csv('/kaggle/input/amp-visitmonth-model/cb_trend_huber_df.csv')
cb_trend_mae_df = pd.read_csv('/kaggle/input/amp-visitmonth-model/cb_trend_mae_df.csv')
healthy_trend_df = pd.read_csv('/kaggle/input/amp-visitmonth-model/healthy_trend_df.csv')
display('first_linear_trend_df:', first_linear_trend_df.iloc[[0, 12, 24, 36, 48, 60, 72, 84, 96, 108]])
display('first_cb_trend_huber_df:', first_cb_trend_huber_df.iloc[[0, 12, 24, 36, 48, 60, 72, 84, 96, 108]])
display('first_cb_trend_mae_df:', first_cb_trend_mae_df.iloc[[0, 12, 24, 36, 48, 60, 72, 84, 96, 108]])
display('linear_trend_df:', linear_trend_df.iloc[[0, 12, 24, 36, 48, 60, 72, 84, 96, 108]])
display('cb_trend_huber_df:', cb_trend_huber_df.iloc[[0, 12, 24, 36, 48, 60, 72, 84, 96, 108]])
display('cb_trend_mae_df:', cb_trend_mae_df.iloc[[0, 12, 24, 36, 48, 60, 72, 84, 96, 108]])
display('healthy_trend:', healthy_trend_df.iloc[[0, 12, 24, 36, 48, 60, 72, 84, 96, 108]])
|
code
|
90130234/cell_9
|
[
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv')
train = train.set_index('row_id')
test = test.set_index('row_id')
train.time = pd.to_datetime(train.time)
test.time = pd.to_datetime(test.time)
x_unique = train.x.unique()
y_unique = train.y.unique()
d_unique = train.direction.unique()
train_min_time = train.time.min()
train_max_time = train.time.max()
test_min_time = test.time.min()
test_max_time = test.time.max()
congestion_mean = train.congestion.mean()
print(f'congestion {congestion_mean}')
|
code
|
90130234/cell_4
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv')
train.head()
|
code
|
90130234/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
90130234/cell_7
|
[
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv')
train = train.set_index('row_id')
test = test.set_index('row_id')
train.time = pd.to_datetime(train.time)
test.time = pd.to_datetime(test.time)
x_unique = train.x.unique()
y_unique = train.y.unique()
d_unique = train.direction.unique()
print(f'x: {x_unique}\r\ny: {y_unique}\r\nd: {d_unique}')
|
code
|
90130234/cell_8
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv')
train = train.set_index('row_id')
test = test.set_index('row_id')
train.time = pd.to_datetime(train.time)
test.time = pd.to_datetime(test.time)
x_unique = train.x.unique()
y_unique = train.y.unique()
d_unique = train.direction.unique()
train_min_time = train.time.min()
train_max_time = train.time.max()
test_min_time = test.time.min()
test_max_time = test.time.max()
print(f'train min {train_min_time} max {train_max_time}\r\ntest min {test_min_time} max {test_max_time}')
|
code
|
90130234/cell_14
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv')
train = train.set_index('row_id')
test = test.set_index('row_id')
train.time = pd.to_datetime(train.time)
test.time = pd.to_datetime(test.time)
x_unique = train.x.unique()
y_unique = train.y.unique()
d_unique = train.direction.unique()
train_min_time = train.time.min()
train_max_time = train.time.max()
test_min_time = test.time.min()
test_max_time = test.time.max()
congestion_mean = train.congestion.mean()
x_group_mean = train.groupby('x').congestion.mean()
y_group_mean = train.groupby('y').congestion.mean()
d_group_mean = train.groupby('direction').congestion.mean()
plt.xticks(x_group_mean.index)
plt.xticks(y_group_mean.index)
plt.xticks(d_group_mean.index)
x_y_group_mean = train.groupby('x_y').congestion.mean()
x_y_d_group_mean = train.groupby('x_y_d').congestion.mean()
plt.xticks(x_y_group_mean.index)
plt.xticks(x_y_d_group_mean.index)
train['weekday'] = train.time.dt.weekday
train['hour'] = train.time.dt.hour
train['timeofday'] = train.time.dt.time
train['weekend'] = (train['weekday'] == 5) | (train['weekday'] == 6)
train['minute'] = train.time.dt.minute
test['weekday'] = test.time.dt.weekday
test['hour'] = test.time.dt.hour
test['timeofday'] = test.time.dt.time
test['weekend'] = (test['weekday'] == 5) | (test['weekday'] == 6)
test['minute'] = test.time.dt.minute
train.head()
|
code
|
90130234/cell_10
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv')
train = train.set_index('row_id')
test = test.set_index('row_id')
train.time = pd.to_datetime(train.time)
test.time = pd.to_datetime(test.time)
x_unique = train.x.unique()
y_unique = train.y.unique()
d_unique = train.direction.unique()
train_min_time = train.time.min()
train_max_time = train.time.max()
test_min_time = test.time.min()
test_max_time = test.time.max()
congestion_mean = train.congestion.mean()
x_group_mean = train.groupby('x').congestion.mean()
y_group_mean = train.groupby('y').congestion.mean()
d_group_mean = train.groupby('direction').congestion.mean()
print(f'x_group_mean:\r\n {x_group_mean}\r\n')
print(f'y_group_mean:\r\n {y_group_mean}\r\n')
print(f'd_group_mean:\r\n {d_group_mean}\r\n')
plt.subplot(3, 1, 1)
plt.bar(x_group_mean.index, x_group_mean)
plt.xlabel('x')
plt.ylabel('mean')
plt.xticks(x_group_mean.index)
plt.subplot(3, 1, 2)
plt.bar(y_group_mean.index, y_group_mean)
plt.xlabel('y')
plt.ylabel('mean')
plt.xticks(y_group_mean.index)
plt.subplot(3, 1, 3)
plt.bar(d_group_mean.index, d_group_mean)
plt.xlabel('direction')
plt.ylabel('mean')
plt.xticks(d_group_mean.index)
plt.show()
|
code
|
90130234/cell_12
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv')
train = train.set_index('row_id')
test = test.set_index('row_id')
train.time = pd.to_datetime(train.time)
test.time = pd.to_datetime(test.time)
x_unique = train.x.unique()
y_unique = train.y.unique()
d_unique = train.direction.unique()
train_min_time = train.time.min()
train_max_time = train.time.max()
test_min_time = test.time.min()
test_max_time = test.time.max()
congestion_mean = train.congestion.mean()
x_group_mean = train.groupby('x').congestion.mean()
y_group_mean = train.groupby('y').congestion.mean()
d_group_mean = train.groupby('direction').congestion.mean()
plt.xticks(x_group_mean.index)
plt.xticks(y_group_mean.index)
plt.xticks(d_group_mean.index)
x_y_group_mean = train.groupby('x_y').congestion.mean()
x_y_d_group_mean = train.groupby('x_y_d').congestion.mean()
print(f'x_y_group_mean:\r\n {x_y_group_mean}\r\n')
print(f'x_y_d_group_mean:\r\n {x_y_d_group_mean}\r\n')
plt.figure()
plt.bar(x_y_group_mean.index, x_y_group_mean)
plt.xlabel('x_y')
plt.ylabel('mean')
plt.xticks(x_y_group_mean.index)
plt.figure()
plt.bar(x_y_d_group_mean.index, x_y_d_group_mean)
plt.xlabel('x_y_direction')
plt.ylabel('mean')
plt.xticks(x_y_d_group_mean.index)
plt.show()
|
code
|
122249704/cell_2
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
import pandas as pd
|
code
|
88101809/cell_9
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from glob import os
import collections
import csv
import matplotlib.pyplot as plt
import re
import csv
import matplotlib.pyplot as plt
import statistics as st
import re
import collections
from glob import os
import pandas as pd
arr = os.listdir('../input/spectra-files/')
arr = sorted(arr, key=lambda x: int(os.path.splitext(x)[0]))
def function_general(file_name):
wav = []
absr = []
d = {}
with open(file_name, 'r') as df:
reader = csv.reader(df)
header = next(reader)
for row in reader:
d[float(row[0])] = float(row[1])
wav.append(float(row[0]))
absr.append(float(row[1]))
function_general('../input/spectra-files/50.csv')
def function_peak_area(file_name, in_w, fin_w):
dp = {}
absr = []
wav = []
product = {}
with open(file_name, 'r') as df:
reader = csv.reader(df)
header = next(reader)
for row in reader:
if float(row[0]) > in_w and float(row[0]) < fin_w:
dp[float(row[0])] = float(row[1])
absr.append(float(row[1]))
wav.append(float(row[0]))
max_w = list(dp.keys())[list(dp.values()).index(max(absr))]
min_w = list(dp.keys())[list(dp.values()).index(min(absr))]
peak = dp[max_w] - dp[min_w]
area = peak * (fin_w - in_w) / 2
name = file_name
tiempo = re.findall('\\d+', name)
product = {float(tiempo[0]): area}
return product
def function_macro(file_list):
product_dict_list = {}
react_dict_list = {}
for x in file_list:
file_name = '../input/spectra-files/' + x
product_dict_list.update(function_peak_area(file_name, 1070, 1330))
react_dict_list.update(function_peak_area(file_name, 2186, 2705))
product = collections.OrderedDict(sorted(product_dict_list.items()))
reactive = collections.OrderedDict(sorted(react_dict_list.items()))
return (product, reactive)
product, react = function_macro(arr)
|
code
|
88101809/cell_11
|
[
"text_plain_output_1.png"
] |
from glob import os
import collections
import csv
import matplotlib.pyplot as plt
import re
import csv
import matplotlib.pyplot as plt
import statistics as st
import re
import collections
from glob import os
import pandas as pd
arr = os.listdir('../input/spectra-files/')
arr = sorted(arr, key=lambda x: int(os.path.splitext(x)[0]))
def function_general(file_name):
wav = []
absr = []
d = {}
with open(file_name, 'r') as df:
reader = csv.reader(df)
header = next(reader)
for row in reader:
d[float(row[0])] = float(row[1])
wav.append(float(row[0]))
absr.append(float(row[1]))
function_general('../input/spectra-files/50.csv')
def function_peak_area(file_name, in_w, fin_w):
dp = {}
absr = []
wav = []
product = {}
with open(file_name, 'r') as df:
reader = csv.reader(df)
header = next(reader)
for row in reader:
if float(row[0]) > in_w and float(row[0]) < fin_w:
dp[float(row[0])] = float(row[1])
absr.append(float(row[1]))
wav.append(float(row[0]))
max_w = list(dp.keys())[list(dp.values()).index(max(absr))]
min_w = list(dp.keys())[list(dp.values()).index(min(absr))]
peak = dp[max_w] - dp[min_w]
area = peak * (fin_w - in_w) / 2
name = file_name
tiempo = re.findall('\\d+', name)
product = {float(tiempo[0]): area}
return product
def function_macro(file_list):
product_dict_list = {}
react_dict_list = {}
for x in file_list:
file_name = '../input/spectra-files/' + x
product_dict_list.update(function_peak_area(file_name, 1070, 1330))
react_dict_list.update(function_peak_area(file_name, 2186, 2705))
product = collections.OrderedDict(sorted(product_dict_list.items()))
reactive = collections.OrderedDict(sorted(react_dict_list.items()))
return (product, reactive)
product, react = function_macro(arr)
time = product.keys()
area = product.values()
plt.plot(time, area)
time1 = react.keys()
area1 = react.values()
plt.plot(time1, area1)
plt.ylabel('Area')
plt.xlabel('Time')
|
code
|
88101809/cell_3
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from glob import os
import csv
import matplotlib.pyplot as plt
import statistics as st
import re
import collections
from glob import os
import pandas as pd
arr = os.listdir('../input/spectra-files/')
arr = sorted(arr, key=lambda x: int(os.path.splitext(x)[0]))
print(arr)
|
code
|
88101809/cell_5
|
[
"image_output_1.png"
] |
import csv
import matplotlib.pyplot as plt
def function_general(file_name):
wav = []
absr = []
d = {}
with open(file_name, 'r') as df:
reader = csv.reader(df)
header = next(reader)
print(header)
for row in reader:
d[float(row[0])] = float(row[1])
wav.append(float(row[0]))
absr.append(float(row[1]))
plt.plot(wav, absr)
plt.ylabel('Absortion')
plt.xlabel('Wavelength')
function_general('../input/spectra-files/50.csv')
|
code
|
106192098/cell_4
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv')
ss = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv')
cols = ['country', 'store', 'product']
cnt_val = pd.unique(train['country'])
str_val = pd.unique(train['store'])
prod_val = pd.unique(train['product'])
data = np.empty(shape=48, dtype='object')
i = 0
for x in cnt_val:
for y in str_val:
for z in prod_val:
data[i] = train[(train['country'] == x) & (train['store'] == y) & (train['product'] == z)]
i += 1
colors = ['r', 'g', 'b', 'c', 'm', 'y']
fig = plt.figure(figsize=(20, 500))
i = 0
j = -1
days = [i for i in range(data[0].shape[0])]
for x in cnt_val:
j += 1
for y in str_val:
for z in prod_val:
ax = fig.add_subplot(len(cnt_val) * len(str_val) * len(prod_val), 1, i + 1)
ax.set_title(x + ', ' + y + ', ' + z)
ax.set_ylabel('num_sold')
ax.plot(days, data[i]['num_sold'], color=colors[j])
i += 1
plt.show()
|
code
|
106192098/cell_2
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv')
ss = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv')
print('Train values:')
cols = ['country', 'store', 'product']
for col in cols:
print(col, ':', pd.unique(train[col]))
print('Test values:')
for col in cols:
print(col, ':', pd.unique(test[col]))
|
code
|
106192098/cell_1
|
[
"text_html_output_2.png",
"text_html_output_1.png",
"text_html_output_3.png"
] |
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv')
display(train)
test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv')
display(test)
ss = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv')
display(ss)
|
code
|
106192098/cell_7
|
[
"text_plain_output_4.png",
"application_vnd.jupyter.stderr_output_3.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv')
ss = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv')
cols = ['country', 'store', 'product']
cnt_val = pd.unique(train['country'])
str_val = pd.unique(train['store'])
prod_val = pd.unique(train['product'])
data = np.empty(shape=48, dtype='object')
i = 0
for x in cnt_val:
for y in str_val:
for z in prod_val:
data[i] = train[(train['country'] == x) & (train['store'] == y) & (train['product'] == z)]
i += 1
colors=['r','g','b','c','m','y']
fig=plt.figure(figsize=(20,500))
i=0
j=-1
days=[i for i in range(data[0].shape[0])]
for x in cnt_val:
j+=1
for y in str_val:
for z in prod_val:
ax=fig.add_subplot(len(cnt_val)*len(str_val)*len(prod_val),1,i+1)
ax.set_title(x+', '+y+', '+z)
ax.set_ylabel('num_sold')
ax.plot(days,data[i]['num_sold'],color=colors[j])
i+=1
plt.show()
i=0
j=-1
s=[i for i in range(24,48)]
fig=plt.figure(figsize=(25,90))
for x in cnt_val:
j+=1
for y in str_val:
for z in prod_val:
ax=fig.add_subplot(12,4,i+1)
ax.set_title(x+', '+y+', '+z)
ax.set_ylabel('number_of_samples')
if i not in s:
ax.hist(data[i]['num_sold'],bins=50,color=colors[j],ec='black')
else:
ax.hist(data[i]['num_sold'].iloc[1100:],bins=50,color=colors[j],ec='black')
i+=1
plt.show()
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
days1 = 182
ep1 = 60
days2 = 60
ep2 = 21
for i in range(48):
if i not in s:
data_ = data[i]['num_sold'].copy()
dataset = windowed_dataset(data_.values, days1, 700, 100)
NN = tf.keras.Sequential([tf.keras.layers.Input(shape=(days1,)), tf.keras.layers.Dense(units=35, activation='relu'), tf.keras.layers.Dense(units=10, activation='relu'), tf.keras.layers.Dense(units=5, activation='relu'), tf.keras.layers.Dense(units=1, activation='relu')])
NN.summary()
NN.compile(optimizer=tf.keras.optimizers.Adagrad(), loss=tf.keras.losses.MeanSquaredError(), metrics=['MAPE'])
hist = NN.fit(dataset, epochs=ep1)
while hist.history['MAPE'][-1] >= 11:
NN = tf.keras.Sequential([tf.keras.layers.Input(shape=(days1,)), tf.keras.layers.Dense(units=35, activation='relu'), tf.keras.layers.Dense(units=10, activation='relu'), tf.keras.layers.Dense(units=5, activation='relu'), tf.keras.layers.Dense(units=1, activation='relu')])
NN.summary()
NN.compile(optimizer=tf.keras.optimizers.Adagrad(), loss=tf.keras.losses.MeanSquaredError(), metrics=['MAPE'])
hist = NN.fit(dataset, epochs=ep1)
for j in range(365):
pred = NN.predict(np.array([data_.values[-days1:]]))
pred = int(pred * (pred - np.floor(pred) <= 0.5) + (int(pred) + 1) * (pred - np.floor(pred) > 0.5))
data_ = data_.append(pd.Series(pred))
print(pred)
ss['num_sold'].iloc[i + 48 * j] = pred
else:
data_ = data[i]['num_sold'].iloc[1100:].copy()
dataset = windowed_dataset(data_.values, days2, 180, 50)
NN = tf.keras.Sequential([tf.keras.layers.Input(shape=(days2,)), tf.keras.layers.Dense(units=40, activation='relu'), tf.keras.layers.Dense(units=30, activation='relu'), tf.keras.layers.Dense(units=10, activation='relu'), tf.keras.layers.Dense(units=5, activation='relu'), tf.keras.layers.Dense(units=1, activation='relu')])
NN.summary()
NN.compile(optimizer=tf.keras.optimizers.Adagrad(), loss=tf.keras.losses.MeanSquaredError(), metrics=['MAPE'])
hist = NN.fit(dataset, epochs=ep2)
while hist.history['MAPE'][-1] >= 10:
NN = tf.keras.Sequential([tf.keras.layers.Input(shape=(days2,)), tf.keras.layers.Dense(units=40, activation='relu'), tf.keras.layers.Dense(units=30, activation='relu'), tf.keras.layers.Dense(units=10, activation='relu'), tf.keras.layers.Dense(units=5, activation='relu'), tf.keras.layers.Dense(units=1, activation='relu')])
NN.summary()
NN.compile(optimizer=tf.keras.optimizers.Adagrad(), loss=tf.keras.losses.MeanSquaredError(), metrics=['MAPE'])
hist = NN.fit(dataset, epochs=ep2)
for j in range(365):
pred = NN.predict(np.array([data_.values[-days2:]]))
pred = int(pred * (pred - np.floor(pred) <= 0.5) + (int(pred) + 1) * (pred - np.floor(pred) > 0.5))
data_ = data_.append(pd.Series(pred))
print(pred)
ss['num_sold'].iloc[i + 48 * j] = pred
display(ss)
|
code
|
106192098/cell_3
|
[
"text_html_output_2.png",
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv')
ss = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv')
cols = ['country', 'store', 'product']
cnt_val = pd.unique(train['country'])
str_val = pd.unique(train['store'])
prod_val = pd.unique(train['product'])
data = np.empty(shape=48, dtype='object')
print(data.shape)
i = 0
for x in cnt_val:
for y in str_val:
for z in prod_val:
data[i] = train[(train['country'] == x) & (train['store'] == y) & (train['product'] == z)]
i += 1
display(data[0])
display(data[7])
|
code
|
106192098/cell_5
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv')
test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv')
ss = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv')
cols = ['country', 'store', 'product']
cnt_val = pd.unique(train['country'])
str_val = pd.unique(train['store'])
prod_val = pd.unique(train['product'])
data = np.empty(shape=48, dtype='object')
i = 0
for x in cnt_val:
for y in str_val:
for z in prod_val:
data[i] = train[(train['country'] == x) & (train['store'] == y) & (train['product'] == z)]
i += 1
colors=['r','g','b','c','m','y']
fig=plt.figure(figsize=(20,500))
i=0
j=-1
days=[i for i in range(data[0].shape[0])]
for x in cnt_val:
j+=1
for y in str_val:
for z in prod_val:
ax=fig.add_subplot(len(cnt_val)*len(str_val)*len(prod_val),1,i+1)
ax.set_title(x+', '+y+', '+z)
ax.set_ylabel('num_sold')
ax.plot(days,data[i]['num_sold'],color=colors[j])
i+=1
plt.show()
i = 0
j = -1
s = [i for i in range(24, 48)]
fig = plt.figure(figsize=(25, 90))
for x in cnt_val:
j += 1
for y in str_val:
for z in prod_val:
ax = fig.add_subplot(12, 4, i + 1)
ax.set_title(x + ', ' + y + ', ' + z)
ax.set_ylabel('number_of_samples')
if i not in s:
ax.hist(data[i]['num_sold'], bins=50, color=colors[j], ec='black')
else:
ax.hist(data[i]['num_sold'].iloc[1100:], bins=50, color=colors[j], ec='black')
i += 1
plt.show()
|
code
|
32068555/cell_48
|
[
"text_plain_output_1.png"
] |
!pip install json2html
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import pandas as pd
from json2html import *
from IPython.core.display import display, HTML
|
code
|
32068555/cell_50
|
[
"text_html_output_10.png",
"text_html_output_16.png",
"text_html_output_4.png",
"text_html_output_6.png",
"text_html_output_2.png",
"text_html_output_15.png",
"text_html_output_5.png",
"image_output_5.png",
"text_html_output_14.png",
"text_html_output_19.png",
"image_output_7.png",
"text_html_output_9.png",
"text_html_output_13.png",
"text_html_output_20.png",
"image_output_4.png",
"text_html_output_1.png",
"text_html_output_17.png",
"text_html_output_18.png",
"image_output_6.png",
"text_html_output_12.png",
"text_html_output_11.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"text_html_output_8.png",
"text_html_output_3.png",
"text_html_output_7.png"
] |
from IPython.core.display import display, HTML
from nltk.tokenize import sent_tokenize
from nltk.tokenize import sent_tokenize
from wordcloud import WordCloud, STOPWORDS
import json
import json
import json
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np
import os
import os
import requests
import time
import time
import time
import time
import requests
import json
headers = {'accept': 'application/json', 'Content-Type': 'text/plain'}
params = (('annotationTypes', '*'), ('language', 'en'))
def get_json_object(text):
return requests.post('http://deda1x3026.merckgroup.com:8080/information-discovery/rest/textanalysis/projects/AA-Internal/pipelines/ThemeAnnotator/analyseText', headers=headers, params=params, data=text).json()
def get_json_str(json_obj):
return json.dumps(json_obj)
def get_pretty_json(json_str):
return json.dumps(json_str, indent=4)
def get_themes(text):
json_obj = get_json_object(text)
json_array = json_obj['annotationDtos']
return json_array[-1]['themes']
def lst_to_str(word_list):
return ' '.join(word_list).strip()
import numpy as np
import json
import os
import csv
import time
from nltk.tokenize import sent_tokenize
root = '/kaggle/input/dataset/CORD-19-research-challenge/'
folders = ['biorxiv_medrxiv/biorxiv_medrxiv/', 'comm_use_subset/comm_use_subset/', 'noncomm_use_subset/noncomm_use_subset/', 'custom_license/custom_license/']
def collect_sentences():
index_in_docs = 0
num_files_processed = 0
sentences_np_array = np.empty(100000000, dtype=object)
start = time.time()
for folder in folders:
for filename in os.listdir(root + folder):
if filename.endswith('.json'):
input_file_path = root + folder + filename
with open(input_file_path) as f:
data = json.load(f)
abstracts = data['abstract']
for content in abstracts:
abstract_para = content['text']
sentences = sent_tokenize(abstract_para)
for sentence in sentences:
sentences_np_array[index_in_docs] = sentence
index_in_docs += 1
body_texts = data['body_text']
for content in body_texts:
body_para = content['text']
sentences = sent_tokenize(body_para)
for sentence in sentences:
sentences_np_array[index_in_docs] = sentence
index_in_docs += 1
num_files_processed += 1
np.save('sentences.npy', sentences_np_array)
import json
import os
import csv
import time
from nltk.tokenize import sent_tokenize
root = '/kaggle/input/dataset/CORD-19-research-challenge/'
folders = ['biorxiv_medrxiv/biorxiv_medrxiv/', 'comm_use_subset/comm_use_subset/', 'noncomm_use_subset/noncomm_use_subset/', 'custom_license/custom_license/']
def collect_json_docs():
docs = np.empty(100000000, dtype=np.object)
index_in_docs = 0
num_files_processed = 0
num_docs_collected = 0
start = time.time()
for folder in folders:
for filename in os.listdir(root + folder):
if filename.endswith('.json'):
input_file_path = root + folder + filename
with open(input_file_path) as f:
data = json.load(f)
paper_title = data['metadata']['title']
authors = data['metadata']['authors']
authors_names = []
for author in authors:
first_name = author['first']
middle_name = author['middle']
last_name = author['last']
author_name = first_name + ' ' + lst_to_str(middle_name) + ' ' + last_name
authors_names.append(author_name)
abstracts = data['abstract']
for content in abstracts:
abstract_para = content['text']
section = content['section']
sentences = sent_tokenize(abstract_para)
for sentence in sentences:
new_doc = {'sentence': sentence, 'section': section, 'paper_title': paper_title, 'authors': authors_names, 'paragraph': abstract_para}
docs[index_in_docs] = new_doc
index_in_docs += 1
num_docs_collected += 1
body_texts = data['body_text']
for content in body_texts:
body_para = content['text']
section = content['section']
sentences = sent_tokenize(body_para)
for sentence in sentences:
new_doc = {'sentence': sentence, 'section': section, 'paper_title': paper_title, 'authors': authors_names, 'paragraph': body_para}
docs[index_in_docs] = new_doc
index_in_docs += 1
num_docs_collected += 1
num_files_processed += 1
np.save('docs', docs)
import numpy as np
docs = np.load('/kaggle/input/jsondocs/docs.npy', allow_pickle=True)
stopwords = set(STOPWORDS)
for id_index in I[0]:
doc = docs[id_index]
html = json2html.convert(doc)
html = html.replace('<td>', "<td style='text-align:left'>")
display(HTML(html))
themes_list = doc['themes']
final_theme_string = ''
for theme in themes_list:
words = theme.replace('-', ' ').split()
t = '_'.join(words)
final_theme_string = final_theme_string + ' ' + t
if doc['themes'] and doc['themes'][0]:
wordcloud = WordCloud(width=700, height=200, stopwords=stopwords, min_font_size=8, max_font_size=20, background_color='white', prefer_horizontal=1).generate(final_theme_string)
plt.figure(figsize=(10, 10), linewidth=10, edgecolor='#04253a')
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.show()
display(HTML("<hr style='height:3px; color:black'>"))
|
code
|
32068555/cell_36
|
[
"text_plain_output_1.png"
] |
!python -m pip install --upgrade faiss faiss-gpu
|
code
|
122255862/cell_21
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class['Age'].agg(['min', 'max'])
|
code
|
122255862/cell_13
|
[
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
for cla, titanic_df in titanic_class:
print(cla)
print(titanic_df)
|
code
|
122255862/cell_9
|
[
"text_html_output_2.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic_gender = titanic['Sex'].value_counts(normalize=True)
print(f'People divided by gender in percentage: \n{titanic_gender}')
|
code
|
122255862/cell_25
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class.max()
titanic_class.filter(lambda x: x['Age'].mean() < 38)
|
code
|
122255862/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
|
code
|
122255862/cell_34
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic_gender = titanic['Sex'].value_counts(normalize=True)
wp = {'linewidth': 1, 'edgecolor': 'black'}
plt.axis('equal')
plt.tight_layout()
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class['Age'].agg(['min', 'max']).plot(kind='bar')
titanic['avg_fare_class'] = titanic.groupby('Pclass')['Fare'].transform(lambda x: x.mean())
tita_df = titanic.groupby(['Embarked', 'Sex']).mean()
tita_df
titanic['Embarked'].value_counts().plot(kind='bar')
plt.title('Who got in?')
plt.xlabel('Where embarked')
plt.ylabel('Number of people')
|
code
|
122255862/cell_23
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class.max()
|
code
|
122255862/cell_30
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic['avg_fare_class'] = titanic.groupby('Pclass')['Fare'].transform(lambda x: x.mean())
titanic['fare_aboce_avg'] = titanic['avg_fare_class'] < titanic['Fare']
titanic
|
code
|
122255862/cell_33
|
[
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic_gender = titanic['Sex'].value_counts(normalize=True)
wp = {'linewidth': 1, 'edgecolor': 'black'}
plt.axis('equal')
plt.tight_layout()
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class['Age'].agg(['min', 'max']).plot(kind='bar')
titanic['avg_fare_class'] = titanic.groupby('Pclass')['Fare'].transform(lambda x: x.mean())
tita_df = titanic.groupby(['Embarked', 'Sex']).mean()
tita_df
titanic['Embarked'].value_counts()
|
code
|
122255862/cell_20
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class['Age'].mean()
|
code
|
122255862/cell_6
|
[
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
print('Number of classes on board:')
titanic['Pclass'].nunique()
|
code
|
122255862/cell_29
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic['avg_fare_class'] = titanic.groupby('Pclass')['Fare'].transform(lambda x: x.mean())
titanic.head()
|
code
|
122255862/cell_26
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class.max()
titanic_class.filter(lambda x: x['Age'].mean() < 38)
print('Mean fare for people in age under 38:')
titanic_class.filter(lambda x: x['Age'].mean() < 38)['Fare'].mean()
|
code
|
122255862/cell_11
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
|
code
|
122255862/cell_19
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic_gender = titanic['Sex'].value_counts(normalize=True)
wp = {'linewidth': 1, 'edgecolor': 'black'}
plt.axis('equal')
plt.tight_layout()
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
sns.barplot(x=titanic['Pclass'], y=titanic['Survived'])
plt.title('People who survived per class')
plt.xlabel('Pclass')
plt.ylabel('Survived')
|
code
|
122255862/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
122255862/cell_7
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
print('How many people survived?')
titanic['Survived'].sum()
|
code
|
122255862/cell_18
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
|
code
|
122255862/cell_32
|
[
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic_gender = titanic['Sex'].value_counts(normalize=True)
wp = {'linewidth': 1, 'edgecolor': 'black'}
plt.axis('equal')
plt.tight_layout()
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class['Age'].agg(['min', 'max']).plot(kind='bar')
titanic['avg_fare_class'] = titanic.groupby('Pclass')['Fare'].transform(lambda x: x.mean())
tita_df = titanic.groupby(['Embarked', 'Sex']).mean()
tita_df
plt.figure(figsize=(5, 5))
titanic.plot.scatter(x='Embarked', y='PassengerId', c='Pclass', colormap='viridis', figsize=(10, 9))
plt.title('Which class went where')
plt.xlabel('PassengerId')
plt.ylabel('Pclass')
|
code
|
122255862/cell_28
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
titanic_class.sum()
titanic_class.mean()
titanic_class.max()
titanic_class.filter(lambda x: x['Age'].mean() < 38)
titanic_class.filter(lambda x: x['Age'].mean() < 38)['Fare'].mean()
titanic_class.filter(lambda x: x['Age'].mean() < 38)['Fare'].max()
titanic_class.filter(lambda x: x['Age'].mean() > 38)
|
code
|
122255862/cell_8
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
print('People divided by gender:')
titanic['Sex'].value_counts()
|
code
|
122255862/cell_15
|
[
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
|
code
|
122255862/cell_16
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.shape
titanic.groupby('Sex').Survived.sum()
titanic_class = titanic.groupby('Pclass')
titanic_class
titanic_class.get_group(1)
titanic_class.get_group(2)
titanic_class.get_group(3)
|
code
|
122255862/cell_3
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
titanic = pd.read_csv('/kaggle/input/test-file/tested.csv')
titanic.head()
|
code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.