path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
33104556/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
train = train.drop_duplicates().reset_index(drop=True)
train.target.value_counts()
train.isnull().sum()
test.isnull().sum()
print(train.keyword.nunique(), test.keyword.nunique()) | code |
33104556/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
train = train.drop_duplicates().reset_index(drop=True)
train.target.value_counts()
train.isnull().sum()
test.isnull().sum()
dist = train[train.target == 1].keyword.value_counts().head()
plt.figure(figsize=(9, 6))
sns.barplot(dist, dist.index)
plt.show() | code |
16166679/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
"""
Eliminando os mercados que não serão utilizados em nossa análise.Esses mercados são: LEILÃO (017), FRACIONARIO(020)
e o TERMO(030)
"""
mask = (df['TPMERC'] == 10) | (df['TPMERC'] == 12) | (df['TPMERC'] == 13) | (df['TPMERC'] == 70) | (df['TPMERC'] == 80)
new_df = df[mask]
new_df.head(2) | code |
16166679/cell_4 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
df.head(2) | code |
16166679/cell_20 | [
"text_plain_output_1.png"
] | """
Será que devemos retirar?
Dúvidas referente aos campos: CODBDI - CÓDIGO BDI , ESPECI - ESPECIFICAÇÃO DO PAPEL, CODISI e DISMES
"""
'\nO que faremos com a data de vencimento do mercado a vista?\n' | code |
16166679/cell_6 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
codBDI = df[['CODBDI']]
codBDI = np.unique(codBDI)
codBDI | code |
16166679/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
"""
Eliminando os mercados que não serão utilizados em nossa análise.Esses mercados são: LEILÃO (017), FRACIONARIO(020)
e o TERMO(030)
"""
mask = (df['TPMERC'] == 10) | (df['TPMERC'] == 12) | (df['TPMERC'] == 13) | (df['TPMERC'] == 70) | (df['TPMERC'] == 80)
new_df = df[mask]
"""
Eliminando a coluna TIPREG, pois esta possui um valor fixo que não será utilizado na análise
"""
new_df.drop(columns=['TIPREG'], axis=1, inplace=True)
"""
Eliminando a coluna PRAZOT, pois este campo é referente ao prazo do hedge do mercado a termo.Essa coluna possui valores
nulos para os mercados de opções e a vista. Como esta não fará parte da analise, pode ser eliminada.
"""
new_df.drop(columns=['PRAZOT'], axis=1, inplace=True)
new_df.head(10) | code |
16166679/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16166679/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
"""
Eliminando os mercados que não serão utilizados em nossa análise.Esses mercados são: LEILÃO (017), FRACIONARIO(020)
e o TERMO(030)
"""
mask = (df['TPMERC'] == 10) | (df['TPMERC'] == 12) | (df['TPMERC'] == 13) | (df['TPMERC'] == 70) | (df['TPMERC'] == 80)
new_df = df[mask]
"""
Eliminando a coluna TIPREG, pois esta possui um valor fixo que não será utilizado na análise
"""
new_df.drop(columns=['TIPREG'], axis=1, inplace=True)
new_df.head(2) | code |
16166679/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
codBDI = df[['CODBDI']]
codBDI = np.unique(codBDI)
codBDI
codESPECI = df[['ESPECI']]
codESPECI = np.unique(codESPECI)
codESPECI | code |
16166679/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
codBDI = df[['CODBDI']]
codBDI = np.unique(codBDI)
codBDI
codESPECI = df[['ESPECI']]
codESPECI = np.unique(codESPECI)
codESPECI
tpMercado = df[['TPMERC']]
tpMercado = np.unique(tpMercado)
tpMercado
"""
Eliminando os mercados que não serão utilizados em nossa análise.Esses mercados são: LEILÃO (017), FRACIONARIO(020)
e o TERMO(030)
"""
mask = (df['TPMERC'] == 10) | (df['TPMERC'] == 12) | (df['TPMERC'] == 13) | (df['TPMERC'] == 70) | (df['TPMERC'] == 80)
new_df = df[mask]
moedaMerc = new_df[['MODREF']]
moedaMerc = np.unique(moedaMerc)
moedaMerc
indCorr = new_df[['INDOPC']]
indCorr = np.unique(indCorr)
indCorr | code |
16166679/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.head(2) | code |
16166679/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
codBDI = df[['CODBDI']]
codBDI = np.unique(codBDI)
codBDI
codESPECI = df[['ESPECI']]
codESPECI = np.unique(codESPECI)
codESPECI
tpMercado = df[['TPMERC']]
tpMercado = np.unique(tpMercado)
tpMercado
"""
Eliminando os mercados que não serão utilizados em nossa análise.Esses mercados são: LEILÃO (017), FRACIONARIO(020)
e o TERMO(030)
"""
mask = (df['TPMERC'] == 10) | (df['TPMERC'] == 12) | (df['TPMERC'] == 13) | (df['TPMERC'] == 70) | (df['TPMERC'] == 80)
new_df = df[mask]
moedaMerc = new_df[['MODREF']]
moedaMerc = np.unique(moedaMerc)
moedaMerc
indCorr = new_df[['INDOPC']]
indCorr = np.unique(indCorr)
indCorr
fatCot = new_df[['FATCOT']]
fatCot = np.unique(fatCot)
fatCot | code |
16166679/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
codBDI = df[['CODBDI']]
codBDI = np.unique(codBDI)
codBDI
codESPECI = df[['ESPECI']]
codESPECI = np.unique(codESPECI)
codESPECI
tpMercado = df[['TPMERC']]
tpMercado = np.unique(tpMercado)
tpMercado
"""
Eliminando os mercados que não serão utilizados em nossa análise.Esses mercados são: LEILÃO (017), FRACIONARIO(020)
e o TERMO(030)
"""
mask = (df['TPMERC'] == 10) | (df['TPMERC'] == 12) | (df['TPMERC'] == 13) | (df['TPMERC'] == 70) | (df['TPMERC'] == 80)
new_df = df[mask]
moedaMerc = new_df[['MODREF']]
moedaMerc = np.unique(moedaMerc)
moedaMerc | code |
16166679/cell_10 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
file = '../input/COTAHIST_A2009_to_A2018P.csv'
df = pd.read_csv(file)
df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)
codBDI = df[['CODBDI']]
codBDI = np.unique(codBDI)
codBDI
codESPECI = df[['ESPECI']]
codESPECI = np.unique(codESPECI)
codESPECI
tpMercado = df[['TPMERC']]
tpMercado = np.unique(tpMercado)
tpMercado | code |
33111782/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/uncover/public_health_england/covid-19-daily-confirmed-cases.csv', encoding='ISO-8859-2')
df1 = pd.read_csv('../input/uncover/public_health_england/covid-19-cases-by-county-uas.csv', encoding='ISO-8859-2')
df2 = pd.read_csv('../input/uncover/regional_sources/uk_government/covid-19-uk-historical-data.csv', encoding='ISO-8859-2')
df2.dtypes | code |
33111782/cell_4 | [
"text_html_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
import plotly.offline as py
import plotly.express as px
import seaborn
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33111782/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/uncover/public_health_england/covid-19-daily-confirmed-cases.csv', encoding='ISO-8859-2')
df1 = pd.read_csv('../input/uncover/public_health_england/covid-19-cases-by-county-uas.csv', encoding='ISO-8859-2')
df1.head() | code |
33111782/cell_2 | [
"text_plain_output_1.png"
] | from IPython.display import Image
from IPython.display import Image
Image(url='data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMSEhUTEhIWFhUVFRgYFxUXGB0aFxgYFRYXFxYWFx4bHTQgGBomHhkVITEkMSkrLi4uFyAzODMsNygtLisBCgoKDg0OFxAPGy0dHR0tKy0rKystLS0tLS4tLS0rKy0tLSstLSstLS0rLy0rLS0tKzcrLS0rLSsrLS0rNy0tLf/AABEIAOEA4QMBIgACEQEDEQH/xAAbAAEAAwEBAQEAAAAAAAAAAAAABAUGAwIBB//EADwQAAIBAwIEBQIEAwcEAwEAAAECAwAREgQhBRMiMQYyQVFhFHEjQoGRFlKCJDNicnOhsxVDksFTY9EH/8QAGAEBAQEBAQAAAAAAAAAAAAAAAAECAwT/xAAgEQEBAAIBBQEBAQAAAAAAAAAAAQIREgMTMUFRIRQE/9oADAMBAAIRAxEAPwD9vrxLKFtkbXIAv7sbAfqSBXuq7jvkT/Xg/wCZKrNuosC1eWlAIBO57D3tuax3/wDU+JcrQzi5QcsNzA1rsHUrDt1dditx2vWO4LxvXGGBnUj6dAswkP4jlCQ+OUZJYJYEki5U/erpX7IGB7GvtfmPAfEEkvFZHBmSPkp/Z2KkSq5VUnhFwCqnO5824G+yj9OqUKUpQKUpQKUpQKUpQKUpQKUpQKUpQKUpQKUpQKUpQKUpQKUpQKruO+RP9eD/AJkqxqu475E/14P+ZKTyzl4U/jXhBn5TGD6hEyyhVgsjMwAjZWJt0nLYkDqB/LY4XWNy+dHqXMM24MK3czKIUKCBxEAhb+7JC9wxABNfo/F+MSx6mLTwxxM0kUspaWQxqBE8K2GKNcnmj9q88J8TxSwySSWjMKyNMoOQVY5JYy6sB1oTDIQbXsO1XbSi4BwJ2nSabS8mdV/EmDK0ckZRAsaWsylTHH0lbCzbtkTW7qh0HiqB5GjZsWE7QjpYqWBsgL44qzAghSb7+tcuG+LI3iSSXFC6xYoubuWljMmAVUuTYE7X2BJtUGjpVPxLjyJpG1cVpEABG5UHrCkG4upG9wRcEWNctZ4mij1EcWcZRoZ5XlDi0YgMI3A9DzT6jy+tBe0qn/ifTY5Zt5ggTlyc0sVLgCPDmHpBby9gT6VW6rxtEkgUKzxsumKyIrEk6maeIgqFuuPJ9dyTbuKDVUqi4J4nh1BCXxkLzKFxbEmGR0IDlcS1lyKg3APxV7QKUpQKUpQKUpQKUpQKUpQKUpQKUpQKUpQKUpQKreO+RP8AXg/5kqyqu475E/14P+ZKTyzl4ROJ+HY9TqYppkjkjjhmj5ciBgWleFg4vsLCNh/VUHjnhiR+eumeKJNRpBpmBQ2jCc7FowpA/wC8wt8D7VH8VQu+v0yLEkw+m1LGOSVokusulAe6q12GRHb8xrxLxDVQrNgIIo9LDFIYgGkuXLtIgkJXpsNmxvc9tqNRJHhqc3iaWLkNq11BsjCQYSJKIxvibug6vQEgD1rnw/wpNBynjljLxCMAMpxYLAYXBsbrfZgR7W9ah8Q4/qCgfmQiOafUwcrE8xREmosQ+e73iBIxFg3xu0fiCdTpwpVos4IG/DPd4UJykaQEvkwPSrC3c3vYNFxHhEk2jbTySKZHHU4WyXzDkBf5fT396g+IPCC6h1aPCICCaM4p3aV4HRjja4Bh3H+LYiqaHxXq0g00sjQsdXpVlWyMqws0uljBbru6AajI9vIffa64A0v1+rWWVZCsGksVUqu7ar8uRs3vvuMaCL/CkwOYaJWLrmqtMA6IjhQ0mfMNmdmtsPT5rlovBs0a3E0RdRpgvQ2H9m1c+o3GVxkJgvc2K33rbUoM9pfDzJ9L1r/Z9TqJjseoTrqAFHsRzhv/AITWhpSgUpSgUpSgUpSgUpSgUpSgUpSgUpSgUpSgUpSgV5kjDdwDYg7+4Nwf3tXqlBB4jwbT6gqZ4IpSoIXmIr45WvbIbXsP2FfYuEwIhjWGNUZQpQIApUXspFrEC52+am0oM6PCcZ1P1DkMc2e3KjDEsrIFdwuTqAxAB+Lk2qe/h7SFszpoS3T1GNb/AIeJTe3pitv8oqzqBxjiiaZM3uQTYBRdjsSbD4AJ/SiWyTddP+mQ4qnKTFUMariLBGsGQC1gpxXb4FfOHcKg04IghjiBtfBQt7Xtew3tc/vXYahcQ1xYi9/SxqPpOKxyM6g2KPhvYXOIbp33FiKaTnPqdSok/EUUqu7FnCdNjiSL9W+wrv8AULfHIX9r7/tTS8o6Urms6k2DAkdxfcfeuGo4jGkiRswDvfEfb39v/dDcS6VzE63tkL+19/2rhruIxxFA7WLtiv3/APQ+aHKJdKg6PiiSFwDYpIYzewuygE477jepRnW9shf2vv8AtTSTKX26UqBxbii6dQzBmyYIAouSW7bXrloeORyFlIaNkALLIuJAPY97Wq6qdzGXW1pSomv4ikSO7G+C5EDdrD2FH4igjaS9wqFyBbKwGXa/e1TTXKfUulQNJxRZGUKG6oxICQLWbsDv3qXHMreVgfsb00TKV0pUQa9c3Q3GAUljYKcu1jepEcyt2INu9jQ5R7pSlFKUpQKUpQKUpQKUpQKy3F4pptUBGi4wof7zIIzygg2sNyFuP6q1NLVZdMdTDnNMIYZOTFHNE7rppysihSwdAp5bKLdai4/amn0TcueSOJ0MWoWWJCpBKqq3VR8rkLel62c+qjQ2d1U99yBt2vvX1dSmWIYZd8bi/wC3etcnn7GPusmmgcJpnKnOTVc2Tbdc1bv7WGIrxwvShXVX0znUCWQtNYgBSWs5fsy2KjH/AG2rR6Li0UkZkJwUOydZA3Vip9bV61HFoUeNC4vKTjvtt639B6fc1d07ePnbK+HtAyzR5JIsiF825Vgb3vnIT1g9xYHe3tVvxzTD6vTSGMsOtSwTKzHHllrDYA3N/TerbTcRRlLnoCsynIr+U2vcG2/3vXVtZHYEutm7G4sft71N3bU6ePHW2M0mie8SCFxqVnykmKmxXJixz/MCtgF+aufFmnDHTuYi4SUFwEzOOJvtbcXx/wBqvYZlcXRgw9wbj/ao+t4nHE8aO1mkNl/QE3PsPT703+nakxst8snqeHXTUvyiZPqwUOJytePddr2829edVw9zqJM1fNplaORYsiEGNrSXAQCxBH/7W0+pTLDJcu+Nxl+3euxpySdCeqofFkDPHCFyv9RFuouVF7ZdvS/rtUTi/AGEE7BnmldALta9lYNioUAe/wB6mv4ljBJEcpjDYmYLeMG9vfIi/qBarZ9WgIDMoLdgSAT9vepuxq4YZW3bG6/KU6l1ikAbR4rkhBJDE2A7+vavkekuWMUDxgaWRJboVzcr0j/Ge+/zWz+qj6upenzbjp9er2rhDxBGd1tYIFOV1xIYXuLG/wC9q1yYvRnusm2ima4RWDnhyoDYizZeW/ofirbw2keZMeleK0ahmZSgLA+XH8xH81WMfGYjI6A7JGrl7jDFiR3v8Vxi8RQtE8wDYq+A23drgDEX3uSLVLbVxxwxu9qLxBpnLa60bHJNPjZScsTvb3tVnwnRCLWSBI8EMCbgWUtk1/gm1qtOFcUWfIYsjobOjizLcXHY2sR81YWqW+m8enjbylKUpWXcpSlApSlApSlApSlApSlBneJ8OEmtiLx5RiF9yLqGyFgfmqfQcMcTgOkuazs/MEa4kXJuZD1EEG1v9q3JFLVqZacMuhLdsMmmkEcatASOfOSeWHdcmOBVW2Aa/evmh0LKNIzQOeXJMGGFyoZjgSP5fW/b1rckVBg4rG3MJsqROULsQFJABax9gTb7g1eTF6GMv7WWTQyBUZ4WdE1M7PHjckOzYOFPmAvf9aDhjsqXhIjbWhxGR5IypBLAbKCd7fNa4a+IpzOYmH8+Qx/e9q+HiUOAfmJgxAVshiSewBvY1eVOzj9VvhvSGOTUgJipmBUWsLYLfH0tcGvHiPTEzaaQRF1R2zxXIgMpC/plY/FWb8UhAVjLGA/lOQs32N969ajiMSGzyIu1+pgNibA7+l6z+723xx48dsg3Dn3j5Dc86jmDUWGOPMyDZ9/L041t2S6ke9R5+IwoAzyIobyksAD9rneui62MkgOpKgMRcXCnsT8fNLbTDHHHf75Zrhc8+mhXTjTO8iMVB7RspcnMv6bHta9R+KaFs9SH07StMo5LgAhemwW5/u7Nvf8AWtEvGYy5GS4csSczNccSbA972+e3zXeLiMTEBZEJJIADAklbZAfa4v8AerusdvGzW2X4lwqbJEAJGojjSdh2BiIyY/5lyWvnE+GyM2rCRsQfprC1s1jAzRT67XFad+KQqubSxhblciwAuDYi9+4NeZOKIHiUbibLFwQVJUXxv7kXI+xpunax+qTg+lDamY/TmOJ4kADIACQTfbtXL6Fvp9SDE5P1LugXZ9mUq6XFj2uPe1a+1LVOTc6E0zfhbSOHmlcPaTAKZAA5wBuzAbL3sB8VpaWpUt26dPDhNFKUqNlKUoFKUoFKUoFKUoFKUoFKUoPLViotK7adQELmHWO0ke12Adj67E2ZW+a29q5R6dVLMFALkFiB3IFgT7mwA/SrLpzz6fJk9RpHdVkXSYquo5jQ7ZSLjjmV8uV97X9BUeXhcjgnkFUl1cTcqwuqCwkZgDYX3Jrc0tV5sdifWR4jw8xzSH6YypJCqRhQtkIyupBICg3vevOh4O4kjEqZhNHgWIuM8vKL9zbathSnMv8AnjBJoZhFChgaw05W6qhcMSehi/lS1u3vXXUcMm5UBjQhpIBp5fdVNus/5er963FqU5p/PPrGcZ4Y+U4jjOP0SxpYdyHNlHza1SdToWhfSyRQFljVw6xgXu6qAbEi/Y3NaqlOS9iMNHoZBDGTFMrrLMQY8CyCRj3VtmB2qSUlKaNJFCyfU5YgAWRA5JIGwONr/JrYWrk2nUsHKjIAgH1ANiQPvYftTknY14rrSlKy9BSlKBSlKBSlKBSlKBSlKDO+INZqhMsemBJMeVrIVyyxHMLsCqf5bnvtVfr/ABpiyBOVsRzA8oUC6as8sm3S4OnB3G+Vtu9XXF+Ox6eSNHViXsMlx6QzBQWBbK1z3AIquh8RoxIeBkyZeXcKS2YhIJxYi/4wb/KrH0oOEPi1g1mCbliM5BGAqyzLj5TeSyLt62PaxqdxnjM0UjoiXVfpuvpsvNmKPcE5G47WFcB4nj5auYJWzxClVTre2TBRmSMbsd/Y2JqXreOBXiAgd1mizHlVsuZCiIVcixvKDv2tQU7ePwIeaYQN7hTLuUChj2U4uAQMTYXPc19PimdCzFY3WMSMwyxYpGpYnsd9tvQ77i1T+JcbCxxPHp8xLckHEFSHRCCCbFiWIve1x3tvXTRcfiklWIQShmuGOAKoQZAUcqSPyNvuNxvvVHnifFZF1iwq+KcuJ+8QuXklVr8wgkWRfLc7/aoh8Zssau8MalkEgBnsCjIjqFJj3lOVsLflO9TxxyJ5QjQuDnIiOwQhuSzq5FmJADIe4B6h81U6zjEc6jOBsc1MZysjL+DcMqPdiBKNiMTaoOg8ZsrcsojuEPZypLgBsSMLAWK7gnv2rvD4qaSeGHGNCZmjkHMuxKfUq3LGILLeHzbe1u9SYuNK8UsqwMeVG7BiECyGLJXVeosLOpG9vi9cYPEq9Ctp3MjEquIjAZ1dUmxvJ0hXO9zv3F6CPxLi+rMuGnBJP1OKhUO+nkhiUuZHHQWZySN7EWG2/LW+ItQsZNgtpNUvM6cW5MkiqoBa4ICi5IANj71YcQ48mnlcLCxAciVwF3fkK6ogyuWI5XpbfvcVG13i2MIbQPkY5XAkVQA0aTPZxlkAeU/VaxuCCb0FlwniDkxBmzEnNBN4yQUxK7xEraxPzuKpp/GjwQB5Y0LctJPORmjI7EjoIV/w26e3bqqWviCFHuYHupeMOoQLlu/LAzvchBva2w3HpJ1XHokg50kDgB5I8CqFw8Qk6RixBLFCq2O5Ye9Byj8UZxaxgoVtKrmwbIkLzMWJC4i/LO12t62O1Rz40sxBiUWm5bfi3spJAfZDkxsekXt6kUm8UDfkw5owJuQovZ8AfN1hvTttb3268X4/y9Os0enuWecMrYgo8EczMTY9RyiK7Hsb3oOGp8cqoXGNXLBSRzVGJIcshJFg4x7G3c3tau8HiR5DqsBH+DAzqueRyV5l/FAF4/7sdO/3rsniGIyrDyJQ5azDBWEZ5jIpcqSN2Vtxf3Nq6anxAiSOnIkJzCAry/xGLxIQLvcWMqXJttfvagqz4wkiS8kaNkXwZZLL0SxxkMSvSo5q2be9uwvUziXiCRRppVVBHJGXcM4Fv7tVBYAqEBkBZvQLevMfiqErcaeXAuEDFFVMWy68i2IQ4+/ttuK9N4rjK3XTzsASB0KAVCSOzKXYArjE23fddt6ohfxe+d8Y8CrADMYZiWCENzLf3d3ZsrdiNq9/xvcqgh6nAsQ4K9WJjINt1YDUkNa1tOfcCrFOMDlBzp2JeaSFI1wu4jMm92YKAUjY2JHa3tXGXxPGVjaKJn5jKFvitlMsEZY3O1hqFIH3qDkvip1C5RIA+yu0uIuGiVjJ0WjH4gsRe9rbXFX/AAfXc+CKYC3NjV7XvbIA2v69+9ZhvEUUsCiXTygmLmuIyFIXCN8lZJMrFnUAea/cW3rsPFcUUaRxwyIRG2CMFGHLWTFXGWQU8prNYg+/exGtpUPhHEBqIllUEK17X2OxK3t3HY7HceoB2qZRSlKUClKUFJxqXSCaPnxB5VxKNyi5TJ7R3a3Rd9luRvVZoZ4uXpjBoIxJKrahY9owgCrGXyw85WVV7dmO9q0Gs4TFK6yOt2S1jkwBxbJcgDZ7N1C4NjuK56ngcDpGhUgRLimLujBcQpXJWBKkAXF97Cgps+HFij6ZFLYo2UPSShRAmdsWwZkU2Jsbe23rUcT0kwRW04kuY41BjNkjnaIDK62jBIU4+pjHqNrJ/DWlIcGLZ0KFcmxCnG4Rb2S+KkkAE4iuj8CgMgkKHMMGuGYAlSCtwDZgCAQCNt7dzQUU/GoAIFfSIiiR0VXZFMfKnEA5QAIZ+zYgiwHe+1T5ptLHOWOnBkjbAOkReTJ1aVwAqlgArBif/sI7nexk4NCxUlT0uzgZMBk75sWANmGW9jcA151PBIpHZnucirWDMtmVcCbqQd1xUjsQooKBdRw0Bsog3NlZyzwsc8mmn5lyu8YxmIbsAvyL9YuIcO5gtp8XcqjE6dgRvEEEhw6ReSG1/ce21vF4d0y9o/U7FmIAKyIVAJsqWkk6Rt1HakHh3Tp2Q7kElndmJVkZblmJNjHHb4UDtQVul4noLyYRqrP0SfhFS9sFCNt1X5i2HyfY180HE9CxaZYVV8BKziI74RpIbMVBchZE/f3qw0fhfSxBQkVgjq63ZmxZFKIRkxsAptbtX1vDWmKKnLIVewDuNsFSxs3UuKILHbpFBDXU6PUPzOSkhZzp2kaMXvi3Tdh1Lsy/rVh/Duk6f7ND0qVX8NdlIYEDbYEMw+zH3rwPD0IYsuQLPm5zYszBWVeotdQMmIAIsatgKCBqeDwujIY1Aa9yFF7spUsLjvZmH2Jrxw7gcMMKwKgKI+YDAHrz5mdgLA59QsBY9qsqUEFeDacBQIIwEACgKLKFFlA9gBtXSbhsLrg0SMoZmAKgjJ8sm+5ye/vkfepVKCI3C4S6yGJC6klXKjIFtyQbXF65RcFgWR5OUhd2DFyoLXUgrv3sCoI+RVhSgrjwHSm/9mi3fM/hr59+rt33P7mvcfB9OuRWCMZElrKBkWBVidtyQzD+o+9TqUEKbhEDoY2hjZGbIoVBUt/NY7X+a8NwTTFi308WTY3bAXOBUrc29CiEf5F9hVhSggQcE0yAqkESqQwICKAQ4UMCLbghUH9I9q5/w9pdv7NF0ghehdgcrgbbeZ//ACPuas6UHODTqlwihQSWNha5Pcn5NdKUoFKUoFKUoIHEeLxQEK5a5BayI7kKtsmIRSQouN6kJq4zcB1JUAkAi4BFwSO4qv4zwczHMS4XjKOCoZWS4YGxIsym9jf8xuDtaJwPgkayGeObmLi6rsL/AIhRpM27ubotu1rnv6BcaPiMUoUxyo+SBxiwJKN2a3exrjo+NQSk4SobC/e1xa9xfuAO59Kp+CcHi08lxqFfk2QjFVIkaOOMBmG5uqpZfdvsBxPhaGWNV518xmjooBKcpogAR3FpVbvvb5oNHPxOJAjFxi98WG6nFGkNiNvKjH9Kj6bj8DgESWUxrJkwKgK4utyw2Px3G1+9QX8OxnTrpTJY5mS4G5Be8oAYk2YOyE3J/E73rlL4ajzAaQktM0wUgG4EsEmA/wAI5aj+s0FzqeKwxi7SJYhT5gdnYIrWH5bsN+1dBxCL/wCVPLn5h5P5+/l+e1Z/TeGIyAyShoyyNblqbmN12Dd1SyY4j5+1eD4JjyYiTZg3SVJClgwGIDAAANa1r/IoL9OLQEkCaPbG/UP+5cpv23sbV1k1sYR3zBEYYvib44i7Agb3t6d6zU3goMqq07HEAXKm52dWDWYEjFgBcny75XNW8XB8YpYAwCSLILheoNM0jOx9Ds6/+J96CanEYjjaVOq+PUOq3e2+9vX2r2dZHcLzFuTYDIXJtew9zYg/Y1n9T4QVpVkEpVVlWTlhem6GM22Nt8N7g99rdz4i8GIrRNzWPLa5vfqCspjBsw3VUjW5BuFHrvQaQalMima5gXKXGQHuR3t81x/6lF6SIQCQSGBClRc5G/Tt71Aj4CRrDqjMx2IEdthdVU73tbpv2vudyLWhyeD02tIQQiL5RYmN2dWYfmuWAI9QKDQR6uNjZZFJIyADAnE9mt7fNeG4hEO8sfmw8w8/8nfzfHeqfh3hdYtRz+ZkxG4xxGWCoSoBxVbKNrX+TUeTwaCCvO2xeMfhJflyWLAnu0mws/p7bmgu4uLwsyKHGUhYKv5ujPIkdwOhtzttXt+JRA25i7NibEHEhWbq/l2U96qtB4ZEUyyiUkK7PiUFyzRtH5u+NmG3uL+tQY/BmVzLILlpOlUXHGT6jZv5z+Odz7W9SSGk0/EYpHKJIrMFVyFIPSxYK23oSrD9K+ya+JcryxjDzXcDG/bLfaq/QcE5ErPGws56lKjYGSaSykdt5bf01E/hMHUnUPMzXbIRlekDNXA3NtsQBsPm53oLr/qMXMEQdS7KWCg3OIANzbsNxb3r02uiAuZEAG5JYWtYHvf2Kn9RVPwbwwNPIriTIKmIUooN+XFGSWG5FolNvS5+LQ08EKocJOylu3SLAB1IHTYm0aQxXuDjEN7k0Ggj4nEX5YdcrFgL91UISw9xZ03+a56LjenlQSJMhUrkCWA6L2D7/lPv2qv4B4YGlKlZS+KYdSi5HL08YNx2P4AP9RqNB4MRQF5pKgJsUW5ZEjjN27lSsY6fc39AKDUKwIuDcHsR2NfaAUoFKUoFKUoM94g4FJqJ4ZFkVViKmxW7XDhmse4uox7j5vXKTw0/06xZIxWXMq4JjcYMuDgG5AvkPlVrTUoMxF4Wx05iujMZ4Zi7L5miMRLN65EId9/NUH+CGEYjV1CiEIV3ALj6Ylz9zAx3BvnuDvfa0oaZfiXhl5RAS0bPFDy3ZksJBnC5WwOyNy2BX2f4sfmv8LvJDBHml4ldcirdJfGzxWa6slum59BWppQY4+EZciyyoG3KviSwbCyId94g+Mtu+Q/WpWm8MOmmkh5uRZlsWuRy0ZWET73ZT1g79nNaelXYyDeEJChTnAAoekKcQ5f038gi/CA9qseA8DOnN7qxIVS291RQxCKT6ZEewt+gq+pU2FKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoFKUoP/Z', width=400, height=400) | code |
33111782/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/uncover/public_health_england/covid-19-daily-confirmed-cases.csv', encoding='ISO-8859-2')
df1 = pd.read_csv('../input/uncover/public_health_england/covid-19-cases-by-county-uas.csv', encoding='ISO-8859-2')
df2 = pd.read_csv('../input/uncover/regional_sources/uk_government/covid-19-uk-historical-data.csv', encoding='ISO-8859-2')
df2.head() | code |
33111782/cell_10 | [
"text_html_output_1.png"
] | from IPython.display import Image
from IPython.display import Image
from IPython.display import Image
Image(url='', width=400, height=400) | code |
33111782/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/uncover/public_health_england/covid-19-daily-confirmed-cases.csv', encoding='ISO-8859-2')
df.head() | code |
74052611/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
numbers_lables_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
x_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
y_train = numbers_lables_data[['label']]
x_train = numbers_lables_data.iloc[:, 1:]
x_train = np.array(x_train).reshape((x_train.shape[0], 28, 28, 1))
x_test = np.array(x_test).reshape((x_test.shape[0], 28, 28, 1))
x_train = x_train / 255
x_test = x_test / 255
plt.imshow(x_train[9]) | code |
74052611/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
numbers_lables_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
x_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
y_train = numbers_lables_data[['label']]
x_train = numbers_lables_data.iloc[:, 1:]
y_train['label'].value_counts() | code |
74052611/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74052611/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from keras.layers import Conv2D,MaxPool2D,AveragePooling2D,Flatten,Dense,Input
from keras.losses import categorical_crossentropy,sparse_categorical_crossentropy
from keras.models import Sequential
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
numbers_lables_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
x_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
y_train = numbers_lables_data[['label']]
x_train = numbers_lables_data.iloc[:, 1:]
x_train = np.array(x_train).reshape((x_train.shape[0], 28, 28, 1))
x_test = np.array(x_test).reshape((x_test.shape[0], 28, 28, 1))
x_train = x_train / 255
x_test = x_test / 255
model = Sequential()
model.add(Input((28, 28, 1)))
model.add(Conv2D(128, 3, activation='relu', padding='same'))
model.add(AveragePooling2D(2))
model.add(Conv2D(64, 3, activation='relu', padding='same'))
model.add(AveragePooling2D(2))
model.add(Conv2D(32, 3, activation='relu', padding='same'))
model.add(AveragePooling2D(2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss=sparse_categorical_crossentropy, metrics=['accuracy'])
model.summary()
model.fit(x_train, y_train, epochs=100, validation_split=0.1) | code |
74052611/cell_14 | [
"text_plain_output_1.png"
] | from keras.layers import Conv2D,MaxPool2D,AveragePooling2D,Flatten,Dense,Input
from keras.losses import categorical_crossentropy,sparse_categorical_crossentropy
from keras.models import Sequential
model = Sequential()
model.add(Input((28, 28, 1)))
model.add(Conv2D(128, 3, activation='relu', padding='same'))
model.add(AveragePooling2D(2))
model.add(Conv2D(64, 3, activation='relu', padding='same'))
model.add(AveragePooling2D(2))
model.add(Conv2D(32, 3, activation='relu', padding='same'))
model.add(AveragePooling2D(2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss=sparse_categorical_crossentropy, metrics=['accuracy'])
model.summary() | code |
49116933/cell_21 | [
"text_plain_output_1.png"
] | from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
factory = StemmerFactory()
stemmer = factory.create_stemmer()
Encoder = LabelEncoder()
Tfidf_vect = TfidfVectorizer()
data_train = pd.read_csv('../input/indonesiafalsenews/Data_latih.csv')
false_news = data_train[data_train['label'] == 1].sample(frac=1)
true_fact = data_train[data_train['label'] == 0]
df = true_fact.append(false_news[:len(true_fact) + 200])
df
feature = df['narasi']
label = df['label']
lower = [stemmer.stem(row.lower()) for row in feature]
lower
Tfidf_vect.fit([''.join(row) for row in X_train]) | code |
49116933/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_train = pd.read_csv('../input/indonesiafalsenews/Data_latih.csv')
false_news = data_train[data_train['label'] == 1].sample(frac=1)
true_fact = data_train[data_train['label'] == 0]
df = true_fact.append(false_news[:len(true_fact) + 200])
df | code |
49116933/cell_25 | [
"text_plain_output_1.png"
] | from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from sklearn import svm
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
factory = StemmerFactory()
stemmer = factory.create_stemmer()
Encoder = LabelEncoder()
Tfidf_vect = TfidfVectorizer()
data_train = pd.read_csv('../input/indonesiafalsenews/Data_latih.csv')
false_news = data_train[data_train['label'] == 1].sample(frac=1)
true_fact = data_train[data_train['label'] == 0]
df = true_fact.append(false_news[:len(true_fact) + 200])
df
feature = df['narasi']
label = df['label']
lower = [stemmer.stem(row.lower()) for row in feature]
lower
y_train = Encoder.fit_transform(y_train)
y_test = Encoder.fit_transform(y_test)
y_train
Tfidf_vect.fit([''.join(row) for row in X_train])
X_train_Tfidf = Tfidf_vect.transform([' '.join(row) for row in X_train])
X_test_Tfidf = Tfidf_vect.transform([' '.join(row) for row in X_test])
SVM = svm.SVC(C=1.0, kernel='linear', degree=1, gamma='auto', verbose=True)
SVM.fit(X_train_Tfidf, y_train)
predictions_SVM = SVM.predict(X_test_Tfidf)
print('SVM Accuracy Score -> ', accuracy_score(predictions_SVM, y_test) * 100) | code |
49116933/cell_4 | [
"text_plain_output_1.png"
] | import nltk
import numpy as np
import numpy as np # linear algebra
np.random.seed(42)
nltk.download('punkt') | code |
49116933/cell_20 | [
"text_plain_output_1.png"
] | from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
factory = StemmerFactory()
stemmer = factory.create_stemmer()
Encoder = LabelEncoder()
Tfidf_vect = TfidfVectorizer()
y_train = Encoder.fit_transform(y_train)
y_test = Encoder.fit_transform(y_test)
y_train | code |
49116933/cell_2 | [
"text_plain_output_1.png"
] | !pip install Sastrawi | code |
49116933/cell_11 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_train = pd.read_csv('../input/indonesiafalsenews/Data_latih.csv')
data_train['label'].value_counts() | code |
49116933/cell_19 | [
"text_plain_output_1.png"
] | print('X_train : ', len(X_train))
print('X_test : ', len(X_test)) | code |
49116933/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
49116933/cell_16 | [
"text_plain_output_1.png"
] | from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
factory = StemmerFactory()
stemmer = factory.create_stemmer()
Encoder = LabelEncoder()
Tfidf_vect = TfidfVectorizer()
data_train = pd.read_csv('../input/indonesiafalsenews/Data_latih.csv')
false_news = data_train[data_train['label'] == 1].sample(frac=1)
true_fact = data_train[data_train['label'] == 0]
df = true_fact.append(false_news[:len(true_fact) + 200])
df
feature = df['narasi']
label = df['label']
lower = [stemmer.stem(row.lower()) for row in feature]
lower | code |
49116933/cell_17 | [
"text_html_output_1.png"
] | from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
factory = StemmerFactory()
stemmer = factory.create_stemmer()
Encoder = LabelEncoder()
Tfidf_vect = TfidfVectorizer()
data_train = pd.read_csv('../input/indonesiafalsenews/Data_latih.csv')
false_news = data_train[data_train['label'] == 1].sample(frac=1)
true_fact = data_train[data_train['label'] == 0]
df = true_fact.append(false_news[:len(true_fact) + 200])
df
feature = df['narasi']
label = df['label']
lower = [stemmer.stem(row.lower()) for row in feature]
lower
tokens = [word_tokenize(element) for element in lower]
tokens | code |
49116933/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_train = pd.read_csv('../input/indonesiafalsenews/Data_latih.csv')
data_train | code |
49116933/cell_27 | [
"text_plain_output_1.png"
] | from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
factory = StemmerFactory()
stemmer = factory.create_stemmer()
Encoder = LabelEncoder()
Tfidf_vect = TfidfVectorizer()
data_train = pd.read_csv('../input/indonesiafalsenews/Data_latih.csv')
false_news = data_train[data_train['label'] == 1].sample(frac=1)
true_fact = data_train[data_train['label'] == 0]
df = true_fact.append(false_news[:len(true_fact) + 200])
df
feature = df['narasi']
label = df['label']
lower = [stemmer.stem(row.lower()) for row in feature]
lower
y_train = Encoder.fit_transform(y_train)
y_test = Encoder.fit_transform(y_test)
y_train
Tfidf_vect.fit([''.join(row) for row in X_train])
X_train_Tfidf = Tfidf_vect.transform([' '.join(row) for row in X_train])
X_test_Tfidf = Tfidf_vect.transform([' '.join(row) for row in X_test])
rf = RandomForestClassifier()
rf.fit(X_train_Tfidf, y_train)
prediction_rf = rf.predict(X_test_Tfidf)
print('RandomForest Accuracy Score -> ', accuracy_score(prediction_rf, y_test) * 100) | code |
89125359/cell_42 | [
"text_plain_output_1.png"
] | from pathlib import Path
import ipywidgets as widgets
path = Path().cwd() / 'dogs'
lst = get_image_files(path)
lst
failed = verify_images(lst)
failed
failed.map(Path.unlink)
dog = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128))
dls = dog.dataloaders(path)
dog = dog.new(item_tfms=RandomResizedCrop(128, min_scale=0.5), batch_tfms=aug_transforms())
dls = dog.dataloaders(path)
learn = cnn_learner(dls, resnet18, metrics=error_rate)
lr_min, lr_steep, lr_slide, lr_valley = learn.lr_find(suggest_funcs=(minimum, steep, slide, valley))
learn.fine_tune(5, 0.0006918309954926372)
learn.export()
path = Path()
path.ls(file_exts='.pkl')
btn_upload = widgets.FileUpload()
btn_upload
img = PILImage.create(btn_upload.data[-1])
img
out_pl = widgets.Output()
out_pl.clear_output()
out_pl
learn_inf = load_learner(path / 'export.pkl')
pred, pred_idx, probs = learn_inf.predict(img) | code |
89125359/cell_21 | [
"text_plain_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
dog = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128))
dls = dog.dataloaders(path)
dog = dog.new(item_tfms=RandomResizedCrop(128, min_scale=0.5), batch_tfms=aug_transforms())
dls = dog.dataloaders(path)
learn = cnn_learner(dls, resnet18, metrics=error_rate) | code |
89125359/cell_9 | [
"text_plain_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
lst = get_image_files(path)
lst | code |
89125359/cell_4 | [
"text_plain_output_1.png"
] | !pip install -Uqq fastbook
import fastbook #import the fast.ai library
from fastbook import * #dont't worry, it's designed to work with import *
fastbook.setup_book()
from fastai.vision.widgets import *
#import the image scraper by @JoeDockrill, website: https://joedockrill.github.io/blog/2020/09/18/jmd-imagescraper-library/
from jmd_imagescraper.core import *
from pathlib import Path
from jmd_imagescraper.imagecleaner import *
import ipywidgets as widgets | code |
89125359/cell_23 | [
"text_plain_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
dog = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128))
dls = dog.dataloaders(path)
dog = dog.new(item_tfms=RandomResizedCrop(128, min_scale=0.5), batch_tfms=aug_transforms())
dls = dog.dataloaders(path)
learn = cnn_learner(dls, resnet18, metrics=error_rate)
lr_min, lr_steep, lr_slide, lr_valley = learn.lr_find(suggest_funcs=(minimum, steep, slide, valley)) | code |
89125359/cell_33 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
lst = get_image_files(path)
lst
failed = verify_images(lst)
failed
failed.map(Path.unlink)
dog = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128))
dls = dog.dataloaders(path)
dog = dog.new(item_tfms=RandomResizedCrop(128, min_scale=0.5), batch_tfms=aug_transforms())
dls = dog.dataloaders(path)
learn = cnn_learner(dls, resnet18, metrics=error_rate)
lr_min, lr_steep, lr_slide, lr_valley = learn.lr_find(suggest_funcs=(minimum, steep, slide, valley))
learn.fine_tune(5, 0.0006918309954926372)
learn.export()
path = Path()
path.ls(file_exts='.pkl') | code |
89125359/cell_44 | [
"image_output_1.png"
] | import ipywidgets as widgets
btn_upload = widgets.FileUpload()
btn_upload
img = PILImage.create(btn_upload.data[-1])
img
out_pl = widgets.Output()
out_pl.clear_output()
out_pl
lbl_pred = widgets.Label()
lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
lbl_pred | code |
89125359/cell_40 | [
"text_plain_output_1.png"
] | import ipywidgets as widgets
btn_upload = widgets.FileUpload()
btn_upload
img = PILImage.create(btn_upload.data[-1])
img
out_pl = widgets.Output()
out_pl.clear_output()
with out_pl:
display(img.to_thumb(128, 128))
out_pl | code |
89125359/cell_29 | [
"image_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
dog = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128))
dls = dog.dataloaders(path)
dog = dog.new(item_tfms=RandomResizedCrop(128, min_scale=0.5), batch_tfms=aug_transforms())
dls = dog.dataloaders(path)
learn = cnn_learner(dls, resnet18, metrics=error_rate)
lr_min, lr_steep, lr_slide, lr_valley = learn.lr_find(suggest_funcs=(minimum, steep, slide, valley))
learn.fine_tune(5, 0.0006918309954926372)
cleaner = ImageClassifierCleaner(learn)
cleaner | code |
89125359/cell_26 | [
"text_plain_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
dog = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128))
dls = dog.dataloaders(path)
dog = dog.new(item_tfms=RandomResizedCrop(128, min_scale=0.5), batch_tfms=aug_transforms())
dls = dog.dataloaders(path)
learn = cnn_learner(dls, resnet18, metrics=error_rate)
lr_min, lr_steep, lr_slide, lr_valley = learn.lr_find(suggest_funcs=(minimum, steep, slide, valley))
learn.fine_tune(5, 0.0006918309954926372) | code |
89125359/cell_11 | [
"text_plain_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
lst = get_image_files(path)
lst
len(lst) | code |
89125359/cell_19 | [
"text_plain_output_5.png",
"text_html_output_4.png",
"text_plain_output_4.png",
"text_html_output_2.png",
"text_html_output_5.png",
"text_plain_output_6.png",
"text_plain_output_3.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"text_html_output_3.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
dog = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128))
dls = dog.dataloaders(path)
dog = dog.new(item_tfms=RandomResizedCrop(128, min_scale=0.5), batch_tfms=aug_transforms())
dls = dog.dataloaders(path)
dls.train.show_batch(max_n=8, nrows=2) | code |
89125359/cell_7 | [
"text_plain_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
duckduckgo_search(path, 'bulldog', 'bulldog', max_results=200)
duckduckgo_search(path, 'shih tzu', 'shih tzu', max_results=200)
duckduckgo_search(path, 'dalmatian', 'dalmatian', max_results=200)
duckduckgo_search(path, 'golden retriever', 'golden retriever', max_results=200)
duckduckgo_search(path, 'german shepherd', 'german shepherd', max_results=200) | code |
89125359/cell_28 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
dog = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128))
dls = dog.dataloaders(path)
dog = dog.new(item_tfms=RandomResizedCrop(128, min_scale=0.5), batch_tfms=aug_transforms())
dls = dog.dataloaders(path)
learn = cnn_learner(dls, resnet18, metrics=error_rate)
lr_min, lr_steep, lr_slide, lr_valley = learn.lr_find(suggest_funcs=(minimum, steep, slide, valley))
learn.fine_tune(5, 0.0006918309954926372)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_top_losses(5, nrows=1) | code |
89125359/cell_8 | [
"text_plain_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
path | code |
89125359/cell_38 | [
"image_output_1.png"
] | import ipywidgets as widgets
btn_upload = widgets.FileUpload()
btn_upload
img = PILImage.create(btn_upload.data[-1])
img | code |
89125359/cell_3 | [
"text_plain_output_1.png"
] | pip install jmd_imagescraper; | code |
89125359/cell_46 | [
"text_plain_output_1.png"
] | import ipywidgets as widgets
btn_upload = widgets.FileUpload()
btn_upload
img = PILImage.create(btn_upload.data[-1])
img
out_pl = widgets.Output()
out_pl.clear_output()
out_pl
lbl_pred = widgets.Label()
lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
lbl_pred
btn_run = widgets.Button(description='Classify')
btn_run | code |
89125359/cell_24 | [
"text_plain_output_1.png"
] | print(f' minimum:{lr_min}\n steep:{lr_steep}\n slide:{lr_slide}\n valley:{lr_valley}') | code |
89125359/cell_27 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
dog = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(valid_pct=0.2, seed=42), get_y=parent_label, item_tfms=Resize(128))
dls = dog.dataloaders(path)
dog = dog.new(item_tfms=RandomResizedCrop(128, min_scale=0.5), batch_tfms=aug_transforms())
dls = dog.dataloaders(path)
learn = cnn_learner(dls, resnet18, metrics=error_rate)
lr_min, lr_steep, lr_slide, lr_valley = learn.lr_find(suggest_funcs=(minimum, steep, slide, valley))
learn.fine_tune(5, 0.0006918309954926372)
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix() | code |
89125359/cell_12 | [
"text_plain_output_1.png"
] | from pathlib import Path
path = Path().cwd() / 'dogs'
lst = get_image_files(path)
lst
failed = verify_images(lst)
failed | code |
89125359/cell_36 | [
"image_output_1.png"
] | import ipywidgets as widgets
btn_upload = widgets.FileUpload()
btn_upload | code |
105214315/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
def get_longest_text(texts):
longest_input = 0
for text in texts:
text_len = len(text.split())
longest_input = max(longest_input, text_len)
return longest_input
longest_input = get_longest_text(dataset['Text'])
longest_input
def get_total_words(texts):
total_words = []
for text in texts:
for word in text.split():
if word not in total_words:
total_words.append(word)
return len(total_words)
word_count = get_total_words(dataset['Text'])
word_count | code |
105214315/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
def get_longest_text(texts):
longest_input = 0
for text in texts:
text_len = len(text.split())
longest_input = max(longest_input, text_len)
return longest_input
longest_input = get_longest_text(dataset['Text'])
longest_input | code |
105214315/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.utils import to_categorical
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.utils import to_categorical
encoder = LabelEncoder()
y_encoder = encoder.fit_transform(dataset['Emotion'])
y = to_categorical(y_encoder)
y[0] | code |
105214315/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
dataset.head(5) | code |
105214315/cell_23 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Dropout
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import one_hot
from keras.preprocessing.text import one_hot
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
def get_longest_text(texts):
longest_input = 0
for text in texts:
text_len = len(text.split())
longest_input = max(longest_input, text_len)
return longest_input
longest_input = get_longest_text(dataset['Text'])
longest_input
vocab_size = 21000
encoded_docs = [one_hot(d, vocab_size) for d in dataset['Text']]
max_length = longest_input
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
model = Sequential()
model.add(Embedding(vocab_size, 132, input_length=max_length))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(6, activation='relu'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
print(model.summary()) | code |
105214315/cell_30 | [
"text_plain_output_1.png"
] | (X_train.shape, y_train.shape) | code |
105214315/cell_33 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Dropout
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import one_hot
from keras.preprocessing.text import one_hot
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
def get_longest_text(texts):
longest_input = 0
for text in texts:
text_len = len(text.split())
longest_input = max(longest_input, text_len)
return longest_input
longest_input = get_longest_text(dataset['Text'])
longest_input
vocab_size = 21000
encoded_docs = [one_hot(d, vocab_size) for d in dataset['Text']]
max_length = longest_input
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
model = Sequential()
model.add(Embedding(vocab_size, 132, input_length=max_length))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(6, activation='relu'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
(X_train.shape, y_train.shape)
history = model.fit(X_train, y_train, epochs=50, validation_data=(X_test, y_test)) | code |
105214315/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
dataset['Text'][0] | code |
105214315/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.utils import to_categorical
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.utils import to_categorical
encoder = LabelEncoder()
y_encoder = encoder.fit_transform(dataset['Emotion'])
y = to_categorical(y_encoder)
y[0]
y | code |
105214315/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
len(dataset['Text'][1].split()) | code |
105214315/cell_19 | [
"text_plain_output_1.png"
] | from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import one_hot
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
def get_longest_text(texts):
longest_input = 0
for text in texts:
text_len = len(text.split())
longest_input = max(longest_input, text_len)
return longest_input
longest_input = get_longest_text(dataset['Text'])
longest_input
vocab_size = 21000
encoded_docs = [one_hot(d, vocab_size) for d in dataset['Text']]
max_length = longest_input
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
print(padded_docs[0]) | code |
105214315/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105214315/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
dataset.info() | code |
105214315/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5) | code |
105214315/cell_17 | [
"text_plain_output_1.png"
] | from keras.preprocessing.text import one_hot
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
vocab_size = 21000
encoded_docs = [one_hot(d, vocab_size) for d in dataset['Text']]
print(encoded_docs[0]) | code |
105214315/cell_31 | [
"text_plain_output_1.png"
] | (X_train.shape, y_train.shape)
y_train | code |
105214315/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
dataset['Emotion'][0] | code |
105214315/cell_36 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, Dropout
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import one_hot
from keras.preprocessing.text import one_hot
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/emotions-in-text/Emotion_final.csv')
dataset.sample(5)
def get_longest_text(texts):
longest_input = 0
for text in texts:
text_len = len(text.split())
longest_input = max(longest_input, text_len)
return longest_input
longest_input = get_longest_text(dataset['Text'])
longest_input
vocab_size = 21000
encoded_docs = [one_hot(d, vocab_size) for d in dataset['Text']]
max_length = longest_input
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
model = Sequential()
model.add(Embedding(vocab_size, 132, input_length=max_length))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(6, activation='relu'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
(X_train.shape, y_train.shape)
history = model.fit(X_train, y_train, epochs=50, validation_data=(X_test, y_test))
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.0, 1])
plt.legend(loc='lower right')
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) | code |
129040406/cell_13 | [
"text_plain_output_1.png"
] | X_test.head() | code |
129040406/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
CSV_PATH = '/kaggle/input/iris/Iris.csv'
ID = 'Id'
TARGET = 'Species'
TEST_SIZE = 0.3
VAL_SIZE = 0.3
SEED = 2023
total = pd.read_csv(CSV_PATH)
TOTAL_LEN = len(total)
TOTAL_LEN | code |
129040406/cell_20 | [
"text_plain_output_1.png"
] | y_val.value_counts() | code |
129040406/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
CSV_PATH = '/kaggle/input/iris/Iris.csv'
ID = 'Id'
TARGET = 'Species'
TEST_SIZE = 0.3
VAL_SIZE = 0.3
SEED = 2023
total = pd.read_csv(CSV_PATH)
total[TARGET].value_counts() | code |
129040406/cell_11 | [
"text_plain_output_1.png"
] | X_trainval.head() | code |
129040406/cell_19 | [
"text_plain_output_1.png"
] | X_val.head() | code |
129040406/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
CSV_PATH = '/kaggle/input/iris/Iris.csv'
ID = 'Id'
TARGET = 'Species'
TEST_SIZE = 0.3
VAL_SIZE = 0.3
SEED = 2023 | code |
129040406/cell_18 | [
"text_html_output_1.png"
] | y_train.value_counts() | code |
129040406/cell_16 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
CSV_PATH = '/kaggle/input/iris/Iris.csv'
ID = 'Id'
TARGET = 'Species'
TEST_SIZE = 0.3
VAL_SIZE = 0.3
SEED = 2023
y_trainval.value_counts()
X_train, X_val, y_train, y_val = train_test_split(X_trainval, y_trainval, test_size=VAL_SIZE, random_state=SEED, stratify=y_trainval)
print('train set = ', len(X_train), ' val set', len(X_val)) | code |
129040406/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
CSV_PATH = '/kaggle/input/iris/Iris.csv'
ID = 'Id'
TARGET = 'Species'
TEST_SIZE = 0.3
VAL_SIZE = 0.3
SEED = 2023
total = pd.read_csv(CSV_PATH)
total.head() | code |
129040406/cell_17 | [
"text_plain_output_1.png"
] | X_train.head() | code |
129040406/cell_14 | [
"text_plain_output_1.png"
] | y_test.value_counts() | code |
129040406/cell_10 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
CSV_PATH = '/kaggle/input/iris/Iris.csv'
ID = 'Id'
TARGET = 'Species'
TEST_SIZE = 0.3
VAL_SIZE = 0.3
SEED = 2023
total = pd.read_csv(CSV_PATH)
y = total[TARGET]
X = total.drop([TARGET], axis=1)
X_trainval, X_test, y_trainval, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=SEED, stratify=y)
print('train+val set = ', len(X_trainval), ' test set', len(X_test)) | code |
129040406/cell_12 | [
"text_plain_output_1.png"
] | y_trainval.value_counts() | code |
129040406/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
CSV_PATH = '/kaggle/input/iris/Iris.csv'
ID = 'Id'
TARGET = 'Species'
TEST_SIZE = 0.3
VAL_SIZE = 0.3
SEED = 2023
total = pd.read_csv(CSV_PATH)
total[TARGET].unique() | code |
2025748/cell_13 | [
"text_html_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
housing = strat_train_set.copy()
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
housing.plot(kind='scatter', x='median_income', y='median_house_value', alpha=0.1) | code |
2025748/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing['ocean_proximity'].value_counts() | code |
2025748/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20, 15))
plt.show() | code |
2025748/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing.head() | code |
2025748/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
housing = pd.read_csv('../input/housing.csv')
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0, inplace=True)
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
housing = strat_train_set.copy()
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.