path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
17123947/cell_3 | [
"text_plain_output_1.png"
] | !pip install pyspark | code |
17123947/cell_10 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
from pyspark.sql import SparkSession
my_spark = SparkSession.builder.getOrCreate()
file_path = '../input/flights.csv'
flights = my_spark.read.csv(file_path, header=True)
flights.createOrReplaceTempView('flights')
flights = flights.withColumn('duration_hrs', flights.air_time / 60)
flights.toPandas().shape[0]
flights.limit(flights.toPandas().shape[0]).toPandas()['duration_hrs'].hist() | code |
17123947/cell_12 | [
"text_plain_output_1.png"
] | !pip install pyspark_dist_explore
# https://github.com/Bergvca/pyspark_dist_explore/ | code |
17123947/cell_5 | [
"text_html_output_1.png"
] | from pyspark.sql import SparkSession
from pyspark.sql import SparkSession
my_spark = SparkSession.builder.getOrCreate()
print(my_spark.catalog.listTables()) | code |
106210513/cell_42 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df.sample(10)
df.columns
df_new = df.copy()
df_new.drop(['ID'], axis=1, inplace=True)
df_new.Manufacturer.unique() | code |
106210513/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df['Doors'].value_counts() | code |
106210513/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum() | code |
106210513/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df['Mileage'] = df['Mileage'].apply(lambda x: str(x).replace('km', ' '))
df['Mileage'] = df['Mileage'].astype(str).astype(int)
df['Mileage'] | code |
106210513/cell_34 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df.sample(10)
df.columns
df.info() | code |
106210513/cell_23 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df['Doors'] = df['Doors'].str.replace('04-May', '4-5')
df['Doors'] = df['Doors'].str.replace('02-Mar', '2-3')
df['Doors'].value_counts() | code |
106210513/cell_30 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df['Levy'] = df['Levy'].apply(lambda x: str(x).replace('-', '0'))
df['Levy'] = df['Levy'].astype(str).astype(int)
df['Levy'] | code |
106210513/cell_33 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df.sample(10)
df.columns | code |
106210513/cell_20 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df['Doors'] | code |
106210513/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.head(10) | code |
106210513/cell_39 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df.sample(10)
df.columns
df_new = df.copy()
df_new.drop(['ID'], axis=1, inplace=True)
df_new['Price'].max() | code |
106210513/cell_26 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df['Engine volume'] | code |
106210513/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
print(f'Data Contains {df.shape[0]} rows , {df.shape[1]} columns') | code |
106210513/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
106210513/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.info() | code |
106210513/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
print(f'Data Contains {df.shape[0]} rows , {df.shape[1]} columns') | code |
106210513/cell_32 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df.sample(10) | code |
106210513/cell_28 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df['Levy'] | code |
106210513/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
plt.figure(figsize=(7, 8))
sns.countplot(df.dtypes)
plt.title('Count of DTypes of Data')
plt.show()
print(f'Count of DTypes of Columns')
print(df.dtypes.value_counts()) | code |
106210513/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()] | code |
106210513/cell_38 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df.sample(10)
df.columns
df_new = df.copy()
df_new.drop(['ID'], axis=1, inplace=True)
df_new['Price'].mean() | code |
106210513/cell_35 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df.sample(10)
df.columns
df_new = df.copy()
df_new.drop(['ID'], axis=1, inplace=True)
df_new.info() | code |
106210513/cell_43 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df.sample(10)
df.columns
df_new = df.copy()
df_new.drop(['ID'], axis=1, inplace=True)
df_new.Manufacturer.unique()
print('Num of Cars produced by different manufacturer: \n', df_new.Manufacturer.value_counts()) | code |
106210513/cell_31 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
sns.kdeplot(df['Levy']) | code |
106210513/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df['Mileage'] | code |
106210513/cell_14 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
print(f'duplicated rows = {df.duplicated().sum()} ') | code |
106210513/cell_27 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df['Engine volume'] = df['Engine volume'].str.replace('Turbo', ' ')
df['Engine volume'] = df['Engine volume'].astype(str).astype(float)
df['Engine volume'] | code |
106210513/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T | code |
106210513/cell_36 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/car-price-prediction-challenge/car_price_prediction.csv')
df.isnull().sum()
df.describe().T
df.loc[df.duplicated()]
df.drop_duplicates(inplace=True)
df.sample(10)
df.columns
df_new = df.copy()
df_new.drop(['ID'], axis=1, inplace=True)
df_new.describe() | code |
106195366/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
106195366/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
age_10 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2010 - regional sex and age pop.csv', encoding='euc_kr')
age_11 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2011 - regional sex and age pop.csv', encoding='euc_kr')
age_12 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2012 - regional sex and age pop.csv', encoding='euc_kr')
age_13 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2013 - regional sex and age pop.csv', encoding='euc_kr')
age_14 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2014 - regional sex and age pop.csv', encoding='euc_kr')
age_15 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2015 - regional sex and age pop.csv', encoding='euc_kr')
age_16 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2016 - regional sex and age pop.csv', encoding='euc_kr')
age_17 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2017 - regional sex and age pop.csv', encoding='euc_kr')
age_18 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2018 - regional sex and age pop.csv', encoding='euc_kr')
age_19 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2019 - regional sex and age pop.csv', encoding='euc_kr')
age_20 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2020 - regional sex and age pop.csv', encoding='euc_kr')
age_21 = pd.read_csv('/kaggle/input/korea-age-repartition-20102021/2021 - regional sex and age pop.csv', encoding='euc_kr') | code |
129010475/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5)
df2 = df.drop(['New_Price'], axis=1, inplace=False)
df.drop(['New_Price'], axis=1, inplace=True)
df.describe().T
df.info() | code |
129010475/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5)
df['New_Price'].isnull().sum() | code |
129010475/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.head() | code |
129010475/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5)
df2 = df.drop(['New_Price'], axis=1, inplace=False)
df.drop(['New_Price'], axis=1, inplace=True)
df.describe().T
unique_fuel = df['Fuel_Type'].unique()
unique_fuel_list = unique_fuel.tolist()
unique_fuel_list | code |
129010475/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5)
df2 = df.drop(['New_Price'], axis=1, inplace=False)
df.drop(['New_Price'], axis=1, inplace=True)
df.describe().T | code |
129010475/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.head(2) | code |
129010475/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5)
len(df['Location'].unique()) | code |
129010475/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129010475/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5) | code |
129010475/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5)
df2 = df.drop(['New_Price'], axis=1, inplace=False)
df.drop(['New_Price'], axis=1, inplace=True)
df.head(1) | code |
129010475/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5)
df2 = df.drop(['New_Price'], axis=1, inplace=False)
df2.head() | code |
129010475/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5)
df2 = df.drop(['New_Price'], axis=1, inplace=False)
df.head(1) | code |
129010475/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5)
df2 = df.drop(['New_Price'], axis=1, inplace=False)
df.drop(['New_Price'], axis=1, inplace=True)
df.describe().T
df['Seats'].isnull().sum() | code |
129010475/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.sample(5)
unique_location = df['Location'].unique()
unique_location_list = unique_location.tolist()
len(unique_location_list) | code |
129010475/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/used-cars-price-prediction/train-data.csv')
df.head(-1) | code |
128039607/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
airlines = list(training_set.airline) + list(test_set.airline)
flights = list(training_set.flight) + list(test_set.flight)
source_cities = list(training_set.source_city) + list(test_set.source_city)
departure_times = list(training_set.departure_time) + list(test_set.departure_time)
stops = list(training_set.stops) + list(test_set.stops)
arrival_times = list(training_set.arrival_time) + list(test_set.arrival_time)
destination_cities = list(training_set.destination_city) + list(test_set.destination_city)
classes = list(training_set['class']) + list(test_set['class'])
training_set.nunique() | code |
128039607/cell_23 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
airlines = list(training_set.airline) + list(test_set.airline)
flights = list(training_set.flight) + list(test_set.flight)
source_cities = list(training_set.source_city) + list(test_set.source_city)
departure_times = list(training_set.departure_time) + list(test_set.departure_time)
stops = list(training_set.stops) + list(test_set.stops)
arrival_times = list(training_set.arrival_time) + list(test_set.arrival_time)
destination_cities = list(training_set.destination_city) + list(test_set.destination_city)
classes = list(training_set['class']) + list(test_set['class'])
training_set.nunique()
training_set.drop(['id'], axis=1, inplace=True)
training_set.drop(['flight'], axis=1, inplace=True)
from turtle import title
plt.figure(figsize=(15,5))
NF = sns.countplot(x='airline', data = training_set)
NF.set(xlabel='Hindiston aviakompaniyalari', ylabel='Reyslar soni', title='Aviakompaniyalar tomonidan amalga oshirilgan parvozlar')
plt.show(NF)
from turtle import title
plt.figure(figsize=(15, 5))
CE = sns.stripplot(x='price', y='class', hue='class', data=training_set)
CE.set(xlabel='Bilet narxlari', ylabel='Sayohat klasslari', title="Sayohat klassi bo'yicha narxlar oralig'i")
plt.show(CE) | code |
128039607/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
airlines = list(training_set.airline) + list(test_set.airline)
flights = list(training_set.flight) + list(test_set.flight)
source_cities = list(training_set.source_city) + list(test_set.source_city)
departure_times = list(training_set.departure_time) + list(test_set.departure_time)
stops = list(training_set.stops) + list(test_set.stops)
arrival_times = list(training_set.arrival_time) + list(test_set.arrival_time)
destination_cities = list(training_set.destination_city) + list(test_set.destination_city)
classes = list(training_set['class']) + list(test_set['class'])
training_set.nunique()
training_set.drop(['id'], axis=1, inplace=True)
training_set.drop(['flight'], axis=1, inplace=True)
from turtle import title
plt.figure(figsize=(15, 5))
NF = sns.countplot(x='airline', data=training_set)
NF.set(xlabel='Hindiston aviakompaniyalari', ylabel='Reyslar soni', title='Aviakompaniyalar tomonidan amalga oshirilgan parvozlar')
plt.show(NF) | code |
128039607/cell_6 | [
"image_output_1.png"
] | import pandas as pd
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
training_set.head(5) | code |
128039607/cell_29 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
airlines = list(training_set.airline) + list(test_set.airline)
flights = list(training_set.flight) + list(test_set.flight)
source_cities = list(training_set.source_city) + list(test_set.source_city)
departure_times = list(training_set.departure_time) + list(test_set.departure_time)
stops = list(training_set.stops) + list(test_set.stops)
arrival_times = list(training_set.arrival_time) + list(test_set.arrival_time)
destination_cities = list(training_set.destination_city) + list(test_set.destination_city)
classes = list(training_set['class']) + list(test_set['class'])
training_set.nunique()
training_set.drop(['id'], axis=1, inplace=True)
training_set.drop(['flight'], axis=1, inplace=True)
from turtle import title
plt.figure(figsize=(15,5))
NF = sns.countplot(x='airline', data = training_set)
NF.set(xlabel='Hindiston aviakompaniyalari', ylabel='Reyslar soni', title='Aviakompaniyalar tomonidan amalga oshirilgan parvozlar')
plt.show(NF)
from turtle import title
plt.figure(figsize=(15,5))
CE = sns.stripplot(x='price', y='class',hue="class", data = training_set)
CE.set(xlabel='Bilet narxlari', ylabel='Sayohat klasslari', title="Sayohat klassi bo'yicha narxlar oralig'i")
plt.show(CE)
from turtle import title
plt.figure(figsize=(15,5))
TA = sns.countplot(x='class', data = training_set)
TA.set(xlabel='Sayohat klassi', title="Sayohat klassiga ko'ra chiptalar mavjudligi")
plt.show(TA)
plt.figure(figsize=(15, 5))
PD = sns.scatterplot(x=training_set['duration'], y=training_set['price'], hue=training_set['airline'])
PD.set(xlabel='Parvoz davomiyligi', ylabel='Bilet Narxi', title='Narx va turli aviakompaniyalar uchun parvoz davomiyligi')
plt.show(PD) | code |
128039607/cell_26 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
airlines = list(training_set.airline) + list(test_set.airline)
flights = list(training_set.flight) + list(test_set.flight)
source_cities = list(training_set.source_city) + list(test_set.source_city)
departure_times = list(training_set.departure_time) + list(test_set.departure_time)
stops = list(training_set.stops) + list(test_set.stops)
arrival_times = list(training_set.arrival_time) + list(test_set.arrival_time)
destination_cities = list(training_set.destination_city) + list(test_set.destination_city)
classes = list(training_set['class']) + list(test_set['class'])
training_set.nunique()
training_set.drop(['id'], axis=1, inplace=True)
training_set.drop(['flight'], axis=1, inplace=True)
from turtle import title
plt.figure(figsize=(15,5))
NF = sns.countplot(x='airline', data = training_set)
NF.set(xlabel='Hindiston aviakompaniyalari', ylabel='Reyslar soni', title='Aviakompaniyalar tomonidan amalga oshirilgan parvozlar')
plt.show(NF)
from turtle import title
plt.figure(figsize=(15,5))
CE = sns.stripplot(x='price', y='class',hue="class", data = training_set)
CE.set(xlabel='Bilet narxlari', ylabel='Sayohat klasslari', title="Sayohat klassi bo'yicha narxlar oralig'i")
plt.show(CE)
from turtle import title
plt.figure(figsize=(15, 5))
TA = sns.countplot(x='class', data=training_set)
TA.set(xlabel='Sayohat klassi', title="Sayohat klassiga ko'ra chiptalar mavjudligi")
plt.show(TA) | code |
128039607/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
airlines = list(training_set.airline) + list(test_set.airline)
flights = list(training_set.flight) + list(test_set.flight)
source_cities = list(training_set.source_city) + list(test_set.source_city)
departure_times = list(training_set.departure_time) + list(test_set.departure_time)
stops = list(training_set.stops) + list(test_set.stops)
arrival_times = list(training_set.arrival_time) + list(test_set.arrival_time)
destination_cities = list(training_set.destination_city) + list(test_set.destination_city)
classes = list(training_set['class']) + list(test_set['class'])
training_set.nunique()
training_set.drop(['id'], axis=1, inplace=True)
training_set.drop(['flight'], axis=1, inplace=True)
training_set | code |
128039607/cell_8 | [
"image_output_1.png"
] | import pandas as pd
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
print('\n\nDatasetlarning qatorlar soni :\n', '#' * 40)
print('\nTraining Set : ', len(training_set))
print('Test Set : ', len(test_set))
print('\n\nDatasetlarning ustunlar soni :\n', '#' * 40)
print('\nTraining Set : ', len(training_set.columns))
print('Test Set : ', len(test_set.columns))
print('\n\nDatasetning ustunlari nomi :\n', '#' * 40)
print('\nTraining Set : ', list(training_set.columns))
print('Test Set : ', list(test_set.columns))
print('\n\nDataset ustunlari turi :\n', '#' * 40)
print('\nTraining Set : ', training_set.dtypes)
print('\nTest Set : ', test_set.dtypes)
print("\n\nNaN qiymat yoki bo'sh yachaykalar :\n", '#' * 40)
print('\nTraining Set : ', training_set.isnull().values.any())
print('\nTest Set : ', test_set.isnull().values.any())
print('\n\nInfo:\n', '#' * 40)
training_set.info() | code |
128039607/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
airlines = list(training_set.airline) + list(test_set.airline)
flights = list(training_set.flight) + list(test_set.flight)
source_cities = list(training_set.source_city) + list(test_set.source_city)
departure_times = list(training_set.departure_time) + list(test_set.departure_time)
stops = list(training_set.stops) + list(test_set.stops)
arrival_times = list(training_set.arrival_time) + list(test_set.arrival_time)
destination_cities = list(training_set.destination_city) + list(test_set.destination_city)
classes = list(training_set['class']) + list(test_set['class'])
training_set.nunique()
for col in training_set:
if training_set[col].dtype == 'object':
print(training_set[col].unique()) | code |
128039607/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
print(f"Raqamli ustunlar: \n {training_set.select_dtypes(['int', 'float']).columns} \n")
print(f"Harfli ustunlar: \n {training_set.select_dtypes('object').columns}") | code |
128039607/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
training_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/train_data.csv')
test_set = pd.read_csv('/kaggle/input/aviachipta-narxini-bashorat-qilish/test_data.csv')
airlines = list(training_set.airline) + list(test_set.airline)
flights = list(training_set.flight) + list(test_set.flight)
source_cities = list(training_set.source_city) + list(test_set.source_city)
departure_times = list(training_set.departure_time) + list(test_set.departure_time)
stops = list(training_set.stops) + list(test_set.stops)
arrival_times = list(training_set.arrival_time) + list(test_set.arrival_time)
destination_cities = list(training_set.destination_city) + list(test_set.destination_city)
classes = list(training_set['class']) + list(test_set['class'])
print('\nAirlanes ustunidagi jami takrorlanmas qymatlar soni : \n ', len(set(airlines)))
print('\nAirlanes ustunidagi takrorlanmas qymatlar : \n ', set(airlines))
print('\nFlights ustunidagi jami takrorlanmas qymatlar soni: \n ', len(set(flights)))
print('\nSource_cities ustunidagi jami takrorlanmas qymatlar soni : \n ', len(set(source_cities)))
print('\nSource_cities ustunidagi takrorlanmas qymatlar : \n ', set(source_cities))
print('\nDeparture_time ustunidagi jami takrorlanmas qymatlar soni : \n ', len(set(departure_times)))
print('\nDeparture_time ustunidagi takrorlanmas qymatlar : \n ', set(departure_times))
print('\nStops ustunidagi jami takrorlanmas qymatlar soni : \n ', len(set(stops)))
print('\nStops ustunidagi takrorlanmas qymatlar : \n ', set(stops))
print('\nArrival_time ustunidagi jami takrorlanmas qymatlar soni : \n ', len(set(arrival_times)))
print('\nArrival_time ustunidagi takrorlanmas qymatlar : \n ', set(arrival_times))
print('\nDestination_citiy ustunidagi jami takrorlanmas qymatlar soni : \n ', len(set(destination_cities)))
print('\nDestination_citiy ustunidagi takrorlanmas qymatlar : \n ', set(destination_cities))
print('\nClass ustunidagi jami takrorlanmas qymatlar soni : \n ', len(set(classes)))
print('\nClass ustunidagi takrorlanmas qymatlar : \n ', set(classes)) | code |
32068026/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
meta = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
print('Cols names: {}'.format(meta.columns))
meta.head(7) | code |
32068026/cell_30 | [
"text_plain_output_1.png"
] | from gensim.parsing.preprocessing import remove_stopwords
from gensim.similarities import Similarity
from gensim.test.utils import datapath, get_tmpfile
from nltk.stem import LancasterStemmer
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize, sent_tokenize
import gensim
import pandas as pd
import string
meta = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
meta_dropped = meta.drop(['Microsoft Academic Paper ID', 'WHO #Covidence'], axis=1)
miss = meta['abstract'].isna().sum()
abstracts_papers = meta[meta['abstract'].notna()]
missing_doi = abstracts_papers['doi'].isna().sum()
missing_url = abstracts_papers['url'].isna().sum()
abstracts_papers = abstracts_papers[abstracts_papers['publish_time'].notna()]
abstracts_papers['year'] = pd.DatetimeIndex(abstracts_papers['publish_time']).year
abstracts_papers = abstracts_papers[abstracts_papers['url'].notna() | abstracts_papers['doi']]
porter = PorterStemmer()
lancaster = LancasterStemmer()
abstracts_only = abstracts_papers['abstract']
tokenized_abs = []
for abst in abstracts_only:
tokens_without_stop_words = remove_stopwords(abst)
tokens_cleaned = sent_tokenize(tokens_without_stop_words)
words = [porter.stem(w.lower()) for text in tokens_cleaned for w in word_tokenize(text) if w.translate(str.maketrans('', '', string.punctuation)).isalnum()]
tokenized_abs.append(words)
dictionary = []
dictionary = gensim.corpora.Dictionary(tokenized_abs)
corpus = [dictionary.doc2bow(abstract) for abstract in tokenized_abs]
tf_idf = gensim.models.TfidfModel(corpus)
query = 'COVID-19 (corona) non-pharmaceutical interventions, Methods to control the spread in communities, barriers to compliance and how these vary among different populations'
query_without_stop_words = remove_stopwords(query)
tokens = sent_tokenize(query_without_stop_words)
query_doc = [porter.stem(w.lower()) for text in tokens for w in word_tokenize(text) if w.translate(str.maketrans('', '', string.punctuation)).isalnum()]
query_doc_bow = dictionary.doc2bow(query_doc)
query_doc_tf_idf = tf_idf[query_doc_bow]
index_temp = get_tmpfile('index')
index = Similarity(index_temp, tf_idf[corpus], num_features=len(dictionary))
similarities = index[query_doc_tf_idf]
abstracts_papers['similarity'] = similarities
abstracts_papers = abstracts_papers.sort_values(by='similarity', ascending=False)
abstracts_papers.reset_index(inplace=True)
top20 = abstracts_papers.head(20)
norm_range = top20['year'].max() - top20['year'].min()
top20["similarity"] -= (abs(top20['year'] - top20['year'].max()) / norm_range)*0.1
top20 = top20.sort_values(by ='similarity' , ascending=False)
top20.reset_index(inplace = True)
for abstract in range(10):
print(top20.abstract[abstract])
print('\n>>>>>>>>>>>>>>>>>>>>>>\n') | code |
32068026/cell_28 | [
"text_plain_output_1.png"
] | from gensim.parsing.preprocessing import remove_stopwords
from gensim.similarities import Similarity
from gensim.test.utils import datapath, get_tmpfile
from nltk.stem import LancasterStemmer
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize, sent_tokenize
import gensim
import pandas as pd
import string
meta = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
meta_dropped = meta.drop(['Microsoft Academic Paper ID', 'WHO #Covidence'], axis=1)
miss = meta['abstract'].isna().sum()
abstracts_papers = meta[meta['abstract'].notna()]
missing_doi = abstracts_papers['doi'].isna().sum()
missing_url = abstracts_papers['url'].isna().sum()
abstracts_papers = abstracts_papers[abstracts_papers['publish_time'].notna()]
abstracts_papers['year'] = pd.DatetimeIndex(abstracts_papers['publish_time']).year
abstracts_papers = abstracts_papers[abstracts_papers['url'].notna() | abstracts_papers['doi']]
porter = PorterStemmer()
lancaster = LancasterStemmer()
abstracts_only = abstracts_papers['abstract']
tokenized_abs = []
for abst in abstracts_only:
tokens_without_stop_words = remove_stopwords(abst)
tokens_cleaned = sent_tokenize(tokens_without_stop_words)
words = [porter.stem(w.lower()) for text in tokens_cleaned for w in word_tokenize(text) if w.translate(str.maketrans('', '', string.punctuation)).isalnum()]
tokenized_abs.append(words)
dictionary = []
dictionary = gensim.corpora.Dictionary(tokenized_abs)
corpus = [dictionary.doc2bow(abstract) for abstract in tokenized_abs]
tf_idf = gensim.models.TfidfModel(corpus)
query = 'COVID-19 (corona) non-pharmaceutical interventions, Methods to control the spread in communities, barriers to compliance and how these vary among different populations'
query_without_stop_words = remove_stopwords(query)
tokens = sent_tokenize(query_without_stop_words)
query_doc = [porter.stem(w.lower()) for text in tokens for w in word_tokenize(text) if w.translate(str.maketrans('', '', string.punctuation)).isalnum()]
query_doc_bow = dictionary.doc2bow(query_doc)
query_doc_tf_idf = tf_idf[query_doc_bow]
index_temp = get_tmpfile('index')
index = Similarity(index_temp, tf_idf[corpus], num_features=len(dictionary))
similarities = index[query_doc_tf_idf]
abstracts_papers['similarity'] = similarities
abstracts_papers = abstracts_papers.sort_values(by='similarity', ascending=False)
abstracts_papers.reset_index(inplace=True)
top20 = abstracts_papers.head(20)
norm_range = top20['year'].max() - top20['year'].min()
top20['similarity'] -= abs(top20['year'] - top20['year'].max()) / norm_range * 0.1
top20 = top20.sort_values(by='similarity', ascending=False)
top20.reset_index(inplace=True) | code |
32068026/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
meta = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
meta_dropped = meta.drop(['Microsoft Academic Paper ID', 'WHO #Covidence'], axis=1)
plt.figure(figsize=(20, 10))
meta_dropped.isna().sum().plot(kind='bar', stacked=True) | code |
32068026/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
meta = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
meta_dropped = meta.drop(['Microsoft Academic Paper ID', 'WHO #Covidence'], axis=1)
miss = meta['abstract'].isna().sum()
abstracts_papers = meta[meta['abstract'].notna()]
missing_doi = abstracts_papers['doi'].isna().sum()
missing_url = abstracts_papers['url'].isna().sum()
abstracts_papers = abstracts_papers[abstracts_papers['publish_time'].notna()]
abstracts_papers['year'] = pd.DatetimeIndex(abstracts_papers['publish_time']).year
missing_url_data = abstracts_papers[abstracts_papers['url'].notna()]
print('The total number of papers with abstracts but missing url and missing doi = {:.0f}'.format(missing_url_data.doi.isna().sum())) | code |
32068026/cell_31 | [
"text_plain_output_1.png"
] | from gensim.parsing.preprocessing import remove_stopwords
from gensim.similarities import Similarity
from gensim.test.utils import datapath, get_tmpfile
from nltk.stem import LancasterStemmer
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize, sent_tokenize
import gensim
import pandas as pd
import string
meta = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
meta_dropped = meta.drop(['Microsoft Academic Paper ID', 'WHO #Covidence'], axis=1)
miss = meta['abstract'].isna().sum()
abstracts_papers = meta[meta['abstract'].notna()]
missing_doi = abstracts_papers['doi'].isna().sum()
missing_url = abstracts_papers['url'].isna().sum()
abstracts_papers = abstracts_papers[abstracts_papers['publish_time'].notna()]
abstracts_papers['year'] = pd.DatetimeIndex(abstracts_papers['publish_time']).year
abstracts_papers = abstracts_papers[abstracts_papers['url'].notna() | abstracts_papers['doi']]
porter = PorterStemmer()
lancaster = LancasterStemmer()
abstracts_only = abstracts_papers['abstract']
tokenized_abs = []
for abst in abstracts_only:
tokens_without_stop_words = remove_stopwords(abst)
tokens_cleaned = sent_tokenize(tokens_without_stop_words)
words = [porter.stem(w.lower()) for text in tokens_cleaned for w in word_tokenize(text) if w.translate(str.maketrans('', '', string.punctuation)).isalnum()]
tokenized_abs.append(words)
dictionary = []
dictionary = gensim.corpora.Dictionary(tokenized_abs)
corpus = [dictionary.doc2bow(abstract) for abstract in tokenized_abs]
tf_idf = gensim.models.TfidfModel(corpus)
query = 'COVID-19 (corona) non-pharmaceutical interventions, Methods to control the spread in communities, barriers to compliance and how these vary among different populations'
query_without_stop_words = remove_stopwords(query)
tokens = sent_tokenize(query_without_stop_words)
query_doc = [porter.stem(w.lower()) for text in tokens for w in word_tokenize(text) if w.translate(str.maketrans('', '', string.punctuation)).isalnum()]
query_doc_bow = dictionary.doc2bow(query_doc)
query_doc_tf_idf = tf_idf[query_doc_bow]
index_temp = get_tmpfile('index')
index = Similarity(index_temp, tf_idf[corpus], num_features=len(dictionary))
similarities = index[query_doc_tf_idf]
abstracts_papers['similarity'] = similarities
abstracts_papers = abstracts_papers.sort_values(by='similarity', ascending=False)
abstracts_papers.reset_index(inplace=True)
top20 = abstracts_papers.head(20)
norm_range = top20['year'].max() - top20['year'].min()
top20["similarity"] -= (abs(top20['year'] - top20['year'].max()) / norm_range)*0.1
top20 = top20.sort_values(by ='similarity' , ascending=False)
top20.reset_index(inplace = True)
for paper in range(10):
print(top20.url[paper]) | code |
32068026/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
meta = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
meta_dropped = meta.drop(['Microsoft Academic Paper ID', 'WHO #Covidence'], axis=1)
miss = meta['abstract'].isna().sum()
print('The number of papers without abstracts is {:0.0f} which represents {:.2f}% of the total number of papers'.format(miss, 100 * (miss / meta.shape[0]))) | code |
32068026/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
meta = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
meta_dropped = meta.drop(['Microsoft Academic Paper ID', 'WHO #Covidence'], axis=1)
miss = meta['abstract'].isna().sum()
abstracts_papers = meta[meta['abstract'].notna()]
print('The total number of papers is {:0.0f}'.format(abstracts_papers.shape[0]))
missing_doi = abstracts_papers['doi'].isna().sum()
print('The number of papers without doi is {:0.0f}'.format(missing_doi))
missing_url = abstracts_papers['url'].isna().sum()
print('The number of papers without url is {:0.0f}'.format(missing_url)) | code |
32068026/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
meta = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv')
plt.figure(figsize=(20, 10))
meta.isna().sum().plot(kind='bar', stacked=True) | code |
106212426/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
temp = pd.DataFrame(index=df.columns)
temp['data_type'] = df.dtypes
temp['null_count'] = df.isnull().sum()
temp['unique_count'] = df.nunique()
df['bedrooms'].value_counts().plot(kind='bar')
plt.title('Number of Bedrooms')
plt.xlabel('Bedrooms')
plt.ylabel('Count') | code |
106212426/cell_25 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train, y_train)
print(lr.intercept_) | code |
106212426/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum() | code |
106212426/cell_26 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
temp = pd.DataFrame(index=df.columns)
temp['data_type'] = df.dtypes
temp['null_count'] = df.isnull().sum()
temp['unique_count'] = df.nunique()
X = df[['Total_Area', 'price', 'bedrooms', 'baths']]
y = df['price']
lr = LinearRegression()
lr.fit(X_train, y_train)
coeff_df = pd.DataFrame(lr.coef_, X.columns, columns=['Coefficient'])
coeff_df | code |
106212426/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape | code |
106212426/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes | code |
106212426/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
temp = pd.DataFrame(index=df.columns)
temp['data_type'] = df.dtypes
temp['null_count'] = df.isnull().sum()
temp['unique_count'] = df.nunique()
ax = plt.figure(figsize=(6, 6)).add_subplot(111)
ax.set_title('Price for Houses')
bp = ax.boxplot([df['bedrooms'], df['baths']]) | code |
106212426/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
temp = pd.DataFrame(index=df.columns)
temp['data_type'] = df.dtypes
temp['null_count'] = df.isnull().sum()
temp['unique_count'] = df.nunique()
ax = plt.figure(figsize=(6,6)).add_subplot(111)
ax.set_title('Price for Houses')
bp = ax.boxplot([df['bedrooms'], df['baths']])
X = df[['Total_Area', 'price', 'bedrooms', 'baths']]
y = df['price']
lr = LinearRegression()
lr.fit(X_train, y_train)
coeff_df = pd.DataFrame(lr.coef_, X.columns, columns=['Coefficient'])
coeff_df
predictions = lr.predict(X_test)
plt.scatter(y_test, predictions) | code |
106212426/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
df['location'].nunique | code |
106212426/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
temp = pd.DataFrame(index=df.columns)
temp['data_type'] = df.dtypes
temp['null_count'] = df.isnull().sum()
temp['unique_count'] = df.nunique()
plt.scatter(df.bedrooms, df.price)
plt.title('Bedroom and Price ')
plt.xlabel('Bedrooms')
plt.ylabel('Price')
plt.show() | code |
106212426/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
temp = pd.DataFrame(index=df.columns)
temp['data_type'] = df.dtypes
temp['null_count'] = df.isnull().sum()
temp['unique_count'] = df.nunique()
plt.scatter(df.baths, df.price)
plt.title('Bathrooms and Price ')
plt.xlabel('Bathrooms')
plt.ylabel('Price')
plt.show() | code |
106212426/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.head() | code |
106212426/cell_17 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
temp = pd.DataFrame(index=df.columns)
temp['data_type'] = df.dtypes
temp['null_count'] = df.isnull().sum()
temp['unique_count'] = df.nunique()
df.plot.scatter('price', 'Total_Area') | code |
106212426/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train, y_train) | code |
106212426/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
temp = pd.DataFrame(index=df.columns)
temp['data_type'] = df.dtypes
temp['null_count'] = df.isnull().sum()
temp['unique_count'] = df.nunique()
df['baths'].value_counts().plot(kind='bar')
plt.title('number of Bathrooms')
plt.xlabel('baths')
plt.ylabel('Count') | code |
106212426/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
temp = pd.DataFrame(index=df.columns)
temp['data_type'] = df.dtypes
temp['null_count'] = df.isnull().sum()
temp['unique_count'] = df.nunique()
temp | code |
106212426/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.isnull().sum()
df.dtypes
temp = pd.DataFrame(index=df.columns)
temp['data_type'] = df.dtypes
temp['null_count'] = df.isnull().sum()
temp['unique_count'] = df.nunique()
plt.figure(figsize=(20, 6))
df['location'].value_counts().plot(kind='bar')
plt.title('Location and House frequency')
plt.xlabel('Location')
plt.ylabel('Value Count') | code |
106212426/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/islamabad-house-prices/isb_data.csv')
df.shape
df.describe(include='all') | code |
74046533/cell_9 | [
"text_html_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.value_counts(ascending=True)
df.CRISPR_Cas.value_counts(ascending=True) | code |
74046533/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes | code |
74046533/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.plot() | code |
74046533/cell_2 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.head() | code |
74046533/cell_11 | [
"text_html_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.value_counts(ascending=True)
df.CRISPR_Cas.value_counts(ascending=True)
df.corr() | code |
74046533/cell_1 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv') | code |
74046533/cell_7 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.value_counts(ascending=True) | code |
74046533/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.value_counts(ascending=True)
df.CRISPR_Cas.plot() | code |
74046533/cell_3 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.describe() | code |
74046533/cell_10 | [
"text_html_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.value_counts(ascending=True)
df.CRISPR_Cas.value_counts(ascending=True)
df.boxplot() | code |
74046533/cell_5 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape | code |
2008232/cell_13 | [
"image_output_1.png"
] | from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
import sqlite3
input = sqlite3.connect('../input/FPA_FOD_20170508.sqlite')
df = pd.read_sql_query("SELECT * FROM 'Fires'", input)
epoch = pd.to_datetime(0, unit='s').to_julian_date()
df.DISCOVERY_DATE = pd.to_datetime(df.DISCOVERY_DATE - epoch, unit='D')
df.CONT_DATE = pd.to_datetime(df.CONT_DATE - epoch, unit='D')
df.index = pd.to_datetime(df.DISCOVERY_DATE)
df_wa = df[df.STATE == 'WA']
# analysis for yearly burn area
y=df_wa.FIRE_SIZE.resample('AS').sum().fillna(0)
ax = y.plot(kind='bar',figsize=(10,6))
# set xaxis major labels
# Make most of the ticklabels empty so the labels don't get too crowded
ticklabels = ['']*len(y.index)
# Every 4th ticklable shows the month and day
#ticklabels[::5] = [item.strftime('%b %d') for item in y.index[::4]]
# Every 12th ticklabel includes the year
#ticklabels[::5] = [item.strftime('%b %d\n%Y') for item in y.index[::5]]
ticklabels[::1] = [item.strftime('%Y') for item in y.index[::1]]
ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))
plt.gcf().autofmt_xdate()
plt.xlabel('Year')
plt.ylabel('Acres Burned');
plt.title('Acres Burned by Year');
# Extract the data we're interested in
lat = df_wa['LATITUDE'].values
lon = df_wa['LONGITUDE'].values
fsize = df_wa['FIRE_SIZE'].values
# Draw the map background
fig = plt.figure(figsize=(17, 10))
m = Basemap(projection='mill',llcrnrlon=-124. ,llcrnrlat=45.3,urcrnrlon=-117 ,urcrnrlat=49.1, resolution = 'h', epsg = 4269)
# do not know how to download the following background image with kaggel kernel, so I had to
# comment out the command
#m.arcgisimage(service='World_Physical_Map', xpixels = 5000, verbose= False)
m.drawcoastlines(color='blue')
m.drawcountries(color='blue')
m.drawstates(color='blue')
# scatter plot
m.scatter(lon, lat, latlon=True,
c=np.log10(fsize), s=fsize*.01,
cmap='Set1', alpha=0.5)
# create colorbar and legend
plt.colorbar(label=r'$\log_{10}({\rm Size Acres})$',fraction=0.02, pad=0.04)
plt.clim(3, 7)
cause = df_wa.STAT_CAUSE_DESCR.value_counts()
# plot pie chart for cause distribution
fig,ax = plt.subplots(figsize=(10,10))
ax.pie(x=cause,labels=cause.index,rotatelabels=False, autopct='%.2f%%');
plt.title('Fire Cause Distribution');
df_wa_cause = df_wa.groupby(pd.Grouper(key='DISCOVERY_DATE', freq='2AS'))['STAT_CAUSE_DESCR'].value_counts()
ticklabels = ['1992 - 1993', '1994 - 1995', '1996 - 1997', '1998 - 1999', '2000 - 2001', '2002 - 2003', '2004 - 2005', '2006 - 2007', '2008 - 2009', '2010 - 2011', '2012 - 2013', '2014 - 2015']
df_wa_cause
df_wa_cause_us = df_wa_cause.unstack()
ax = df_wa_cause_us.plot(kind='bar', x=df_wa_cause_us.index, stacked=True, figsize=(10, 6))
plt.title('Fire Cause Distribution 2 Year Window')
plt.xlabel('2 Year Window')
plt.ylabel('Number Fires')
ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))
ax.yaxis.grid(False, 'minor')
ax.yaxis.grid(True, 'major')
plt.gcf().autofmt_xdate() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.