path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
89143018/cell_9 | [
"image_output_1.png"
] | from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height.show(5) | code |
89143018/cell_25 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
x = np.array(fixedData.select('weight').collect())
x = x[np.logical_not(np.isnan(x))]
y = np.array(fixedData.select('kickReturnYardage').collect())
y = y[np.logical_not(np.isnan(y))]
x = np.array(fixedData.select('totalHeight').collect())
x = x[np.logical_not(np.isnan(x))]
y = np.array(fixedData.select('kickReturnYardage').collect())
y = y[np.logical_not(np.isnan(y))]
plt.hist(x, bins=30, color='blue')
plt.xlabel('Height (inches)', fontsize=16)
plt.ylabel('counts', fontsize=16)
plt.title('Distribution of Heights of Kick Returners', fontsize=16)
plt.show() | code |
89143018/cell_4 | [
"text_plain_output_1.png"
] | !pip install pyspark
!pip install -U -q PyDrive
!apt install openjdk-8-jdk-headless -qq --yes
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" | code |
89143018/cell_20 | [
"text_plain_output_1.png"
] | from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight', 'totalHeight'], outputCol='features')
regression_df = vectorAssembler.transform(fixedData)
regression_df = regression_df.select(['features', 'kickReturnYardage'])
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight'], outputCol='features2')
regression_df2 = vectorAssembler.transform(fixedData)
regression_df2 = regression_df2.select(['features2', 'kickReturnYardage'])
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['totalHeight'], outputCol='features3')
regression_df3 = vectorAssembler.transform(fixedData)
regression_df3 = regression_df3.select(['features3', 'kickReturnYardage'])
regression_df3.show(3) | code |
89143018/cell_6 | [
"image_output_1.png"
] | from pyspark.sql import SparkSession
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True) | code |
89143018/cell_26 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
x = np.array(fixedData.select('weight').collect())
x = x[np.logical_not(np.isnan(x))]
y = np.array(fixedData.select('kickReturnYardage').collect())
y = y[np.logical_not(np.isnan(y))]
x = np.array(fixedData.select('totalHeight').collect())
x = x[np.logical_not(np.isnan(x))]
y = np.array(fixedData.select('kickReturnYardage').collect())
y = y[np.logical_not(np.isnan(y))]
x = np.array(fixedData.select('weight').collect())
x = x[np.logical_not(np.isnan(x))]
y = np.array(fixedData.select('kickReturnYardage').collect())
y = y[np.logical_not(np.isnan(y))]
plt.hist(y, bins=30, color='blue')
plt.xlabel('Kick Return Yardage', fontsize=16)
plt.ylabel('counts', fontsize=16)
plt.title('Distribution of Kick Return Yardages', fontsize=16)
plt.show() | code |
89143018/cell_19 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import LinearRegression
from pyspark.ml.regression import LinearRegression
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight', 'totalHeight'], outputCol='features')
regression_df = vectorAssembler.transform(fixedData)
regression_df = regression_df.select(['features', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df)
trainingSummary = lr_model.summary
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight'], outputCol='features2')
regression_df2 = vectorAssembler.transform(fixedData)
regression_df2 = regression_df2.select(['features2', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features2', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df2)
trainingSummary = lr_model.summary
print('RMSE: %f' % trainingSummary.rootMeanSquaredError)
print('r2: %f' % trainingSummary.r2) | code |
89143018/cell_18 | [
"text_plain_output_1.png"
] | from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import LinearRegression
from pyspark.ml.regression import LinearRegression
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight', 'totalHeight'], outputCol='features')
regression_df = vectorAssembler.transform(fixedData)
regression_df = regression_df.select(['features', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df)
trainingSummary = lr_model.summary
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight'], outputCol='features2')
regression_df2 = vectorAssembler.transform(fixedData)
regression_df2 = regression_df2.select(['features2', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features2', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df2)
print('Coefficients: ' + str(lr_model.coefficients))
print('Intercept: ' + str(lr_model.intercept)) | code |
89143018/cell_8 | [
"image_output_1.png"
] | from pyspark.sql import SparkSession
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
merged3.show(5) | code |
89143018/cell_15 | [
"text_plain_output_1.png"
] | from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import LinearRegression
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight', 'totalHeight'], outputCol='features')
regression_df = vectorAssembler.transform(fixedData)
regression_df = regression_df.select(['features', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df)
trainingSummary = lr_model.summary
print('RMSE: %f' % trainingSummary.rootMeanSquaredError)
print('r2: %f' % trainingSummary.r2) | code |
89143018/cell_16 | [
"text_plain_output_1.png"
] | from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight', 'totalHeight'], outputCol='features')
regression_df = vectorAssembler.transform(fixedData)
regression_df = regression_df.select(['features', 'kickReturnYardage'])
regression_df.describe().show() | code |
89143018/cell_17 | [
"text_plain_output_1.png"
] | from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight', 'totalHeight'], outputCol='features')
regression_df = vectorAssembler.transform(fixedData)
regression_df = regression_df.select(['features', 'kickReturnYardage'])
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight'], outputCol='features2')
regression_df2 = vectorAssembler.transform(fixedData)
regression_df2 = regression_df2.select(['features2', 'kickReturnYardage'])
regression_df2.show(3) | code |
89143018/cell_24 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
x = np.array(fixedData.select('weight').collect())
x = x[np.logical_not(np.isnan(x))]
y = np.array(fixedData.select('kickReturnYardage').collect())
y = y[np.logical_not(np.isnan(y))]
plt.hist(x, bins=30, color='blue')
plt.xlabel('Weight (lbs)', fontsize=16)
plt.ylabel('counts', fontsize=16)
plt.title('Distribution of Weights of Kick Returners', fontsize=16)
plt.show() | code |
89143018/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import LinearRegression
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight', 'totalHeight'], outputCol='features')
regression_df = vectorAssembler.transform(fixedData)
regression_df = regression_df.select(['features', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df)
print('Coefficients: ' + str(lr_model.coefficients))
print('Intercept: ' + str(lr_model.intercept)) | code |
89143018/cell_22 | [
"text_plain_output_1.png"
] | from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import LinearRegression
from pyspark.ml.regression import LinearRegression
from pyspark.ml.regression import LinearRegression
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight', 'totalHeight'], outputCol='features')
regression_df = vectorAssembler.transform(fixedData)
regression_df = regression_df.select(['features', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df)
trainingSummary = lr_model.summary
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['weight'], outputCol='features2')
regression_df2 = vectorAssembler.transform(fixedData)
regression_df2 = regression_df2.select(['features2', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features2', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df2)
trainingSummary = lr_model.summary
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=['totalHeight'], outputCol='features3')
regression_df3 = vectorAssembler.transform(fixedData)
regression_df3 = regression_df3.select(['features3', 'kickReturnYardage'])
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(featuresCol='features3', labelCol='kickReturnYardage')
lr_model = lr.fit(regression_df3)
trainingSummary = lr_model.summary
print('RMSE: %f' % trainingSummary.rootMeanSquaredError)
print('r2: %f' % trainingSummary.r2) | code |
89143018/cell_10 | [
"text_plain_output_1.png"
] | from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import split
from pyspark.sql.types import IntegerType
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate()
playerData = spark.read.csv('../input/nfl-big-data-bowl-2022/players.csv', header=True)
playData = spark.read.csv('../input/nfl-big-data-bowl-2022/plays.csv', header=True)
returnData = playData.filter(playData.kickReturnYardage != 'NA').filter(playData.returnerId != 'NA').drop('gameId', 'playId', 'quarter', 'possessionTeam', 'yardlineSide', 'yardlineNumber', 'gameClock', 'penaltyJerseyNumbers', 'preSnapHomeScore', 'preSnapVisitorScore', 'passResult', 'absoluteYardlineNumber')
reducedPlayerData = playerData.drop('birthDate', 'collegeName', 'Position', 'displayName')
returnData = returnData.withColumnRenamed('returnerId', 'nflId')
merged3 = returnData.join(reducedPlayerData, returnData.nflId == reducedPlayerData.nflId)
merged3 = merged3.filter(merged3.weight != 'NA').filter(merged3.height != 'NA')
new_height = merged3.withColumn('height_feet', split(col('height'), '-').getItem(0)).withColumn('height_inch', split(col('height'), '-').getItem(1))
new_height = new_height.withColumn('height_feet', new_height['height_feet'].cast(IntegerType()))
new_height = new_height.withColumn('height_inch', new_height['height_inch'].cast(IntegerType()))
new_height = new_height.withColumn('weight', new_height['weight'].cast(IntegerType()))
new_height = new_height.withColumn('kickReturnYardage', new_height['kickReturnYardage'].cast(IntegerType()))
new_height = new_height.replace(4, 48, 'height_feet')
new_height = new_height.replace(5, 60, 'height_feet')
new_height = new_height.replace(6, 72, 'height_feet')
new_height = new_height.replace(7, 84, 'height_feet')
new_height = new_height.na.fill(value=0, subset=['height_inch'])
fixedData = new_height.withColumn('totalHeight', col('height_feet') + col('height_inch'))
fixedData.show(10) | code |
89143018/cell_5 | [
"image_output_1.png"
] | from pyspark.sql import SparkSession
import numpy as np
from pyspark import SparkContext, SparkFiles
from pyspark.sql import SparkSession
import string
import matplotlib.pyplot as plt
from pyspark.sql.functions import split
from pyspark.sql.functions import col
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.master('local').appName('NFL').getOrCreate() | code |
128047653/cell_4 | [
"text_plain_output_1.png"
] | import os
import shutil
import numpy as np
import pandas as pd
import os
basedir = '/kaggle/input/5-flower-types-classification-dataset/flower_images'
source_path_orchid = os.path.join(basedir, 'Orchid')
source_path_sunflower = os.path.join(basedir, 'Sunflower')
source_path_tulip = os.path.join(basedir, 'Tulip')
source_path_lotus = os.path.join(basedir, 'Lotus')
source_path_lilly = os.path.join(basedir, 'Lilly')
import shutil
root_dir = '/kaggle/working/fiveflowers'
if os.path.exists(root_dir):
shutil.rmtree(root_dir)
def create_train_val_dirs(root_path):
os.makedirs(os.path.join(root_path, 'training'))
os.makedirs(os.path.join(f'{root_path}/training', 'Lilly'))
os.makedirs(os.path.join(f'{root_path}/training', 'Lotus'))
os.makedirs(os.path.join(f'{root_path}/training', 'Orchid'))
os.makedirs(os.path.join(f'{root_path}/training', 'Sunflower'))
os.makedirs(os.path.join(f'{root_path}/training', 'Tulip'))
os.makedirs(os.path.join(root_path, 'validation'))
os.makedirs(os.path.join(f'{root_path}/validation', 'Lilly'))
os.makedirs(os.path.join(f'{root_path}/validation', 'Lotus'))
os.makedirs(os.path.join(f'{root_path}/validation', 'Orchid'))
os.makedirs(os.path.join(f'{root_path}/validation', 'Sunflower'))
os.makedirs(os.path.join(f'{root_path}/validation', 'Tulip'))
try:
create_train_val_dirs(root_path=root_dir)
except FileExistsError:
print('You should not be seeing this since the upper directory is removed beforehand')
for rootdir, dirs, files in os.walk(root_dir):
for subdir in dirs:
print(os.path.join(rootdir, subdir)) | code |
128047653/cell_6 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import os
import random
import shutil
import numpy as np
import pandas as pd
import os
basedir = '/kaggle/input/5-flower-types-classification-dataset/flower_images'
source_path_orchid = os.path.join(basedir, 'Orchid')
source_path_sunflower = os.path.join(basedir, 'Sunflower')
source_path_tulip = os.path.join(basedir, 'Tulip')
source_path_lotus = os.path.join(basedir, 'Lotus')
source_path_lilly = os.path.join(basedir, 'Lilly')
import shutil
root_dir = '/kaggle/working/fiveflowers'
if os.path.exists(root_dir):
shutil.rmtree(root_dir)
def create_train_val_dirs(root_path):
os.makedirs(os.path.join(root_path, 'training'))
os.makedirs(os.path.join(f'{root_path}/training', 'Lilly'))
os.makedirs(os.path.join(f'{root_path}/training', 'Lotus'))
os.makedirs(os.path.join(f'{root_path}/training', 'Orchid'))
os.makedirs(os.path.join(f'{root_path}/training', 'Sunflower'))
os.makedirs(os.path.join(f'{root_path}/training', 'Tulip'))
os.makedirs(os.path.join(root_path, 'validation'))
os.makedirs(os.path.join(f'{root_path}/validation', 'Lilly'))
os.makedirs(os.path.join(f'{root_path}/validation', 'Lotus'))
os.makedirs(os.path.join(f'{root_path}/validation', 'Orchid'))
os.makedirs(os.path.join(f'{root_path}/validation', 'Sunflower'))
os.makedirs(os.path.join(f'{root_path}/validation', 'Tulip'))
try:
create_train_val_dirs(root_path=root_dir)
except FileExistsError:
import random
from shutil import copyfile
def split_data(SOURCE_DIR, TRAINING_DIR, VALIDATION_DIR, SPLIT_SIZE):
shuffled_source = random.sample(os.listdir(SOURCE_DIR), len(os.listdir(SOURCE_DIR)))
training_number = int(len(shuffled_source)) * SPLIT_SIZE
i = 0
Target = TRAINING_DIR
for item in shuffled_source:
item_path = os.path.join(SOURCE_DIR, item)
shutil.copy(item_path, os.path.join(Target, item))
i += 1
if i == training_number:
Target = VALIDATION_DIR
Lilly_SOURCE_DIR = '/kaggle/input/5-flower-types-classification-dataset/flower_images/Lilly'
Lotus_SOURCE_DIR = '/kaggle/input/5-flower-types-classification-dataset/flower_images/Lotus'
Orchid_SOURCE_DIR = '/kaggle/input/5-flower-types-classification-dataset/flower_images/Orchid'
Sunflower_SOURCE_DIR = '/kaggle/input/5-flower-types-classification-dataset/flower_images/Sunflower'
Tulip_SOURCE_DIR = '/kaggle/input/5-flower-types-classification-dataset/flower_images/Tulip'
TRAINING_DIR = 'kaggle/working/fiveflowers/training/'
VALIDATION_DIR = 'kaggle/working/fiveflowers/validation/'
TRAINING_Lilly_DIR = os.path.join(TRAINING_DIR, 'Lilly')
VALIDATION_Lilly_DIR = os.path.join(VALIDATION_DIR, 'Lilly')
TRAINING_Lotus_DIR = os.path.join(TRAINING_DIR, 'Lotus')
VALIDATION_Lotus_DIR = os.path.join(VALIDATION_DIR, 'Lotus')
TRAINING_Orchid_DIR = os.path.join(TRAINING_DIR, 'Orchid')
VALIDATION_Orchid_DIR = os.path.join(VALIDATION_DIR, 'Orchid')
TRAINING_Sunflower_DIR = os.path.join(TRAINING_DIR, 'Sunflower')
VALIDATION_Sunflower_DIR = os.path.join(VALIDATION_DIR, 'Sunflower')
TRAINING_Tulip_DIR = os.path.join(TRAINING_DIR, 'Tulip')
VALIDATION_Tulip_DIR = os.path.join(VALIDATION_DIR, 'Tulip')
print(TRAINING_Lilly_DIR)
split_size = 0.9
split_data(Lilly_SOURCE_DIR, TRAINING_Lilly_DIR, VALIDATION_Lilly_DIR, split_size)
split_data(Lotus_SOURCE_DIR, TRAINING_Lotus_DIR, VALIDATION_Lotus_DIR, split_size)
split_data(Orchid_SOURCE_DIR, TRAINING_Orchid_DIR, VALIDATION_Orchid_DIR, split_size)
split_data(Sunflower_SOURCE_DIR, TRAINING_Sunflower_DIR, VALIDATION_Sunflower_DIR, split_size)
split_data(Tulip_SOURCE_DIR, TRAINING_Tulip_DIR, VALIDATION_Tulip_DIR, split_size)
print(f"\n\nOriginal lilly's directory has {len(os.listdir(Lilly_SOURCE_DIR))} images")
print(f"Original lotus's directory has {len(os.listdir(Lotus_SOURCE_DIR))} images")
print(f"Original orchid's directory has {len(os.listdir(Orchid_SOURCE_DIR))} images")
print(f"Original sunflower's directory has {len(os.listdir(Sunflower_SOURCE_DIR))} images")
print(f"Original tulip's directory has {len(os.listdir(Tulip_SOURCE_DIR))} images")
print(f'There are {len(os.listdir(TRAINING_Lilly_DIR))} images of lilly for training')
print(f'There are {len(os.listdir(TRAINING_Lotus_DIR))} images of lotus for training')
print(f'There are {len(os.listdir(VALIDATION_Lilly_DIR))} images of lilly for validation')
print(f'There are {len(os.listdir(VALIDATION_Lotus_DIR))} images of lotus for validation') | code |
128047653/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
basedir = '/kaggle/input/5-flower-types-classification-dataset/flower_images'
print('contents of base directory:')
print(os.listdir(basedir)) | code |
128047653/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128047653/cell_3 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
basedir = '/kaggle/input/5-flower-types-classification-dataset/flower_images'
print(os.listdir(basedir))
source_path_orchid = os.path.join(basedir, 'Orchid')
source_path_sunflower = os.path.join(basedir, 'Sunflower')
source_path_tulip = os.path.join(basedir, 'Tulip')
source_path_lotus = os.path.join(basedir, 'Lotus')
source_path_lilly = os.path.join(basedir, 'Lilly')
print(f'there are {len(os.listdir(source_path_orchid))} images of Orchid')
print(f'there are {len(os.listdir(source_path_orchid))} images of Sunflower')
print(f'there are {len(os.listdir(source_path_orchid))} images of Tulip')
print(f'there are {len(os.listdir(source_path_orchid))} images of Lotus')
print(f'there are {len(os.listdir(source_path_orchid))} images of Lilly') | code |
130008924/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
X_test.shape
lr = LinearRegression()
lr.fit(X_train, Y_train)
lr.score(X_test, Y_test)
test = test.fillna(0)
numerical_test_cols = test.select_dtypes(include=['int64', 'float64'])
numerical_test_cols.columns
x_val = test[['Id', 'MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'LowQualFinSF', 'GrLivArea', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'Fireplaces', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'MiscVal', 'MoSold', 'YrSold']]
x_val.fillna(0)
SalePrice_pred = lr.predict(x_val)
ids = test['Id']
fnl_df = pd.DataFrame({'ID': ids, 'SalePrice': SalePrice_pred})
fnl_df.head() | code |
130008924/cell_13 | [
"text_html_output_1.png"
] | X_test.shape | code |
130008924/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
train.columns
train.drop(['Alley'], axis=1)
train = train.fillna(0)
numerical_cols = train.select_dtypes(include=['int64', 'float64'])
train.columns | code |
130008924/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
train.head() | code |
130008924/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
train.info() | code |
130008924/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
train.columns
train.drop(['Alley'], axis=1)
train = train.fillna(0)
numerical_cols = train.select_dtypes(include=['int64', 'float64'])
train.columns
train.shape | code |
130008924/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130008924/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
train.columns | code |
130008924/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
test = test.fillna(0)
numerical_test_cols = test.select_dtypes(include=['int64', 'float64'])
numerical_test_cols.columns
x_val = test[['Id', 'MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'LowQualFinSF', 'GrLivArea', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'Fireplaces', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'MiscVal', 'MoSold', 'YrSold']]
x_val.fillna(0) | code |
130008924/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
train.columns
train.drop(['Alley'], axis=1) | code |
130008924/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
X_test.shape
lr = LinearRegression()
lr.fit(X_train, Y_train)
lr.score(X_test, Y_test) | code |
130008924/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
test.info() | code |
130008924/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
test = test.fillna(0)
numerical_test_cols = test.select_dtypes(include=['int64', 'float64'])
numerical_test_cols.columns | code |
130008924/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train, Y_train) | code |
130008924/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
test.head() | code |
105182734/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import warnings
warnings.filterwarnings('ignore')
import scipy as sp
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from tqdm.auto import tqdm
import itertools
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, MinMaxScaler
from sklearn.metrics import roc_auc_score, roc_curve
import matplotlib.pyplot as plt
df_train = pd.read_csv('../input/amazon-train/delivery time.csv')
df_test = pd.read_csv('../input/amazon-train/delivery time Test.csv')
test_id = df_test['ID']
df_train = df_train.set_index('Source.Name.1')
df_test = df_test.set_index('Source.Name.1')
target = df_train['Time_taken (min)']
df_test.isna().sum()
cdata = pd.concat([df_train, df_test], axis=0)
cdata = cdata.sort_index()
cdata.replace('nan', np.nan, inplace=True)
cdata2 = cdata.copy()
cdata2.drop(['ID', 'Time_taken (min)', 'Order_Date'], axis=1, inplace=True)
cat_features = cdata2.select_dtypes('O').columns
numeric_features = cdata2.select_dtypes(np.number)
na_numeric_features = [feat for feat in numeric_features if feat in cdata2.loc[:, cdata2.isna().sum() > 0].columns]
cdata2.isna().sum()
for i in range(cdata.shape[0]):
tmp = cdata2.loc[i, 'Time_Orderd']
[hr, mint] = tmp.split(':', 2)
hr = int(hr)
mint = int(mint)
mint = mint / 60
cdata2.loc[i, 'Time_Orderd'] = hr + mint
for i in range(cdata.shape[0]):
tmp = cdata2.loc[i, 'Time_Order_picked']
[hr, mint] = tmp.split(':', 2)
hr = int(hr)
mint = int(mint)
mint = mint / 60
cdata2.loc[i, 'Time_Order_picked'] = hr + mint
import geopy.distance
for i in range(0, cdata2.shape[0]):
cdata2.loc[i, 'distance'] = geopy.distance.geodesic((cdata2.loc[i, 'Restaurant_latitude'], cdata2.loc[i, 'Restaurant_longitude']), (cdata2.loc[i, 'Delivery_location_latitude'], cdata2.loc[i, 'Delivery_location_longitude'])).km
cdata2.drop(['Restaurant_latitude', 'Restaurant_longitude', 'Delivery_location_longitude', 'Delivery_location_latitude', 'Delivery_person_ID'], axis=1, inplace=True)
cdata2 | code |
105182734/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import warnings
warnings.filterwarnings('ignore')
import scipy as sp
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from tqdm.auto import tqdm
import itertools
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, MinMaxScaler
from sklearn.metrics import roc_auc_score, roc_curve
import matplotlib.pyplot as plt
df_train = pd.read_csv('../input/amazon-train/delivery time.csv')
df_test = pd.read_csv('../input/amazon-train/delivery time Test.csv')
test_id = df_test['ID']
df_train = df_train.set_index('Source.Name.1')
df_test = df_test.set_index('Source.Name.1')
target = df_train['Time_taken (min)']
df_test.isna().sum()
cdata = pd.concat([df_train, df_test], axis=0)
cdata = cdata.sort_index()
cdata.replace('nan', np.nan, inplace=True)
cdata2 = cdata.copy()
cdata2.drop(['ID', 'Time_taken (min)', 'Order_Date'], axis=1, inplace=True)
cat_features = cdata2.select_dtypes('O').columns
numeric_features = cdata2.select_dtypes(np.number)
na_numeric_features = [feat for feat in numeric_features if feat in cdata2.loc[:, cdata2.isna().sum() > 0].columns]
cdata2.isna().sum() | code |
105182734/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import warnings
import gc
import warnings
warnings.filterwarnings('ignore')
import scipy as sp
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from tqdm.auto import tqdm
import itertools
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, MinMaxScaler
from sklearn.metrics import roc_auc_score, roc_curve
import matplotlib.pyplot as plt
df_train = pd.read_csv('../input/amazon-train/delivery time.csv')
df_test = pd.read_csv('../input/amazon-train/delivery time Test.csv')
test_id = df_test['ID']
df_train = df_train.set_index('Source.Name.1')
df_test = df_test.set_index('Source.Name.1')
target = df_train['Time_taken (min)']
x_train = cdata3.iloc[:df_train.shape[0], :]
x_test = cdata3.iloc[df_train.shape[0]:, :] | code |
105182734/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
import warnings
import gc
import warnings
warnings.filterwarnings('ignore')
import scipy as sp
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from tqdm.auto import tqdm
import itertools
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, MinMaxScaler
from sklearn.metrics import roc_auc_score, roc_curve
import matplotlib.pyplot as plt
df_train = pd.read_csv('../input/amazon-train/delivery time.csv')
df_test = pd.read_csv('../input/amazon-train/delivery time Test.csv')
test_id = df_test['ID']
df_train = df_train.set_index('Source.Name.1')
df_test = df_test.set_index('Source.Name.1')
target = df_train['Time_taken (min)']
df_test.isna().sum() | code |
16112556/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
model = LinearRegression()
model.fit(train_X, train_Y)
model.intercept_
model.coef_
train_predict = model.predict(train_X)
test_predict = model.predict(test_X)
print('MAE for train', mean_absolute_error(test_Y, test_predict)) | code |
16112556/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(train_X, train_Y)
model.intercept_ | code |
16112556/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
bike_df = pd.read_csv('../input/bike_share.csv')
bike_df.shape | code |
16112556/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
model = LinearRegression()
model.fit(train_X, train_Y)
model.intercept_
model.coef_
train_predict = model.predict(train_X)
print('MAE for train', mean_absolute_error(train_Y, train_predict)) | code |
16112556/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
bike_df = pd.read_csv('../input/bike_share.csv')
bike_df.shape
bike_df.isna().sum() | code |
16112556/cell_2 | [
"text_plain_output_1.png"
] | import os
import os
import numpy as np
import pandas as pd
import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16112556/cell_19 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
model = LinearRegression()
model.fit(train_X, train_Y)
model.intercept_
model.coef_
train_predict = model.predict(train_X)
test_predict = model.predict(test_X)
print('MSE for test', mean_squared_error(test_Y, test_predict)) | code |
16112556/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16112556/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
bike_df = pd.read_csv('../input/bike_share.csv')
bike_df.shape
bike_df.isna().sum()
bike_df.corr() | code |
16112556/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
model = LinearRegression()
model.fit(train_X, train_Y)
model.intercept_
model.coef_
train_predict = model.predict(train_X)
print('MSE', mean_squared_error(train_Y, train_predict)) | code |
16112556/cell_14 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(train_X, train_Y)
model.intercept_
model.coef_ | code |
16112556/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(train_X, train_Y) | code |
16112556/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
bike_df = pd.read_csv('../input/bike_share.csv')
bike_df.shape
bike_df.head() | code |
50212949/cell_13 | [
"text_plain_output_1.png"
] | from collections import defaultdict
from nltk.corpus import stopwords
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import pandas as pd
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
import re
import string
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from nltk.corpus import stopwords
import seaborn as sns
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from nltk.stem import WordNetLemmatizer
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from tensorflow.keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from gensim.models import Word2Vec
from numpy import asarray
from numpy import zeros
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from nltk.tokenize import RegexpTokenizer
import plotly
import plotly.graph_objs as go
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import math
from bs4 import BeautifulSoup
import tensorflow as tf
import numpy as np
import skimage
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score
import missingno as msno
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
from collections import Counter
plt.style.use('ggplot')
stop = set(stopwords.words('english'))
import re
from nltk.tokenize import word_tokenize
import gensim
import string
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, SpatialDropout1D
from keras.initializers import Constant
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
import warnings
warnings.filterwarnings('ignore')
tweet = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/train.csv')
test = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/test.csv')
x = tweet.label.value_counts()
#Number of characters in tweets
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
tweet_len=tweet[tweet['label']==1]['tweet'].str.len()
ax1.hist(tweet_len,color='red')
ax1.set_title('Negative tweets')
tweet_len=tweet[tweet['label']==0]['tweet'].str.len()
ax2.hist(tweet_len,color='green')
ax2.set_title('Positive tweets')
fig.suptitle('Characters in tweets')
plt.show()
#Number of words in a tweet
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
tweet_len=tweet[tweet['label']==1]['tweet'].str.split().map(lambda x: len(x))
ax1.hist(tweet_len,color='red')
ax1.set_title('Negative tweets')
tweet_len=tweet[tweet['label']==0]['tweet'].str.split().map(lambda x: len(x))
ax2.hist(tweet_len,color='green')
ax2.set_title('Positive tweets')
fig.suptitle('Words in a tweet')
plt.show()
#Average word length in a tweet
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
word=tweet[tweet['label']==1]['tweet'].str.split().apply(lambda x : [len(i) for i in x])
sns.distplot(word.map(lambda x: np.mean(x)),ax=ax1,color='red')
ax1.set_title('Negative')
word=tweet[tweet['label']==0]['tweet'].str.split().apply(lambda x : [len(i) for i in x])
sns.distplot(word.map(lambda x: np.mean(x)),ax=ax2,color='green')
ax2.set_title('Positive')
fig.suptitle('Average word length in each tweet')
def create_corpus(target):
corpus = []
for x in tweet[tweet['label'] == target]['tweet'].str.split():
for i in x:
corpus.append(i)
return corpus
corpus = create_corpus(0)
dic = defaultdict(int)
for word in corpus:
if word in stop:
dic[word] += 1
top = sorted(dic.items(), key=lambda x: x[1], reverse=True)[:10]
x, y = zip(*top)
corpus = create_corpus(1)
dic = defaultdict(int)
for word in corpus:
if word in stop:
dic[word] += 1
top = sorted(dic.items(), key=lambda x: x[1], reverse=True)[:10]
x, y = zip(*top)
plt.bar(x, y) | code |
50212949/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.corpus import stopwords
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import pandas as pd
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
import re
import string
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from nltk.corpus import stopwords
import seaborn as sns
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from nltk.stem import WordNetLemmatizer
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from tensorflow.keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from gensim.models import Word2Vec
from numpy import asarray
from numpy import zeros
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from nltk.tokenize import RegexpTokenizer
import plotly
import plotly.graph_objs as go
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import math
from bs4 import BeautifulSoup
import tensorflow as tf
import numpy as np
import skimage
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score
import missingno as msno
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
from collections import Counter
plt.style.use('ggplot')
stop = set(stopwords.words('english'))
import re
from nltk.tokenize import word_tokenize
import gensim
import string
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, SpatialDropout1D
from keras.initializers import Constant
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
import warnings
warnings.filterwarnings('ignore')
tweet = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/train.csv')
test = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/test.csv')
x = tweet.label.value_counts()
#Number of characters in tweets
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
tweet_len=tweet[tweet['label']==1]['tweet'].str.len()
ax1.hist(tweet_len,color='red')
ax1.set_title('Negative tweets')
tweet_len=tweet[tweet['label']==0]['tweet'].str.len()
ax2.hist(tweet_len,color='green')
ax2.set_title('Positive tweets')
fig.suptitle('Characters in tweets')
plt.show()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
tweet_len = tweet[tweet['label'] == 1]['tweet'].str.split().map(lambda x: len(x))
ax1.hist(tweet_len, color='red')
ax1.set_title('Negative tweets')
tweet_len = tweet[tweet['label'] == 0]['tweet'].str.split().map(lambda x: len(x))
ax2.hist(tweet_len, color='green')
ax2.set_title('Positive tweets')
fig.suptitle('Words in a tweet')
plt.show() | code |
50212949/cell_4 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd
tweet = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/train.csv')
tweet.head(5) | code |
50212949/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd
tweet = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/train.csv')
test = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/test.csv')
print('There are {} rows and {} columns in train'.format(tweet.shape[0], tweet.shape[1]))
print('There are {} rows and {} columns in test'.format(test.shape[0], test.shape[1])) | code |
50212949/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.corpus import stopwords
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import pandas as pd
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
import re
import string
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from nltk.corpus import stopwords
import seaborn as sns
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from nltk.stem import WordNetLemmatizer
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from tensorflow.keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from gensim.models import Word2Vec
from numpy import asarray
from numpy import zeros
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from nltk.tokenize import RegexpTokenizer
import plotly
import plotly.graph_objs as go
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import math
from bs4 import BeautifulSoup
import tensorflow as tf
import numpy as np
import skimage
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score
import missingno as msno
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
from collections import Counter
plt.style.use('ggplot')
stop = set(stopwords.words('english'))
import re
from nltk.tokenize import word_tokenize
import gensim
import string
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, SpatialDropout1D
from keras.initializers import Constant
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
import warnings
warnings.filterwarnings('ignore')
tweet = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/train.csv')
test = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/test.csv')
x = tweet.label.value_counts()
sns.barplot(x.index, x)
plt.gca().set_ylabel('samples') | code |
50212949/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.corpus import stopwords
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import pandas as pd
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
import re
import string
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from nltk.corpus import stopwords
import seaborn as sns
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from nltk.stem import WordNetLemmatizer
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from tensorflow.keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from gensim.models import Word2Vec
from numpy import asarray
from numpy import zeros
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from nltk.tokenize import RegexpTokenizer
import plotly
import plotly.graph_objs as go
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import math
from bs4 import BeautifulSoup
import tensorflow as tf
import numpy as np
import skimage
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score
import missingno as msno
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
from collections import Counter
plt.style.use('ggplot')
stop = set(stopwords.words('english'))
import re
from nltk.tokenize import word_tokenize
import gensim
import string
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, SpatialDropout1D
from keras.initializers import Constant
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
import warnings
warnings.filterwarnings('ignore')
tweet = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/train.csv')
test = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/test.csv')
x = tweet.label.value_counts()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
tweet_len = tweet[tweet['label'] == 1]['tweet'].str.len()
ax1.hist(tweet_len, color='red')
ax1.set_title('Negative tweets')
tweet_len = tweet[tweet['label'] == 0]['tweet'].str.len()
ax2.hist(tweet_len, color='green')
ax2.set_title('Positive tweets')
fig.suptitle('Characters in tweets')
plt.show() | code |
50212949/cell_10 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import pandas as pd
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
import re
import string
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from nltk.corpus import stopwords
import seaborn as sns
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from nltk.stem import WordNetLemmatizer
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from tensorflow.keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from gensim.models import Word2Vec
from numpy import asarray
from numpy import zeros
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from nltk.tokenize import RegexpTokenizer
import plotly
import plotly.graph_objs as go
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import math
from bs4 import BeautifulSoup
import tensorflow as tf
import numpy as np
import skimage
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score
import missingno as msno
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
from collections import Counter
plt.style.use('ggplot')
stop = set(stopwords.words('english'))
import re
from nltk.tokenize import word_tokenize
import gensim
import string
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, SpatialDropout1D
from keras.initializers import Constant
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
import warnings
warnings.filterwarnings('ignore')
tweet = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/train.csv')
test = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/test.csv')
x = tweet.label.value_counts()
#Number of characters in tweets
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
tweet_len=tweet[tweet['label']==1]['tweet'].str.len()
ax1.hist(tweet_len,color='red')
ax1.set_title('Negative tweets')
tweet_len=tweet[tweet['label']==0]['tweet'].str.len()
ax2.hist(tweet_len,color='green')
ax2.set_title('Positive tweets')
fig.suptitle('Characters in tweets')
plt.show()
#Number of words in a tweet
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
tweet_len=tweet[tweet['label']==1]['tweet'].str.split().map(lambda x: len(x))
ax1.hist(tweet_len,color='red')
ax1.set_title('Negative tweets')
tweet_len=tweet[tweet['label']==0]['tweet'].str.split().map(lambda x: len(x))
ax2.hist(tweet_len,color='green')
ax2.set_title('Positive tweets')
fig.suptitle('Words in a tweet')
plt.show()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
word = tweet[tweet['label'] == 1]['tweet'].str.split().apply(lambda x: [len(i) for i in x])
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax1, color='red')
ax1.set_title('Negative')
word = tweet[tweet['label'] == 0]['tweet'].str.split().apply(lambda x: [len(i) for i in x])
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax2, color='green')
ax2.set_title('Positive')
fig.suptitle('Average word length in each tweet') | code |
50212949/cell_12 | [
"text_html_output_1.png"
] | from collections import defaultdict
from nltk.corpus import stopwords
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
import seaborn as sns
import seaborn as sns
import warnings
import pandas as pd
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
import re
import string
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from nltk.corpus import stopwords
import seaborn as sns
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from nltk.stem import WordNetLemmatizer
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from tensorflow.keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from gensim.models import Word2Vec
from numpy import asarray
from numpy import zeros
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from nltk.tokenize import RegexpTokenizer
import plotly
import plotly.graph_objs as go
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import math
from bs4 import BeautifulSoup
import tensorflow as tf
import numpy as np
import skimage
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score
import missingno as msno
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
from collections import Counter
plt.style.use('ggplot')
stop = set(stopwords.words('english'))
import re
from nltk.tokenize import word_tokenize
import gensim
import string
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, SpatialDropout1D
from keras.initializers import Constant
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
import warnings
warnings.filterwarnings('ignore')
tweet = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/train.csv')
test = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/test.csv')
x = tweet.label.value_counts()
#Number of characters in tweets
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
tweet_len=tweet[tweet['label']==1]['tweet'].str.len()
ax1.hist(tweet_len,color='red')
ax1.set_title('Negative tweets')
tweet_len=tweet[tweet['label']==0]['tweet'].str.len()
ax2.hist(tweet_len,color='green')
ax2.set_title('Positive tweets')
fig.suptitle('Characters in tweets')
plt.show()
#Number of words in a tweet
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
tweet_len=tweet[tweet['label']==1]['tweet'].str.split().map(lambda x: len(x))
ax1.hist(tweet_len,color='red')
ax1.set_title('Negative tweets')
tweet_len=tweet[tweet['label']==0]['tweet'].str.split().map(lambda x: len(x))
ax2.hist(tweet_len,color='green')
ax2.set_title('Positive tweets')
fig.suptitle('Words in a tweet')
plt.show()
#Average word length in a tweet
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(10,5))
word=tweet[tweet['label']==1]['tweet'].str.split().apply(lambda x : [len(i) for i in x])
sns.distplot(word.map(lambda x: np.mean(x)),ax=ax1,color='red')
ax1.set_title('Negative')
word=tweet[tweet['label']==0]['tweet'].str.split().apply(lambda x : [len(i) for i in x])
sns.distplot(word.map(lambda x: np.mean(x)),ax=ax2,color='green')
ax2.set_title('Positive')
fig.suptitle('Average word length in each tweet')
def create_corpus(target):
corpus = []
for x in tweet[tweet['label'] == target]['tweet'].str.split():
for i in x:
corpus.append(i)
return corpus
corpus = create_corpus(0)
dic = defaultdict(int)
for word in corpus:
if word in stop:
dic[word] += 1
top = sorted(dic.items(), key=lambda x: x[1], reverse=True)[:10]
x, y = zip(*top)
plt.bar(x, y) | code |
50212949/cell_5 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd
tweet = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/train.csv')
test = pd.read_csv('../input/analytics-vidhya-identify-the-sentiments/test.csv')
test.head(5) | code |
104123576/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
data = sklearn.datasets.load_boston()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['price'] = data.target
df.info() | code |
104123576/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
data = sklearn.datasets.load_boston()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['price'] = data.target
df.head() | code |
104123576/cell_11 | [
"text_html_output_1.png"
] | from sklearn import metrics
from xgboost import XGBRegressor
model = XGBRegressor(objective='reg:squarederror')
model.fit(x_train, y_train)
pred = model.predict(x_test)
metrics.r2_score(y_test, pred)
metrics.mean_absolute_error(y_test, pred) | code |
104123576/cell_8 | [
"text_plain_output_1.png"
] | from xgboost import XGBRegressor
model = XGBRegressor(objective='reg:squarederror')
model.fit(x_train, y_train) | code |
104123576/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
data = sklearn.datasets.load_boston()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['price'] = data.target
df.describe() | code |
104123576/cell_10 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn import metrics
from xgboost import XGBRegressor
model = XGBRegressor(objective='reg:squarederror')
model.fit(x_train, y_train)
pred = model.predict(x_test)
metrics.r2_score(y_test, pred) | code |
104123576/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
data = sklearn.datasets.load_boston()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['price'] = data.target
df['price'].value_counts | code |
90124333/cell_3 | [
"text_plain_output_1.png"
] | import tensorflow as tf
def auto_select_accelerator():
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy()
return strategy
strategy = auto_select_accelerator() | code |
90124333/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from tensorflow import keras
import tensorflow as tf
def auto_select_accelerator():
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy()
return strategy
strategy = auto_select_accelerator()
with strategy.scope():
DistilBERTmodel = TFDistilBertModel.from_pretrained('distilbert-base-uncased', config=config)
model = create_model(DistilBERTmodel)
model.compile(keras.optimizers.Adam(lr), loss='binary_crossentropy', metrics=['accuracy']) | code |
2005556/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ufo = pd.read_csv('../input/scrubbed.csv')
countryFreq = ufo['country'].value_counts()
labels = list(countryFreq.index)
positionsForBars = list(range(len(labels)))
plt.xticks(positionsForBars, labels)
stateFreq = ufo['state'][ufo.country == 'us'].value_counts()
labels = list(stateFreq.index)
positionsForBars = list(range(len(labels)))
plt.figure(figsize=(18, 8))
plt.bar(positionsForBars, stateFreq.values)
plt.xticks(positionsForBars, labels)
plt.title('state found ufo') | code |
2005556/cell_6 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
ufo = pd.read_csv('../input/scrubbed.csv')
countryFreq = ufo['country'].value_counts()
labels = list(countryFreq.index)
positionsForBars = list(range(len(labels)))
plt.xticks(positionsForBars, labels)
stateFreq = ufo['state'][ufo.country == 'us'].value_counts()
labels = list(stateFreq.index)
positionsForBars = list(range(len(labels)))
plt.xticks(positionsForBars, labels)
plt.figure(figsize=(18, 8))
ax = sns.countplot(ufo['state'][ufo.country == 'us']).set_title('State found ufo') | code |
2005556/cell_2 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ufo = pd.read_csv('../input/scrubbed.csv')
ufo.head() | code |
2005556/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2005556/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
ufo = pd.read_csv('../input/scrubbed.csv')
countryFreq = ufo['country'].value_counts()
labels = list(countryFreq.index)
positionsForBars = list(range(len(labels)))
plt.bar(positionsForBars, countryFreq.values)
plt.xticks(positionsForBars, labels)
plt.title('countries found ufo') | code |
2005556/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
ufo = pd.read_csv('../input/scrubbed.csv')
countryFreq = ufo['country'].value_counts()
labels = list(countryFreq.index)
positionsForBars = list(range(len(labels)))
plt.xticks(positionsForBars, labels)
stateFreq = ufo['state'][ufo.country == 'us'].value_counts()
labels = list(stateFreq.index)
positionsForBars = list(range(len(labels)))
plt.xticks(positionsForBars, labels)
sns.countplot(ufo['country']).set_title('Country found ufo') | code |
1007671/cell_6 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv')
matchups = [[str(x + 1), str(16 - x)] for x in range(8)]
df = df[df.gender == 'mens']
pre = df[df.playin_flag == 1]
data = []
for region in pre.team_region.unique():
for seed in range(2, 17):
res = pre[(pre.team_region == region) & pre.team_seed.isin([str(seed) + 'a', str(seed) + 'b'])]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd1_win])
post = df[df.playin_flag == 0]
for region in post.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd2_win])
match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob'])
match['delta'] = match.Team1_Rating - match.Team2_Rating
match['win_extra'] = match.Team1_Prob - 0.5
sns.regplot('delta', 'win_extra', data=match, order=2) | code |
1007671/cell_19 | [
"text_plain_output_1.png"
] | from itertools import combinations
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv')
matchups = [[str(x + 1), str(16 - x)] for x in range(8)]
df = df[df.gender == 'mens']
pre = df[df.playin_flag == 1]
data = []
for region in pre.team_region.unique():
for seed in range(2, 17):
res = pre[(pre.team_region == region) & pre.team_seed.isin([str(seed) + 'a', str(seed) + 'b'])]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd1_win])
post = df[df.playin_flag == 0]
for region in post.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd2_win])
match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob'])
match['delta'] = match.Team1_Rating - match.Team2_Rating
match['win_extra'] = match.Team1_Prob - 0.5
poly = np.polyfit(match.delta, match.win_extra, 2)
poly
data = []
for region in df.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_name, row.team_rating, int(row.team_seed)])
else:
seeds = matchup + [x + 'a' for x in matchup] + [x + 'b' for x in matchup]
res = df[(df.team_region == region) & df.team_seed.isin(seeds)]
for t1, t2 in combinations(res.team_name.tolist(), 2):
res2 = res[res.team_name.isin([t1, t2])]
data.append([])
for _, row in res2.iterrows():
seed = row.team_seed if len(row.team_seed) < 3 else row.team_seed[:-1]
data[-1].extend([row.team_name, row.team_rating, int(seed)])
rd2 = pd.DataFrame(data, columns=['Team1', 'Rank1', 'Seed1', 'Team2', 'Rank2', 'Seed2'])
def upset(row):
top_rank = max(row.Rank1, row.Rank2)
top_num = '1' if top_rank == row.Rank1 else '2'
low_num = '1' if top_num == '2' else '2'
seed_delta = row['Seed' + top_num] - row['Seed' + low_num]
rank_delta = row['Rank' + top_num] - row['Rank' + low_num]
prob = np.polyval([-0.00116991, 0.0461334, 0.01831479], np.abs(rank_delta))
return (prob * np.sign(seed_delta), top_num)
def matchup_str(x, direc='l'):
if direc == 'l':
top_num = '2' if x.Seed1 > x.Seed2 else '1'
else:
top_num = '1' if x.Seed1 > x.Seed2 else '2'
low_num = '1' if top_num == '2' else '2'
return '{} {}'.format(x['Seed' + top_num], x['Team' + top_num])
rd2.shape
rd2['upset_data'] = rd2.apply(upset, axis=1)
rd2['upset'] = rd2.upset_data.apply(lambda x: x[0])
rd2['matchup_left'] = rd2.apply(matchup_str, axis=1, args=['l'])
rd2['matchup_right'] = rd2.apply(matchup_str, axis=1, args=['r'])
rd2['matchup'] = rd2.apply(lambda x: x.matchup_left + ' v ' + x.matchup_right, axis=1)
rd2 = rd2[(np.abs(rd2.Seed1 - rd2.Seed2) >= 2) & (rd2.upset > -0.2)]
rd2.sort_values('upset', inplace=True, ascending=False)
sns.set(style="white", context="talk")
f, ax = plt.subplots(figsize=(6, 10))
sns.barplot(x="upset", y="matchup", data=rd2, label="Win Probability", palette="RdBu_r")
ax.set_xlabel("Win Probability Above 50/50 (Postive = upset)")
ax.plot([0, 0], [-1, rd2.shape[0]], '-k'); ax.set_ylabel("");
new_matchups = [[str(a) for a in x] for x in combinations(range(1, 17), 2)]
data = []
for region in df.team_region.unique():
for matchup in new_matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_name, row.team_rating, int(row.team_seed)])
else:
seeds = matchup + [x + 'a' for x in matchup] + [x + 'b' for x in matchup]
res = df[(df.team_region == region) & df.team_seed.isin(seeds)]
for t1, t2 in combinations(res.team_name.tolist(), 2):
res2 = res[res.team_name.isin([t1, t2])]
data.append([])
for _, row in res2.iterrows():
seed = row.team_seed if len(row.team_seed) < 3 else row.team_seed[:-1]
data[-1].extend([row.team_name, row.team_rating, int(seed)])
rdall = pd.DataFrame(data, columns=['Team1', 'Rank1', 'Seed1', 'Team2', 'Rank2', 'Seed2'])
rdall['upset_data'] = rdall.apply(upset, axis=1)
rdall['upset'] = rdall.upset_data.apply(lambda x: x[0])
rdall['matchup_left'] = rdall.apply(matchup_str, axis=1, args=['l'])
rdall['matchup_right'] = rdall.apply(matchup_str, axis=1, args=['r'])
rdall['matchup'] = rdall.apply(lambda x: x.matchup_left + ' v ' + x.matchup_right, axis=1)
rdall = rdall[(np.abs(rdall.Seed1 - rdall.Seed2) >= 2) & (rdall.upset >= -0.05)]
rdall.sort_values('upset', inplace=True, ascending=False)
f, ax = plt.subplots(figsize=(6, 15))
sns.barplot(x='upset', y='matchup', data=rdall, label='Win Probability', palette='RdBu_r')
ax.set_xlabel('Win Probability Above 50/50 (Postive = upset)')
ax.plot([0, 0], [-1, rdall.shape[0]], '-k')
ax.set_ylabel('') | code |
1007671/cell_7 | [
"image_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv')
matchups = [[str(x + 1), str(16 - x)] for x in range(8)]
df = df[df.gender == 'mens']
pre = df[df.playin_flag == 1]
data = []
for region in pre.team_region.unique():
for seed in range(2, 17):
res = pre[(pre.team_region == region) & pre.team_seed.isin([str(seed) + 'a', str(seed) + 'b'])]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd1_win])
post = df[df.playin_flag == 0]
for region in post.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd2_win])
match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob'])
match['delta'] = match.Team1_Rating - match.Team2_Rating
match['win_extra'] = match.Team1_Prob - 0.5
poly = np.polyfit(match.delta, match.win_extra, 2)
poly | code |
1007671/cell_15 | [
"image_output_1.png"
] | from itertools import combinations
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv')
matchups = [[str(x + 1), str(16 - x)] for x in range(8)]
df = df[df.gender == 'mens']
pre = df[df.playin_flag == 1]
data = []
for region in pre.team_region.unique():
for seed in range(2, 17):
res = pre[(pre.team_region == region) & pre.team_seed.isin([str(seed) + 'a', str(seed) + 'b'])]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd1_win])
post = df[df.playin_flag == 0]
for region in post.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd2_win])
match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob'])
match['delta'] = match.Team1_Rating - match.Team2_Rating
match['win_extra'] = match.Team1_Prob - 0.5
poly = np.polyfit(match.delta, match.win_extra, 2)
poly
data = []
for region in df.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_name, row.team_rating, int(row.team_seed)])
else:
seeds = matchup + [x + 'a' for x in matchup] + [x + 'b' for x in matchup]
res = df[(df.team_region == region) & df.team_seed.isin(seeds)]
for t1, t2 in combinations(res.team_name.tolist(), 2):
res2 = res[res.team_name.isin([t1, t2])]
data.append([])
for _, row in res2.iterrows():
seed = row.team_seed if len(row.team_seed) < 3 else row.team_seed[:-1]
data[-1].extend([row.team_name, row.team_rating, int(seed)])
rd2 = pd.DataFrame(data, columns=['Team1', 'Rank1', 'Seed1', 'Team2', 'Rank2', 'Seed2'])
def upset(row):
top_rank = max(row.Rank1, row.Rank2)
top_num = '1' if top_rank == row.Rank1 else '2'
low_num = '1' if top_num == '2' else '2'
seed_delta = row['Seed' + top_num] - row['Seed' + low_num]
rank_delta = row['Rank' + top_num] - row['Rank' + low_num]
prob = np.polyval([-0.00116991, 0.0461334, 0.01831479], np.abs(rank_delta))
return (prob * np.sign(seed_delta), top_num)
def matchup_str(x, direc='l'):
if direc == 'l':
top_num = '2' if x.Seed1 > x.Seed2 else '1'
else:
top_num = '1' if x.Seed1 > x.Seed2 else '2'
low_num = '1' if top_num == '2' else '2'
return '{} {}'.format(x['Seed' + top_num], x['Team' + top_num])
rd2.shape
rd2['upset_data'] = rd2.apply(upset, axis=1)
rd2['upset'] = rd2.upset_data.apply(lambda x: x[0])
rd2['matchup_left'] = rd2.apply(matchup_str, axis=1, args=['l'])
rd2['matchup_right'] = rd2.apply(matchup_str, axis=1, args=['r'])
rd2['matchup'] = rd2.apply(lambda x: x.matchup_left + ' v ' + x.matchup_right, axis=1)
rd2 = rd2[(np.abs(rd2.Seed1 - rd2.Seed2) >= 2) & (rd2.upset > -0.2)]
rd2.sort_values('upset', inplace=True, ascending=False)
sns.set(style='white', context='talk')
f, ax = plt.subplots(figsize=(6, 10))
sns.barplot(x='upset', y='matchup', data=rd2, label='Win Probability', palette='RdBu_r')
ax.set_xlabel('Win Probability Above 50/50 (Postive = upset)')
ax.plot([0, 0], [-1, rd2.shape[0]], '-k')
ax.set_ylabel('') | code |
1007671/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv')
df.head() | code |
1007671/cell_12 | [
"text_html_output_1.png"
] | from itertools import combinations
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv')
matchups = [[str(x + 1), str(16 - x)] for x in range(8)]
df = df[df.gender == 'mens']
pre = df[df.playin_flag == 1]
data = []
for region in pre.team_region.unique():
for seed in range(2, 17):
res = pre[(pre.team_region == region) & pre.team_seed.isin([str(seed) + 'a', str(seed) + 'b'])]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd1_win])
post = df[df.playin_flag == 0]
for region in post.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd2_win])
match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob'])
match['delta'] = match.Team1_Rating - match.Team2_Rating
match['win_extra'] = match.Team1_Prob - 0.5
data = []
for region in df.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_name, row.team_rating, int(row.team_seed)])
else:
seeds = matchup + [x + 'a' for x in matchup] + [x + 'b' for x in matchup]
res = df[(df.team_region == region) & df.team_seed.isin(seeds)]
for t1, t2 in combinations(res.team_name.tolist(), 2):
res2 = res[res.team_name.isin([t1, t2])]
data.append([])
for _, row in res2.iterrows():
seed = row.team_seed if len(row.team_seed) < 3 else row.team_seed[:-1]
data[-1].extend([row.team_name, row.team_rating, int(seed)])
rd2 = pd.DataFrame(data, columns=['Team1', 'Rank1', 'Seed1', 'Team2', 'Rank2', 'Seed2'])
rd2.shape | code |
18142557/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
path = '../input/data.csv'
df = pd.read_csv(path)
df.columns
df = df.drop(['Unnamed: 0', 'ID', 'Position', 'Name', 'Photo', 'Nationality', 'Potential', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Release Clause', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Height', 'Weight'], 1)
df.columns
df = df.dropna()
df.isnull().sum() | code |
18142557/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
path = '../input/data.csv'
df = pd.read_csv(path)
df.head()
df.columns | code |
18142557/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
path = '../input/data.csv'
df = pd.read_csv(path)
df.columns
df = df.drop(['Unnamed: 0', 'ID', 'Position', 'Name', 'Photo', 'Nationality', 'Potential', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Release Clause', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Height', 'Weight'], 1)
df.columns
df = df.dropna()
df.isnull().sum()
train, validate, test = np.split(df.sample(frac=1), [int(0.6 * len(df)), int(0.8 * len(df))])
x_train = np.array(train.drop(['Overall'], axis=1))
y_train = np.array(train['Overall'])
x_validate = np.array(validate.drop(['Overall'], axis=1))
y_validate = np.array(validate['Overall'])
x_test = np.array(test.drop(['Overall'], axis=1))
y_test = np.array(test['Overall'])
(len(x_train), len(x_validate), len(x_test)) | code |
18142557/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
path = '../input/data.csv'
df = pd.read_csv(path)
df.columns
df = df.drop(['Unnamed: 0', 'ID', 'Position', 'Name', 'Photo', 'Nationality', 'Potential', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Release Clause', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Height', 'Weight'], 1)
df.columns | code |
18142557/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import linear_model
import os
print(os.listdir('../input')) | code |
18142557/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/data.csv'
df = pd.read_csv(path)
df.columns
df = df.drop(['Unnamed: 0', 'ID', 'Position', 'Name', 'Photo', 'Nationality', 'Potential', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Release Clause', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Height', 'Weight'], 1)
df.columns
df = df.dropna()
df.isnull().sum()
f, axes = plt.subplots(figsize=(10, 5))
ax = sns.countplot('Age', data=df)
plt.ylabel('Number of players') | code |
18142557/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
path = '../input/data.csv'
df = pd.read_csv(path)
df.columns
df = df.drop(['Unnamed: 0', 'ID', 'Position', 'Name', 'Photo', 'Nationality', 'Potential', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Release Clause', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Height', 'Weight'], 1)
df.columns
df.describe() | code |
18142557/cell_18 | [
"text_plain_output_1.png"
] | train.describe() | code |
18142557/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/data.csv'
df = pd.read_csv(path)
df.columns
df = df.drop(['Unnamed: 0', 'ID', 'Position', 'Name', 'Photo', 'Nationality', 'Potential', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Release Clause', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Height', 'Weight'], 1)
df.columns
len(df) | code |
18142557/cell_15 | [
"text_html_output_1.png"
] | train.head() | code |
18142557/cell_16 | [
"text_plain_output_1.png"
] | validate.head() | code |
18142557/cell_17 | [
"text_plain_output_1.png"
] | test.head() | code |
18142557/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/data.csv'
df = pd.read_csv(path)
df.columns
df = df.drop(['Unnamed: 0', 'ID', 'Position', 'Name', 'Photo', 'Nationality', 'Potential', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Release Clause', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Height', 'Weight'], 1)
df.columns
df = df.dropna()
df.isnull().sum()
len(df) | code |
18142557/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/data.csv'
df = pd.read_csv(path)
df.columns
df = df.drop(['Unnamed: 0', 'ID', 'Position', 'Name', 'Photo', 'Nationality', 'Potential', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Release Clause', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Height', 'Weight'], 1)
df.columns
df = df.dropna()
df.isnull().sum()
f , axes = plt.subplots(figsize = (10,5))
ax = sns.countplot('Age', data = df)
plt.ylabel('Number of players')
f, axes = plt.subplots(figsize=(20, 5))
ax = sns.countplot('Overall', data=df)
plt.ylabel('Number of players')
plt.xlabel('Overall Score') | code |
34120249/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
udemy_courses = pd.read_csv('/kaggle/input/udemy-courses/udemy_courses.csv', parse_dates=['published_timestamp'])
udemy_courses['price'] = udemy_courses['price'].str.replace('Free', '0').str.replace('TRUE', '0')
udemy_courses['price'] = udemy_courses['price'].astype('float')
udemy_courses['number_of_contents'] = udemy_courses['content_duration'].str.extract('([\\d\\.]+)\\s[\\w]+').astype('float')
udemy_courses['content_duration_type'] = udemy_courses['content_duration'].str.extract('[\\d\\.]+\\s([\\w]+)')
udemy_courses.drop('content_duration', axis=1, inplace=True)
udemy_courses.dropna(inplace=True)
g=sns.catplot(x='subject',
data=udemy_courses,
kind='count',
hue='is_paid')
g.fig.suptitle('free/paid categories comparison', y=1.03)
plt.xticks(rotation=90)
plt.show()
g=sns.catplot(x='level',
data=udemy_courses,
kind='count',
hue='is_paid')
g.fig.suptitle('free/paid lavel comparison', y=1.03)
plt.xticks(rotation=90)
plt.show()
def cdf(lst):
x = np.sort(lst)
y = np.arange(1, len(x) + 1) / len(x)
return (x, y)
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
x_price, y_price = cdf(udemy_courses['price'])
ax[0].plot(x_price, y_price)
ax[0].set_title('CDF of prices')
ax[1].hist(udemy_courses['price'])
ax[1].set_title('histogram distribution of prices')
ax[2].boxplot(udemy_courses['price'])
ax[2].set_title('boxplot of prices')
plt.show()
print('median: ', udemy_courses['price'].median())
print('mean: ', udemy_courses['price'].mean()) | code |
34120249/cell_25 | [
"image_output_1.png"
] | import pandas as pd
udemy_courses = pd.read_csv('/kaggle/input/udemy-courses/udemy_courses.csv', parse_dates=['published_timestamp'])
udemy_courses['price'] = udemy_courses['price'].str.replace('Free', '0').str.replace('TRUE', '0')
udemy_courses['price'] = udemy_courses['price'].astype('float')
udemy_courses['number_of_contents'] = udemy_courses['content_duration'].str.extract('([\\d\\.]+)\\s[\\w]+').astype('float')
udemy_courses['content_duration_type'] = udemy_courses['content_duration'].str.extract('[\\d\\.]+\\s([\\w]+)')
udemy_courses.drop('content_duration', axis=1, inplace=True)
udemy_courses.dropna(inplace=True)
udemy_courses['price_category'].value_counts() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.