path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
129005548/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv')
df.describe() | code |
16109895/cell_63 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
vector_row.T | code |
16109895/cell_21 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
vector_row[:] | code |
16109895/cell_13 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
print(matrix_sparse) | code |
16109895/cell_25 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[:1, :] | code |
16109895/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np
vector_row = np.array([1, 2, 3])
vector_row | code |
16109895/cell_57 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix.min()
matrix.max()
matrix.size
matrix.reshape(6, 2) | code |
16109895/cell_56 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix.min()
matrix.max()
matrix.size | code |
16109895/cell_34 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape | code |
16109895/cell_23 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
vector_row[-1] | code |
16109895/cell_30 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[:, 2] | code |
16109895/cell_33 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix | code |
16109895/cell_44 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix | code |
16109895/cell_20 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
vector_row[0] | code |
16109895/cell_55 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix3d = np.array([[[2, 3, 4, 5], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 3, 2]]])
add_10 = lambda i: i + 10
vectorized = np.vectorize(add_10)
matrix.min()
matrix.max()
np.mean(matrix)
np.var(matrix)
np.std(matrix)
np.mean(matrix, axis=1)
np.mean(matrix, axis=0) | code |
16109895/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
vector_col | code |
16109895/cell_40 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix | code |
16109895/cell_29 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[:, :1] | code |
16109895/cell_39 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix3d = np.array([[[2, 3, 4, 5], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 3, 2]]])
matrix3d.ndim
matrix3d[0][1][2] | code |
16109895/cell_26 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[:, :] | code |
16109895/cell_48 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix3d = np.array([[[2, 3, 4, 5], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 3, 2]]])
add_10 = lambda i: i + 10
vectorized = np.vectorize(add_10)
matrix.min()
matrix.max()
print(np.max(matrix, axis=0))
print(np.min(matrix, axis=0)) | code |
16109895/cell_61 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix.min()
matrix.max()
matrix.size
matrix.reshape(6, 2)
matrix.reshape(1, -1)
matrix.reshape(-1, 1)
matrix.T | code |
16109895/cell_54 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix3d = np.array([[[2, 3, 4, 5], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 3, 2]]])
add_10 = lambda i: i + 10
vectorized = np.vectorize(add_10)
matrix.min()
matrix.max()
np.mean(matrix)
np.var(matrix)
np.std(matrix)
np.mean(matrix, axis=1) | code |
16109895/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
type(matrix_object) | code |
16109895/cell_60 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix.min()
matrix.max()
matrix.size
matrix.reshape(6, 2)
matrix.reshape(1, -1)
matrix.reshape(-1, 1)
matrix | code |
16109895/cell_19 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
vector_row | code |
16109895/cell_50 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix.min()
matrix.max()
matrix | code |
16109895/cell_52 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix3d = np.array([[[2, 3, 4, 5], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 3, 2]]])
add_10 = lambda i: i + 10
vectorized = np.vectorize(add_10)
matrix.min()
matrix.max()
np.mean(matrix)
np.var(matrix) | code |
16109895/cell_45 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix + 10 | code |
16109895/cell_49 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix3d = np.array([[[2, 3, 4, 5], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 3, 2]]])
add_10 = lambda i: i + 10
vectorized = np.vectorize(add_10)
matrix.min()
matrix.max()
print(np.min(matrix, axis=1))
print(np.max(matrix, axis=1)) | code |
16109895/cell_51 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix3d = np.array([[[2, 3, 4, 5], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 3, 2]]])
add_10 = lambda i: i + 10
vectorized = np.vectorize(add_10)
matrix.min()
matrix.max()
np.mean(matrix) | code |
16109895/cell_62 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
vector_row | code |
16109895/cell_59 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix.min()
matrix.max()
matrix.size
matrix.reshape(6, 2)
matrix.reshape(1, -1)
matrix.reshape(-1, 1) | code |
16109895/cell_58 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix.min()
matrix.max()
matrix.size
matrix.reshape(6, 2)
matrix.reshape(1, -1) | code |
16109895/cell_28 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[:, 1:2] | code |
16109895/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix | code |
16109895/cell_15 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[1, 2] | code |
16109895/cell_16 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[0, 0] | code |
16109895/cell_38 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix3d = np.array([[[2, 3, 4, 5], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 3, 2]]])
matrix3d.ndim | code |
16109895/cell_47 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix.min()
matrix.max() | code |
16109895/cell_17 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[1, 1] | code |
16109895/cell_35 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size | code |
16109895/cell_43 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix3d = np.array([[[2, 3, 4, 5], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 3, 2]]])
add_10 = lambda i: i + 10
vectorized = np.vectorize(add_10)
vectorized(matrix) | code |
16109895/cell_31 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[:, :2] | code |
16109895/cell_46 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix.min() | code |
16109895/cell_24 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[:2, :] | code |
16109895/cell_22 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
vector_row[:3] | code |
16109895/cell_53 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim
matrix3d = np.array([[[2, 3, 4, 5], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 3, 2]]])
add_10 = lambda i: i + 10
vectorized = np.vectorize(add_10)
matrix.min()
matrix.max()
np.mean(matrix)
np.var(matrix)
np.std(matrix) | code |
16109895/cell_10 | [
"text_plain_output_1.png"
] | import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
matrix_object | code |
16109895/cell_27 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
matrix[:1] | code |
16109895/cell_36 | [
"text_plain_output_1.png"
] | from scipy import sparse
import numpy as np
vector_row = np.array([1, 2, 3])
vector_col = np.array([[1], [2], [3]])
matrix = np.array([[1, 2, 3], [4, 5, 6]])
matrix_object = np.mat([[1, 2], [3, 4]])
from scipy import sparse
matrix = np.array([[1, 0, 2, 0], [1, 0, 0, 1], [0, 0, 1, 0]])
matrix_sparse = sparse.csr_matrix(matrix)
matrix = np.array([[1, 2, 3], [1, 4, 5]])
vector_row = np.array([1, 2, 3])
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
matrix.shape
matrix.size
matrix.ndim | code |
33105736/cell_6 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
root = '../input/104-flowers-garden-of-eden'
train_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'train'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'train', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'train'
train_df = train_df.append(tmp_df, ignore_index=True)
val_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'val'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'val', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'val'
val_df = val_df.append(tmp_df, ignore_index=True)
classes = list(train_df['class'].unique())
train_df['label'] = train_df['class'].apply(lambda x: classes.index(x))
val_df['label'] = val_df['class'].apply(lambda x: classes.index(x))
test_df = pd.DataFrame()
f = 'jpeg-224x224'
images = os.listdir(os.path.join(root, f, 'test'))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = 'unknown'
tmp_df['folder'] = f
tmp_df['type'] = 'test'
test_df = test_df.append(tmp_df, ignore_index=True)
print('test:', test_df.shape) | code |
33105736/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
!python pytorch-xla-env-setup.py --apt-packages libomp5 libopenblas-dev | code |
33105736/cell_8 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
root = '../input/104-flowers-garden-of-eden'
train_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'train'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'train', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'train'
train_df = train_df.append(tmp_df, ignore_index=True)
val_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'val'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'val', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'val'
val_df = val_df.append(tmp_df, ignore_index=True)
classes = list(train_df['class'].unique())
train_df['label'] = train_df['class'].apply(lambda x: classes.index(x))
val_df['label'] = val_df['class'].apply(lambda x: classes.index(x))
test_df = pd.DataFrame()
f = 'jpeg-224x224'
images = os.listdir(os.path.join(root, f, 'test'))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = 'unknown'
tmp_df['folder'] = f
tmp_df['type'] = 'test'
test_df = test_df.append(tmp_df, ignore_index=True)
train_dataset = flowerDataset(train_df)
print(train_dataset.__len__())
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, drop_last=True)
train_iter = iter(train_loader)
images, labels = next(train_iter)
print(images.size())
print(labels.size())
plot_size = 32
fig = plt.figure(figsize=(25, 10))
for idx in np.arange(plot_size):
ax = fig.add_subplot(4, plot_size / 4, idx + 1, xticks=[], yticks=[])
ax.imshow(np.transpose(images[idx], (1, 2, 0)))
ax.set_title(classes[labels[idx].item()]) | code |
33105736/cell_16 | [
"text_plain_output_1.png"
] | from PIL import Image
from collections import deque
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
import torchvision.models as models
import torchvision.transforms as T
root = '../input/104-flowers-garden-of-eden'
train_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'train'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'train', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'train'
train_df = train_df.append(tmp_df, ignore_index=True)
val_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'val'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'val', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'val'
val_df = val_df.append(tmp_df, ignore_index=True)
classes = list(train_df['class'].unique())
train_df['label'] = train_df['class'].apply(lambda x: classes.index(x))
val_df['label'] = val_df['class'].apply(lambda x: classes.index(x))
test_df = pd.DataFrame()
f = 'jpeg-224x224'
images = os.listdir(os.path.join(root, f, 'test'))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = 'unknown'
tmp_df['folder'] = f
tmp_df['type'] = 'test'
test_df = test_df.append(tmp_df, ignore_index=True)
class flowerDataset(Dataset):
def __init__(self, df, root='../input/104-flowers-garden-of-eden'):
self.df = df
self.root = root
self.transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()])
def __getitem__(self, idx):
img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['class'], self.df.iloc[idx]['image_name'])
img = Image.open(img_path)
img_tensor = self.transforms(img)
target_tensor = torch.tensor(self.df.iloc[idx]['label'], dtype=torch.long)
return (img_tensor, target_tensor)
def __len__(self):
return len(self.df)
class testDataset(Dataset):
def __init__(self, df, root='../input/104-flowers-garden-of-eden'):
self.df = df
self.root = root
self.transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()])
def __getitem__(self, idx):
img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['image_name'])
img = Image.open(img_path)
img_tensor = self.transforms(img)
return (img_tensor, self.df.iloc[idx]['image_name'][:-5])
def __len__(self):
return len(self.df)
train_dataset = flowerDataset(train_df)
print(train_dataset.__len__())
train_loader = DataLoader(train_dataset, batch_size = 32, shuffle = True, drop_last = True)
train_iter = iter(train_loader)
images, labels = next(train_iter)
print(images.size())
print(labels.size())
plot_size = 32
fig = plt.figure(figsize=(25, 10))
for idx in np.arange(plot_size):
ax = fig.add_subplot(4, plot_size/4, idx+1, xticks=[], yticks=[])
ax.imshow(np.transpose(images[idx], (1, 2, 0)))
ax.set_title(classes[labels[idx].item()])
def train_net():
torch.manual_seed(FLAGS['seed'])
device = xm.xla_device()
world_size = xm.xrt_world_size()
train_dataset = flowerDataset(train_df)
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True)
train_loader = DataLoader(train_dataset, batch_size=FLAGS['batch_size'], sampler=train_sampler, num_workers=FLAGS['num_workers'], drop_last=True)
val_dataset = flowerDataset(val_df)
val_sampler = DistributedSampler(val_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=FLAGS['batch_size'], sampler=val_sampler, num_workers=FLAGS['num_workers'], drop_last=True)
model = models.resnet18()
model.load_state_dict(torch.load('/kaggle/input/resnet18/resnet18.pth'))
model.fc = nn.Linear(512, 104)
model.to(device)
optimizer = optim.SGD(model.parameters(), lr=FLAGS['learning_rate'] * world_size, momentum=FLAGS['momentum'], weight_decay=0.0005)
loss_fn = torch.nn.CrossEntropyLoss()
def train_loop_fn(loader):
tracker = xm.RateTracker()
model.train()
loss_window = deque(maxlen=FLAGS['log_steps'])
for x, (data, target) in enumerate(loader):
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss_window.append(loss.item())
loss.backward()
xm.optimizer_step(optimizer)
tracker.add(FLAGS['batch_size'])
def val_loop_fn(loader):
total_samples, correct = (0, 0)
model.eval()
for data, target in loader:
with torch.no_grad():
output = model(data)
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
total_samples += data.size()[0]
accuracy = 100.0 * correct / total_samples
return accuracy
for epoch in range(1, FLAGS['num_epochs'] + 1):
para_loader = pl.ParallelLoader(train_loader, [device])
train_loop_fn(para_loader.per_device_loader(device))
para_loader = pl.ParallelLoader(val_loader, [device])
accuracy = val_loop_fn(para_loader.per_device_loader(device))
best_accuracy = 0.0
if accuracy > best_accuracy:
xm.save(model.state_dict(), 'trained_resnet18_model.pth')
best_accuracy = accuracy
def _mp_fn(rank, flags):
global FLAGS
FLAGS = flags
torch.set_default_tensor_type('torch.FloatTensor')
train_start = time.time()
train_net()
elapsed_train_time = time.time() - train_start
model = models.resnet18()
model.fc = nn.Linear(512, 104)
model.load_state_dict(torch.load('trained_resnet18_model.pth'))
device = xm.xla_device()
model.to(device)
model.eval()
batch_size = 32
test_dataset = testDataset(test_df)
test_loader = DataLoader(test_dataset, batch_size=batch_size)
n = test_dataset.__len__()
predictions = pd.DataFrame()
for x, (images, names) in enumerate(test_loader):
images = images.to(device)
with torch.no_grad():
output = model(images)
pred = output.max(1)[1].cpu().numpy()
predictions = predictions.append(pd.DataFrame(data={'id': names, 'label': pred}), ignore_index=True)
print('\rProcess {} %'.format(round(100 * x * batch_size / n)), end='')
predictions.head() | code |
33105736/cell_14 | [
"text_plain_output_1.png"
] | from PIL import Image
from collections import deque
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
import torchvision.models as models
import torchvision.transforms as T
root = '../input/104-flowers-garden-of-eden'
train_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'train'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'train', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'train'
train_df = train_df.append(tmp_df, ignore_index=True)
val_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'val'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'val', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'val'
val_df = val_df.append(tmp_df, ignore_index=True)
classes = list(train_df['class'].unique())
train_df['label'] = train_df['class'].apply(lambda x: classes.index(x))
val_df['label'] = val_df['class'].apply(lambda x: classes.index(x))
test_df = pd.DataFrame()
f = 'jpeg-224x224'
images = os.listdir(os.path.join(root, f, 'test'))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = 'unknown'
tmp_df['folder'] = f
tmp_df['type'] = 'test'
test_df = test_df.append(tmp_df, ignore_index=True)
class flowerDataset(Dataset):
def __init__(self, df, root='../input/104-flowers-garden-of-eden'):
self.df = df
self.root = root
self.transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()])
def __getitem__(self, idx):
img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['class'], self.df.iloc[idx]['image_name'])
img = Image.open(img_path)
img_tensor = self.transforms(img)
target_tensor = torch.tensor(self.df.iloc[idx]['label'], dtype=torch.long)
return (img_tensor, target_tensor)
def __len__(self):
return len(self.df)
class testDataset(Dataset):
def __init__(self, df, root='../input/104-flowers-garden-of-eden'):
self.df = df
self.root = root
self.transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()])
def __getitem__(self, idx):
img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['image_name'])
img = Image.open(img_path)
img_tensor = self.transforms(img)
return (img_tensor, self.df.iloc[idx]['image_name'][:-5])
def __len__(self):
return len(self.df)
train_dataset = flowerDataset(train_df)
print(train_dataset.__len__())
train_loader = DataLoader(train_dataset, batch_size = 32, shuffle = True, drop_last = True)
train_iter = iter(train_loader)
images, labels = next(train_iter)
print(images.size())
print(labels.size())
plot_size = 32
fig = plt.figure(figsize=(25, 10))
for idx in np.arange(plot_size):
ax = fig.add_subplot(4, plot_size/4, idx+1, xticks=[], yticks=[])
ax.imshow(np.transpose(images[idx], (1, 2, 0)))
ax.set_title(classes[labels[idx].item()])
def train_net():
torch.manual_seed(FLAGS['seed'])
device = xm.xla_device()
world_size = xm.xrt_world_size()
train_dataset = flowerDataset(train_df)
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True)
train_loader = DataLoader(train_dataset, batch_size=FLAGS['batch_size'], sampler=train_sampler, num_workers=FLAGS['num_workers'], drop_last=True)
val_dataset = flowerDataset(val_df)
val_sampler = DistributedSampler(val_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=FLAGS['batch_size'], sampler=val_sampler, num_workers=FLAGS['num_workers'], drop_last=True)
model = models.resnet18()
model.load_state_dict(torch.load('/kaggle/input/resnet18/resnet18.pth'))
model.fc = nn.Linear(512, 104)
model.to(device)
optimizer = optim.SGD(model.parameters(), lr=FLAGS['learning_rate'] * world_size, momentum=FLAGS['momentum'], weight_decay=0.0005)
loss_fn = torch.nn.CrossEntropyLoss()
def train_loop_fn(loader):
tracker = xm.RateTracker()
model.train()
loss_window = deque(maxlen=FLAGS['log_steps'])
for x, (data, target) in enumerate(loader):
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss_window.append(loss.item())
loss.backward()
xm.optimizer_step(optimizer)
tracker.add(FLAGS['batch_size'])
def val_loop_fn(loader):
total_samples, correct = (0, 0)
model.eval()
for data, target in loader:
with torch.no_grad():
output = model(data)
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
total_samples += data.size()[0]
accuracy = 100.0 * correct / total_samples
return accuracy
for epoch in range(1, FLAGS['num_epochs'] + 1):
para_loader = pl.ParallelLoader(train_loader, [device])
train_loop_fn(para_loader.per_device_loader(device))
para_loader = pl.ParallelLoader(val_loader, [device])
accuracy = val_loop_fn(para_loader.per_device_loader(device))
best_accuracy = 0.0
if accuracy > best_accuracy:
xm.save(model.state_dict(), 'trained_resnet18_model.pth')
best_accuracy = accuracy
def _mp_fn(rank, flags):
global FLAGS
FLAGS = flags
torch.set_default_tensor_type('torch.FloatTensor')
train_start = time.time()
train_net()
elapsed_train_time = time.time() - train_start
model = models.resnet18()
model.fc = nn.Linear(512, 104)
model.load_state_dict(torch.load('trained_resnet18_model.pth'))
device = xm.xla_device()
model.to(device)
model.eval()
print(device) | code |
33105736/cell_12 | [
"text_plain_output_1.png"
] | from PIL import Image
from collections import deque
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
import torch_xla.distributed.xla_multiprocessing as xmp
import torchvision.models as models
import torchvision.transforms as T
root = '../input/104-flowers-garden-of-eden'
train_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'train'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'train', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'train'
train_df = train_df.append(tmp_df, ignore_index=True)
val_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'val'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'val', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'val'
val_df = val_df.append(tmp_df, ignore_index=True)
classes = list(train_df['class'].unique())
train_df['label'] = train_df['class'].apply(lambda x: classes.index(x))
val_df['label'] = val_df['class'].apply(lambda x: classes.index(x))
test_df = pd.DataFrame()
f = 'jpeg-224x224'
images = os.listdir(os.path.join(root, f, 'test'))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = 'unknown'
tmp_df['folder'] = f
tmp_df['type'] = 'test'
test_df = test_df.append(tmp_df, ignore_index=True)
class flowerDataset(Dataset):
def __init__(self, df, root='../input/104-flowers-garden-of-eden'):
self.df = df
self.root = root
self.transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()])
def __getitem__(self, idx):
img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['class'], self.df.iloc[idx]['image_name'])
img = Image.open(img_path)
img_tensor = self.transforms(img)
target_tensor = torch.tensor(self.df.iloc[idx]['label'], dtype=torch.long)
return (img_tensor, target_tensor)
def __len__(self):
return len(self.df)
class testDataset(Dataset):
def __init__(self, df, root='../input/104-flowers-garden-of-eden'):
self.df = df
self.root = root
self.transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()])
def __getitem__(self, idx):
img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['image_name'])
img = Image.open(img_path)
img_tensor = self.transforms(img)
return (img_tensor, self.df.iloc[idx]['image_name'][:-5])
def __len__(self):
return len(self.df)
train_dataset = flowerDataset(train_df)
print(train_dataset.__len__())
train_loader = DataLoader(train_dataset, batch_size = 32, shuffle = True, drop_last = True)
train_iter = iter(train_loader)
images, labels = next(train_iter)
print(images.size())
print(labels.size())
plot_size = 32
fig = plt.figure(figsize=(25, 10))
for idx in np.arange(plot_size):
ax = fig.add_subplot(4, plot_size/4, idx+1, xticks=[], yticks=[])
ax.imshow(np.transpose(images[idx], (1, 2, 0)))
ax.set_title(classes[labels[idx].item()])
def train_net():
torch.manual_seed(FLAGS['seed'])
device = xm.xla_device()
world_size = xm.xrt_world_size()
train_dataset = flowerDataset(train_df)
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True)
train_loader = DataLoader(train_dataset, batch_size=FLAGS['batch_size'], sampler=train_sampler, num_workers=FLAGS['num_workers'], drop_last=True)
val_dataset = flowerDataset(val_df)
val_sampler = DistributedSampler(val_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=FLAGS['batch_size'], sampler=val_sampler, num_workers=FLAGS['num_workers'], drop_last=True)
model = models.resnet18()
model.load_state_dict(torch.load('/kaggle/input/resnet18/resnet18.pth'))
model.fc = nn.Linear(512, 104)
model.to(device)
optimizer = optim.SGD(model.parameters(), lr=FLAGS['learning_rate'] * world_size, momentum=FLAGS['momentum'], weight_decay=0.0005)
loss_fn = torch.nn.CrossEntropyLoss()
def train_loop_fn(loader):
tracker = xm.RateTracker()
model.train()
loss_window = deque(maxlen=FLAGS['log_steps'])
for x, (data, target) in enumerate(loader):
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss_window.append(loss.item())
loss.backward()
xm.optimizer_step(optimizer)
tracker.add(FLAGS['batch_size'])
def val_loop_fn(loader):
total_samples, correct = (0, 0)
model.eval()
for data, target in loader:
with torch.no_grad():
output = model(data)
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
total_samples += data.size()[0]
accuracy = 100.0 * correct / total_samples
return accuracy
for epoch in range(1, FLAGS['num_epochs'] + 1):
para_loader = pl.ParallelLoader(train_loader, [device])
train_loop_fn(para_loader.per_device_loader(device))
para_loader = pl.ParallelLoader(val_loader, [device])
accuracy = val_loop_fn(para_loader.per_device_loader(device))
best_accuracy = 0.0
if accuracy > best_accuracy:
xm.save(model.state_dict(), 'trained_resnet18_model.pth')
best_accuracy = accuracy
def _mp_fn(rank, flags):
global FLAGS
FLAGS = flags
torch.set_default_tensor_type('torch.FloatTensor')
train_start = time.time()
train_net()
elapsed_train_time = time.time() - train_start
FLAGS = {}
FLAGS['seed'] = 1
FLAGS['num_workers'] = 4
FLAGS['num_cores'] = 8
FLAGS['num_epochs'] = 10
FLAGS['log_steps'] = 50
FLAGS['batch_size'] = 16
FLAGS['learning_rate'] = 0.0001
FLAGS['momentum'] = 0.9
xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=FLAGS['num_cores'], start_method='fork') | code |
33105736/cell_5 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
root = '../input/104-flowers-garden-of-eden'
train_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'train'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'train', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'train'
train_df = train_df.append(tmp_df, ignore_index=True)
print('train:', train_df.shape)
val_df = pd.DataFrame()
folder = os.listdir(root)
for f in folder:
classes = os.listdir(os.path.join(root, f, 'val'))
for c in classes:
images = os.listdir(os.path.join(root, f, 'val', c))
tmp_df = pd.DataFrame(images, columns=['image_name'])
tmp_df['class'] = c
tmp_df['folder'] = f
tmp_df['type'] = 'val'
val_df = val_df.append(tmp_df, ignore_index=True)
print('val:', val_df.shape)
classes = list(train_df['class'].unique())
print('num class:', len(classes))
train_df['label'] = train_df['class'].apply(lambda x: classes.index(x))
val_df['label'] = val_df['class'].apply(lambda x: classes.index(x)) | code |
16119155/cell_9 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import os
df = pd.read_csv('../input/rural_urban.csv')
df = df.drop(df.index[:7])
df.groupby('area')['transgender'].agg('sum').sort_values(ascending=False).head(10).plot(kind='bar')
df.groupby('area')['average annual growth rate'].agg('sum').sort_values(ascending=False).head(10).plot(kind='bar')
df1 = df.loc[df['female'] >= df['male']]
df1['no of women greater'] = df1['female'] - df1['male']
df1.groupby('area')['no of women greater'].agg('sum').sort_values(ascending=False).head(10).plot(kind='bar') | code |
16119155/cell_1 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import os
df = pd.read_csv('../input/rural_urban.csv')
df.head(10) | code |
16119155/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import os
df = pd.read_csv('../input/rural_urban.csv')
df = df.drop(df.index[:7])
df.groupby('area')['transgender'].agg('sum').sort_values(ascending=False).head(10).plot(kind='bar')
df.groupby('area')['average annual growth rate'].agg('sum').sort_values(ascending=False).head(10).plot(kind='bar') | code |
16119155/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import os
df = pd.read_csv('../input/rural_urban.csv')
df = df.drop(df.index[:7])
df.head() | code |
16119155/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import seaborn as sns
import os
df = pd.read_csv('../input/rural_urban.csv')
df = df.drop(df.index[:7])
df.groupby('area')['transgender'].agg('sum').sort_values(ascending=False).head(10).plot(kind='bar') | code |
89126682/cell_21 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
sns.heatmap(cm, annot=True, fmt='d') | code |
89126682/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
plt.figure(figsize=(10, 8))
sns.histplot(df['Salary'], kde=True, bins=40)
plt.show() | code |
89126682/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
df.head() | code |
89126682/cell_34 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
KNN = KNeighborsClassifier(n_neighbors=2).fit(X_train, y_train)
y_pred_train = KNN.predict(X_train)
y_pred_test = KNN.predict(X_test)
print('Accuracy on Train Data: ' + str(accuracy_score(y_train, y_pred_train) * 100) + ' %')
print('Accuracy on Test Data: ' + str(accuracy_score(y_test, y_pred_test) * 100) + ' %') | code |
89126682/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
y_pred_test = lr.predict(X_test)
print(classification_report(y_test, y_pred_test)) | code |
89126682/cell_30 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
print('Accuracy on Training: ' + str(accuracy_score(y_train, y_pred_train))) | code |
89126682/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
cm = confusion_matrix(y_train, y_pred_train)
sns.heatmap(cm, annot=True, fmt='d') | code |
89126682/cell_39 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
KNN = KNeighborsClassifier(n_neighbors=2).fit(X_train, y_train)
y_pred_train = KNN.predict(X_train)
y_pred_test = KNN.predict(X_test)
cm = confusion_matrix(y_train, y_pred_train)
cm = confusion_matrix(y_test, y_pred_test)
sns.heatmap(cm, annot=True, fmt='d') | code |
89126682/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
sum(y_pred_train) / len(y_pred_train) | code |
89126682/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
df.columns | code |
89126682/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
cm = confusion_matrix(y_train, y_pred_train)
sns.heatmap(cm, annot=True, fmt='d') | code |
89126682/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
clicked = df[df['Clicked'] == 1]
no_clicked = df[df['Clicked'] == 0]
print('Total=', len(df))
print('Number of customers clicked = ', len(clicked))
print('Number of customers not clicked = ', len(no_clicked)) | code |
89126682/cell_18 | [
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train | code |
89126682/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
print('Accuracy: ' + str(accuracy_score(y_test, y_pred_test) * 100) + '%')
print(classification_report(y_test, y_pred_test)) | code |
89126682/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
sum(y_pred_train) / len(y_pred_train) | code |
89126682/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
sns.scatterplot(data=df, x=df['Time Spent on Site'], y=df['Salary'], hue=df['Clicked'])
plt.show() | code |
89126682/cell_17 | [
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train) | code |
89126682/cell_35 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
KNN = KNeighborsClassifier(n_neighbors=2).fit(X_train, y_train)
y_pred_train = KNN.predict(X_train)
y_pred_test = KNN.predict(X_test)
print('Classification Report:')
print(classification_report(y_test, y_pred_test)) | code |
89126682/cell_31 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
print(y_pred_test)
cm = confusion_matrix(y_test, y_pred_test)
sns.heatmap(cm, annot=True, fmt='d') | code |
89126682/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
df.columns
df = df.drop(['Names', 'emails'], axis=1)
df = df.drop(['Country'], axis=1)
df.head() | code |
89126682/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
y_pred_test = lr.predict(X_test)
print('Accuracy: ' + str(accuracy_score(y_test, y_pred_test) * 100) + '%') | code |
89126682/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
for i in range(0, len(y_pred_train)):
if y_pred_train[i] > 0.51:
y_pred_train[i] = 1
else:
y_pred_train[i] = 0
print(y_pred_train) | code |
89126682/cell_37 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/facebook-ads/Facebook_Ads_2.csv', encoding='ISO-8859-1')
lr = LogisticRegression(random_state=0)
lr.fit(X_train, y_train)
y_pred_train = lr.predict(X_train)
y_pred_train
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
linR = LinearRegression().fit(X_train, y_train)
y_pred_train = linR.predict(X_train)
cm = confusion_matrix(y_train, y_pred_train)
y_pred_test = lr.predict(X_test)
cm = confusion_matrix(y_test, y_pred_test)
KNN = KNeighborsClassifier(n_neighbors=2).fit(X_train, y_train)
y_pred_train = KNN.predict(X_train)
y_pred_test = KNN.predict(X_test)
cm = confusion_matrix(y_train, y_pred_train)
sns.heatmap(cm, annot=True, fmt='d') | code |
311174/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df.location.value_counts()[:30].plot(kind='bar', figsize=(12, 7))
plt.title('Number of locations reported - Top 30') | code |
311174/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sbn
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
311174/cell_3 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df.head(3) | code |
311174/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0)
df[df.data_field == 'confirmed_male'].value.plot()
df[df.data_field == 'confirmed_female'].value.plot().legend(('Male', 'Female'), loc='best')
plt.title('Confirmed Male vs Female cases') | code |
2008393/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
sales = pd.read_csv('../input/transactions.csv', parse_dates=['date'], dtype={'store_nbr': np.uint8, 'transactions': np.uint16})
items = pd.read_csv('../input/items.csv', dtype={'item_nbr': np.uint32, 'class': np.uint16, 'perishable': np.bool})
stores = pd.read_csv('../input/stores.csv')
oil = pd.read_csv('../input/oil.csv', parse_dates=['date'], dtype={'dcoilwtico': np.float16})
holidays = pd.read_csv('../input/holidays_events.csv', parse_dates=['date'])
train = pd.read_csv('../input/train.csv', nrows=6000000, parse_dates=['date'], dtype={'id': np.uint32, 'store_nbr': np.uint8, 'item_nbr': np.uint32, 'onpromotion': np.bool, 'unit_sales': np.float32})
items.groupby(['family']).size().plot(kind='bar', stacked=True, figsize=(13, 6), grid=False) | code |
2008393/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import math
import numpy as np
import pandas as pd
sales = pd.read_csv('../input/transactions.csv', parse_dates=['date'], dtype={'store_nbr': np.uint8, 'transactions': np.uint16})
items = pd.read_csv('../input/items.csv', dtype={'item_nbr': np.uint32, 'class': np.uint16, 'perishable': np.bool})
stores = pd.read_csv('../input/stores.csv')
oil = pd.read_csv('../input/oil.csv', parse_dates=['date'], dtype={'dcoilwtico': np.float16})
holidays = pd.read_csv('../input/holidays_events.csv', parse_dates=['date'])
train = pd.read_csv('../input/train.csv', nrows=6000000, parse_dates=['date'], dtype={'id': np.uint32, 'store_nbr': np.uint8, 'item_nbr': np.uint32, 'onpromotion': np.bool, 'unit_sales': np.float32})
train_items = train.merge(items, right_on='item_nbr', left_on='item_nbr', how='left')
train_items_stores = train_items.merge(stores, right_on='store_nbr', left_on='store_nbr', how='left')
train_items_stores_sales = train_items.merge(sales, right_on=['store_nbr', 'date'], left_on=['store_nbr', 'date'], how='left')
def calc_percent(row):
total = row.sum()
percents = []
for sales in row:
if math.isnan(sales):
percents.append(0.0)
else:
percents.append(sales / total * 100)
return percents
train_items_stores.groupby(['type', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False) | code |
2008393/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import math
import numpy as np
import pandas as pd
sales = pd.read_csv('../input/transactions.csv', parse_dates=['date'], dtype={'store_nbr': np.uint8, 'transactions': np.uint16})
items = pd.read_csv('../input/items.csv', dtype={'item_nbr': np.uint32, 'class': np.uint16, 'perishable': np.bool})
stores = pd.read_csv('../input/stores.csv')
oil = pd.read_csv('../input/oil.csv', parse_dates=['date'], dtype={'dcoilwtico': np.float16})
holidays = pd.read_csv('../input/holidays_events.csv', parse_dates=['date'])
train = pd.read_csv('../input/train.csv', nrows=6000000, parse_dates=['date'], dtype={'id': np.uint32, 'store_nbr': np.uint8, 'item_nbr': np.uint32, 'onpromotion': np.bool, 'unit_sales': np.float32})
train_items = train.merge(items, right_on='item_nbr', left_on='item_nbr', how='left')
train_items_stores = train_items.merge(stores, right_on='store_nbr', left_on='store_nbr', how='left')
train_items_stores_sales = train_items.merge(sales, right_on=['store_nbr', 'date'], left_on=['store_nbr', 'date'], how='left')
def calc_percent(row):
total = row.sum()
percents = []
for sales in row:
if math.isnan(sales):
percents.append(0.0)
else:
percents.append(sales / total * 100)
return percents
train_items_stores.groupby(['type', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['cluster', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['state', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['type', 'family']).size().unstack().drop('GROCERY I', 1).apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['cluster', 'family']).size().unstack().drop('GROCERY I', 1).apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False) | code |
2008393/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import math
import numpy as np
import pandas as pd
sales = pd.read_csv('../input/transactions.csv', parse_dates=['date'], dtype={'store_nbr': np.uint8, 'transactions': np.uint16})
items = pd.read_csv('../input/items.csv', dtype={'item_nbr': np.uint32, 'class': np.uint16, 'perishable': np.bool})
stores = pd.read_csv('../input/stores.csv')
oil = pd.read_csv('../input/oil.csv', parse_dates=['date'], dtype={'dcoilwtico': np.float16})
holidays = pd.read_csv('../input/holidays_events.csv', parse_dates=['date'])
train = pd.read_csv('../input/train.csv', nrows=6000000, parse_dates=['date'], dtype={'id': np.uint32, 'store_nbr': np.uint8, 'item_nbr': np.uint32, 'onpromotion': np.bool, 'unit_sales': np.float32})
train_items = train.merge(items, right_on='item_nbr', left_on='item_nbr', how='left')
train_items_stores = train_items.merge(stores, right_on='store_nbr', left_on='store_nbr', how='left')
train_items_stores_sales = train_items.merge(sales, right_on=['store_nbr', 'date'], left_on=['store_nbr', 'date'], how='left')
def calc_percent(row):
total = row.sum()
percents = []
for sales in row:
if math.isnan(sales):
percents.append(0.0)
else:
percents.append(sales / total * 100)
return percents
train_items_stores.groupby(['type', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['cluster', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['state', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['type', 'family']).size().unstack().drop('GROCERY I', 1).apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['cluster', 'family']).size().unstack().drop('GROCERY I', 1).apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['state', 'family']).size().unstack().drop('GROCERY I', 1).apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False) | code |
2008393/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import math
import numpy as np
import pandas as pd
sales = pd.read_csv('../input/transactions.csv', parse_dates=['date'], dtype={'store_nbr': np.uint8, 'transactions': np.uint16})
items = pd.read_csv('../input/items.csv', dtype={'item_nbr': np.uint32, 'class': np.uint16, 'perishable': np.bool})
stores = pd.read_csv('../input/stores.csv')
oil = pd.read_csv('../input/oil.csv', parse_dates=['date'], dtype={'dcoilwtico': np.float16})
holidays = pd.read_csv('../input/holidays_events.csv', parse_dates=['date'])
train = pd.read_csv('../input/train.csv', nrows=6000000, parse_dates=['date'], dtype={'id': np.uint32, 'store_nbr': np.uint8, 'item_nbr': np.uint32, 'onpromotion': np.bool, 'unit_sales': np.float32})
train_items = train.merge(items, right_on='item_nbr', left_on='item_nbr', how='left')
train_items_stores = train_items.merge(stores, right_on='store_nbr', left_on='store_nbr', how='left')
train_items_stores_sales = train_items.merge(sales, right_on=['store_nbr', 'date'], left_on=['store_nbr', 'date'], how='left')
def calc_percent(row):
total = row.sum()
percents = []
for sales in row:
if math.isnan(sales):
percents.append(0.0)
else:
percents.append(sales / total * 100)
return percents
train_items_stores.groupby(['type', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['cluster', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['state', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['type', 'family']).size().unstack().drop('GROCERY I', 1).apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False) | code |
2008393/cell_10 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import math
import numpy as np
import pandas as pd
sales = pd.read_csv('../input/transactions.csv', parse_dates=['date'], dtype={'store_nbr': np.uint8, 'transactions': np.uint16})
items = pd.read_csv('../input/items.csv', dtype={'item_nbr': np.uint32, 'class': np.uint16, 'perishable': np.bool})
stores = pd.read_csv('../input/stores.csv')
oil = pd.read_csv('../input/oil.csv', parse_dates=['date'], dtype={'dcoilwtico': np.float16})
holidays = pd.read_csv('../input/holidays_events.csv', parse_dates=['date'])
train = pd.read_csv('../input/train.csv', nrows=6000000, parse_dates=['date'], dtype={'id': np.uint32, 'store_nbr': np.uint8, 'item_nbr': np.uint32, 'onpromotion': np.bool, 'unit_sales': np.float32})
train_items = train.merge(items, right_on='item_nbr', left_on='item_nbr', how='left')
train_items_stores = train_items.merge(stores, right_on='store_nbr', left_on='store_nbr', how='left')
train_items_stores_sales = train_items.merge(sales, right_on=['store_nbr', 'date'], left_on=['store_nbr', 'date'], how='left')
def calc_percent(row):
total = row.sum()
percents = []
for sales in row:
if math.isnan(sales):
percents.append(0.0)
else:
percents.append(sales / total * 100)
return percents
train_items_stores.groupby(['type', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['cluster', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False) | code |
2008393/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import math
import numpy as np
import pandas as pd
sales = pd.read_csv('../input/transactions.csv', parse_dates=['date'], dtype={'store_nbr': np.uint8, 'transactions': np.uint16})
items = pd.read_csv('../input/items.csv', dtype={'item_nbr': np.uint32, 'class': np.uint16, 'perishable': np.bool})
stores = pd.read_csv('../input/stores.csv')
oil = pd.read_csv('../input/oil.csv', parse_dates=['date'], dtype={'dcoilwtico': np.float16})
holidays = pd.read_csv('../input/holidays_events.csv', parse_dates=['date'])
train = pd.read_csv('../input/train.csv', nrows=6000000, parse_dates=['date'], dtype={'id': np.uint32, 'store_nbr': np.uint8, 'item_nbr': np.uint32, 'onpromotion': np.bool, 'unit_sales': np.float32})
train_items = train.merge(items, right_on='item_nbr', left_on='item_nbr', how='left')
train_items_stores = train_items.merge(stores, right_on='store_nbr', left_on='store_nbr', how='left')
train_items_stores_sales = train_items.merge(sales, right_on=['store_nbr', 'date'], left_on=['store_nbr', 'date'], how='left')
def calc_percent(row):
total = row.sum()
percents = []
for sales in row:
if math.isnan(sales):
percents.append(0.0)
else:
percents.append(sales / total * 100)
return percents
train_items_stores.groupby(['type', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['cluster', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False)
train_items_stores.groupby(['state', 'family']).size().unstack().apply(calc_percent, axis=1).plot(kind='bar', stacked=True, colormap='tab20c', figsize=(12, 10), grid=False) | code |
128010282/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
Data = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv')
Data.shape
Data.columns
Data.isnull().sum().sum()
Data.corr()
feature_name = list(Data.columns[:-1])
Data.drop('NOX', axis=1, inplace=True)
Data_copy = SimpleImputer().fit_transform(Data)
Data = pd.DataFrame(Data_copy, columns=Data.columns)
Data.isnull().sum().sum() | code |
128010282/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
Data = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv')
Data.shape
Data.columns
Data.isnull().sum().sum()
Data.corr()
sns.lineplot(x=Data['NOX'], y=Data['MEDV'], c='r') | code |
128010282/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
Data = pd.read_csv('/kaggle/input/boston-housing-dataset/HousingData.csv')
Data.shape
Data.columns
Data.isnull().sum().sum() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.