path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
16111583/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import scipy.stats as st import unittest import numpy as np import scipy.stats as st import matplotlib.pyplot as plt import pandas as pd from collections import defaultdict import time import unittest t = unittest.TestCase() SPACE_DIMENSIONS = 2 class Points(np.ndarray): """ndarray sized (SPACE_DIMENSIONS,...) with named coordinates x,y""" @staticmethod def of(coords): p = np.asarray(coords).view(Points) assert p.shape[0] == SPACE_DIMENSIONS return p @property def x(self): return self[0] @property def y(self): return self[1] class Lines(np.ndarray): """ndarray shaped (3,...) with named line parameters a,b,c""" @staticmethod def of(abc): lp = np.asarray(abc).view(Lines) assert lp.shape[0] == 3 return lp @property def a(self): return self[0] @property def b(self): return self[1] @property def c(self): return self[2] def intersections(self, hyperplanes) -> Points: """ https://stackoverflow.com/a/20679579/2082707 answered Dec 19 '13 at 10:46 by rook Adapted for numpy matrix operations by Subota Intersection points of lines from the first set with hyperplanes from the second set. Currently only 2D sapce supported, e.g. the second lanes is lines, too. @hyperplanes parametrical equation coeffs. For 2D it is also Lines @return array of intersection coordinates as Points, sized: - SPACE_DIMENSIONS for intersection coordinates - n1 for the number of lines passed in L1 - n2 for the number of lines passed in L2 """ l1 = np.reshape(self, (*self.shape, 1)) l2 = hyperplanes d = l1.a * l2.b - l1.b * l2.a dx = l1.c * l2.b - l1.b * l2.c dy = l1.a * l2.c - l1.c * l2.a d[d == 0.0] = np.nan x = dx / d y = dy / d return Points.of((x, y)) class LineSegments(np.ndarray): """Wrapper around ndarray((2,SPACE_DIMENSIONS)) to access endPoint1, endPoint2 and coordinates x,y by names""" @staticmethod def of(point_coords): ls = np.asarray(point_coords).view(LineSegments) assert ls.shape[0] == 2 assert ls.shape[1] == SPACE_DIMENSIONS return ls @property def endPoint1(self): return Points.of(self[0]) @property def endPoint2(self): return Points.of(self[1]) @property def x(self): return self[:, 0] @property def y(self): return self[:, 1] def length(self) -> np.array: dif = self.endPoint1 - self.endPoint2 return np.sqrt(dif.x * dif.x + dif.y * dif.y).view(np.ndarray) def lines(self) -> Lines: """ https://stackoverflow.com/a/20679579/2082707 answered Dec 19 '13 at 10:46 by rook Adapted for numpy matrix operations by Subota Calculates the line equation Ay + Bx - C = 0, given two points on a line. Horizontal and vertical lines are Ok @return returns an array of Lines parameters sized: - 3 for the parameters A, B, and C - n for the number of lines calculated """ p1, p2 = (self.endPoint1, self.endPoint2) a = p1.y - p2.y b = p2.x - p1.x c = -(p1.x * p2.y - p2.x * p1.y) return Lines.of((a, b, c)) def intersections(self, other) -> Points: """ Returns intersection points for line sets, along with the true/false matrix for do intersections lie within the segments or not. @other LineSegments to find intersections with. Sized: - 2 for the endPoint1 and endPoint2 - SPACE_DIMENSIONS - n1 for the number of segments in the first set Generally speaking these must be hyper-planes in N-dimensional space @return a tuple with two elements 0. boolean matrix sized(n1,n2), True the intersection to fall within the segments, False otherwise. 1. intersection Points sized (SPACE_DIMENSIONS, n1, n2) """ s1, s2 = (self, other) l1, l2 = (self.lines(), other.lines()) il = l1.intersections(l2) s1 = s1.reshape((2, SPACE_DIMENSIONS, -1, 1)) s1p1, s1p2 = (s1.endPoint1, s1.endPoint2) s2p1, s2p2 = (s2.endPoint1, s2.endPoint2) ROUNDING_THRESHOLD = np.array(1e-10) which_intersect = (il.x <= np.maximum(s1p1.x, s1p2.x) + ROUNDING_THRESHOLD) & (il.x >= np.minimum(s1p1.x, s1p2.x) - ROUNDING_THRESHOLD) & (il.y <= np.maximum(s1p1.y, s1p2.y) + ROUNDING_THRESHOLD) & (il.y >= np.minimum(s1p1.y, s1p2.y) - ROUNDING_THRESHOLD) & (il.x <= np.maximum(s2p1.x, s2p2.x) + ROUNDING_THRESHOLD) & (il.x >= np.minimum(s2p1.x, s2p2.x) - ROUNDING_THRESHOLD) & (il.y <= np.maximum(s2p1.y, s2p2.y) + ROUNDING_THRESHOLD) & (il.y >= np.minimum(s2p1.y, s2p2.y) - ROUNDING_THRESHOLD) return (which_intersect, il) t.assertTrue(np.allclose(LineSegments.of([[[-1.0], [-1]], [[1], [1]]]).lines().flat, np.array([-2, 2, 0]))) t.assertTrue(np.allclose(LineSegments.of([[[0.0], [-1]], [[0], [1]]]).lines().flat, np.array([-2, 0, 0]))) t.assertTrue(np.allclose(LineSegments.of([[[3.0], [1]], [[-4], [1]]]).lines().flat, np.array([0, -7, -7]))) t.assertEqual(LineSegments.of([Points.of([0, 0]), Points.of([3, 4])]).length(), 5) def demo_intersect_lines(): seg1 = LineSegments.of( st.uniform.rvs(size=(2,SPACE_DIMENSIONS, 2), random_state=19) ) seg2 = LineSegments.of( st.uniform.rvs(size=(2,SPACE_DIMENSIONS, 3), random_state=15)+1 ) l1, l2 = seg1.lines(), seg2.lines() i = l1.intersections(l2) plt.plot(seg1.x, seg1.y, '-', c='green') plt.plot(seg2.x, seg2.y, '-', c='blue') plt.plot(i.x, i.y, '+', c='red', markersize=20) plt.title('Extended Line Intersections') plt.axis('off') def demo_intersect_segments(): seg1 = LineSegments.of( st.uniform.rvs(size=(2,SPACE_DIMENSIONS, 4), random_state=1) ) seg2 = LineSegments.of( st.uniform.rvs(size=(2,SPACE_DIMENSIONS, 5), random_state=2) ) plt.plot(seg1.x, seg1.y, '-', c='black') plt.plot(seg2.x, seg2.y, '-', c='lightgrey') w, i = seg1.intersections(seg2) plt.plot(i.x[w], i.y[w], '+', c='red', markersize=20) plt.title('Segment Intersections') plt.axis('off') f, ax = plt.subplots(ncols=2) f.set_size_inches(12,4) plt.sca(ax[0]) demo_intersect_lines() plt.sca(ax[1]) demo_intersect_segments() SEGMENT_ENDPOINTS = 2 NUM_WALLS = 7 SOURCE = Points.of((0.0, 0.0)) DETECTOR = LineSegments.of(((8.0, -1), (8.0, +1))) walls = LineSegments.of(np.zeros((SEGMENT_ENDPOINTS, SPACE_DIMENSIONS, NUM_WALLS))) SLIT_WIDTH, SLITS_APART = (0.05, 0.5) walls[:, :, 1] = ((6.0, +1.0), (6.0, +SLITS_APART / 2 + SLIT_WIDTH)) walls[:, :, 2] = ((6.0, -SLITS_APART / 2), (6.0, +SLITS_APART / 2)) walls[:, :, 3] = ((6.0, -1.0), (6.0, -SLITS_APART / 2 - SLIT_WIDTH)) walls[:, :, 4] = ((-1, -1), (-1, +1)) walls[:, :, 5] = ((-1, +1), (+8.1, +1)) walls[:, :, 6] = ((+8.1, +1), (+8.1, -1)) walls[:, :, 0] = ((+8.1, -1), (-1, -1)) def plot_experimet_setup(walls, detector, source): plt.plot(*source, 'o', color='red', label='Source') wall_lines = plt.plot(walls.x, walls.y, '-', c='black', linewidth=1) wall_lines[1].set_label('Walls') plt.plot(detector.x, detector.y, '-', c='green', linewidth=4, label='Detector') plt.gcf().set_size_inches(12, 5) plt.legend(loc='upper center') plot_experimet_setup(walls, DETECTOR, SOURCE)
code
16111583/cell_10
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import scipy.stats as st import time import unittest import numpy as np import scipy.stats as st import matplotlib.pyplot as plt import pandas as pd from collections import defaultdict import time import unittest t = unittest.TestCase() SPACE_DIMENSIONS = 2 class Points(np.ndarray): """ndarray sized (SPACE_DIMENSIONS,...) with named coordinates x,y""" @staticmethod def of(coords): p = np.asarray(coords).view(Points) assert p.shape[0] == SPACE_DIMENSIONS return p @property def x(self): return self[0] @property def y(self): return self[1] class Lines(np.ndarray): """ndarray shaped (3,...) with named line parameters a,b,c""" @staticmethod def of(abc): lp = np.asarray(abc).view(Lines) assert lp.shape[0] == 3 return lp @property def a(self): return self[0] @property def b(self): return self[1] @property def c(self): return self[2] def intersections(self, hyperplanes) -> Points: """ https://stackoverflow.com/a/20679579/2082707 answered Dec 19 '13 at 10:46 by rook Adapted for numpy matrix operations by Subota Intersection points of lines from the first set with hyperplanes from the second set. Currently only 2D sapce supported, e.g. the second lanes is lines, too. @hyperplanes parametrical equation coeffs. For 2D it is also Lines @return array of intersection coordinates as Points, sized: - SPACE_DIMENSIONS for intersection coordinates - n1 for the number of lines passed in L1 - n2 for the number of lines passed in L2 """ l1 = np.reshape(self, (*self.shape, 1)) l2 = hyperplanes d = l1.a * l2.b - l1.b * l2.a dx = l1.c * l2.b - l1.b * l2.c dy = l1.a * l2.c - l1.c * l2.a d[d == 0.0] = np.nan x = dx / d y = dy / d return Points.of((x, y)) class LineSegments(np.ndarray): """Wrapper around ndarray((2,SPACE_DIMENSIONS)) to access endPoint1, endPoint2 and coordinates x,y by names""" @staticmethod def of(point_coords): ls = np.asarray(point_coords).view(LineSegments) assert ls.shape[0] == 2 assert ls.shape[1] == SPACE_DIMENSIONS return ls @property def endPoint1(self): return Points.of(self[0]) @property def endPoint2(self): return Points.of(self[1]) @property def x(self): return self[:, 0] @property def y(self): return self[:, 1] def length(self) -> np.array: dif = self.endPoint1 - self.endPoint2 return np.sqrt(dif.x * dif.x + dif.y * dif.y).view(np.ndarray) def lines(self) -> Lines: """ https://stackoverflow.com/a/20679579/2082707 answered Dec 19 '13 at 10:46 by rook Adapted for numpy matrix operations by Subota Calculates the line equation Ay + Bx - C = 0, given two points on a line. Horizontal and vertical lines are Ok @return returns an array of Lines parameters sized: - 3 for the parameters A, B, and C - n for the number of lines calculated """ p1, p2 = (self.endPoint1, self.endPoint2) a = p1.y - p2.y b = p2.x - p1.x c = -(p1.x * p2.y - p2.x * p1.y) return Lines.of((a, b, c)) def intersections(self, other) -> Points: """ Returns intersection points for line sets, along with the true/false matrix for do intersections lie within the segments or not. @other LineSegments to find intersections with. Sized: - 2 for the endPoint1 and endPoint2 - SPACE_DIMENSIONS - n1 for the number of segments in the first set Generally speaking these must be hyper-planes in N-dimensional space @return a tuple with two elements 0. boolean matrix sized(n1,n2), True the intersection to fall within the segments, False otherwise. 1. intersection Points sized (SPACE_DIMENSIONS, n1, n2) """ s1, s2 = (self, other) l1, l2 = (self.lines(), other.lines()) il = l1.intersections(l2) s1 = s1.reshape((2, SPACE_DIMENSIONS, -1, 1)) s1p1, s1p2 = (s1.endPoint1, s1.endPoint2) s2p1, s2p2 = (s2.endPoint1, s2.endPoint2) ROUNDING_THRESHOLD = np.array(1e-10) which_intersect = (il.x <= np.maximum(s1p1.x, s1p2.x) + ROUNDING_THRESHOLD) & (il.x >= np.minimum(s1p1.x, s1p2.x) - ROUNDING_THRESHOLD) & (il.y <= np.maximum(s1p1.y, s1p2.y) + ROUNDING_THRESHOLD) & (il.y >= np.minimum(s1p1.y, s1p2.y) - ROUNDING_THRESHOLD) & (il.x <= np.maximum(s2p1.x, s2p2.x) + ROUNDING_THRESHOLD) & (il.x >= np.minimum(s2p1.x, s2p2.x) - ROUNDING_THRESHOLD) & (il.y <= np.maximum(s2p1.y, s2p2.y) + ROUNDING_THRESHOLD) & (il.y >= np.minimum(s2p1.y, s2p2.y) - ROUNDING_THRESHOLD) return (which_intersect, il) t.assertTrue(np.allclose(LineSegments.of([[[-1.0], [-1]], [[1], [1]]]).lines().flat, np.array([-2, 2, 0]))) t.assertTrue(np.allclose(LineSegments.of([[[0.0], [-1]], [[0], [1]]]).lines().flat, np.array([-2, 0, 0]))) t.assertTrue(np.allclose(LineSegments.of([[[3.0], [1]], [[-4], [1]]]).lines().flat, np.array([0, -7, -7]))) t.assertEqual(LineSegments.of([Points.of([0, 0]), Points.of([3, 4])]).length(), 5) def demo_intersect_lines(): seg1 = LineSegments.of( st.uniform.rvs(size=(2,SPACE_DIMENSIONS, 2), random_state=19) ) seg2 = LineSegments.of( st.uniform.rvs(size=(2,SPACE_DIMENSIONS, 3), random_state=15)+1 ) l1, l2 = seg1.lines(), seg2.lines() i = l1.intersections(l2) plt.plot(seg1.x, seg1.y, '-', c='green') plt.plot(seg2.x, seg2.y, '-', c='blue') plt.plot(i.x, i.y, '+', c='red', markersize=20) plt.title('Extended Line Intersections') plt.axis('off') def demo_intersect_segments(): seg1 = LineSegments.of( st.uniform.rvs(size=(2,SPACE_DIMENSIONS, 4), random_state=1) ) seg2 = LineSegments.of( st.uniform.rvs(size=(2,SPACE_DIMENSIONS, 5), random_state=2) ) plt.plot(seg1.x, seg1.y, '-', c='black') plt.plot(seg2.x, seg2.y, '-', c='lightgrey') w, i = seg1.intersections(seg2) plt.plot(i.x[w], i.y[w], '+', c='red', markersize=20) plt.title('Segment Intersections') plt.axis('off') f, ax = plt.subplots(ncols=2) f.set_size_inches(12,4) plt.sca(ax[0]) demo_intersect_lines() plt.sca(ax[1]) demo_intersect_segments() SEGMENT_ENDPOINTS = 2 NUM_WALLS = 7 SOURCE = Points.of( (0.,0.) ) DETECTOR = LineSegments.of( ((8.,-1), (8.,+1)) ) walls = LineSegments.of( np.zeros((SEGMENT_ENDPOINTS,SPACE_DIMENSIONS, NUM_WALLS)) ) SLIT_WIDTH, SLITS_APART = 0.05, 0.5 # The wall with slits # above the slits walls[:,:,1] = ( (6.,+1.), (6.,+SLITS_APART/2+SLIT_WIDTH) ) # between the slits walls[:,:,2] = ( (6.,-SLITS_APART/2), (6.,+SLITS_APART/2) ) # below the slits walls[:,:,3] = ( (6.,-1.), (6.,-SLITS_APART/2-SLIT_WIDTH) ) # square box walls[:,:,4] = ( (-1,-1), (-1,+1)) # left wall walls[:,:,5] = ( (-1,+1), (+8.1,+1)) # top walls[:,:,6] = ( (+8.1,+1), (+8.1,-1)) # right walls[:,:,0] = ( (+8.1,-1), (-1,-1)) # bottom def plot_experimet_setup(walls, detector, source): plt.plot(*source,'o', color='red', label='Source') wall_lines = plt.plot(walls.x, walls.y, '-', c='black', linewidth=1); wall_lines[1].set_label('Walls') plt.plot(detector.x, detector.y, '-', c='green', linewidth=4, label='Detector'); plt.gcf().set_size_inches(12,5) plt.legend(loc = 'upper center'); plot_experimet_setup(walls, DETECTOR, SOURCE) detections = [] np.random.seed(1254785) MIN_STEPS_TO_DETECTION = 2 BATCH_SIZE = 50000 def shifter_uniform_destination(r0: Points): """Shift is so that a photon arrives to a uniformly picked location in the test setup area, regardkess current position""" target_x = st.uniform(loc=-1, scale=1 + 6.0).rvs(r0.shape[1]) target_y = st.uniform(loc=-1, scale=1 + 1.0).rvs(r0.shape[1]) return np.array([target_x, target_y]) - r0 photons = Points.of(np.zeros((SPACE_DIMENSIONS, BATCH_SIZE))) lengths = np.zeros(BATCH_SIZE) steps = np.zeros(BATCH_SIZE, dtype='B') start = time.monotonic() last_reported = len(detections) epoch = 0 while len(detections) < 1000000: epoch += 1 if last_reported <= len(detections) - 50000: last_reported = round(len(detections) / 1000) * 1000 print(len(detections), end=', ') steps += 1 randomInBox = Points.of(st.uniform().rvs(photons.shape)) randomInBox.x[...] *= 9 randomInBox.x[...] -= 1 randomInBox.y[...] *= 2 randomInBox.y[...] -= 1 randomInDetector = Points.of(st.uniform().rvs(photons.shape)) randomInDetector.x[...] = 8 randomInDetector.y[...] *= 2 randomInDetector.y[...] -= 1 newLoc = np.where(steps < MIN_STEPS_TO_DETECTION, randomInBox, randomInDetector) moves = LineSegments.of((photons, newLoc)) lengths += moves.length() photons = moves.endPoint2 colliders, _ = moves.intersections(walls) colliders = np.logical_or.reduce(colliders, axis=1) photons[:, colliders] = 0 steps[colliders] = 0 lengths[colliders] = 0 detected = steps >= MIN_STEPS_TO_DETECTION for i in np.where(detected)[0]: detections += [(*photons[:, i], lengths[i])] photons[:, detected] = 0 steps[detected] = 0 lengths[detected] = 0 print('Time total: %.1f sec' % (time.monotonic() - start))
code
50243774/cell_13
[ "text_plain_output_1.png" ]
from mmdet.apis import init_detector, inference_detector from tqdm import tqdm import mmcv import os import pandas as pd import torch def format_prediction_string(boxes, scores): pred_strings = [] for j in zip(scores, boxes): pred_strings.append('{0:.4f} {1} {2} {3} {4}'.format(j[0], j[1][0], j[1][1], j[1][2], j[1][3])) return ' '.join(pred_strings) CONFIG_FILE = './config.py' CHECKPOINT_PATH = './model.pth' TEST_IMG_DIR = '../input/global-wheat-detection/test' import torch device = 'cuda:0' if torch.cuda.is_available() else 'cpu' config = mmcv.Config.fromfile(CONFIG_FILE) config.model.pretrained = None config.model.neck.rfp_backbone.pretrained = None if False: img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) config.data.test.pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1024, 1024), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] model = init_detector(config, CHECKPOINT_PATH, device=device) model.eval() from tqdm import tqdm import os results = [] with torch.no_grad(): for img_name in tqdm(os.listdir(TEST_IMG_DIR)): img_pth = os.path.join(TEST_IMG_DIR, img_name) result = inference_detector(model, img_pth) boxes = result[0][:, :4] scores = result[0][:, 4] if len(boxes) > 0: boxes[:, 2] = boxes[:, 2] - boxes[:, 0] boxes[:, 3] = boxes[:, 3] - boxes[:, 1] result = {'image_id': img_name[:-4], 'PredictionString': format_prediction_string(boxes, scores)} results.append(result) import pandas as pd test_df = pd.DataFrame(results, columns=['image_id', 'PredictionString']) test_df.to_csv('submission.csv', index=False) test_df.head()
code
50243774/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
! pip install ../input/mmdetectionv260/addict-2.4.0-py3-none-any.whl ! pip install ../input/mmdetectionv260/mmcv_full-latesttorch1.6.0cu102-cp37-cp37m-manylinux1_x86_64.whl ! pip install ../input/mmdetectionv260/mmpycocotools-12.0.3-cp37-cp37m-linux_x86_64.whl ! pip install ../input/mmdetection-package/mmdet-2.7.0-py3-none-any.whl
code
50243774/cell_11
[ "text_plain_output_1.png" ]
from mmdet.apis import init_detector, inference_detector import mmcv import torch CONFIG_FILE = './config.py' CHECKPOINT_PATH = './model.pth' TEST_IMG_DIR = '../input/global-wheat-detection/test' import torch device = 'cuda:0' if torch.cuda.is_available() else 'cpu' config = mmcv.Config.fromfile(CONFIG_FILE) config.model.pretrained = None config.model.neck.rfp_backbone.pretrained = None if False: img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) config.data.test.pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1024, 1024), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] model = init_detector(config, CHECKPOINT_PATH, device=device) model.eval()
code
50243774/cell_3
[ "text_html_output_1.png" ]
! pip install ../input/mmdetection-package/torch-1.6.0-cp37-cp37m-linux_x86_64.whl
code
50243774/cell_12
[ "text_plain_output_1.png" ]
from mmdet.apis import init_detector, inference_detector from tqdm import tqdm import mmcv import os import torch def format_prediction_string(boxes, scores): pred_strings = [] for j in zip(scores, boxes): pred_strings.append('{0:.4f} {1} {2} {3} {4}'.format(j[0], j[1][0], j[1][1], j[1][2], j[1][3])) return ' '.join(pred_strings) CONFIG_FILE = './config.py' CHECKPOINT_PATH = './model.pth' TEST_IMG_DIR = '../input/global-wheat-detection/test' import torch device = 'cuda:0' if torch.cuda.is_available() else 'cpu' config = mmcv.Config.fromfile(CONFIG_FILE) config.model.pretrained = None config.model.neck.rfp_backbone.pretrained = None if False: img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) config.data.test.pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1024, 1024), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] model = init_detector(config, CHECKPOINT_PATH, device=device) model.eval() from tqdm import tqdm import os results = [] with torch.no_grad(): for img_name in tqdm(os.listdir(TEST_IMG_DIR)): img_pth = os.path.join(TEST_IMG_DIR, img_name) result = inference_detector(model, img_pth) boxes = result[0][:, :4] scores = result[0][:, 4] if len(boxes) > 0: boxes[:, 2] = boxes[:, 2] - boxes[:, 0] boxes[:, 3] = boxes[:, 3] - boxes[:, 1] result = {'image_id': img_name[:-4], 'PredictionString': format_prediction_string(boxes, scores)} results.append(result)
code
73059955/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] plt.figure(figsize=(20,10)) ax= sns.violinplot(data=train_data[cont_features],inner=None, palette="viridis") plt.title('Continuous features distribution'); train_data.var() train_data.std() cat = train_data.select_dtypes(include='object').columns.tolist() idx = 0 f, axes = plt.subplots(5, 2, sharex=True, figsize=(12, 14)) plt.suptitle('Categorical features distribution', size=16, y=0.94) for row in range(5): for col in range(2): data = train_data[cat[idx]].value_counts() sns.barplot(x=data.index, y=data.values, palette='viridis', ax=axes[row, col]) axes[row, col].set_title(cat[idx]) idx += 1
code
73059955/cell_9
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') train_data.head(3)
code
73059955/cell_25
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] plt.figure(figsize=(20,10)) ax= sns.violinplot(data=train_data[cont_features],inner=None, palette="viridis") plt.title('Continuous features distribution'); train_data.var() train_data.std() cat= train_data.select_dtypes(include='object').columns.tolist() idx = 0 f, axes = plt.subplots(5, 2, sharex=True, figsize=(12,14)) plt.suptitle('Categorical features distribution', size=16, y=(0.94)) for row in range(5): for col in range(2): data = train_data[cat[idx]].value_counts() sns.barplot(x = data.index, y = data.values, palette='viridis', ax=axes[row, col]) axes[row,col].set_title(cat[idx]) idx += 1 corrMatrix = train_data.corr(method='pearson', min_periods=1) corrMatrix plt.figure(figsize=(25, 20)) ax = sns.heatmap(corrMatrix, cmap='viridis', annot=True)
code
73059955/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import os, glob import pandas as pd import numpy as np from scipy import stats import matplotlib.pyplot as plt import seaborn as sns sns.set(style='whitegrid') import warnings warnings.filterwarnings('ignore') from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler, Normalizer, MaxAbsScaler from sklearn.preprocessing import StandardScaler, PowerTransformer, QuantileTransformer, LabelEncoder, OneHotEncoder, OrdinalEncoder from xgboost import XGBRegressor print('set up complete')
code
73059955/cell_29
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] plt.figure(figsize=(20,10)) ax= sns.violinplot(data=train_data[cont_features],inner=None, palette="viridis") plt.title('Continuous features distribution'); train_data.var() train_data.std() cat= train_data.select_dtypes(include='object').columns.tolist() idx = 0 f, axes = plt.subplots(5, 2, sharex=True, figsize=(12,14)) plt.suptitle('Categorical features distribution', size=16, y=(0.94)) for row in range(5): for col in range(2): data = train_data[cat[idx]].value_counts() sns.barplot(x = data.index, y = data.values, palette='viridis', ax=axes[row, col]) axes[row,col].set_title(cat[idx]) idx += 1 corrMatrix = train_data.corr(method='pearson', min_periods=1) corrMatrix #heatmap plt.figure(figsize=(25,20)) ax = sns.heatmap(corrMatrix, cmap="viridis", annot=True) plt.figure(figsize=(12, 5)) ax = sns.boxplot(train_data['target'], orient='h') ax.set_title('Target variable boxplot')
code
73059955/cell_2
[ "image_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73059955/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') print('Info about train data: ') train_data.info()
code
73059955/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] train_data.var() train_data.std()
code
73059955/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] plt.figure(figsize=(20,10)) ax= sns.violinplot(data=train_data[cont_features],inner=None, palette="viridis") plt.title('Continuous features distribution'); train_data.var() train_data.std() cat= train_data.select_dtypes(include='object').columns.tolist() idx = 0 f, axes = plt.subplots(5, 2, sharex=True, figsize=(12,14)) plt.suptitle('Categorical features distribution', size=16, y=(0.94)) for row in range(5): for col in range(2): data = train_data[cat[idx]].value_counts() sns.barplot(x = data.index, y = data.values, palette='viridis', ax=axes[row, col]) axes[row,col].set_title(cat[idx]) idx += 1 corrMatrix = train_data.corr(method='pearson', min_periods=1) corrMatrix #heatmap plt.figure(figsize=(25,20)) ax = sns.heatmap(corrMatrix, cmap="viridis", annot=True) plt.figure(figsize=(12, 5)) sns.distplot(train_data['target'], color='maroon', kde=True, bins=120, label='target') plt.title('target values Distribution ')
code
73059955/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] plt.figure(figsize=(20, 10)) ax = sns.violinplot(data=train_data[cont_features], inner=None, palette='viridis') plt.title('Continuous features distribution')
code
73059955/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] train_data.var()
code
73059955/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] plt.figure(figsize=(20,10)) ax= sns.violinplot(data=train_data[cont_features],inner=None, palette="viridis") plt.title('Continuous features distribution'); train_data.var() train_data.std() cat= train_data.select_dtypes(include='object').columns.tolist() idx = 0 f, axes = plt.subplots(5, 2, sharex=True, figsize=(12,14)) plt.suptitle('Categorical features distribution', size=16, y=(0.94)) for row in range(5): for col in range(2): data = train_data[cat[idx]].value_counts() sns.barplot(x = data.index, y = data.values, palette='viridis', ax=axes[row, col]) axes[row,col].set_title(cat[idx]) idx += 1 corrMatrix = train_data.corr(method='pearson', min_periods=1) corrMatrix
code
73059955/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] train_data.describe().T.style.bar().background_gradient(cmap='viridis')
code
73059955/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') print(f'Number of rows: {train_data.shape[0]}; Number of columns: {train_data.shape[1]}; No of missing values: {sum(train_data.isna().sum())}')
code
73059955/cell_27
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] plt.figure(figsize=(20,10)) ax= sns.violinplot(data=train_data[cont_features],inner=None, palette="viridis") plt.title('Continuous features distribution'); train_data.var() train_data.std() cat= train_data.select_dtypes(include='object').columns.tolist() idx = 0 f, axes = plt.subplots(5, 2, sharex=True, figsize=(12,14)) plt.suptitle('Categorical features distribution', size=16, y=(0.94)) for row in range(5): for col in range(2): data = train_data[cat[idx]].value_counts() sns.barplot(x = data.index, y = data.values, palette='viridis', ax=axes[row, col]) axes[row,col].set_title(cat[idx]) idx += 1 corrMatrix = train_data.corr(method='pearson', min_periods=1) corrMatrix print('target column basic statistics:') train_data['target'].describe()
code
73059955/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('../input/30-days-of-ml/train.csv') test_data = pd.read_csv('../input/30-days-of-ml/test.csv') sample = pd.read_csv('../input/30-days-of-ml/sample_submission.csv', index_col='id') cat_features = [feature for feature in train_data.columns if 'cat' in feature] cont_features = [feature for feature in train_data.columns if 'cont' in feature] print(f'categorical features are : {cat_features}; numerical features are : {cont_features}')
code
50233728/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.stats as stats import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') plt.figure(figsize=(15,6)) quality_count = data['quality'].value_counts().sort_values(ascending=False).to_frame() quality_count = quality_count.rename(columns={'quality':'Count'}) ax = sns.barplot(x=quality_count.index, y='Count', data=quality_count, palette="ch:.25") for p in ax.patches: ax.annotate(format(p.get_height(), '.1f'), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', xytext = (0,9), textcoords='offset points') plt.xlabel("Quality", fontsize=15) plt.ylabel("Count", fontsize=15) plt.title("Bar plot of Quality", fontsize=20) plt.show() table_nan = data.isna().sum().to_frame().style.background_gradient(cmap=cmap) table_nan feature_desc = ['most acids involved with wine or fixed or nonvolatile (do not evaporate readily)', 'he amount of acetic acid in wine, which at too high of levels can lead to an unpleasant, vinegar taste', 'found in small quantities, citric acid can add freshness and flavor to wines', "the amount of sugar remaining after fermentation stops, it's rare to find wines with less than 1 gram/liter and wines with greater than 45 grams/liter are considered sweet", 'the amount of salt in the wine', 'the free form of SO2 exists in equilibrium between molecular SO2 (as a dissolved gas) and bisulfite ion; it prevents microbial growth and the oxidation of wine', 'amount of free and bound forms of S02; in low concentrations, SO2 is mostly undetectable in wine, but at free SO2 concentrations over 50 ppm, SO2 becomes evident in the nose and taste of wine', 'the density of water is close to that of water depending on the percent alcohol and sugar content', 'describes how acidic or basic a wine is on a scale from 0 (very acidic) to 14 (very basic); most wines are between 3-4 on the pH scale', 'a wine additive which can contribute to sulfur dioxide gas (S02) levels, wich acts as an antimicrobial and antioxidant', '-', 'score between 0 and 10'] feature_desc = pd.DataFrame(feature_desc, columns=['Description'], index=data.columns) data_desc = data.describe().T data_description = pd.concat([feature_desc, data_desc], axis=1) f, ax = plt.subplots(3, 4, figsize=(25,15)) sns.despine(left=True) sns.boxplot(data['fixed acidity'], ax=ax[0,0]) sns.boxplot(data['volatile acidity'], ax=ax[0,1]) sns.boxplot(data['citric acid'], ax=ax[0,2]) sns.boxplot(data['residual sugar'], ax=ax[0,3]) sns.boxplot(data['chlorides'], ax=ax[1,0]) sns.boxplot(data['density'], ax=ax[1,1]) sns.boxplot(data['pH'], ax=ax[1,2]) sns.boxplot(data['sulphates'], ax=ax[1,3]) sns.boxplot(data['alcohol'], ax=ax[2,0]) sns.boxplot(data['total sulfur dioxide'], ax=ax[2,1]) sns.boxplot(data['free sulfur dioxide'], ax=ax[2,2]) sns.boxplot(data['quality'], ax=ax[2,3]) plt.show() density_mean = np.mean(data['density']) density_median = np.median(data['density']) density_mode = data['density'].mode()[0] q1 = data['density'].quantile(0.25) q3 = data['density'].quantile(0.75) density_IQR = q3 - q1 f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={'height_ratios': (0.2, 1)}) sns.boxplot(data['density'], ax=ax_box) ax_box.axvline(density_mean, color='r', linestyle='--') ax_box.axvline(density_median, color='g', linestyle='-') ax_box.axvline(density_mode, color='b', linestyle='-') sns.distplot(data['density'], ax=ax_hist, fit=stats.norm) ax_hist.axvline(density_mean, color='r', linestyle='--') ax_hist.axvline(density_median, color='g', linestyle='-') ax_hist.axvline(density_mode, color='b', linestyle='-') plt.legend({'Mean': density_mean, 'Median': density_median, 'Mode': density_mode}) ax_box.set(xlabel='') plt.show()
code
50233728/cell_9
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') table_nan = data.isna().sum().to_frame().style.background_gradient(cmap=cmap) table_nan feature_desc = ['most acids involved with wine or fixed or nonvolatile (do not evaporate readily)', 'he amount of acetic acid in wine, which at too high of levels can lead to an unpleasant, vinegar taste', 'found in small quantities, citric acid can add freshness and flavor to wines', "the amount of sugar remaining after fermentation stops, it's rare to find wines with less than 1 gram/liter and wines with greater than 45 grams/liter are considered sweet", 'the amount of salt in the wine', 'the free form of SO2 exists in equilibrium between molecular SO2 (as a dissolved gas) and bisulfite ion; it prevents microbial growth and the oxidation of wine', 'amount of free and bound forms of S02; in low concentrations, SO2 is mostly undetectable in wine, but at free SO2 concentrations over 50 ppm, SO2 becomes evident in the nose and taste of wine', 'the density of water is close to that of water depending on the percent alcohol and sugar content', 'describes how acidic or basic a wine is on a scale from 0 (very acidic) to 14 (very basic); most wines are between 3-4 on the pH scale', 'a wine additive which can contribute to sulfur dioxide gas (S02) levels, wich acts as an antimicrobial and antioxidant', '-', 'score between 0 and 10'] feature_desc = pd.DataFrame(feature_desc, columns=['Description'], index=data.columns) data_desc = data.describe().T data_description = pd.concat([feature_desc, data_desc], axis=1) data_description
code
50233728/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') plt.figure(figsize=(15, 6)) quality_count = data['quality'].value_counts().sort_values(ascending=False).to_frame() quality_count = quality_count.rename(columns={'quality': 'Count'}) ax = sns.barplot(x=quality_count.index, y='Count', data=quality_count, palette='ch:.25') for p in ax.patches: ax.annotate(format(p.get_height(), '.1f'), (p.get_x() + p.get_width() / 2.0, p.get_height()), ha='center', va='center', xytext=(0, 9), textcoords='offset points') plt.xlabel('Quality', fontsize=15) plt.ylabel('Count', fontsize=15) plt.title('Bar plot of Quality', fontsize=20) plt.show()
code
50233728/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') table_nan = data.isna().sum().to_frame().style.background_gradient(cmap=cmap) table_nan
code
50233728/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') plt.figure(figsize=(15,6)) quality_count = data['quality'].value_counts().sort_values(ascending=False).to_frame() quality_count = quality_count.rename(columns={'quality':'Count'}) ax = sns.barplot(x=quality_count.index, y='Count', data=quality_count, palette="ch:.25") for p in ax.patches: ax.annotate(format(p.get_height(), '.1f'), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', xytext = (0,9), textcoords='offset points') plt.xlabel("Quality", fontsize=15) plt.ylabel("Count", fontsize=15) plt.title("Bar plot of Quality", fontsize=20) plt.show() table_nan = data.isna().sum().to_frame().style.background_gradient(cmap=cmap) table_nan feature_desc = ['most acids involved with wine or fixed or nonvolatile (do not evaporate readily)', 'he amount of acetic acid in wine, which at too high of levels can lead to an unpleasant, vinegar taste', 'found in small quantities, citric acid can add freshness and flavor to wines', "the amount of sugar remaining after fermentation stops, it's rare to find wines with less than 1 gram/liter and wines with greater than 45 grams/liter are considered sweet", 'the amount of salt in the wine', 'the free form of SO2 exists in equilibrium between molecular SO2 (as a dissolved gas) and bisulfite ion; it prevents microbial growth and the oxidation of wine', 'amount of free and bound forms of S02; in low concentrations, SO2 is mostly undetectable in wine, but at free SO2 concentrations over 50 ppm, SO2 becomes evident in the nose and taste of wine', 'the density of water is close to that of water depending on the percent alcohol and sugar content', 'describes how acidic or basic a wine is on a scale from 0 (very acidic) to 14 (very basic); most wines are between 3-4 on the pH scale', 'a wine additive which can contribute to sulfur dioxide gas (S02) levels, wich acts as an antimicrobial and antioxidant', '-', 'score between 0 and 10'] feature_desc = pd.DataFrame(feature_desc, columns=['Description'], index=data.columns) data_desc = data.describe().T data_description = pd.concat([feature_desc, data_desc], axis=1) f, ax = plt.subplots(3, 4, figsize=(25, 15)) sns.despine(left=True) sns.boxplot(data['fixed acidity'], ax=ax[0, 0]) sns.boxplot(data['volatile acidity'], ax=ax[0, 1]) sns.boxplot(data['citric acid'], ax=ax[0, 2]) sns.boxplot(data['residual sugar'], ax=ax[0, 3]) sns.boxplot(data['chlorides'], ax=ax[1, 0]) sns.boxplot(data['density'], ax=ax[1, 1]) sns.boxplot(data['pH'], ax=ax[1, 2]) sns.boxplot(data['sulphates'], ax=ax[1, 3]) sns.boxplot(data['alcohol'], ax=ax[2, 0]) sns.boxplot(data['total sulfur dioxide'], ax=ax[2, 1]) sns.boxplot(data['free sulfur dioxide'], ax=ax[2, 2]) sns.boxplot(data['quality'], ax=ax[2, 3]) plt.show()
code
50233728/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.stats as stats import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') plt.figure(figsize=(15,6)) quality_count = data['quality'].value_counts().sort_values(ascending=False).to_frame() quality_count = quality_count.rename(columns={'quality':'Count'}) ax = sns.barplot(x=quality_count.index, y='Count', data=quality_count, palette="ch:.25") for p in ax.patches: ax.annotate(format(p.get_height(), '.1f'), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', xytext = (0,9), textcoords='offset points') plt.xlabel("Quality", fontsize=15) plt.ylabel("Count", fontsize=15) plt.title("Bar plot of Quality", fontsize=20) plt.show() table_nan = data.isna().sum().to_frame().style.background_gradient(cmap=cmap) table_nan feature_desc = ['most acids involved with wine or fixed or nonvolatile (do not evaporate readily)', 'he amount of acetic acid in wine, which at too high of levels can lead to an unpleasant, vinegar taste', 'found in small quantities, citric acid can add freshness and flavor to wines', "the amount of sugar remaining after fermentation stops, it's rare to find wines with less than 1 gram/liter and wines with greater than 45 grams/liter are considered sweet", 'the amount of salt in the wine', 'the free form of SO2 exists in equilibrium between molecular SO2 (as a dissolved gas) and bisulfite ion; it prevents microbial growth and the oxidation of wine', 'amount of free and bound forms of S02; in low concentrations, SO2 is mostly undetectable in wine, but at free SO2 concentrations over 50 ppm, SO2 becomes evident in the nose and taste of wine', 'the density of water is close to that of water depending on the percent alcohol and sugar content', 'describes how acidic or basic a wine is on a scale from 0 (very acidic) to 14 (very basic); most wines are between 3-4 on the pH scale', 'a wine additive which can contribute to sulfur dioxide gas (S02) levels, wich acts as an antimicrobial and antioxidant', '-', 'score between 0 and 10'] feature_desc = pd.DataFrame(feature_desc, columns=['Description'], index=data.columns) data_desc = data.describe().T data_description = pd.concat([feature_desc, data_desc], axis=1) f, ax = plt.subplots(3, 4, figsize=(25,15)) sns.despine(left=True) sns.boxplot(data['fixed acidity'], ax=ax[0,0]) sns.boxplot(data['volatile acidity'], ax=ax[0,1]) sns.boxplot(data['citric acid'], ax=ax[0,2]) sns.boxplot(data['residual sugar'], ax=ax[0,3]) sns.boxplot(data['chlorides'], ax=ax[1,0]) sns.boxplot(data['density'], ax=ax[1,1]) sns.boxplot(data['pH'], ax=ax[1,2]) sns.boxplot(data['sulphates'], ax=ax[1,3]) sns.boxplot(data['alcohol'], ax=ax[2,0]) sns.boxplot(data['total sulfur dioxide'], ax=ax[2,1]) sns.boxplot(data['free sulfur dioxide'], ax=ax[2,2]) sns.boxplot(data['quality'], ax=ax[2,3]) plt.show() density_mean = np.mean(data['density']) density_median = np.median(data['density']) density_mode = data['density'].mode()[0] q1 = data['density'].quantile(0.25) q3 = data['density'].quantile(0.75) density_IQR = q3 - q1 f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw= {'height_ratios':(0.2, 1)}) sns.boxplot(data["density"], ax=ax_box) ax_box.axvline(density_mean, color='r', linestyle='--') ax_box.axvline(density_median, color='g', linestyle='-') ax_box.axvline(density_mode, color='b', linestyle='-') sns.distplot(data["density"], ax=ax_hist, fit=stats.norm) ax_hist.axvline(density_mean, color='r', linestyle='--') ax_hist.axvline(density_median, color='g', linestyle='-') ax_hist.axvline(density_mode, color='b', linestyle='-') plt.legend({'Mean':density_mean,'Median':density_median,'Mode':density_mode}) ax_box.set(xlabel='') plt.show() normal = stats.normaltest(data['density']) normal f, (ax_box, ax_dist) = plt.subplots(2, sharex=True, gridspec_kw={'height_ratios': (0.2, 1)}) mean = np.mean(data['pH']) median = np.median(data['pH']) mode = data['pH'].mode()[0] q1 = data['pH'].quantile(0.25) q3 = data['pH'].quantile(0.75) IQR = q3 - q1 print('Mean : {}'.format(mean)) print('Median : {}'.format(median)) print('Mode : {}'.format(mode)) print('Inter Quantile Range : {}'.format(IQR)) sns.boxplot(data['pH'], ax=ax_box) ax_box.axvline(mean, color='r', linestyle='--') ax_box.axvline(median, color='g', linestyle='--') ax_box.axvline(mode, color='b', linestyle='--') sns.distplot(data['pH'], ax=ax_dist, fit=stats.norm) ax_dist.axvline(mean, color='r', linestyle='--') ax_dist.axvline(median, color='g', linestyle='--') ax_dist.axvline(mode, color='b', linestyle='--') plt.legend({'Mean': mean, 'Median': median, 'Mode': mode}) ax_box.set(xlabel='') plt.show()
code
50233728/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.stats as stats import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') plt.figure(figsize=(15,6)) quality_count = data['quality'].value_counts().sort_values(ascending=False).to_frame() quality_count = quality_count.rename(columns={'quality':'Count'}) ax = sns.barplot(x=quality_count.index, y='Count', data=quality_count, palette="ch:.25") for p in ax.patches: ax.annotate(format(p.get_height(), '.1f'), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', xytext = (0,9), textcoords='offset points') plt.xlabel("Quality", fontsize=15) plt.ylabel("Count", fontsize=15) plt.title("Bar plot of Quality", fontsize=20) plt.show() table_nan = data.isna().sum().to_frame().style.background_gradient(cmap=cmap) table_nan feature_desc = ['most acids involved with wine or fixed or nonvolatile (do not evaporate readily)', 'he amount of acetic acid in wine, which at too high of levels can lead to an unpleasant, vinegar taste', 'found in small quantities, citric acid can add freshness and flavor to wines', "the amount of sugar remaining after fermentation stops, it's rare to find wines with less than 1 gram/liter and wines with greater than 45 grams/liter are considered sweet", 'the amount of salt in the wine', 'the free form of SO2 exists in equilibrium between molecular SO2 (as a dissolved gas) and bisulfite ion; it prevents microbial growth and the oxidation of wine', 'amount of free and bound forms of S02; in low concentrations, SO2 is mostly undetectable in wine, but at free SO2 concentrations over 50 ppm, SO2 becomes evident in the nose and taste of wine', 'the density of water is close to that of water depending on the percent alcohol and sugar content', 'describes how acidic or basic a wine is on a scale from 0 (very acidic) to 14 (very basic); most wines are between 3-4 on the pH scale', 'a wine additive which can contribute to sulfur dioxide gas (S02) levels, wich acts as an antimicrobial and antioxidant', '-', 'score between 0 and 10'] feature_desc = pd.DataFrame(feature_desc, columns=['Description'], index=data.columns) data_desc = data.describe().T data_description = pd.concat([feature_desc, data_desc], axis=1) f, ax = plt.subplots(3, 4, figsize=(25,15)) sns.despine(left=True) sns.boxplot(data['fixed acidity'], ax=ax[0,0]) sns.boxplot(data['volatile acidity'], ax=ax[0,1]) sns.boxplot(data['citric acid'], ax=ax[0,2]) sns.boxplot(data['residual sugar'], ax=ax[0,3]) sns.boxplot(data['chlorides'], ax=ax[1,0]) sns.boxplot(data['density'], ax=ax[1,1]) sns.boxplot(data['pH'], ax=ax[1,2]) sns.boxplot(data['sulphates'], ax=ax[1,3]) sns.boxplot(data['alcohol'], ax=ax[2,0]) sns.boxplot(data['total sulfur dioxide'], ax=ax[2,1]) sns.boxplot(data['free sulfur dioxide'], ax=ax[2,2]) sns.boxplot(data['quality'], ax=ax[2,3]) plt.show() density_mean = np.mean(data['density']) density_median = np.median(data['density']) density_mode = data['density'].mode()[0] q1 = data['density'].quantile(0.25) q3 = data['density'].quantile(0.75) density_IQR = q3 - q1 f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw= {'height_ratios':(0.2, 1)}) sns.boxplot(data["density"], ax=ax_box) ax_box.axvline(density_mean, color='r', linestyle='--') ax_box.axvline(density_median, color='g', linestyle='-') ax_box.axvline(density_mode, color='b', linestyle='-') sns.distplot(data["density"], ax=ax_hist, fit=stats.norm) ax_hist.axvline(density_mean, color='r', linestyle='--') ax_hist.axvline(density_median, color='g', linestyle='-') ax_hist.axvline(density_mode, color='b', linestyle='-') plt.legend({'Mean':density_mean,'Median':density_median,'Mode':density_mode}) ax_box.set(xlabel='') plt.show() normal = stats.normaltest(data['density']) normal f, (ax_box, ax_dist) = plt.subplots(2, sharex=True, gridspec_kw = {"height_ratios":(0.2, 1)}) mean = np.mean(data['pH']) median = np.median(data['pH']) mode = data['pH'].mode()[0] q1 = data['pH'].quantile(0.25) q3 = data['pH'].quantile(0.75) IQR = q3 - q1 print("Mean : {}".format(mean)) print("Median : {}".format(median)) print("Mode : {}".format(mode)) print("Inter Quantile Range : {}".format(IQR)) sns.boxplot(data['pH'], ax=ax_box) ax_box.axvline(mean, color='r', linestyle='--') ax_box.axvline(median, color='g', linestyle='--') ax_box.axvline(mode, color='b', linestyle='--') sns.distplot(data['pH'], ax=ax_dist, fit=stats.norm) ax_dist.axvline(mean, color='r', linestyle='--') ax_dist.axvline(median, color='g', linestyle='--') ax_dist.axvline(mode, color='b', linestyle='--') plt.legend({"Mean":mean, "Median":median, "Mode":mode}) ax_box.set(xlabel='') plt.show() normal = stats.normaltest(data['pH']) normal
code
50233728/cell_3
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') data.head(4)
code
50233728/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.stats as stats import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') plt.figure(figsize=(15,6)) quality_count = data['quality'].value_counts().sort_values(ascending=False).to_frame() quality_count = quality_count.rename(columns={'quality':'Count'}) ax = sns.barplot(x=quality_count.index, y='Count', data=quality_count, palette="ch:.25") for p in ax.patches: ax.annotate(format(p.get_height(), '.1f'), (p.get_x() + p.get_width() / 2., p.get_height()), ha='center', va='center', xytext = (0,9), textcoords='offset points') plt.xlabel("Quality", fontsize=15) plt.ylabel("Count", fontsize=15) plt.title("Bar plot of Quality", fontsize=20) plt.show() table_nan = data.isna().sum().to_frame().style.background_gradient(cmap=cmap) table_nan feature_desc = ['most acids involved with wine or fixed or nonvolatile (do not evaporate readily)', 'he amount of acetic acid in wine, which at too high of levels can lead to an unpleasant, vinegar taste', 'found in small quantities, citric acid can add freshness and flavor to wines', "the amount of sugar remaining after fermentation stops, it's rare to find wines with less than 1 gram/liter and wines with greater than 45 grams/liter are considered sweet", 'the amount of salt in the wine', 'the free form of SO2 exists in equilibrium between molecular SO2 (as a dissolved gas) and bisulfite ion; it prevents microbial growth and the oxidation of wine', 'amount of free and bound forms of S02; in low concentrations, SO2 is mostly undetectable in wine, but at free SO2 concentrations over 50 ppm, SO2 becomes evident in the nose and taste of wine', 'the density of water is close to that of water depending on the percent alcohol and sugar content', 'describes how acidic or basic a wine is on a scale from 0 (very acidic) to 14 (very basic); most wines are between 3-4 on the pH scale', 'a wine additive which can contribute to sulfur dioxide gas (S02) levels, wich acts as an antimicrobial and antioxidant', '-', 'score between 0 and 10'] feature_desc = pd.DataFrame(feature_desc, columns=['Description'], index=data.columns) data_desc = data.describe().T data_description = pd.concat([feature_desc, data_desc], axis=1) f, ax = plt.subplots(3, 4, figsize=(25,15)) sns.despine(left=True) sns.boxplot(data['fixed acidity'], ax=ax[0,0]) sns.boxplot(data['volatile acidity'], ax=ax[0,1]) sns.boxplot(data['citric acid'], ax=ax[0,2]) sns.boxplot(data['residual sugar'], ax=ax[0,3]) sns.boxplot(data['chlorides'], ax=ax[1,0]) sns.boxplot(data['density'], ax=ax[1,1]) sns.boxplot(data['pH'], ax=ax[1,2]) sns.boxplot(data['sulphates'], ax=ax[1,3]) sns.boxplot(data['alcohol'], ax=ax[2,0]) sns.boxplot(data['total sulfur dioxide'], ax=ax[2,1]) sns.boxplot(data['free sulfur dioxide'], ax=ax[2,2]) sns.boxplot(data['quality'], ax=ax[2,3]) plt.show() density_mean = np.mean(data['density']) density_median = np.median(data['density']) density_mode = data['density'].mode()[0] q1 = data['density'].quantile(0.25) q3 = data['density'].quantile(0.75) density_IQR = q3 - q1 f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw= {'height_ratios':(0.2, 1)}) sns.boxplot(data["density"], ax=ax_box) ax_box.axvline(density_mean, color='r', linestyle='--') ax_box.axvline(density_median, color='g', linestyle='-') ax_box.axvline(density_mode, color='b', linestyle='-') sns.distplot(data["density"], ax=ax_hist, fit=stats.norm) ax_hist.axvline(density_mean, color='r', linestyle='--') ax_hist.axvline(density_median, color='g', linestyle='-') ax_hist.axvline(density_mode, color='b', linestyle='-') plt.legend({'Mean':density_mean,'Median':density_median,'Mode':density_mode}) ax_box.set(xlabel='') plt.show() normal = stats.normaltest(data['density']) normal
code
50233728/cell_12
[ "image_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') table_nan = data.isna().sum().to_frame().style.background_gradient(cmap=cmap) table_nan feature_desc = ['most acids involved with wine or fixed or nonvolatile (do not evaporate readily)', 'he amount of acetic acid in wine, which at too high of levels can lead to an unpleasant, vinegar taste', 'found in small quantities, citric acid can add freshness and flavor to wines', "the amount of sugar remaining after fermentation stops, it's rare to find wines with less than 1 gram/liter and wines with greater than 45 grams/liter are considered sweet", 'the amount of salt in the wine', 'the free form of SO2 exists in equilibrium between molecular SO2 (as a dissolved gas) and bisulfite ion; it prevents microbial growth and the oxidation of wine', 'amount of free and bound forms of S02; in low concentrations, SO2 is mostly undetectable in wine, but at free SO2 concentrations over 50 ppm, SO2 becomes evident in the nose and taste of wine', 'the density of water is close to that of water depending on the percent alcohol and sugar content', 'describes how acidic or basic a wine is on a scale from 0 (very acidic) to 14 (very basic); most wines are between 3-4 on the pH scale', 'a wine additive which can contribute to sulfur dioxide gas (S02) levels, wich acts as an antimicrobial and antioxidant', '-', 'score between 0 and 10'] feature_desc = pd.DataFrame(feature_desc, columns=['Description'], index=data.columns) data_desc = data.describe().T data_description = pd.concat([feature_desc, data_desc], axis=1) density_mean = np.mean(data['density']) density_median = np.median(data['density']) density_mode = data['density'].mode()[0] q1 = data['density'].quantile(0.25) q3 = data['density'].quantile(0.75) density_IQR = q3 - q1 print('Mean : {}'.format(density_mean)) print('Median : {}'.format(density_median)) print('Mode : {}'.format(density_mode)) print('Inter Quantile Range : {}'.format(density_IQR))
code
50233728/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd pd.set_option('display.max_columns', None) import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style(style='darkgrid') sns.set_palette(palette='pastel') pd.options.display.max_colwidth = 300 cmap = sns.diverging_palette(220, 10, as_cmap=True) data = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') data.info()
code
89122282/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import netCDF4 from netCDF4 import Dataset from datetime import datetime, timedelta, date import math as m from decimal import Decimal import os files = os.listdir('/kaggle/input/models/wrf-chem')
code
89122282/cell_8
[ "text_html_output_1.png" ]
from netCDF4 import Dataset import netCDF4 import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def geo_idx(dd, dd_array): """ search for nearest decimal degree in an array of decimal degrees and return the index. np.argmin returns the indices of minium value along an axis. so subtract dd from all values in dd_array, take absolute value and find index of minium. """ geo_idx = np.abs(dd_array - dd).argmin() return geo_idx rootdir = '/kaggle/input/models/wrf-chem/' curr_date = 20220218 cycle = '12' ncfile = Dataset(rootdir + str(curr_date) + cycle + '_NOA-WRF-CHEM.nc') times = ncfile.variables['time'] times_convert = np.array(netCDF4.num2date(times[:], times.long_name), dtype='datetime64[s]') nTimes = len(times) d = pd.to_datetime(np.datetime_as_string(times_convert, timezone='UTC', unit='s')) print(d.strftime('%d/%m/%Y %H:%M')) sconc_dust = ncfile.variables['SCONC_DUST'][:] lats = ncfile.variables['latitude'][:] lons = ncfile.variables['longitude'][:]
code
89122282/cell_10
[ "text_plain_output_1.png" ]
from netCDF4 import Dataset import netCDF4 import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def geo_idx(dd, dd_array): """ search for nearest decimal degree in an array of decimal degrees and return the index. np.argmin returns the indices of minium value along an axis. so subtract dd from all values in dd_array, take absolute value and find index of minium. """ geo_idx = np.abs(dd_array - dd).argmin() return geo_idx rootdir = '/kaggle/input/models/wrf-chem/' curr_date = 20220218 cycle = '12' ncfile = Dataset(rootdir + str(curr_date) + cycle + '_NOA-WRF-CHEM.nc') times = ncfile.variables['time'] times_convert = np.array(netCDF4.num2date(times[:], times.long_name), dtype='datetime64[s]') nTimes = len(times) d = pd.to_datetime(np.datetime_as_string(times_convert, timezone='UTC', unit='s')) sconc_dust = ncfile.variables['SCONC_DUST'][:] lats = ncfile.variables['latitude'][:] lons = ncfile.variables['longitude'][:] pois = pd.read_csv(rootdir + 'GREECE_POIS_NEW.csv', sep='\t') pois = pois.dropna(how='all', axis='columns') poi_name = pois['NAME'] poi_lat = pois['LATITUDE'] poi_lon = pois['LONGITUDE'] poi_alt = pois['ELEVATION'] npois = poi_name.count() poi_lat_int = poi_lat.astype(int) poi_lat_dec = (poi_lat - poi_lat_int) * (100.0 / 60.0) poi_lat_final = poi_lat_int + poi_lat_dec poi_lon_int = poi_lon.astype(int) poi_lon_dec = (poi_lon - poi_lon_int) * (100.0 / 60.0) poi_lon_final = poi_lon_int + poi_lon_dec pois.head()
code
89135552/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt sns.set_style('whitegrid') train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape x = train.corr() plt.xticks(rotation=90) df = pd.DataFrame(train['facility_type'].value_counts().head(10)) df['facility'] = df.index df.rename(columns = {'facility_type':'count'}, inplace = True) df['%share'] = df['count']/df['count'].sum() df.drop(columns=['count'],inplace=True) df.reset_index(drop=True) plt.figure(figsize=(12,6)) ax = sns.barplot(y='facility',x='%share',data=df) for i in ax.containers: ax.bar_label(i,) plt.title("Distribution of Top 10 Facilities in Buildings in %") plt.show() df = pd.DataFrame(train[train['building_class'] == 'Commercial'][['site_eui', 'floor_area', 'year_built', 'ELEVATION', 'State_Factor', 'facility_type']].sort_values(by=['site_eui']).tail(200)) df.dropna()
code
89135552/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape plt.figure(figsize=(18, 6)) plt.subplot(1, 2, 1) train['State_Factor'].value_counts().plot(kind='pie', autopct='%1.1f%%') plt.subplot(1, 2, 2) test['State_Factor'].value_counts().plot(kind='pie', autopct='%1.1f%%')
code
89135552/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape train.info()
code
89135552/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.head()
code
89135552/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt sns.set_style('whitegrid') train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape x = train.corr() plt.xticks(rotation=90) len(train[train['building_class'] == 'Residential'])
code
89135552/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.head(2)
code
89135552/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt sns.set_style('whitegrid') train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape sns.countplot(x='State_Factor', hue='building_class', data=train, order=train['State_Factor'].value_counts().index)
code
89135552/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt sns.set_style('whitegrid') train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape x = train.corr() plt.xticks(rotation=90) df = pd.DataFrame(train['facility_type'].value_counts().head(10)) df['facility'] = df.index df.rename(columns={'facility_type': 'count'}, inplace=True) df['%share'] = df['count'] / df['count'].sum() df.drop(columns=['count'], inplace=True) df.reset_index(drop=True) plt.figure(figsize=(12, 6)) ax = sns.barplot(y='facility', x='%share', data=df) for i in ax.containers: ax.bar_label(i) plt.title('Distribution of Top 10 Facilities in Buildings in %') plt.show()
code
89135552/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89135552/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum()
code
89135552/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt sns.set_style('whitegrid') train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape x = train.corr() plt.xticks(rotation=90) train.describe()
code
89135552/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape
code
89135552/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt sns.set_style('whitegrid') train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape x = train.corr() plt.figure(figsize=(8, 6)) sns.heatmap(x)
code
89135552/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt sns.set_style('whitegrid') train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape x = train.corr() plt.figure(figsize=(24, 6)) sns.countplot(x='year_built', data=train[train.year_built > 1920]) plt.xticks(rotation=90) plt.show()
code
89135552/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt sns.set_style('whitegrid') train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape x = train.corr() plt.xticks(rotation=90) df = pd.DataFrame(train['facility_type'].value_counts().head(10)) df['facility'] = df.index df.rename(columns = {'facility_type':'count'}, inplace = True) df['%share'] = df['count']/df['count'].sum() df.drop(columns=['count'],inplace=True) df.reset_index(drop=True) plt.figure(figsize=(12,6)) ax = sns.barplot(y='facility',x='%share',data=df) for i in ax.containers: ax.bar_label(i,) plt.title("Distribution of Top 10 Facilities in Buildings in %") plt.show() plt.figure(figsize=(12, 8)) sns.histplot(data=train, x='site_eui', hue='State_Factor') plt.show()
code
89135552/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/widsdatathon2022/train.csv') test = pd.read_csv('../input/widsdatathon2022/test.csv') submission = pd.read_csv('../input/widsdatathon2022/sample_solution.csv') train = pd.read_csv('../input/widsdatathon2022/train.csv') train.drop(columns=['id'], axis=1, inplace=True) train.isna().sum() train.shape train.head()
code
106202729/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd start_table_df = pd.read_csv('../input/big-data-derby-2022/nyra_start_table.csv') start_table_df.columns = ['track_id', 'race_date', 'race_number', 'program_number', 'weight_carried', 'jockey', 'odds', 'position_at_finish'] plt.bar(x=start_table_df['track_id'].value_counts().index, height=start_table_df['track_id'].value_counts())
code
106202729/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd start_table_df = pd.read_csv('../input/big-data-derby-2022/nyra_start_table.csv') start_table_df.columns = ['track_id', 'race_date', 'race_number', 'program_number', 'weight_carried', 'jockey', 'odds', 'position_at_finish'] start_table_df.head()
code
106202729/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd start_table_df = pd.read_csv('../input/big-data-derby-2022/nyra_start_table.csv') start_table_df.columns = ['track_id', 'race_date', 'race_number', 'program_number', 'weight_carried', 'jockey', 'odds', 'position_at_finish'] start_table_df.info()
code
106202729/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd start_table_df = pd.read_csv('../input/big-data-derby-2022/nyra_start_table.csv') start_table_df.columns = ['track_id', 'race_date', 'race_number', 'program_number', 'weight_carried', 'jockey', 'odds', 'position_at_finish'] start_table_df['track_id'].value_counts()
code
106202729/cell_10
[ "text_html_output_1.png" ]
import pandas as pd start_table_df = pd.read_csv('../input/big-data-derby-2022/nyra_start_table.csv') start_table_df.columns = ['track_id', 'race_date', 'race_number', 'program_number', 'weight_carried', 'jockey', 'odds', 'position_at_finish'] start_table_df.groupby(['race_date'])['race_date'].count()
code
106202729/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd start_table_df = pd.read_csv('../input/big-data-derby-2022/nyra_start_table.csv') start_table_df.columns = ['track_id', 'race_date', 'race_number', 'program_number', 'weight_carried', 'jockey', 'odds', 'position_at_finish'] start_table_df.groupby(['race_date'])['race_date'].count() start_table_df.head()
code
128020406/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) len(train_df.columns) len(train_df.columns[train_df.isnull().any()]) train_df.head()
code
128020406/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) print(dataframe.to_markdown())
code
128020406/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) null_test = df_test.isnull().sum() / df_test.shape[0] * 100 null_test dataframe_null_test = pd.DataFrame(null_test.sort_values(ascending=False)) col_to_drop_test = null_test[null_test > 50].keys() test_df = df_test.drop(col_to_drop, axis=1) len(test_df.columns) test_df.fillna(test_df.mode().iloc[0], inplace=True) test_df.isnull().values.sum()
code
128020406/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') df_train.head()
code
128020406/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) len(train_df.columns) len(train_df.columns[train_df.isnull().any()]) train_df.info()
code
128020406/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') print(df_train.columns) print(len(df_train.columns), 'fetures present in training dataset')
code
128020406/cell_29
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) null_test = df_test.isnull().sum() / df_test.shape[0] * 100 null_test dataframe_null_test = pd.DataFrame(null_test.sort_values(ascending=False)) col_to_drop_test = null_test[null_test > 50].keys() test_df = df_test.drop(col_to_drop, axis=1) len(train_df.columns) len(test_df.columns) len(train_df.columns[train_df.isnull().any()]) train_df.isnull().values.sum() test_df.fillna(test_df.mode().iloc[0], inplace=True) test_df.isnull().values.sum() train_df.corr() corr = train_df.corr() high_corr_features = corr.index[abs(corr['SalePrice']) > 0.5] train_df = pd.get_dummies(train_df, drop_first=True) test_df = pd.get_dummies(test_df, drop_first=True) print(f'Test shape: {test_df.shape}')
code
128020406/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) len(train_df.columns) len(train_df.columns[train_df.isnull().any()]) train_df.isnull().values.sum() train_df.corr()
code
128020406/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null
code
128020406/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) len(train_df.columns) len(train_df.columns[train_df.isnull().any()])
code
128020406/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128020406/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') print(df_test.columns) print(len(df_test.columns), 'fetures present in training dataset')
code
128020406/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) null_test = df_test.isnull().sum() / df_test.shape[0] * 100 null_test dataframe_null_test = pd.DataFrame(null_test.sort_values(ascending=False)) col_to_drop_test = null_test[null_test > 50].keys() test_df = df_test.drop(col_to_drop, axis=1) len(test_df.columns)
code
128020406/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) null_test = df_test.isnull().sum() / df_test.shape[0] * 100 null_test dataframe_null_test = pd.DataFrame(null_test.sort_values(ascending=False)) len(train_df.columns) len(train_df.columns[train_df.isnull().any()]) train_df.isnull().values.sum() train_df.corr() corr = train_df.corr() high_corr_features = corr.index[abs(corr['SalePrice']) > 0.5] train_df = pd.get_dummies(train_df, drop_first=True) print(f'Train shape: {train_df.shape}')
code
128020406/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') print(df_test.shape, 'shape of testing dataset') print(df_train.shape, 'shape of training dataset')
code
128020406/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) null_test = df_test.isnull().sum() / df_test.shape[0] * 100 null_test dataframe_null_test = pd.DataFrame(null_test.sort_values(ascending=False)) print(dataframe_null_test.to_markdown())
code
128020406/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) len(train_df.columns)
code
128020406/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) len(train_df.columns) len(train_df.columns[train_df.isnull().any()]) train_df.isnull().values.sum()
code
128020406/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null_test = df_test.isnull().sum() / df_test.shape[0] * 100 null_test
code
128020406/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) print(dataframe.to_markdown())
code
128020406/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) col_to_drop = null[null > 50].keys() train_df = df_train.drop(col_to_drop, axis=1) len(train_df.columns) len(train_df.columns[train_df.isnull().any()]) train_df.isnull().values.sum() train_df.corr() corr = train_df.corr() high_corr_features = corr.index[abs(corr['SalePrice']) > 0.5] print(f'highly correlated feature:\n', high_corr_features) print(f'No. of highly correlated features:', len(high_corr_features))
code
128020406/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') dataframe = pd.DataFrame(df_train.isnull().sum().sort_values(ascending=False)) dataframe = pd.DataFrame(df_test.isnull().sum().sort_values(ascending=False)) null = df_train.isnull().sum() / df_train.shape[0] * 100 null dataframe_null = pd.DataFrame(null.sort_values(ascending=False)) print(dataframe_null.to_markdown())
code
128020406/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') df_test.head()
code
105185927/cell_13
[ "text_plain_output_1.png" ]
from collections import defaultdict from functools import lru_cache from geopy.geocoders import Nominatim from tqdm.auto import tqdm import numpy as np import pandas as pd import pycountry import pycountry_convert as pc import re import spacy countries_only = pd.read_json('../input/precise-location/countries.json') country_ex = pd.read_csv('../input/precise-location/country_iso_codes_expanded.csv') country_ex = country_ex.fillna(' ') contriesSet = [] for i in tqdm(range(len(countries_only))): contriesSet.append(countries_only['name'][i]) contriesSet += list(countries_only['translations'][i].values()) contriesSet = list(set(contriesSet)) dictMultiLangCountry = {} for i in tqdm(range(len(countries_only))): dictMultiLangCountry[countries_only['name'][i]] = countries_only['name'][i] mc = list(countries_only['translations'][i].values()) for c in mc: dictMultiLangCountry[c] = countries_only['name'][i] for i in tqdm(range(len(country_ex))): for col in range(26): if country_ex[f'alternative_country_name_{col}'][i] != ' ': dictMultiLangCountry[country_ex[f'alternative_country_name_{col}'][i]] = country_ex['country'][i] dictMultiLangCountry['China'] = 'China' dictMultiLangCountry['SIngapore'] = 'Singapore' dictMultiLangCountry.pop('') geoname_cities = pd.read_csv('../input/precise-location/geonames-all-cities-with-a-population-1000.csv', sep=';') dictAllUnicodeCities = dict() for i in tqdm(range(len(geoname_cities))): if geoname_cities.Name[i] in dictAllUnicodeCities: dictAllUnicodeCities[geoname_cities.Name[i]].append(geoname_cities['Country name EN'][i]) dictAllUnicodeCities[geoname_cities.Name[i]] = list(set(dictAllUnicodeCities[geoname_cities.Name[i]])) else: dictAllUnicodeCities[geoname_cities.Name[i]] = [geoname_cities['Country name EN'][i]] if geoname_cities['Alternate Names'][i] is not np.NaN: alt_ = geoname_cities['Alternate Names'][i].split(',') for city in alt_: if city in dictAllUnicodeCities: dictAllUnicodeCities[city].append(geoname_cities['Country name EN'][i]) dictAllUnicodeCities[city] = list(set(dictAllUnicodeCities[city])) else: dictAllUnicodeCities[city] = [geoname_cities['Country name EN'][i]] dictAllUnicodeCities['Dhaka'] = ['Bangladesh'] dictAllUnicodeCities['Changi'] = ['Singapore'] dictAllUnicodeCities['Admiralty'] = ['Singapore'] dictAllUnicodeCities['North Bridge'] = ['Singapore'] dictAllUnicodeCities['Greater Bay'] = ['China'] dictAllUnicodeCities['Philadelphia'] = ['United States'] dictAllUnicodeCities['Manila'] = ['Philippines'] dictAllUnicodeCities['Irving'] = ['Singapore'] dictAllUnicodeCities['HDB Hub'] = ['Singapore'] dictAllUnicodeCities['s Heerenberg'] = ['Netherlands'] dictAllUnicodeCities['S Heerenberg'] = ['Netherlands'] dictAllUnicodeCities['Durham'] = ['United States'] dictAllUnicodeCities['dhabi'] = ['United Arab Emirates'] dictAllUnicodeCities['Scotland'] = ['United Kingdom'] dictAllUnicodeCities['Middle East'] = ['United Arab Emirates'] dictAllUnicodeCities['Alberta'] = ['Canada'] dictAllUnicodeCities['Colombo'] = ['Sri Lanka'] dictAllUnicodeCities['Phoenix'] = ['United States'] dictAllUnicodeCities.pop('Ahmad') dictAllUnicodeCities.pop('Road') dictAllUnicodeCities.pop('Street') dictAllUnicodeCities.pop('30') dictAllUnicodeCities.pop('North') dictAllUnicodeCities.pop('West') dictAllUnicodeCities.pop('Bridge') dictAllUnicodeCities.pop('Model') dictAllUnicodeCities.pop('Spa') dictAllUnicodeCities.pop('Park') dictAllUnicodeCities.pop('Bay') dictAllUnicodeCities.pop('Home') dictAllUnicodeCities.pop('List') dictAllUnicodeCities.pop('China') dictAllUnicodeCities.pop('Court') dictAllUnicodeCities.pop('Wing') dictAllUnicodeCities.pop('HDB') dictAllUnicodeCities.pop('Al') dictAllUnicodeCities.pop('Bin') dictAllUnicodeCities.pop('A') dictAllUnicodeCities.pop('Vista') dictAllUnicodeCities.pop('Aria') dictAllUnicodeCities.pop('No') dictAllUnicodeCities.pop('I') countries_cities = pd.read_json('../input/precise-location/countriescities.json') citiesList = [] for i in range(len(countries_cities)): for j in range(len(countries_cities['cities'][i])): citiesList.append(countries_cities['cities'][i][j].get('name')) states_only = pd.read_json('../input/precise-location/states.json') statesCountriesDict = {} for i in range(len(states_only)): statesCountriesDict[states_only['name'][i]] = states_only['country_name'][i] from collections import defaultdict countries_states = pd.read_json('../input/precise-location/countriesstates.json') statesList = [] statesNameToStateCode = defaultdict(list) statesCodeToStatesName = defaultdict(list) for i in range(len(countries_states)): for j in range(len(countries_states['states'][i])): statesList.append(countries_states['states'][i][j].get('name')) statesNameToStateCode[countries_states['states'][i][j].get('name')].append(countries_states['states'][i][j].get('state_code')) statesCodeToStatesName[countries_states['states'][i][j].get('state_code')].append(countries_states['states'][i][j].get('name')) def get_ngram(text, WordsToCombine=2): text = text.replace(',', '') words = text.split() output = [] for i in range(len(words) - WordsToCombine + 1): output.append(' '.join(words[i:i + WordsToCombine])) return output from re import T def detect_language(text): nlp = spacy.blank('xx') nlp.add_pipe('language_detector') doc = nlp(text) return doc._.language cid_mapper = {country.name: country.alpha_2 for country in pycountry.countries} @lru_cache(maxsize=128) def preproc(locstr): pattern = re.compile('#[0-9]+\\-[0-9]+|Headquarters|HQ|Town|Court|Access|via|City|Head|Bank|Center|Remote|Building|Office|City|Of| And|Transportation|D.C|feild|Metro|Tn.|Health Care|Health|Care|STE 100|Sector|Tower|[-&/|;:\\"]+|Area|Surrounding|[\\(\\)\\{\\}\\[\\]]|📍 |🌎', flags=re.IGNORECASE) return pattern.sub(' ', locstr).strip().rstrip('.') road_re = re.compile('^.*?(road|street)(?!\\w)', flags=re.IGNORECASE) def get_road(locstr): m = road_re.match(locstr) if m: return (m.group(), m.group(1)) else: return (locstr, '') def get_road_idn(locstr): if locstr.split(' ')[0] in ('Jl', 'Jl.', 'JL', 'Jalan', 'jl', 'Jln', 'Jln.'): return True for i in locstr.split(' '): for j in ['Jl', 'Jl.', 'JL', 'jl']: if i == j: return True @lru_cache(maxsize=128) def get_all_geoname(locstr): try: geolocator = Nominatim(user_agent='geopy25', timeout=3) location = geolocator.geocode(locstr) location = geolocator.reverse('{}, {}'.format(str(location.raw['lat']), str(location.raw['lon'])), exactly_one=True) address = location.raw['address'] city = address.get('city', '') state = address.get('state', '') country = address.get('country', '') return (city, state, country) except: pass @lru_cache(maxsize=128) def get_geoname_from_road(roadstr): try: geolocator = Nominatim(user_agent='geopy2', timeout=3) location = geolocator.geocode(roadstr) addr = location.address addr = addr.split(',') country = addr[-1] state = addr[-3] city = addr[-4] return (city, state, country) except: pass def get_result(countries): countries = str(countries) if countries is not '': country_id = cid_mapper.get(countries.strip()) else: country_id = '' if country_id: try: continent_code = pc.country_alpha2_to_continent_code(country_id.strip()) except: continent_code = '' else: continent_code = '' if continent_code: continent_name = pc.convert_continent_code_to_continent_name(continent_code.strip()) else: continent_name = '' return {'country': countries, 'country_code': country_id, 'region': continent_name, 'region_code': continent_code} from itertools import count def get_locations(locstr): if (locstr == ""): return { "city":"", "state":"", "country":"", "country_code": "", "region":"", "region_code":""} states, cities, countries, country_id, continent_code, continent_name = "", "", "", "", "", "" if detect_language(locstr) not in ['ja', 'ko', 'zh']: locstr = preproc(locstr) locstr = re.sub(r' , ', ', ', locstr) locstr = re.sub(r',', ' ', locstr) locstr = re.sub(r'\s\s+', ' ', locstr) # print(locstr) if locstr.isupper() or locstr.islower(): locstr = locstr.title() locstr = ' '.join(re.sub(r"([A-Z]+[a-z])", r" \1", locstr).split()) # print(locstr) for i in locstr.replace(',', '').split(' ')[::-1]: if i in dictMultiLangCountry: countries = dictMultiLangCountry[i] if (countries != "") or (countries is not None): # print('5 ' + countries) return get_result(countries) for i in locstr.replace(',', '').split(' ')[::-1]: if i in dictAllUnicodeCities: countries = dictAllUnicodeCities[i] if len(countries) == 1: countries = countries[0] if (countries != "") or (countries is not None): # print('6 ' + countries) return get_result(countries) else: countries = "" if (countries == "") : trigram = get_ngram(locstr, 3) if countries is "": for gram in trigram: if gram in dictMultiLangCountry: countries = dictMultiLangCountry[gram] if (countries != "") or (countries is not None): # print('1 ' + countries) return get_result(countries) if gram in dictAllUnicodeCities: countries = dictAllUnicodeCities[gram] if len(countries) == 1: countries = countries[0] if (countries != "") or (countries is not None): # print('2 ' + countries) return get_result(countries) else: countries = "" bigram = get_ngram(locstr) if countries is "": for gram in bigram: if gram in dictMultiLangCountry: countries = dictMultiLangCountry[gram] if (countries != "") or (countries is not None): # print('3 ' + countries) return get_result(countries) if gram in dictAllUnicodeCities: countries = dictAllUnicodeCities[gram] if len(countries) == 1: countries = countries[0] if (countries != "") or (countries is not None): # print('4 ' + countries) return get_result(countries) else: countries = "" ############################################################################ # ROAD REFORMATING (ex: 159, Sin Ming Road # 07-02 Lobby 2 Amtech Building # --> 159, Sin Ming Road) ############################################################################ locstr, road = get_road(locstr) if len(locstr.split(" ")[0]) == 1: locstr = locstr[2:] if get_road_idn(locstr): countries = "Indonesia" return get_result(countries) ############################################################################ # ROAD ############################################################################ if re.findall('[0-9]+', locstr) or road: try: cities, states, countries = get_geoname_from_road(locstr) countries = countries.strip() if countries is not "": if countries in dictMultiLangCountry: return get_result(countries) if cities: countries = dictAllUnicodeCities[cities] if len(countries) == 1: countries = countries[0] else: countries = "" if countries is not "": if countries in dictMultiLangCountry: return get_result(countries) if states: countries = statesCountriesDict[states] if countries is not "": if countries in dictMultiLangCountry: return get_result(countries) except: pass ############################################################################ ## THIS CODE TO SOLVE CITY - STATE CODE FORMAT (ex: Bonney Lake, WA) ####### ############################################################################ loc_split_by_comma = locstr.split(",") if (len(loc_split_by_comma) == 2): if len(loc_split_by_comma[-1].strip()): if statesCodeToStatesName.get(loc_split_by_comma[-1].strip()) and len(statesCodeToStatesName.get(loc_split_by_comma[-1].strip())) > 1: if loc_split_by_comma[0] in set(citiesList): cities = loc_split_by_comma[0] try: cities, states, countries = get_all_geoname(locstr) if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) except: states, countries = "", "" if type(states) == list: states = states[0] if states: countries = statesCountriesDict[states] if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) elif statesCodeToStatesName.get(loc_split_by_comma[-1].strip()) and len(statesCodeToStatesName.get(loc_split_by_comma[-1].strip())) == 1: cities = loc_split_by_comma[0] states = statesCodeToStatesName.get(loc_split_by_comma[-1].strip()) if type(states) == list: states = states[0] if states: countries = statesCountriesDict[states] if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) else: locstr = locstr ######################################################################### #### IF CURRENT RESULT JUST HAS A CITY ######################################################################### try: if (cities != '') and (states == '') and (countries == ''): _, states, countries = get_all_geoname(cities) if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) except: country_id, countries = "", "" ######################################################################### #### IF ALL KEY IS NULL / EMPTY STRING ######################################################################### try: if (cities is "") and (states is "") and (countries != '') or (countries == ''): _, _, countries = get_all_geoname(locstr) if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) except: country_id, countries = "", "" return { "country":"", "country_code":"", "region":"", "region_code":"" } sample = 'City of Fond du Lac' loc = get_locations(sample) sample = '3501 NW Lowell Street Suite 202, Silverdale, WA 98383' loc = get_locations(sample) print(loc)
code
105185927/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from tqdm.auto import tqdm import numpy as np import pandas as pd countries_only = pd.read_json('../input/precise-location/countries.json') country_ex = pd.read_csv('../input/precise-location/country_iso_codes_expanded.csv') country_ex = country_ex.fillna(' ') contriesSet = [] for i in tqdm(range(len(countries_only))): contriesSet.append(countries_only['name'][i]) contriesSet += list(countries_only['translations'][i].values()) contriesSet = list(set(contriesSet)) dictMultiLangCountry = {} for i in tqdm(range(len(countries_only))): dictMultiLangCountry[countries_only['name'][i]] = countries_only['name'][i] mc = list(countries_only['translations'][i].values()) for c in mc: dictMultiLangCountry[c] = countries_only['name'][i] for i in tqdm(range(len(country_ex))): for col in range(26): if country_ex[f'alternative_country_name_{col}'][i] != ' ': dictMultiLangCountry[country_ex[f'alternative_country_name_{col}'][i]] = country_ex['country'][i] dictMultiLangCountry['China'] = 'China' dictMultiLangCountry['SIngapore'] = 'Singapore' dictMultiLangCountry.pop('') geoname_cities = pd.read_csv('../input/precise-location/geonames-all-cities-with-a-population-1000.csv', sep=';') dictAllUnicodeCities = dict() for i in tqdm(range(len(geoname_cities))): if geoname_cities.Name[i] in dictAllUnicodeCities: dictAllUnicodeCities[geoname_cities.Name[i]].append(geoname_cities['Country name EN'][i]) dictAllUnicodeCities[geoname_cities.Name[i]] = list(set(dictAllUnicodeCities[geoname_cities.Name[i]])) else: dictAllUnicodeCities[geoname_cities.Name[i]] = [geoname_cities['Country name EN'][i]] if geoname_cities['Alternate Names'][i] is not np.NaN: alt_ = geoname_cities['Alternate Names'][i].split(',') for city in alt_: if city in dictAllUnicodeCities: dictAllUnicodeCities[city].append(geoname_cities['Country name EN'][i]) dictAllUnicodeCities[city] = list(set(dictAllUnicodeCities[city])) else: dictAllUnicodeCities[city] = [geoname_cities['Country name EN'][i]] dictAllUnicodeCities['Dhaka'] = ['Bangladesh'] dictAllUnicodeCities['Changi'] = ['Singapore'] dictAllUnicodeCities['Admiralty'] = ['Singapore'] dictAllUnicodeCities['North Bridge'] = ['Singapore'] dictAllUnicodeCities['Greater Bay'] = ['China'] dictAllUnicodeCities['Philadelphia'] = ['United States'] dictAllUnicodeCities['Manila'] = ['Philippines'] dictAllUnicodeCities['Irving'] = ['Singapore'] dictAllUnicodeCities['HDB Hub'] = ['Singapore'] dictAllUnicodeCities['s Heerenberg'] = ['Netherlands'] dictAllUnicodeCities['S Heerenberg'] = ['Netherlands'] dictAllUnicodeCities['Durham'] = ['United States'] dictAllUnicodeCities['dhabi'] = ['United Arab Emirates'] dictAllUnicodeCities['Scotland'] = ['United Kingdom'] dictAllUnicodeCities['Middle East'] = ['United Arab Emirates'] dictAllUnicodeCities['Alberta'] = ['Canada'] dictAllUnicodeCities['Colombo'] = ['Sri Lanka'] dictAllUnicodeCities['Phoenix'] = ['United States'] dictAllUnicodeCities.pop('Ahmad') dictAllUnicodeCities.pop('Road') dictAllUnicodeCities.pop('Street') dictAllUnicodeCities.pop('30') dictAllUnicodeCities.pop('North') dictAllUnicodeCities.pop('West') dictAllUnicodeCities.pop('Bridge') dictAllUnicodeCities.pop('Model') dictAllUnicodeCities.pop('Spa') dictAllUnicodeCities.pop('Park') dictAllUnicodeCities.pop('Bay') dictAllUnicodeCities.pop('Home') dictAllUnicodeCities.pop('List') dictAllUnicodeCities.pop('China') dictAllUnicodeCities.pop('Court') dictAllUnicodeCities.pop('Wing') dictAllUnicodeCities.pop('HDB') dictAllUnicodeCities.pop('Al') dictAllUnicodeCities.pop('Bin') dictAllUnicodeCities.pop('A') dictAllUnicodeCities.pop('Vista') dictAllUnicodeCities.pop('Aria') dictAllUnicodeCities.pop('No') dictAllUnicodeCities.pop('I')
code
105185927/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import nltk nltk.download('punkt') nltk.download('words') nltk.download('maxent_ne_chunker') nltk.download('averaged_perceptron_tagger') !python -m spacy download en_core_web_sm import re import spacy import string import pycountry import locationtagger import spacy_fastlang from rapidfuzz import fuzz import pycountry_convert as pc from deep_translator import GoogleTranslator from geopy.geocoders import Nominatim from multiprocessing.pool import ThreadPool as Pool from functools import lru_cache import warnings warnings.filterwarnings('ignore') cid_mapper = {country.name: country.alpha_2 for country in pycountry.countries}
code
105185927/cell_14
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from collections import defaultdict from functools import lru_cache from geopy.geocoders import Nominatim from tqdm.auto import tqdm import numpy as np import pandas as pd import pycountry import pycountry_convert as pc import re import spacy countries_only = pd.read_json('../input/precise-location/countries.json') country_ex = pd.read_csv('../input/precise-location/country_iso_codes_expanded.csv') country_ex = country_ex.fillna(' ') contriesSet = [] for i in tqdm(range(len(countries_only))): contriesSet.append(countries_only['name'][i]) contriesSet += list(countries_only['translations'][i].values()) contriesSet = list(set(contriesSet)) dictMultiLangCountry = {} for i in tqdm(range(len(countries_only))): dictMultiLangCountry[countries_only['name'][i]] = countries_only['name'][i] mc = list(countries_only['translations'][i].values()) for c in mc: dictMultiLangCountry[c] = countries_only['name'][i] for i in tqdm(range(len(country_ex))): for col in range(26): if country_ex[f'alternative_country_name_{col}'][i] != ' ': dictMultiLangCountry[country_ex[f'alternative_country_name_{col}'][i]] = country_ex['country'][i] dictMultiLangCountry['China'] = 'China' dictMultiLangCountry['SIngapore'] = 'Singapore' dictMultiLangCountry.pop('') geoname_cities = pd.read_csv('../input/precise-location/geonames-all-cities-with-a-population-1000.csv', sep=';') dictAllUnicodeCities = dict() for i in tqdm(range(len(geoname_cities))): if geoname_cities.Name[i] in dictAllUnicodeCities: dictAllUnicodeCities[geoname_cities.Name[i]].append(geoname_cities['Country name EN'][i]) dictAllUnicodeCities[geoname_cities.Name[i]] = list(set(dictAllUnicodeCities[geoname_cities.Name[i]])) else: dictAllUnicodeCities[geoname_cities.Name[i]] = [geoname_cities['Country name EN'][i]] if geoname_cities['Alternate Names'][i] is not np.NaN: alt_ = geoname_cities['Alternate Names'][i].split(',') for city in alt_: if city in dictAllUnicodeCities: dictAllUnicodeCities[city].append(geoname_cities['Country name EN'][i]) dictAllUnicodeCities[city] = list(set(dictAllUnicodeCities[city])) else: dictAllUnicodeCities[city] = [geoname_cities['Country name EN'][i]] dictAllUnicodeCities['Dhaka'] = ['Bangladesh'] dictAllUnicodeCities['Changi'] = ['Singapore'] dictAllUnicodeCities['Admiralty'] = ['Singapore'] dictAllUnicodeCities['North Bridge'] = ['Singapore'] dictAllUnicodeCities['Greater Bay'] = ['China'] dictAllUnicodeCities['Philadelphia'] = ['United States'] dictAllUnicodeCities['Manila'] = ['Philippines'] dictAllUnicodeCities['Irving'] = ['Singapore'] dictAllUnicodeCities['HDB Hub'] = ['Singapore'] dictAllUnicodeCities['s Heerenberg'] = ['Netherlands'] dictAllUnicodeCities['S Heerenberg'] = ['Netherlands'] dictAllUnicodeCities['Durham'] = ['United States'] dictAllUnicodeCities['dhabi'] = ['United Arab Emirates'] dictAllUnicodeCities['Scotland'] = ['United Kingdom'] dictAllUnicodeCities['Middle East'] = ['United Arab Emirates'] dictAllUnicodeCities['Alberta'] = ['Canada'] dictAllUnicodeCities['Colombo'] = ['Sri Lanka'] dictAllUnicodeCities['Phoenix'] = ['United States'] dictAllUnicodeCities.pop('Ahmad') dictAllUnicodeCities.pop('Road') dictAllUnicodeCities.pop('Street') dictAllUnicodeCities.pop('30') dictAllUnicodeCities.pop('North') dictAllUnicodeCities.pop('West') dictAllUnicodeCities.pop('Bridge') dictAllUnicodeCities.pop('Model') dictAllUnicodeCities.pop('Spa') dictAllUnicodeCities.pop('Park') dictAllUnicodeCities.pop('Bay') dictAllUnicodeCities.pop('Home') dictAllUnicodeCities.pop('List') dictAllUnicodeCities.pop('China') dictAllUnicodeCities.pop('Court') dictAllUnicodeCities.pop('Wing') dictAllUnicodeCities.pop('HDB') dictAllUnicodeCities.pop('Al') dictAllUnicodeCities.pop('Bin') dictAllUnicodeCities.pop('A') dictAllUnicodeCities.pop('Vista') dictAllUnicodeCities.pop('Aria') dictAllUnicodeCities.pop('No') dictAllUnicodeCities.pop('I') countries_cities = pd.read_json('../input/precise-location/countriescities.json') citiesList = [] for i in range(len(countries_cities)): for j in range(len(countries_cities['cities'][i])): citiesList.append(countries_cities['cities'][i][j].get('name')) states_only = pd.read_json('../input/precise-location/states.json') statesCountriesDict = {} for i in range(len(states_only)): statesCountriesDict[states_only['name'][i]] = states_only['country_name'][i] from collections import defaultdict countries_states = pd.read_json('../input/precise-location/countriesstates.json') statesList = [] statesNameToStateCode = defaultdict(list) statesCodeToStatesName = defaultdict(list) for i in range(len(countries_states)): for j in range(len(countries_states['states'][i])): statesList.append(countries_states['states'][i][j].get('name')) statesNameToStateCode[countries_states['states'][i][j].get('name')].append(countries_states['states'][i][j].get('state_code')) statesCodeToStatesName[countries_states['states'][i][j].get('state_code')].append(countries_states['states'][i][j].get('name')) def get_ngram(text, WordsToCombine=2): text = text.replace(',', '') words = text.split() output = [] for i in range(len(words) - WordsToCombine + 1): output.append(' '.join(words[i:i + WordsToCombine])) return output from re import T def detect_language(text): nlp = spacy.blank('xx') nlp.add_pipe('language_detector') doc = nlp(text) return doc._.language cid_mapper = {country.name: country.alpha_2 for country in pycountry.countries} @lru_cache(maxsize=128) def preproc(locstr): pattern = re.compile('#[0-9]+\\-[0-9]+|Headquarters|HQ|Town|Court|Access|via|City|Head|Bank|Center|Remote|Building|Office|City|Of| And|Transportation|D.C|feild|Metro|Tn.|Health Care|Health|Care|STE 100|Sector|Tower|[-&/|;:\\"]+|Area|Surrounding|[\\(\\)\\{\\}\\[\\]]|📍 |🌎', flags=re.IGNORECASE) return pattern.sub(' ', locstr).strip().rstrip('.') road_re = re.compile('^.*?(road|street)(?!\\w)', flags=re.IGNORECASE) def get_road(locstr): m = road_re.match(locstr) if m: return (m.group(), m.group(1)) else: return (locstr, '') def get_road_idn(locstr): if locstr.split(' ')[0] in ('Jl', 'Jl.', 'JL', 'Jalan', 'jl', 'Jln', 'Jln.'): return True for i in locstr.split(' '): for j in ['Jl', 'Jl.', 'JL', 'jl']: if i == j: return True @lru_cache(maxsize=128) def get_all_geoname(locstr): try: geolocator = Nominatim(user_agent='geopy25', timeout=3) location = geolocator.geocode(locstr) location = geolocator.reverse('{}, {}'.format(str(location.raw['lat']), str(location.raw['lon'])), exactly_one=True) address = location.raw['address'] city = address.get('city', '') state = address.get('state', '') country = address.get('country', '') return (city, state, country) except: pass @lru_cache(maxsize=128) def get_geoname_from_road(roadstr): try: geolocator = Nominatim(user_agent='geopy2', timeout=3) location = geolocator.geocode(roadstr) addr = location.address addr = addr.split(',') country = addr[-1] state = addr[-3] city = addr[-4] return (city, state, country) except: pass def get_result(countries): countries = str(countries) if countries is not '': country_id = cid_mapper.get(countries.strip()) else: country_id = '' if country_id: try: continent_code = pc.country_alpha2_to_continent_code(country_id.strip()) except: continent_code = '' else: continent_code = '' if continent_code: continent_name = pc.convert_continent_code_to_continent_name(continent_code.strip()) else: continent_name = '' return {'country': countries, 'country_code': country_id, 'region': continent_name, 'region_code': continent_code} from itertools import count def get_locations(locstr): if (locstr == ""): return { "city":"", "state":"", "country":"", "country_code": "", "region":"", "region_code":""} states, cities, countries, country_id, continent_code, continent_name = "", "", "", "", "", "" if detect_language(locstr) not in ['ja', 'ko', 'zh']: locstr = preproc(locstr) locstr = re.sub(r' , ', ', ', locstr) locstr = re.sub(r',', ' ', locstr) locstr = re.sub(r'\s\s+', ' ', locstr) # print(locstr) if locstr.isupper() or locstr.islower(): locstr = locstr.title() locstr = ' '.join(re.sub(r"([A-Z]+[a-z])", r" \1", locstr).split()) # print(locstr) for i in locstr.replace(',', '').split(' ')[::-1]: if i in dictMultiLangCountry: countries = dictMultiLangCountry[i] if (countries != "") or (countries is not None): # print('5 ' + countries) return get_result(countries) for i in locstr.replace(',', '').split(' ')[::-1]: if i in dictAllUnicodeCities: countries = dictAllUnicodeCities[i] if len(countries) == 1: countries = countries[0] if (countries != "") or (countries is not None): # print('6 ' + countries) return get_result(countries) else: countries = "" if (countries == "") : trigram = get_ngram(locstr, 3) if countries is "": for gram in trigram: if gram in dictMultiLangCountry: countries = dictMultiLangCountry[gram] if (countries != "") or (countries is not None): # print('1 ' + countries) return get_result(countries) if gram in dictAllUnicodeCities: countries = dictAllUnicodeCities[gram] if len(countries) == 1: countries = countries[0] if (countries != "") or (countries is not None): # print('2 ' + countries) return get_result(countries) else: countries = "" bigram = get_ngram(locstr) if countries is "": for gram in bigram: if gram in dictMultiLangCountry: countries = dictMultiLangCountry[gram] if (countries != "") or (countries is not None): # print('3 ' + countries) return get_result(countries) if gram in dictAllUnicodeCities: countries = dictAllUnicodeCities[gram] if len(countries) == 1: countries = countries[0] if (countries != "") or (countries is not None): # print('4 ' + countries) return get_result(countries) else: countries = "" ############################################################################ # ROAD REFORMATING (ex: 159, Sin Ming Road # 07-02 Lobby 2 Amtech Building # --> 159, Sin Ming Road) ############################################################################ locstr, road = get_road(locstr) if len(locstr.split(" ")[0]) == 1: locstr = locstr[2:] if get_road_idn(locstr): countries = "Indonesia" return get_result(countries) ############################################################################ # ROAD ############################################################################ if re.findall('[0-9]+', locstr) or road: try: cities, states, countries = get_geoname_from_road(locstr) countries = countries.strip() if countries is not "": if countries in dictMultiLangCountry: return get_result(countries) if cities: countries = dictAllUnicodeCities[cities] if len(countries) == 1: countries = countries[0] else: countries = "" if countries is not "": if countries in dictMultiLangCountry: return get_result(countries) if states: countries = statesCountriesDict[states] if countries is not "": if countries in dictMultiLangCountry: return get_result(countries) except: pass ############################################################################ ## THIS CODE TO SOLVE CITY - STATE CODE FORMAT (ex: Bonney Lake, WA) ####### ############################################################################ loc_split_by_comma = locstr.split(",") if (len(loc_split_by_comma) == 2): if len(loc_split_by_comma[-1].strip()): if statesCodeToStatesName.get(loc_split_by_comma[-1].strip()) and len(statesCodeToStatesName.get(loc_split_by_comma[-1].strip())) > 1: if loc_split_by_comma[0] in set(citiesList): cities = loc_split_by_comma[0] try: cities, states, countries = get_all_geoname(locstr) if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) except: states, countries = "", "" if type(states) == list: states = states[0] if states: countries = statesCountriesDict[states] if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) elif statesCodeToStatesName.get(loc_split_by_comma[-1].strip()) and len(statesCodeToStatesName.get(loc_split_by_comma[-1].strip())) == 1: cities = loc_split_by_comma[0] states = statesCodeToStatesName.get(loc_split_by_comma[-1].strip()) if type(states) == list: states = states[0] if states: countries = statesCountriesDict[states] if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) else: locstr = locstr ######################################################################### #### IF CURRENT RESULT JUST HAS A CITY ######################################################################### try: if (cities != '') and (states == '') and (countries == ''): _, states, countries = get_all_geoname(cities) if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) except: country_id, countries = "", "" ######################################################################### #### IF ALL KEY IS NULL / EMPTY STRING ######################################################################### try: if (cities is "") and (states is "") and (countries != '') or (countries == ''): _, _, countries = get_all_geoname(locstr) if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) except: country_id, countries = "", "" return { "country":"", "country_code":"", "region":"", "region_code":"" } sample = 'City of Fond du Lac' loc = get_locations(sample) sample = '3501 NW Lowell Street Suite 202, Silverdale, WA 98383' loc = get_locations(sample) sample = '8A Admiralty Road' loc = get_locations(sample) print(loc)
code
105185927/cell_12
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from collections import defaultdict from functools import lru_cache from geopy.geocoders import Nominatim from tqdm.auto import tqdm import numpy as np import pandas as pd import pycountry import pycountry_convert as pc import re import spacy countries_only = pd.read_json('../input/precise-location/countries.json') country_ex = pd.read_csv('../input/precise-location/country_iso_codes_expanded.csv') country_ex = country_ex.fillna(' ') contriesSet = [] for i in tqdm(range(len(countries_only))): contriesSet.append(countries_only['name'][i]) contriesSet += list(countries_only['translations'][i].values()) contriesSet = list(set(contriesSet)) dictMultiLangCountry = {} for i in tqdm(range(len(countries_only))): dictMultiLangCountry[countries_only['name'][i]] = countries_only['name'][i] mc = list(countries_only['translations'][i].values()) for c in mc: dictMultiLangCountry[c] = countries_only['name'][i] for i in tqdm(range(len(country_ex))): for col in range(26): if country_ex[f'alternative_country_name_{col}'][i] != ' ': dictMultiLangCountry[country_ex[f'alternative_country_name_{col}'][i]] = country_ex['country'][i] dictMultiLangCountry['China'] = 'China' dictMultiLangCountry['SIngapore'] = 'Singapore' dictMultiLangCountry.pop('') geoname_cities = pd.read_csv('../input/precise-location/geonames-all-cities-with-a-population-1000.csv', sep=';') dictAllUnicodeCities = dict() for i in tqdm(range(len(geoname_cities))): if geoname_cities.Name[i] in dictAllUnicodeCities: dictAllUnicodeCities[geoname_cities.Name[i]].append(geoname_cities['Country name EN'][i]) dictAllUnicodeCities[geoname_cities.Name[i]] = list(set(dictAllUnicodeCities[geoname_cities.Name[i]])) else: dictAllUnicodeCities[geoname_cities.Name[i]] = [geoname_cities['Country name EN'][i]] if geoname_cities['Alternate Names'][i] is not np.NaN: alt_ = geoname_cities['Alternate Names'][i].split(',') for city in alt_: if city in dictAllUnicodeCities: dictAllUnicodeCities[city].append(geoname_cities['Country name EN'][i]) dictAllUnicodeCities[city] = list(set(dictAllUnicodeCities[city])) else: dictAllUnicodeCities[city] = [geoname_cities['Country name EN'][i]] dictAllUnicodeCities['Dhaka'] = ['Bangladesh'] dictAllUnicodeCities['Changi'] = ['Singapore'] dictAllUnicodeCities['Admiralty'] = ['Singapore'] dictAllUnicodeCities['North Bridge'] = ['Singapore'] dictAllUnicodeCities['Greater Bay'] = ['China'] dictAllUnicodeCities['Philadelphia'] = ['United States'] dictAllUnicodeCities['Manila'] = ['Philippines'] dictAllUnicodeCities['Irving'] = ['Singapore'] dictAllUnicodeCities['HDB Hub'] = ['Singapore'] dictAllUnicodeCities['s Heerenberg'] = ['Netherlands'] dictAllUnicodeCities['S Heerenberg'] = ['Netherlands'] dictAllUnicodeCities['Durham'] = ['United States'] dictAllUnicodeCities['dhabi'] = ['United Arab Emirates'] dictAllUnicodeCities['Scotland'] = ['United Kingdom'] dictAllUnicodeCities['Middle East'] = ['United Arab Emirates'] dictAllUnicodeCities['Alberta'] = ['Canada'] dictAllUnicodeCities['Colombo'] = ['Sri Lanka'] dictAllUnicodeCities['Phoenix'] = ['United States'] dictAllUnicodeCities.pop('Ahmad') dictAllUnicodeCities.pop('Road') dictAllUnicodeCities.pop('Street') dictAllUnicodeCities.pop('30') dictAllUnicodeCities.pop('North') dictAllUnicodeCities.pop('West') dictAllUnicodeCities.pop('Bridge') dictAllUnicodeCities.pop('Model') dictAllUnicodeCities.pop('Spa') dictAllUnicodeCities.pop('Park') dictAllUnicodeCities.pop('Bay') dictAllUnicodeCities.pop('Home') dictAllUnicodeCities.pop('List') dictAllUnicodeCities.pop('China') dictAllUnicodeCities.pop('Court') dictAllUnicodeCities.pop('Wing') dictAllUnicodeCities.pop('HDB') dictAllUnicodeCities.pop('Al') dictAllUnicodeCities.pop('Bin') dictAllUnicodeCities.pop('A') dictAllUnicodeCities.pop('Vista') dictAllUnicodeCities.pop('Aria') dictAllUnicodeCities.pop('No') dictAllUnicodeCities.pop('I') countries_cities = pd.read_json('../input/precise-location/countriescities.json') citiesList = [] for i in range(len(countries_cities)): for j in range(len(countries_cities['cities'][i])): citiesList.append(countries_cities['cities'][i][j].get('name')) states_only = pd.read_json('../input/precise-location/states.json') statesCountriesDict = {} for i in range(len(states_only)): statesCountriesDict[states_only['name'][i]] = states_only['country_name'][i] from collections import defaultdict countries_states = pd.read_json('../input/precise-location/countriesstates.json') statesList = [] statesNameToStateCode = defaultdict(list) statesCodeToStatesName = defaultdict(list) for i in range(len(countries_states)): for j in range(len(countries_states['states'][i])): statesList.append(countries_states['states'][i][j].get('name')) statesNameToStateCode[countries_states['states'][i][j].get('name')].append(countries_states['states'][i][j].get('state_code')) statesCodeToStatesName[countries_states['states'][i][j].get('state_code')].append(countries_states['states'][i][j].get('name')) def get_ngram(text, WordsToCombine=2): text = text.replace(',', '') words = text.split() output = [] for i in range(len(words) - WordsToCombine + 1): output.append(' '.join(words[i:i + WordsToCombine])) return output from re import T def detect_language(text): nlp = spacy.blank('xx') nlp.add_pipe('language_detector') doc = nlp(text) return doc._.language cid_mapper = {country.name: country.alpha_2 for country in pycountry.countries} @lru_cache(maxsize=128) def preproc(locstr): pattern = re.compile('#[0-9]+\\-[0-9]+|Headquarters|HQ|Town|Court|Access|via|City|Head|Bank|Center|Remote|Building|Office|City|Of| And|Transportation|D.C|feild|Metro|Tn.|Health Care|Health|Care|STE 100|Sector|Tower|[-&/|;:\\"]+|Area|Surrounding|[\\(\\)\\{\\}\\[\\]]|📍 |🌎', flags=re.IGNORECASE) return pattern.sub(' ', locstr).strip().rstrip('.') road_re = re.compile('^.*?(road|street)(?!\\w)', flags=re.IGNORECASE) def get_road(locstr): m = road_re.match(locstr) if m: return (m.group(), m.group(1)) else: return (locstr, '') def get_road_idn(locstr): if locstr.split(' ')[0] in ('Jl', 'Jl.', 'JL', 'Jalan', 'jl', 'Jln', 'Jln.'): return True for i in locstr.split(' '): for j in ['Jl', 'Jl.', 'JL', 'jl']: if i == j: return True @lru_cache(maxsize=128) def get_all_geoname(locstr): try: geolocator = Nominatim(user_agent='geopy25', timeout=3) location = geolocator.geocode(locstr) location = geolocator.reverse('{}, {}'.format(str(location.raw['lat']), str(location.raw['lon'])), exactly_one=True) address = location.raw['address'] city = address.get('city', '') state = address.get('state', '') country = address.get('country', '') return (city, state, country) except: pass @lru_cache(maxsize=128) def get_geoname_from_road(roadstr): try: geolocator = Nominatim(user_agent='geopy2', timeout=3) location = geolocator.geocode(roadstr) addr = location.address addr = addr.split(',') country = addr[-1] state = addr[-3] city = addr[-4] return (city, state, country) except: pass def get_result(countries): countries = str(countries) if countries is not '': country_id = cid_mapper.get(countries.strip()) else: country_id = '' if country_id: try: continent_code = pc.country_alpha2_to_continent_code(country_id.strip()) except: continent_code = '' else: continent_code = '' if continent_code: continent_name = pc.convert_continent_code_to_continent_name(continent_code.strip()) else: continent_name = '' return {'country': countries, 'country_code': country_id, 'region': continent_name, 'region_code': continent_code} from itertools import count def get_locations(locstr): if (locstr == ""): return { "city":"", "state":"", "country":"", "country_code": "", "region":"", "region_code":""} states, cities, countries, country_id, continent_code, continent_name = "", "", "", "", "", "" if detect_language(locstr) not in ['ja', 'ko', 'zh']: locstr = preproc(locstr) locstr = re.sub(r' , ', ', ', locstr) locstr = re.sub(r',', ' ', locstr) locstr = re.sub(r'\s\s+', ' ', locstr) # print(locstr) if locstr.isupper() or locstr.islower(): locstr = locstr.title() locstr = ' '.join(re.sub(r"([A-Z]+[a-z])", r" \1", locstr).split()) # print(locstr) for i in locstr.replace(',', '').split(' ')[::-1]: if i in dictMultiLangCountry: countries = dictMultiLangCountry[i] if (countries != "") or (countries is not None): # print('5 ' + countries) return get_result(countries) for i in locstr.replace(',', '').split(' ')[::-1]: if i in dictAllUnicodeCities: countries = dictAllUnicodeCities[i] if len(countries) == 1: countries = countries[0] if (countries != "") or (countries is not None): # print('6 ' + countries) return get_result(countries) else: countries = "" if (countries == "") : trigram = get_ngram(locstr, 3) if countries is "": for gram in trigram: if gram in dictMultiLangCountry: countries = dictMultiLangCountry[gram] if (countries != "") or (countries is not None): # print('1 ' + countries) return get_result(countries) if gram in dictAllUnicodeCities: countries = dictAllUnicodeCities[gram] if len(countries) == 1: countries = countries[0] if (countries != "") or (countries is not None): # print('2 ' + countries) return get_result(countries) else: countries = "" bigram = get_ngram(locstr) if countries is "": for gram in bigram: if gram in dictMultiLangCountry: countries = dictMultiLangCountry[gram] if (countries != "") or (countries is not None): # print('3 ' + countries) return get_result(countries) if gram in dictAllUnicodeCities: countries = dictAllUnicodeCities[gram] if len(countries) == 1: countries = countries[0] if (countries != "") or (countries is not None): # print('4 ' + countries) return get_result(countries) else: countries = "" ############################################################################ # ROAD REFORMATING (ex: 159, Sin Ming Road # 07-02 Lobby 2 Amtech Building # --> 159, Sin Ming Road) ############################################################################ locstr, road = get_road(locstr) if len(locstr.split(" ")[0]) == 1: locstr = locstr[2:] if get_road_idn(locstr): countries = "Indonesia" return get_result(countries) ############################################################################ # ROAD ############################################################################ if re.findall('[0-9]+', locstr) or road: try: cities, states, countries = get_geoname_from_road(locstr) countries = countries.strip() if countries is not "": if countries in dictMultiLangCountry: return get_result(countries) if cities: countries = dictAllUnicodeCities[cities] if len(countries) == 1: countries = countries[0] else: countries = "" if countries is not "": if countries in dictMultiLangCountry: return get_result(countries) if states: countries = statesCountriesDict[states] if countries is not "": if countries in dictMultiLangCountry: return get_result(countries) except: pass ############################################################################ ## THIS CODE TO SOLVE CITY - STATE CODE FORMAT (ex: Bonney Lake, WA) ####### ############################################################################ loc_split_by_comma = locstr.split(",") if (len(loc_split_by_comma) == 2): if len(loc_split_by_comma[-1].strip()): if statesCodeToStatesName.get(loc_split_by_comma[-1].strip()) and len(statesCodeToStatesName.get(loc_split_by_comma[-1].strip())) > 1: if loc_split_by_comma[0] in set(citiesList): cities = loc_split_by_comma[0] try: cities, states, countries = get_all_geoname(locstr) if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) except: states, countries = "", "" if type(states) == list: states = states[0] if states: countries = statesCountriesDict[states] if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) elif statesCodeToStatesName.get(loc_split_by_comma[-1].strip()) and len(statesCodeToStatesName.get(loc_split_by_comma[-1].strip())) == 1: cities = loc_split_by_comma[0] states = statesCodeToStatesName.get(loc_split_by_comma[-1].strip()) if type(states) == list: states = states[0] if states: countries = statesCountriesDict[states] if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) else: locstr = locstr ######################################################################### #### IF CURRENT RESULT JUST HAS A CITY ######################################################################### try: if (cities != '') and (states == '') and (countries == ''): _, states, countries = get_all_geoname(cities) if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) except: country_id, countries = "", "" ######################################################################### #### IF ALL KEY IS NULL / EMPTY STRING ######################################################################### try: if (cities is "") and (states is "") and (countries != '') or (countries == ''): _, _, countries = get_all_geoname(locstr) if countries is not "": if countries in dictMultiLangCountry: countries = dictMultiLangCountry[countries] return get_result(countries) except: country_id, countries = "", "" return { "country":"", "country_code":"", "region":"", "region_code":"" } sample = 'City of Fond du Lac' loc = get_locations(sample) print(loc)
code
17133813/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt from matplotlib import rc import numpy as np import pandas as pd df = pd.read_csv('../input/BlackFriday.csv', delimiter=',') sns.set_style('whitegrid') g = sns.catplot(x="Purchase", y="Gender", col="Age", data=df.sort_values(by=['Age']), col_wrap=3, orient="h", height=2, aspect=3, palette="Set3", kind="violin", dodge=True, bw=.2) df_target = df.groupby('Product_ID')['Product_ID'].count().reset_index(name='count').sort_values(['count'], ascending=False).head(10).reset_index(drop=True) sns.set(style="whitegrid") ax = sns.barplot(x="Product_ID", y="count", data=df_target) ax.set_xlabel('Produtos') ax.set_ylabel('Total vendido') for item in ax.get_xticklabels(): item.set_rotation(90) for i in range(len(df_target['Product_ID'])): plt.text(x = i - 0.3 , y = df_target.loc[i,'count'] + 20 , s = df_target.loc[i,'count'], size = 8, color='Blue') plt.show() occupation_order = list(df['Occupation'].value_counts().head(5).index) df_target = df[df['Occupation'].isin(occupation_order)].sort_values(by='Age') plt.figure(figsize=(20,10)) g = sns.boxplot(x="Occupation", y="Purchase", hue="Age", data=df_target) plt.title('Valores gastos por faixa etária associados às 5 ocupações mais frequentes\n', fontsize=16) plt.xlabel('Ocupação') plt.ylabel('Valor gasto') plt.legend(loc=1, title='Idade') plt.ylim(0, 35000) plt.show() df_target = df[df['Purchase'] > 9000].groupby(['Marital_Status', 'Occupation'])['Purchase'].count().reset_index(name='count').reset_index(drop=True) g = sns.catplot(x='Marital_Status', y='count', col='Occupation', col_wrap=9, data=df_target, kind='bar', height=3, aspect=0.6) g.set_axis_labels('', 'Estado Civil').despine(left=True)
code
17133813/cell_4
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt from matplotlib import rc import numpy as np import pandas as pd df = pd.read_csv('../input/BlackFriday.csv', delimiter=',') sns.set_style('whitegrid') sns.violinplot(x='Age', y='Purchase', cut=0, scale='count', data=df.sort_values(by=['Age']))
code
17133813/cell_6
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt from matplotlib import rc import numpy as np import pandas as pd df = pd.read_csv('../input/BlackFriday.csv', delimiter=',') sns.set_style('whitegrid') g = sns.catplot(x='Purchase', y='Gender', col='Age', data=df.sort_values(by=['Age']), col_wrap=3, orient='h', height=2, aspect=3, palette='Set3', kind='violin', dodge=True, bw=0.2)
code
17133813/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt from matplotlib import rc import numpy as np import pandas as pd df = pd.read_csv('../input/BlackFriday.csv', delimiter=',') sns.set_style('whitegrid') g = sns.catplot(x="Purchase", y="Gender", col="Age", data=df.sort_values(by=['Age']), col_wrap=3, orient="h", height=2, aspect=3, palette="Set3", kind="violin", dodge=True, bw=.2) df_target = df.groupby('Product_ID')['Product_ID'].count().reset_index(name='count').sort_values(['count'], ascending=False).head(10).reset_index(drop=True) sns.set(style='whitegrid') ax = sns.barplot(x='Product_ID', y='count', data=df_target) ax.set_xlabel('Produtos') ax.set_ylabel('Total vendido') for item in ax.get_xticklabels(): item.set_rotation(90) for i in range(len(df_target['Product_ID'])): plt.text(x=i - 0.3, y=df_target.loc[i, 'count'] + 20, s=df_target.loc[i, 'count'], size=8, color='Blue') plt.show()
code
17133813/cell_3
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from matplotlib import rc import numpy as np import pandas as pd df = pd.read_csv('../input/BlackFriday.csv', delimiter=',') df.head()
code
17133813/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt from matplotlib import rc import numpy as np import pandas as pd df = pd.read_csv('../input/BlackFriday.csv', delimiter=',') sns.set_style('whitegrid') g = sns.catplot(x="Purchase", y="Gender", col="Age", data=df.sort_values(by=['Age']), col_wrap=3, orient="h", height=2, aspect=3, palette="Set3", kind="violin", dodge=True, bw=.2) df_target = df.groupby('Product_ID')['Product_ID'].count().reset_index(name='count').sort_values(['count'], ascending=False).head(10).reset_index(drop=True) sns.set(style="whitegrid") ax = sns.barplot(x="Product_ID", y="count", data=df_target) ax.set_xlabel('Produtos') ax.set_ylabel('Total vendido') for item in ax.get_xticklabels(): item.set_rotation(90) for i in range(len(df_target['Product_ID'])): plt.text(x = i - 0.3 , y = df_target.loc[i,'count'] + 20 , s = df_target.loc[i,'count'], size = 8, color='Blue') plt.show() occupation_order = list(df['Occupation'].value_counts().head(5).index) df_target = df[df['Occupation'].isin(occupation_order)].sort_values(by='Age') plt.figure(figsize=(20, 10)) g = sns.boxplot(x='Occupation', y='Purchase', hue='Age', data=df_target) plt.title('Valores gastos por faixa etária associados às 5 ocupações mais frequentes\n', fontsize=16) plt.xlabel('Ocupação') plt.ylabel('Valor gasto') plt.legend(loc=1, title='Idade') plt.ylim(0, 35000) plt.show()
code
130004668/cell_21
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import StandardScaler, OrdinalEncoder from tqdm import tqdm import matplotlib.pyplot as plt import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews['geo_loc'] = OrdinalEncoder().fit_transform(df_reviews.geo_loc.values.reshape(-1, 1)) df_reviews['tld'] = OrdinalEncoder().fit_transform(df_reviews.tld.values.reshape(-1, 1)) df_reviews['who_is'] = OrdinalEncoder().fit_transform(df_reviews.who_is.values.reshape(-1, 1)) df_reviews['https'] = OrdinalEncoder().fit_transform(df_reviews.https.values.reshape(-1, 1)) df_reviews['label'] = OrdinalEncoder().fit_transform(df_reviews.label.values.reshape(-1, 1)) df_reviews['url'] = df_reviews.url.apply(lambda x: ' '.join(x.split('://')[1].strip('www.').replace('.', '/').split('/'))) tqdm.pandas() stop = stopwords.words() df_reviews.content = df_reviews.content.str.replace('[^\\w\\s]', '').str.lower() df_reviews.content = df_reviews.content.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) df_reviews.url = df_reviews.url.str.replace('[^\\w\\s]', '').str.lower() df_reviews.url = df_reviews.url.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) tfidf = TfidfVectorizer(min_df=5, max_df=0.95, max_features=8000, stop_words='english') tfidf.fit(df_reviews.url) url_tfidf = tfidf.transform(df_reviews.url) tfidf.fit(df_reviews.content) content_tfidf = tfidf.transform(df_reviews.content) def find_optimal_clusters(data, max_k): k_list = range(2, max_k+1) sse = [] for k in k_list: sse.append(MiniBatchKMeans(n_clusters=k, init_size=1024, batch_size=2048, random_state=20).fit(data).inertia_) plt.style.use("dark_background") f, ax = plt.subplots(1, 1) ax.plot(k_list, sse, marker='o') ax.set_xlabel('Cluster Centers') ax.set_xticks(k_list) ax.set_xticklabels(k_list) ax.set_ylabel('SSE') ax.set_title('SSE by Cluster Center Plot') find_optimal_clusters(url_tfidf, 20)
code
130004668/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews[['geo_loc', 'tld', 'who_is', 'https', 'label']].describe()
code
130004668/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_raw.label.describe()
code
130004668/cell_34
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import StandardScaler, OrdinalEncoder from sklearn.tree import DecisionTreeClassifier from tqdm import tqdm import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews['geo_loc'] = OrdinalEncoder().fit_transform(df_reviews.geo_loc.values.reshape(-1, 1)) df_reviews['tld'] = OrdinalEncoder().fit_transform(df_reviews.tld.values.reshape(-1, 1)) df_reviews['who_is'] = OrdinalEncoder().fit_transform(df_reviews.who_is.values.reshape(-1, 1)) df_reviews['https'] = OrdinalEncoder().fit_transform(df_reviews.https.values.reshape(-1, 1)) df_reviews['label'] = OrdinalEncoder().fit_transform(df_reviews.label.values.reshape(-1, 1)) df_reviews['url'] = df_reviews.url.apply(lambda x: ' '.join(x.split('://')[1].strip('www.').replace('.', '/').split('/'))) tqdm.pandas() stop = stopwords.words() df_reviews.content = df_reviews.content.str.replace('[^\\w\\s]', '').str.lower() df_reviews.content = df_reviews.content.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) df_reviews.url = df_reviews.url.str.replace('[^\\w\\s]', '').str.lower() df_reviews.url = df_reviews.url.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) param_grid = [{'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random']}] grid = GridSearchCV(estimator=DecisionTreeClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train) grid.best_params_ grid.score(X_train, y_train) grid.score(X_test, y_test) param_grid = [{'n_estimators': [x for x in range(10, 120, 10)], 'criterion': ['gini', 'entropy']}] grid = GridSearchCV(estimator=RandomForestClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train)
code
130004668/cell_23
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import StandardScaler, OrdinalEncoder from tqdm import tqdm import matplotlib.pyplot as plt import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes df_reviews_untrimmed_sample = df_reviews_raw.groupby('label').apply(lambda x: x.sample(25000, random_state=42)).reset_index(drop=True) df_reviews_trimmed = df_reviews_untrimmed_sample[df_reviews_untrimmed_sample.content.str.split().str.len().ge(60)] df_reviews_sampled = df_reviews_trimmed.groupby('label').apply(lambda x: x.sample(2000, random_state=42)).reset_index(drop=True) df_reviews = df_reviews_sampled.sample(frac=1, random_state=42).reset_index(drop=True) df_reviews['geo_loc'] = OrdinalEncoder().fit_transform(df_reviews.geo_loc.values.reshape(-1, 1)) df_reviews['tld'] = OrdinalEncoder().fit_transform(df_reviews.tld.values.reshape(-1, 1)) df_reviews['who_is'] = OrdinalEncoder().fit_transform(df_reviews.who_is.values.reshape(-1, 1)) df_reviews['https'] = OrdinalEncoder().fit_transform(df_reviews.https.values.reshape(-1, 1)) df_reviews['label'] = OrdinalEncoder().fit_transform(df_reviews.label.values.reshape(-1, 1)) df_reviews['url'] = df_reviews.url.apply(lambda x: ' '.join(x.split('://')[1].strip('www.').replace('.', '/').split('/'))) tqdm.pandas() stop = stopwords.words() df_reviews.content = df_reviews.content.str.replace('[^\\w\\s]', '').str.lower() df_reviews.content = df_reviews.content.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) df_reviews.url = df_reviews.url.str.replace('[^\\w\\s]', '').str.lower() df_reviews.url = df_reviews.url.progress_apply(lambda x: ' '.join([item for item in x.split() if item not in stop])) tfidf = TfidfVectorizer(min_df=5, max_df=0.95, max_features=8000, stop_words='english') tfidf.fit(df_reviews.url) url_tfidf = tfidf.transform(df_reviews.url) tfidf.fit(df_reviews.content) content_tfidf = tfidf.transform(df_reviews.content) def find_optimal_clusters(data, max_k): k_list = range(2, max_k+1) sse = [] for k in k_list: sse.append(MiniBatchKMeans(n_clusters=k, init_size=1024, batch_size=2048, random_state=20).fit(data).inertia_) plt.style.use("dark_background") f, ax = plt.subplots(1, 1) ax.plot(k_list, sse, marker='o') ax.set_xlabel('Cluster Centers') ax.set_xticks(k_list) ax.set_xticklabels(k_list) ax.set_ylabel('SSE') ax.set_title('SSE by Cluster Center Plot') find_optimal_clusters(content_tfidf, 20)
code
130004668/cell_30
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.tree import DecisionTreeClassifier param_grid = [{'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random']}] grid = GridSearchCV(estimator=DecisionTreeClassifier(random_state=42), param_grid=param_grid, cv=5) grid.fit(X_train, y_train) grid.best_params_
code
130004668/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df_reviews_raw = pd.read_csv('/kaggle/input/dataset-of-malicious-and-benign-webpages/Webpages_Classification_train_data.csv/Webpages_Classification_train_data.csv').drop(['Unnamed: 0'], axis=1) df_reviews_raw.isna().sum() df_reviews_raw.dtypes
code