seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
4034762044
|
"""
Utilities for point clouds.
Code modified from Jiayuan Gu.
"""
import numpy as np
def pad_or_clip(array: np.array, n: int, fill_value=0):
"""Pad or clip an array with constant values.
It is usually used for sampling a fixed number of points.
"""
if array.shape[0] >= n:
return array[:n]
else:
pad = np.full((n - array.shape[0],) + array.shape[1:], fill_value, dtype=array.dtype)
return np.concatenate([array, pad], axis=0)
def pad_or_clip_v2(array: np.array, n: int):
"""Pad or clip an array with the first item.
It is usually used for sampling a fixed number of points (PointNet and variants).
"""
if array.shape[0] >= n:
return array[:n]
else:
pad = np.repeat(array[0:1], n - array.shape[0], axis=0)
return np.concatenate([array, pad], axis=0)
def pad(array: np.array, n: int, fill_value=None):
"""Pad an array with a constant or the first item."""
assert array.shape[0] <= n
if fill_value is None:
pad = np.repeat(array[0:1], n - array.shape[0], axis=0)
else:
pad = np.full((n - array.shape[0],) + array.shape[1:], fill_value, dtype=array.dtype)
return np.concatenate([array, pad], axis=0)
# ---------------------------------------------------------------------------- #
# Unit test
# ---------------------------------------------------------------------------- #
def test_pad_or_clip():
test_cases = [(np.array([1, 2, 1]), 5), (np.array([0.5, -0.1, 1.0]), 2), (np.array([0, 1, 1], bool), 4)]
expected_list = [np.array([1, 2, 1, 0, 0]), np.array([0.5, -0.1]), np.array([0, 1, 1, 0], bool)]
test_cases += [(np.empty([0, 3]), 5)]
expected_list += [np.zeros([5, 3])]
for test_case, expected in zip(test_cases, expected_list):
actual = pad_or_clip(*test_case)
np.testing.assert_almost_equal(actual, expected)
|
hansongfang/CompNet
|
common_3d/utils/pc_utils.py
|
pc_utils.py
|
py
| 1,881 |
python
|
en
|
code
| 33 |
github-code
|
6
|
25050908902
|
import threading
from threading import*
import time
#dictionary for storing key value data
dict={}
def create(key,value,timeout=0):
if key in dict:
print("error: this key already exists")
else:
if(key.isalpha()):
#checking the size of file and JSON object
if len(dict)<(1024*1020*1024) and value<=(16*1024*1024):
if timeout==0:
l=[value,timeout]
else:
l=[value,time.time()+timeout]
if len(key)<=32:
dict[key]=l
else:
print("error: Memory limit exceeded!! ")#error message2
else:
print("error: Invalind key_name!! key_name must contain only alphabets and no special characters or numbers")
def read(key):
if key not in dict:
print("error: given key does not exist in database")
else:
b=dict[key]
if b[1]!=0:
if time.time()<b[1]:
stri=str(key)+":"+str(b[0])
return stri
else:
print("error: time_to_live",key,"has expired")
else:
stri=str(key)+":"+str(b[0])
return stri
def delete(key):
if key not in dict:
print("error: given key does not exist in database. Please enter a valid key") #error message4
else:
b=dict[key]
if b[1]!=0:
if time.time()<b[1]: #comparing the current time with expiry time
del dict[key]
print("key is successfully deleted")
else:
print("error: time-to-live of",key,"has expired") #error message5
else:
del dict[key]
print("key is successfully deleted")
def modify(key,value):
b=dict[key]
if b[1]!=0:
if time.time()<b[1]:
if key not in dict:
print("error: given key does not exist in database. Please enter a valid key") #error message6
else:
l=[]
l.append(value)
l.append(b[1])
dict[key]=l
else:
print("error: time-to-live of",key,"has expired") #error message5
else:
if key not in dict:
print("error: given key does not exist in database. Please enter a valid key") #error message6
else:
l=[]
l.append(value)
l.append(b[1])
dict[key]=l
create("chennai",24)
read("chennai")
|
Akhileshpm/file-based-key-value-store
|
main.py
|
main.py
|
py
| 2,618 |
python
|
en
|
code
| 2 |
github-code
|
6
|
10423084443
|
from __future__ import annotations
import dataclasses
from typing import TYPE_CHECKING
from unittest.mock import MagicMock, PropertyMock
import pytest
from randovania.exporter import pickup_exporter
from randovania.game_description import default_database
from randovania.game_description.assignment import PickupTarget
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.games.dread.exporter.patch_data_factory import (
DreadAcquiredMemo,
DreadPatchDataFactory,
get_resources_for_details,
)
from randovania.games.dread.layout.dread_cosmetic_patches import DreadCosmeticPatches, DreadMissileCosmeticType
from randovania.games.game import RandovaniaGame
from randovania.generator.pickup_pool import pickup_creator
from randovania.interface_common.players_configuration import PlayersConfiguration
from randovania.layout.base.ammo_pickup_state import AmmoPickupState
from randovania.layout.base.pickup_model import PickupModelStyle
from randovania.layout.base.standard_pickup_state import StandardPickupState
from randovania.layout.layout_description import LayoutDescription
from randovania.lib import json_lib
if TYPE_CHECKING:
from randovania.layout.preset import Preset
@pytest.mark.parametrize(
("rdvgame_filename", "expected_results_filename", "num_of_players"),
[
("starter_preset.rdvgame", "starter_preset.json", 1), # starter preset
("crazy_settings.rdvgame", "crazy_settings.json", 1), # crazy settings
("dread_dread_multiworld.rdvgame", "dread_dread_multiworld_expected.json", 2), # dread-dread multi
("dread_prime1_multiworld.rdvgame", "dread_prime1_multiworld_expected.json", 2), # dread-prime1 multi
("elevator_rando.rdvgame", "elevator_rando.json", 1), # elevator_rando multi
],
)
def test_create_patch_data(test_files_dir, rdvgame_filename, expected_results_filename, num_of_players, mocker):
# Setup
rdvgame = test_files_dir.joinpath("log_files", "dread", rdvgame_filename)
players_config = PlayersConfiguration(0, {i: f"Player {i + 1}" for i in range(num_of_players)})
description = LayoutDescription.from_file(rdvgame)
cosmetic_patches = DreadCosmeticPatches()
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_word_hash",
new_callable=PropertyMock,
return_value="Words Hash",
)
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_hash",
new_callable=PropertyMock,
return_value="$$$$$",
)
# Run
data = DreadPatchDataFactory(description, players_config, cosmetic_patches).create_data()
# Expected Result
expected_results_path = test_files_dir.joinpath("patcher_data", "dread", expected_results_filename)
expected_data = json_lib.read_path(expected_results_path)
# Uncomment to easily view diff of failed test
# json_lib.write_path(expected_results_path, data); assert False
assert data == expected_data
def _preset_with_locked_pb(preset: Preset, locked: bool):
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_DREAD)
preset = dataclasses.replace(
preset,
configuration=dataclasses.replace(
preset.configuration,
ammo_configuration=preset.configuration.ammo_pickup_configuration.replace_state_for_ammo(
pickup_database.ammo_pickups["Power Bomb Tank"],
AmmoPickupState(requires_main_item=locked),
),
),
)
return preset
@pytest.mark.parametrize("locked", [False, True])
def test_pickup_data_for_pb_expansion(locked, dread_game_description, preset_manager):
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_DREAD)
resource_db = dread_game_description.resource_database
# Setup
pickup = pickup_creator.create_ammo_pickup(
pickup_database.ammo_pickups["Power Bomb Tank"],
[2],
locked,
resource_db,
)
creator = pickup_exporter.PickupExporterSolo(DreadAcquiredMemo.with_expansion_text(), RandovaniaGame.METROID_DREAD)
# Run
details = creator.export(PickupIndex(0), PickupTarget(pickup, 0), pickup, PickupModelStyle.ALL_VISIBLE)
result = get_resources_for_details(details.original_pickup, details.conditional_resources, details.other_player)
# Assert
assert result == [
[
{"item_id": "ITEM_WEAPON_POWER_BOMB_MAX", "quantity": 2},
]
if locked
else [
{"item_id": "ITEM_WEAPON_POWER_BOMB_MAX", "quantity": 2},
{"item_id": "ITEM_WEAPON_POWER_BOMB", "quantity": 1},
]
]
@pytest.mark.parametrize("locked", [False, True])
def test_pickup_data_for_main_pb(locked, dread_game_description, preset_manager):
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_DREAD)
resource_db = dread_game_description.resource_database
# Setup
pickup = pickup_creator.create_standard_pickup(
pickup_database.standard_pickups["Power Bomb"],
StandardPickupState(included_ammo=(3,)),
resource_database=resource_db,
ammo=pickup_database.ammo_pickups["Power Bomb Tank"],
ammo_requires_main_item=locked,
)
creator = pickup_exporter.PickupExporterSolo(DreadAcquiredMemo.with_expansion_text(), RandovaniaGame.METROID_DREAD)
# Run
details = creator.export(PickupIndex(0), PickupTarget(pickup, 0), pickup, PickupModelStyle.ALL_VISIBLE)
result = get_resources_for_details(details.original_pickup, details.conditional_resources, details.other_player)
# Assert
assert result == [
[
{"item_id": "ITEM_WEAPON_POWER_BOMB", "quantity": 1},
{"item_id": "ITEM_WEAPON_POWER_BOMB_MAX", "quantity": 3},
]
]
def test_pickup_data_for_recolored_missiles(dread_game_description, preset_manager):
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_DREAD)
resource_db = dread_game_description.resource_database
preset = preset_manager.default_preset_for_game(RandovaniaGame.METROID_DREAD).get_preset()
description = MagicMock(spec=LayoutDescription)
description.all_patches = {0: MagicMock()}
description.get_preset.return_value = preset
description.get_seed_for_player.return_value = 1000
cosmetics = DreadCosmeticPatches(missile_cosmetic=DreadMissileCosmeticType.PRIDE)
# Setup
pickup = pickup_creator.create_ammo_pickup(
pickup_database.ammo_pickups["Missile Tank"], (2,), False, resource_database=resource_db
)
factory = DreadPatchDataFactory(description, PlayersConfiguration(0, {0: "Dread"}), cosmetics)
creator = pickup_exporter.PickupExporterSolo(DreadAcquiredMemo.with_expansion_text(), RandovaniaGame.METROID_DREAD)
# Run
details = creator.export(PickupIndex(0), PickupTarget(pickup, 0), pickup, PickupModelStyle.ALL_VISIBLE)
result = factory._pickup_detail_for_target(details)
# Assert
assert result == {
"pickup_type": "actor",
"caption": "Missile Tank acquired.\nMissile capacity increased by 2.",
"resources": [[{"item_id": "ITEM_WEAPON_MISSILE_MAX", "quantity": 2}]],
"pickup_actor": {"scenario": "s010_cave", "layer": "default", "actor": "ItemSphere_ChargeBeam"},
"model": ["item_missiletank_green"],
"map_icon": {
"icon_id": "item_missiletank",
"original_actor": {"actor": "powerup_chargebeam", "layer": "default", "scenario": "s010_cave"},
},
}
def test_pickup_data_for_a_major(dread_game_description, preset_manager):
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_DREAD)
resource_db = dread_game_description.resource_database
preset = preset_manager.default_preset_for_game(RandovaniaGame.METROID_DREAD).get_preset()
description = MagicMock(spec=LayoutDescription)
description.all_patches = {0: MagicMock()}
description.get_preset.return_value = preset
description.get_seed_for_player.return_value = 1000
# Setup
pickup = pickup_creator.create_standard_pickup(
pickup_database.standard_pickups["Speed Booster"],
StandardPickupState(),
resource_database=resource_db,
ammo=None,
ammo_requires_main_item=False,
)
factory = DreadPatchDataFactory(description, PlayersConfiguration(0, {0: "Dread"}), MagicMock())
creator = pickup_exporter.PickupExporterSolo(DreadAcquiredMemo.with_expansion_text(), RandovaniaGame.METROID_DREAD)
# Run
details = creator.export(PickupIndex(0), PickupTarget(pickup, 0), pickup, PickupModelStyle.ALL_VISIBLE)
result = factory._pickup_detail_for_target(details)
# Assert
assert result == {
"pickup_type": "actor",
"caption": "Speed Booster acquired.",
"resources": [[{"item_id": "ITEM_SPEED_BOOSTER", "quantity": 1}]],
"pickup_actor": {"scenario": "s010_cave", "layer": "default", "actor": "ItemSphere_ChargeBeam"},
"model": ["powerup_speedbooster"],
"map_icon": {
"icon_id": "powerup_speedbooster",
"original_actor": {"actor": "powerup_chargebeam", "layer": "default", "scenario": "s010_cave"},
},
}
@pytest.fixture()
def _setup_and_teardown_for_wrong_custom_spawn():
# modify the default start to have no collision_camera (asset_id) and no vanilla
# actor name for a start point
game_desc = default_database.game_description_for(RandovaniaGame.METROID_DREAD)
region = game_desc.region_list.region_with_name("Artaria")
area = region.area_by_name("Intro Room")
node = area.node_with_name("Start Point")
modified_node = dataclasses.replace(node, extra={})
area.nodes.remove(node)
area.nodes.append(modified_node)
asset_id = area.extra["asset_id"]
del area.extra["asset_id"]
yield
area.nodes.remove(modified_node)
area.nodes.append(node)
area.extra["asset_id"] = asset_id
@pytest.mark.usefixtures("_setup_and_teardown_for_wrong_custom_spawn")
def test_create_patch_with_wrong_custom_spawn(test_files_dir, mocker):
# test for a not createable spawn point
file = test_files_dir.joinpath("log_files", "dread", "starter_preset.rdvgame")
description = LayoutDescription.from_file(file)
players_config = PlayersConfiguration(0, {0: "Dread"})
cosmetic_patches = DreadCosmeticPatches()
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_word_hash",
new_callable=PropertyMock,
return_value="Words Hash",
)
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_hash",
new_callable=PropertyMock,
return_value="$$$$$",
)
patcher = DreadPatchDataFactory(description, players_config, cosmetic_patches)
with pytest.raises(
KeyError,
match="Artaria/Intro Room/Start Point has neither a start_point_actor_name nor the "
"area has a collision_camera_name for a custom start point",
):
patcher.create_data()
@pytest.fixture()
def _setup_and_teardown_for_custom_spawn():
# modify a node to be a valid start point without a vanilla spawn
game_desc = default_database.game_description_for(RandovaniaGame.METROID_DREAD)
region = game_desc.region_list.region_with_name("Artaria")
area = region.area_by_name("Charge Tutorial")
node = area.node_with_name("Start Point")
modified_node = dataclasses.replace(node, valid_starting_location=True)
area.nodes.remove(node)
area.nodes.append(modified_node)
yield
area.nodes.remove(modified_node)
area.nodes.append(node)
@pytest.mark.usefixtures("_setup_and_teardown_for_custom_spawn")
def test_create_patch_with_custom_spawn(test_files_dir, mocker):
# test for custom spawn point referenced by starting location and teleporters
file = test_files_dir.joinpath("log_files", "dread", "custom_start.rdvgame")
description = LayoutDescription.from_file(file)
players_config = PlayersConfiguration(0, {0: "Dread"})
cosmetic_patches = DreadCosmeticPatches()
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_word_hash",
new_callable=PropertyMock,
return_value="Words Hash",
)
mocker.patch(
"randovania.layout.layout_description.LayoutDescription.shareable_hash",
new_callable=PropertyMock,
return_value="$$$$$",
)
data = DreadPatchDataFactory(description, players_config, cosmetic_patches).create_data()
# Expected Result
expected_data = test_files_dir.read_json("patcher_data", "dread", "custom_start.json")
# Update the file
# json_lib.write_path(test_files_dir.joinpath("patcher_data", "dread", "custom_start.json"), data); assert False
assert data == expected_data
|
randovania/randovania
|
test/games/dread/exporter/test_dread_patch_data_factory.py
|
test_dread_patch_data_factory.py
|
py
| 12,949 |
python
|
en
|
code
| 165 |
github-code
|
6
|
11816055102
|
import os
import sys
from threading import Thread
case = []
for item in os.listdir(sys.argv[1]):
if os.path.isdir(sys.argv[1] + '//' + item) and item != "banira_files":
case.append(item)
num = 0
if len(case) > 3:
if len(case) % 3 == 0:
num = len(case) // 3
else:
num = len(case) // 3 + 1
for i in range(len(case)//num):
group = case[i*num:i*num+num]
group = str(group)
t = Thread(target=lambda: os.system(f"python .\\data_fitting\\interpolation.py {sys.argv[1]} {group}"))
t.start()
if len(case) % 3 != 0:
group = case[len(case)//num*num:]
group = str(group)
t = Thread(target=lambda: os.system(f"python .\\data_fitting\\interpolation.py {sys.argv[1]} {group}"))
t.start()
else:
for item in case:
group = str([item])
t = Thread(target=lambda: os.system(f"python .\\data_fitting\\interpolation.py {sys.argv[1]} {group}"))
t.start()
cases = 0
for item in os.listdir(sys.argv[1]):
if os.path.isdir(sys.argv[1] + '//' + item) and item != "banira_files":
cases += 1
while True:
try:
with open(f"{sys.argv[1]}/banira_files/finished.txt", "r") as f:
for step, row in enumerate(f):
assert step != cases-1
f.close()
except FileNotFoundError:
pass
except AssertionError:
with open(f"{sys.argv[1]}/banira_files/log.txt", "a") as log:
log.write(f"All finished\n")
log.close()
with open(f"{sys.argv[1]}/banira_files/finished.txt", "a") as log:
log.write(f"All finished\n{sys.argv[2]}\n")
log.close()
break
|
cchenyixuan/Banira
|
utils/interpolation_runner.pyw
|
interpolation_runner.pyw
|
pyw
| 1,685 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37616517604
|
from time import sleep
from signal import pause
from gpiozero import LED
from gpiozero import Button
from pygame import mixer
mixer.init()
placeholder = mixer.Sound('placeholder.wav')
ph_len = placeholder.get_length()
led = LED(25)
btn = Button(4)
while True:
btn.wait_for_press()
print("Initialized")
btn.wait_for_release()
print("Starting Sequence")
# Light and Sound Sequence
placeholder.play()
led.blink(.5, .5, round(ph_len))
sleep(ph_len)
print("Sequence Complete")
|
Aahil52/animatronics2022
|
testscripts/soundandlightstest.py
|
soundandlightstest.py
|
py
| 516 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30023024204
|
import os
import glob
import numpy as np
from scipy.stats import norm
import json
class WarpedSpace:
# takes a prior as a dict and returns a scipy normal distribution
@staticmethod
def create_distribution(prior):
means = []
stds = []
ranges = []
for key in sorted(prior.keys()):
mean = np.inf
while mean > prior[key]['range'][1] or mean < prior[key]['range'][0]:
mean = prior[key]['params']['mean'] + norm.rvs(0, prior[key].get('noise', 0))
means.append(mean)
stds.append(prior[key]['params']['std'][0])
ranges.append(prior[key]['range'])
return norm(np.array(means).reshape(1, -1), np.array(stds).reshape(1, -1))
# class that computes the warped space, and through the call funtion extrapolates up
# from warped space to function space - wraps around the original function
def __init__(self, dist, ranges, objective):
# ranges in numpy matrix, one row per dimension
# dist needs to implement elements of a scipy distribution, i.e. pdf, cdf, ppf etc.
self.dist = dist
self.param_ranges = np.zeros((len(ranges), 2))
for i, range_ in enumerate(ranges):
self.param_ranges[i, 0] = range_[0]
self.param_ranges[i, 1] = range_[1]
self.get_warped_ranges()
self.objective = objective
def get_warped_ranges(self):
# gives the coordinates in warped (0, 1)-space where the boundaries of the original space lie
# we want this boundary to be represented as such in the warped space too - thus, we warp the
# space again by minmax-scaling the warped space with these boundary values. Consequently,
# we get a limit on the warped space that (at largest) has the same boundaries as the original
# space, and otherwise further enlarges the original search space. This makes is a truncated
# gaussian even if the prior is set at the very edge
# in the case where the entire prior fits within the search space, we need boundaries for where
# numerical issues occur - i.e. not letting the algorithm go more than 8:ish standard deviations
# away for any dimension
self.boundaries = np.zeros(self.param_ranges.shape)
for i, range_ in enumerate(self.param_ranges.T):
self.boundaries[:, i] = self.dist.cdf(np.array(range_))
#increment boundaries with smallest possible value to avoid inverting bach to infinity
self.boundaries[:, 0] = self.boundaries[:, 0] + 2e-16
self.boundaries[:, 1] = self.boundaries[:, 1] - 2e-16
def get_original_range(self, X):
# input - an X in range 0, 1 irregardless of problem
# this needs to be shrinked linearly to the range which is allowed to still be in range
# Thus, we get inverse cdf (floor + X * (floor of w.s. - ceiling w.s.) )
X_scaled = np.zeros(X.shape)
for dim in range(X.shape[1]):
X_scaled[:, dim] = self.boundaries[dim, 0] + X[:, dim] * (self.boundaries[dim, 1] - self.boundaries[dim, 0])
# this probably won't work in higher dimensions
X_unwarped = self.dist.ppf(X_scaled)
for dim in range(X.shape[1]):
assert np.all(X_unwarped[:, dim] >= self.param_ranges[dim, 0])
assert np.all(X_unwarped[:, dim] <= self.param_ranges[dim, 1])
return X_unwarped
def __call__(self, X):
X_original = self.get_original_range(X)
return self.objective(X_original)
|
piboauthors/PiBO-Spearmint
|
spearmint/warping.py
|
warping.py
|
py
| 3,633 |
python
|
en
|
code
| 0 |
github-code
|
6
|
50818241
|
#
# @lc app=leetcode.cn id=754 lang=python3
#
# [754] 到达终点数字
#
# @lc code=start
class Solution:
def reachNumber(self, target: int) -> int:
# due to symmetry
target = abs(target)
x = count = 0
def dfs(x, target, count):
# base case
if x > target:
return float("inf")
if x == target:
return count
count1 = dfs(x+1, target, count+2)
count2 = dfs(x+count+1, target, count+1)
return min(count1, count2)
count = dfs(x, target, count)
return count
|
code-cp/leetcode
|
solutions/754/754.到达终点数字.py
|
754.到达终点数字.py
|
py
| 623 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13160016876
|
from django.shortcuts import render
from subscribers.models import subscriberForm
from subscribe.settings import EMAIL_HOST_USER
from django.core.mail import send_mail,BadHeaderError
# Create your views here.
def index(request):
form=subscriberForm()
return render(request,"index.html",{"form":form})
def subscribe(request):
if request.method=="GET":
form=subscriberForm(request.GET)
if form.is_valid():
""" saving data in our database """
form.save()
""" using this cleaned data must, to get value of email"""
senderEmail=form.cleaned_data['Email']
subject="Welcome to "+senderEmail
"""here your message will be, also you send templates etc"""
message="Thanking you for subscribing me, you get every updates within a seconds..."
""" receiver email address"""
recipient=str(senderEmail)
if subject and message and recipient :
try:
""" send email is function in django to use this to send mail"""
""" if you want send bulk email add more email in [recipients]"""
send_mail(subject,message,EMAIL_HOST_USER,[recipient],fail_silently=False)
except BadHeaderError :
""" bad header error means prevent from header injection apply by hackers"""
return render(request,"subscribe.html",{'message':'Invalid header found.','class':'text-danger'})
return render(request,"subscribe.html",{'message':'Thanking you For Subscribing..','class':'text-success'})
return render(request,"subscribe.html",{"message":"Make sure all fields are entered and valid.",'class':'text-info'})
|
pawankushwah850/Emailsubscriber
|
subscribers/views.py
|
views.py
|
py
| 1,862 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40369313075
|
while True:
try:
# 1. Преобразование введённой последовательности в список
spisok = [int(c) for c in input('Введите последовательность чисел через пробел: ').split()]
except ValueError:
print("Это неправильный ввод! Введите данные согласно условий ввода!")
else:
break
while True:
try:
num = int(input('Введите число: '))
except ValueError:
print("Это неправильный ввод! Введите данные согласно условий ввода!")
else:
break
# 2.Сортировка списка по возрастанию элементов в нем (для реализации сортировки определите функцию)
def quick_merge(list1):
s = []
for i in list1:
s.append(i)
s.sort()
return s
print('Сортированный список:', *quick_merge(spisok))
# 3.Устанавливается номер позиции элемента, который меньше введенного пользователем числа, а следующий за ним больше или равен этому числу.
if min(spisok) <= num <= max(spisok):
spisok.append(num)
spisok.sort()
def binary_search(array, element, left, right):
if element > max(array):
return print('Нет числа, которое больше введенного')
elif element < min(array):
return print('Нет числа, которое меньше введенного')
if left > right:
return False
middle = (right + left) // 2
if array[middle] == element:
return middle - 1
elif element < array[middle]:
return binary_search(array, element, left, middle - 1)
else:
return binary_search(array, element, middle + 1, right)
print('Номер позиции элемента:', binary_search(quick_merge(spisok), num, 0, len(spisok) - 1))
|
sigmaclap/PythonTasks
|
17.9.1/17.9.py
|
17.9.py
|
py
| 2,120 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
13750197452
|
from abc import ABC, abstractmethod
class Notification(ABC):
def __init__(self, msg) -> None:
self.msg = msg
@abstractmethod
def send(self) -> bool: ...
class EmailNotification(Notification):
def send(self) -> bool:
print(f'Enviando e-mail de notificação... {self.msg}')
return True
class SmsNotification(Notification):
def send(self) -> bool:
print(f'Enviando SMS de notificação... {self.msg}')
return False
def notificate(notification: Notification):
notification_sent = notification.send()
if notification_sent:
print('Notificação enviada com sucesso!')
else:
print('Não foi possível enviar a notificação.')
notif_1 = EmailNotification('testando e-mail')
notif_2 = SmsNotification('testando SMS')
notificate(notif_1)
print()
notificate(notif_2)
|
daugbit/Cursos
|
Python_3_Udemy/ex029_polimorfismo.py
|
ex029_polimorfismo.py
|
py
| 859 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
23156107935
|
from flask import Flask
from main import main
from mypage import mypage
from challengedetail import challengedetail
from flask import Flask, render_template, jsonify, request, session, redirect, url_for
from db import db
app = Flask(__name__)
# JWT 토큰을 만들 때 필요한 비밀문자열입니다. 아무거나 입력해도 괜찮습니다.
# 이 문자열은 서버만 알고있기 때문에, 내 서버에서만 토큰을 인코딩(=만들기)/디코딩(=풀기) 할 수 있습니다.
SECRET_KEY = 'SPARTA'
# JWT 패키지를 사용합니다. (설치해야할 패키지 이름: PyJWT)
import jwt
# 토큰에 만료시간을 줘야하기 때문에, datetime 모듈도 사용합니다.
import datetime
# 회원가입 시엔, 비밀번호를 암호화하여 DB에 저장해두는 게 좋습니다.
# 그렇지 않으면, 개발자(=나)가 회원들의 비밀번호를 볼 수 있으니까요.^^;
import hashlib
app.register_blueprint(main)
app.register_blueprint(mypage)
app.register_blueprint(challengedetail)
#################################
## HTML을 주는 부분 ##
#################################
@app.route('/')
def main():
token_receive = request.cookies.get('mytoken')
try:
payload = jwt.decode(token_receive, SECRET_KEY, algorithms=['HS256'])
user_info = db.user.find_one({"user_id": payload['id']})
return render_template('main.html', nickname=user_info["nick"])
except jwt.ExpiredSignatureError:
return redirect(url_for("login", msg="로그인 시간이 만료되었습니다."))
except jwt.exceptions.DecodeError:
return redirect(url_for("login", msg="로그인 정보가 존재하지 않습니다."))
@app.route('/mypage/')
def mypage():
token_receive = request.cookies.get('mytoken')
try:
payload = jwt.decode(token_receive, SECRET_KEY, algorithms=['HS256'])
user_info = db.user.find_one({"user_id": payload['id']})
return render_template('mypage.html', nickname=user_info["nick"])
except jwt.ExpiredSignatureError:
return redirect(url_for("login", msg="로그인 시간이 만료되었습니다."))
except jwt.exceptions.DecodeError:
return redirect(url_for("login", msg="로그인 정보가 존재하지 않습니다."))
@app.route('/challengedetail/')
def challengedetail():
token_receive = request.cookies.get('mytoken')
try:
payload = jwt.decode(token_receive, SECRET_KEY, algorithms=['HS256'])
user_info = db.user.find_one({"user_id": payload['id']})
return render_template('challengedetail.html', nickname=user_info["nick"])
except jwt.ExpiredSignatureError:
return redirect(url_for("login", msg="로그인 시간이 만료되었습니다."))
except jwt.exceptions.DecodeError:
return redirect(url_for("login", msg="로그인 정보가 존재하지 않습니다."))
@app.route('/login')
def login():
msg = request.args.get("msg")
return render_template('login.html', msg=msg)
@app.route('/signup')
def register():
return render_template('signup.html')
#################################
## 로그인을 위한 API ##
#################################
# [회원가입 API]
# id, pw, nickname을 받아서, mongoDB에 저장합니다.
# 저장하기 전에, pw를 sha256 방법(=단방향 암호화. 풀어볼 수 없음)으로 암호화해서 저장합니다.
@app.route('/api/register', methods=['POST'])
def api_register():
id_receive = request.form['id_give']
pw_receive = request.form['pw_give']
nickname_receive = request.form['nickname_give']
pw_hash = hashlib.sha256(pw_receive.encode('utf-8')).hexdigest()
db.user.insert_one({'user_id': id_receive, 'password': pw_hash, 'nick': nickname_receive})
return jsonify({'result': 'success'})
# [로그인 API]
# id, pw를 받아서 맞춰보고, 토큰을 만들어 발급합니다.
@app.route('/api/login', methods=['POST'])
def api_login():
id_receive = request.form['id_give']
pw_receive = request.form['pw_give']
print(id_receive)
print(pw_receive)
# 회원가입 때와 같은 방법으로 pw를 암호화합니다.
pw_hash = hashlib.sha256(pw_receive.encode('utf-8')).hexdigest()
# id, 암호화된pw을 가지고 해당 유저를 찾습니다.
result = db.user.find_one({'user_id': id_receive, 'password': pw_hash})
print(result)
# 찾으면 JWT 토큰을 만들어 발급합니다.
if result is not None:
# JWT 토큰에는, payload와 시크릿키가 필요합니다.
# 시크릿키가 있어야 토큰을 디코딩(=풀기) 해서 payload 값을 볼 수 있습니다.
# 아래에선 id와 exp를 담았습니다. 즉, JWT 토큰을 풀면 유저ID 값을 알 수 있습니다.
# exp에는 만료시간을 넣어줍니다. 만료시간이 지나면, 시크릿키로 토큰을 풀 때 만료되었다고 에러가 납니다.
payload = {
'id': id_receive,
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=78)
}
token = jwt.encode(payload, SECRET_KEY, algorithm='HS256')
# token을 줍니다.
return jsonify({'result': 'success', 'token': token})
# 찾지 못하면
else:
return jsonify({'result': 'fail', 'msg': '아이디/비밀번호가 일치하지 않습니다.'})
# [유저 정보 확인 API]
# 로그인된 유저만 call 할 수 있는 API입니다.
# 유효한 토큰을 줘야 올바른 결과를 얻어갈 수 있습니다.
# (그렇지 않으면 남의 장바구니라든가, 정보를 누구나 볼 수 있겠죠?)
@app.route('/api/nick', methods=['GET'])
def api_valid():
token_receive = request.cookies.get('mytoken')
print("받은 토큰 값")
print(token_receive)
# try / catch 문?
# try 아래를 실행했다가, 에러가 있으면 except 구분으로 가란 얘기입니다.
try:
# token을 시크릿키로 디코딩합니다.
# 보실 수 있도록 payload를 print 해두었습니다. 우리가 로그인 시 넣은 그 payload와 같은 것이 나옵니다.
payload = jwt.decode(token_receive, SECRET_KEY, algorithms=['HS256'])
print(payload)
# payload 안에 id가 들어있습니다. 이 id로 유저정보를 찾습니다.
# 여기에선 그 예로 닉네임을 보내주겠습니다.
userinfo = db.user.find_one({'user_id': payload['id']}, {'_id': 0})
return jsonify({'result': 'success', 'nickname': userinfo['nick']})
except jwt.ExpiredSignatureError:
# 위를 실행했는데 만료시간이 지났으면 에러가 납니다.
return jsonify({'result': 'fail', 'msg': '로그인 시간이 만료되었습니다.'})
except jwt.exceptions.DecodeError:
return jsonify({'result': 'fail', 'msg': '로그인 정보가 존재하지 않습니다.'})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True)
|
cchloe0927/Mallenge
|
app.py
|
app.py
|
py
| 6,984 |
python
|
ko
|
code
| 2 |
github-code
|
6
|
25969778046
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
#程式欢迎语
print("**欢迎来到UCAR爬虫程式**")
print("\n")
#将资料存入建立好的list
titles = []
date = []
url_list = []
clicks =[]
replys = []
#自定义查询关键字及日期区间
x = str(input("请输入想爬取的关键字:"))
print("日期格式输入范例(YYYYMMDD):20200715")
print("\n")
start_date = int(input("请输入想爬取的起始日期:")) #格式YYYYMMDD:20180101
end_date = int(input("请输入想爬取的结束日期:")) #格式YYYYMMDD:20191231
fake_request_url= "https://forum.u-car.com.tw/forum/list?keywords=" + x
print("\n")
print("爬取中...请稍后")
#抓使用者输入之关键字所有网页
z = 0
while(True):
z += 1
real_request_url = fake_request_url + "&page=" + str(z)
#print(real_request_url)
response = requests.get(real_request_url)
response_text = response.text
soup = BeautifulSoup(response_text, "html.parser")
#print(soup)
#判断这一页目录有没有文章(有就接下一步,没有就break)
#值得注意的是,这里判断每个目录页是否有文章存在的标签是选择.writer而不是用.title的原因为:在该目录页没有文章时还是有其他的.title标签,若选择.title做判断会导致无穷循环程式无法结束,所以选择.writer为判断依据
#当然也可以选择其他判断标签,这里就以.writer为例
if soup.select(".writer"):
pass
else:
break
#将所有div class="cell_topic"的内容爬下来储存到变数soup.find1(是个list)
soup_find1 = soup.find_all('div', 'cell_topic')
#抓发文日期
#循环soup_find1这个list
for i in range(len(soup_find1)):
#第一页且list前两项0,1是置顶广告,所以continue
if (z == 1 and i <= 1):
continue
b = soup_find1[i].find('div', 'postby margin-right-10').find('p').text
#print(b)
re_b = b[:10]
#print(re_b)
re_b_b = int(re_b.replace('/', ''))
#print(re_b_b)
#print(re_b)
#判断发文日期是否符合使用者需求并丢到list
if (start_date <= re_b_b and re_b_b <= end_date):
pass
else:
continue
date.append(re_b)
#print(re_b)
#抓网址
url = soup_find1[i].find('div', 'title').find('a')
#print(url)
a = 'https://forum.u-car.com.tw'
if url is not None:
url_list.append(a + url.get('href'))
else:
url_list.append("(本文已被删除)")
#print(a + url.get('href'))
#抓标题
c = soup_find1[i].find('div', 'title').find('a')
#print(c)
if c is not None:
titles.append(c.text)
else:
titles.append('(本文已被删除)')
print(c.text)
#抓点阅数
click_count = soup_find1[i].find('div', 'cell_topic_view').find('p')
if click_count is not None:
clicks.append(click_count.text)
else:
clicks.append("0")
#print(click_count.text)
#抓回复数
replys_count = soup_find1[i].find('div', 'cell_topic_chats').find('p')
if click_count is not None:
replys.append(replys_count.text)
else:
replys.append("0")
#print(replys_count.text)
#print('46行循环结束')
print("\n")
print("转档中...请稍后")
#转为DataFrame
df = pd.DataFrame(
{
'标题' : titles,
'点阅数' : clicks,
'回复数' : replys,
'发文日期' : date,
'文章连结' : url_list
}
)
#另存为csv
df.to_csv( "UCAR_" + x +"回传结果.csv", index = False, encoding = "utf_8_sig")
#程式结束
len_titles = len(titles)
print("本次共爬出 {} 篇文章".format(len_titles))
print("\n")
end = input("请输入任意键结束程式:")
|
weiweibro87777/UCAR_web-crawler
|
ucar.py
|
ucar.py
|
py
| 3,533 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
9002196810
|
from ast import parse
import pathlib
import configparser
import shutil
import enum
from sre_constants import CATEGORY
output = pathlib.Path("./.out")
shutil.rmtree(output, ignore_errors=True)
output.mkdir(exist_ok=True)
book_counter = 0
cfg = configparser.ConfigParser()
cfg.read(".input.ini", encoding="utf-8")
with open(f".out/books.lua", encoding="utf-8", mode="w") as definition:
with open(f".out/books.txt", encoding="utf-8", mode="w") as text:
definition.write(
"""
--
-- Please, do not delete this comment.
-- This file is generated with ZoMeGen by DiRaven
-- https://github.com/diraven/zomegen
--
""".strip()
+ "\n\n"
)
titles = cfg["books"]["titles"]
print(titles)
for title in titles.strip().splitlines():
book_counter += 1
definition.write(
f"""
-- {title}
item book_ukrlit_{book_counter}
{{
DisplayCategory = Literature,
DisplayName = book_ukrlit_{book_counter},
Type = Literature,
Icon = Book,
Weight = 0.5,
UnhappyChange = -40,
StressChange = -40,
BoredomChange = -50,
FatigueChange = +5,
StaticModel = Book,
WorldStaticModel = BookClosedGround,
}}
""".strip()
+ "\n\n"
)
# Write text.
text.write(
f"""
DisplayName_book_ukrlit_{book_counter} = "{title}",
""".strip()
+ "\n"
)
|
diraven/zomegen
|
books/__main__.py
|
__main__.py
|
py
| 1,440 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9658653690
|
import matplotlib.pyplot as plt
import scipy.signal as ss
import numpy as np
import math
window_size = 55
'------------------------------------------------------------------------------'
# opis: oblicza i zwraca pochodna sygnalu.
# nazwa funkcji: FUNC_diff
# parametry:
# ecg_filtered - numpy 1D array
# zwraca:
# diff - numpy 1D array
def FUNC_diff(ecg_filtered):
N = len(ecg_filtered)
'''POCHODNA---------------------------------------------------------------'''
#diff = np.diff(ecg_filtered)
diff = (2*ecg_filtered[5:N]+ecg_filtered[4:N-1]-ecg_filtered[2:N-3]-2*ecg_filtered[1:N-4])/8;
'''Wektor wynikowy krotszy o 5 elementow'''
return diff
# opis: oblicza i zwraca potege sygnalu.
# nazwa funkcji: FUNC_sqr
# parametry:
# diff - numpy 1D array
# zwraca:
# sqr - numpy 1D array
def FUNC_sqr(diff):
'''POTEGA-----------------------------------------------------------------'''
sqr = diff**2
return sqr
# opis: calkuje sygnal w ruchomym oknie i zwraca go.
# nazwa funkcji: FUNC_signal_integration
# parametry:
# sqr - numpy 1D array
# fs - integer
# zwraca:
# ecgmf - numpy 1D array
def FUNC_signal_integration(sqr, fs):
dt = 1/float(fs)
'''CALKOWANIE W RUCHOMYM OKNIE--------------------------------------------'''
'''Utworzenie okna'''
window = np.ones(window_size)
'''Calkowanie'''
temp = ss.lfilter(window,1,sqr)
ecgmf = ss.medfilt(temp,9)
ecgmf = ecgmf*dt
'''Usuniecie opoznienia filtru'''
delay = math.ceil(len(window) / 2)
ecgmf = ecgmf[delay:len(ecgmf)]
return ecgmf
# opis: wyszukuje i zwraca przyblizone granice QRS
# potrzebne do wyszukania zalamka R (oraz zalamka S).
# nazwa funkcji: FUNC_prepare_for_maxima_search
# parametry:
# ecgmf - numpy 1D array
# zwraca:
# left_T - numpy 1D array
# right_T - numpy 1D array
def FUNC_prepare_for_maxima_search(ecgmf):
'''Wyszukanie najwyzszej amplitudy'''
max_A = max(ecgmf)
'''Budowa tablicy do przeszukiwania'''
threshold = 0.2
region_poz = ecgmf>(threshold*max_A)
region_poz = region_poz*1
region_poz = np.array([region_poz])
'''Uzupelnienie zerem'''
region_poz_LR = np.insert(region_poz, 0, 0)
region_poz_RL = np.append(region_poz, 0)
'''SZUKANE MAKSIMOW-------------------------------------------------------'''
deltaLR = np.diff(region_poz_LR)
deltaRL = np.diff(region_poz_RL)
'''Wyznaczenie granic segmentow'''
left = np.where(deltaLR==1);
right = np.where(deltaRL==-1);
left_T = np.transpose(left)
right_T = np.transpose(right)
return left_T, right_T
# opis: znajduje i zwraca wartosci QRS na podstawie przyblizonych granic QRS.
# nazwa funkcji: FUNC_find_qrs_values
# parametry:
# left_T - numpy 1D array
# right_T - numpy 1D array
# ecg_filtered - numpy 1D array
# ecgmf - numpy 1D array
# zwraca:
# qrs_left_values - numpy 1D array
# qrs_right_values - numpy 1D array
# qrs_left_values_ecgmf - numpy 1D array
# qrs_right_values_ecgmf - numpy 1D array
def FUNC_find_qrs_values(left_T, right_T, ecg_filtered, ecgmf):
qrs_left_values = np.empty(len(left_T))
qrs_right_values = np.empty(len(left_T))
qrs_left_values_ecgmf = np.empty(len(left_T))
qrs_right_values_ecgmf = np.empty(len(left_T))
ecg_filtered[12:len(ecg_filtered)]
for i in range(0,len(left_T)):
qrs_left_values[i] = ecg_filtered[left_T[i]]
qrs_right_values[i] = ecg_filtered[right_T[i]]
qrs_left_values_ecgmf[i] = ecgmf[left_T[i]]
qrs_right_values_ecgmf[i] = ecgmf[right_T[i]]
return qrs_left_values, qrs_right_values, qrs_left_values_ecgmf, qrs_right_values_ecgmf
# opis: znajduje lokalne maksima w przyblizonych granicach QRS.
# Zwraca wartosci i indeksy wyszukanych maksimow.
# nazwa funkcji: FUNC_find_max
# parametry:
# left_T - numpy 1D array
# right_T - numpy 1D array
# ecg_filtered - numpy 1D array
# zwraca:
# max_value - numpy 1D array
# max_index - numpy 1D array
def FUNC_find_max(left_T, right_T, ecg_filtered):
max_index = np.empty(len(left_T))
max_value = np.empty(len(left_T))
#obciecie poczatku sygnalu oryginlanego w celu dopasownania indeksow po calkowaniu i pochodnej wzgledem oryginalnego
#ecg_filtered = ecg_filtered[18:len(ecg_filtered)]
#sygnal w trakcie calkownia jest scinany na koncu
for i in range(0,len(left_T)):
start = int(left_T[i])
end = int(right_T[i])
max_value[i] = ecg_filtered[start]
max_index[i] = start
for j in range(start,end):
if ecg_filtered[j] > max_value[i]:
max_value[i] = ecg_filtered[j]
max_index[i] = j
#max_index[i] = np.argmax(ecg_filtered[left_T[i]:right_T[i]])
#max_index[i] = max_index[i]+left_T[i]
#max_value[i] = ecg_filtered[max_index[i]]
'''for i in range(0,len(left_T)):
max_index[i] = np.argmax(ecg_filtered[left_T[i]:right_T[i]])
max_index[i] = max_index[i]+left_T[i]
max_value[i] = ecg_filtered[max_index[i]]'''
return max_value, max_index
# opis: znajduje lokalne minima w przyblizonych granicach QRS.
# Zwraca wartosci i indeksy wyszukanych minimow.
# nazwa funkcji: FUNC_find_min
# parametry:
# left_T - numpy 1D array
# right_T - numpy 1D array
# ecg_filtered - numpy 1D array
# zwraca:
# min_value - numpy 1D array
# min_index - numpy 1D array
def FUNC_find_min(left_T, right_T, ecg_filtered):
min_index = np.empty(len(left_T))
min_value = np.empty(len(left_T))
for i in range(0,len(left_T)):
start = int(left_T[i])
end = int(right_T[i])
min_value[i] = ecg_filtered[start]
min_index[i] = start
for j in range(start,end):
if ecg_filtered[j] < min_value[i]:
min_value[i] = ecg_filtered[j]
min_index[i] = j
'''
for i in range(0,len(left_T)):
min_index[i] = np.argmin(ecg_filtered[left_T[i]:right_T[i]])
min_index[i] = min_index[i]+left_T[i]
min_value[i] = ecg_filtered[min_index[i]]'''
return min_value, min_index
# opis: detekcja zalamka R. Zwraca wartosci i indeksy wyszukanych zalamkow R.
# nazwa funkcji: FUNC_r_detection
# parametry:
# ecg_filtered - numpy 1D array
# fs - integer
# zwraca:
# r_value - numpy 1D array
# r_index - numpy 1D array
def FUNC_r_detection(ecg_filtered, fs):
stateDict = {'Done': 1,
'Signal too short': -2,
'Incorrect input': -3}
try:
if not len(ecg_filtered):
stateFlag = stateDict['Incorrect input']
return stateFlag, [[], []]
dt = 1/float(fs)
diff = FUNC_diff(ecg_filtered)
sqr = FUNC_sqr(diff)
ecgmf = FUNC_signal_integration(sqr, dt)
left_T, right_T = FUNC_prepare_for_maxima_search(ecgmf)
r_value, r_index = FUNC_find_max(left_T, right_T, ecg_filtered)
stateFlag = stateDict['Done']
return stateFlag, [r_value, r_index]
except Exception as e:
print(f'Module R_PEAKS failed: {e}')
stateFlag = stateDict['Error']
return stateFlag, [[], []]
# opis: detekcja zalamka S przy zalozeniu, ze osiaga minimum.
# Zwraca wartosci i indeksy wyszukanych zalamkow S.
# nazwa funkcji: FUNC_s_detection
# parametry:
# ecg_filtered - numpy 1D array
# fs - integer
# zwraca:
# s_value - numpy 1D array
# s_index - numpy 1D array
def FUNC_s_detection(ecg_filtered, fs):
dt = 1/float(fs)
diff = FUNC_diff(ecg_filtered)
sqr = FUNC_sqr(diff)
ecgmf = FUNC_signal_integration(sqr, dt)
left_T, right_T = FUNC_prepare_for_maxima_search(ecgmf)
s_value, s_index = FUNC_find_min(left_T, right_T, ecg_filtered)
return s_value, s_index
# opis: Rysuje wykresy poszczegolnych sygnalow z naniesionymi na nie punktami
# nazwa funkcji: PRINT_all
# parametry:
# ecg_filtered - numpy 1D array
# fs - integer
def PRINT_all(ecg_filtered, fs):
diff = FUNC_diff(ecg_filtered)
sqr = FUNC_sqr(diff)
ecgmf = FUNC_signal_integration(sqr, fs)
left_T, right_T = FUNC_prepare_for_maxima_search(ecgmf)
r_value, r_index = FUNC_find_max(left_T, right_T, ecg_filtered)
s_value, s_index = FUNC_find_min(left_T, right_T, ecg_filtered)
qrs_left_values, qrs_right_values, qrs_left_values_ekgmf, qrs_right_values_ekgmf = FUNC_find_qrs_values(left_T, right_T, ecg_filtered, ecgmf)
plt.figure(1)
'Zaznaczono przyblizone granice QRS na przefiltrowanym sygnale'
plt.subplot(411)
plt.plot(ecg_filtered)
plt.plot(left_T,qrs_left_values, marker='o', color='g', ls='')
plt.plot(right_T,qrs_right_values, marker='o', color='y', ls='')
plt.ylabel('ekg_filtered')
plt.subplot(412)
plt.plot(diff)
plt.ylabel('diff')
plt.subplot(413)
plt.plot(sqr)
plt.ylabel('sqr')
'Zaznaczono przyblizone granice QRS na scalkowanym sygnale'
plt.subplot(414)
plt.plot(ecgmf)
plt.plot(left_T,qrs_left_values_ekgmf, marker='o', color='g', ls='')
plt.plot(right_T,qrs_right_values_ekgmf, marker='o', color='y', ls='')
plt.ylabel('ecgmf')
plt.figure(2)
plt.plot(ecg_filtered)
plt.plot(r_index,r_value, marker='x', color='r', ls='')
plt.plot(s_index,s_value, marker='x', color='b', ls='')
plt.plot(left_T,qrs_left_values, marker='o', color='g', ls='')
plt.plot(right_T,qrs_right_values, marker='o', color='y', ls='')
plt.xlabel('zalamki R, S oraz przyblizone granice QRS')
plt.figure(3)
plt.plot(ecg_filtered)
plt.plot(r_index,r_value, marker='x', color='r', ls='')
plt.xlabel('zalamki R')
plt.show()
# opis: Zaznacza wykryte zalamki R na sygnale EKG
# nazwa funkcji: PRINT_r
# parametry:
# ecg_filtered - numpy 1D array
# r_index - numpy 1D array
# r_value - numpy 1D array
def PRINT_r(ecg_filtered, r_index, r_value):
plt.figure(4)
plt.plot(ecg_filtered)
plt.plot(r_index,r_value, marker='x', color='r', ls='')
plt.xlabel('zalamki R')
plt.show()
|
sebastianczuma/r_peaks
|
R_PEAKS_old.py
|
R_PEAKS_old.py
|
py
| 9,657 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71082529147
|
from classes.rayon import *
from processing.analysis import intersect
def rayon_direct(start_point,end_point, murs):
#renvoie le rayon direct (dans une liste)
list_rayon = []
nouveau_rayon = Rayon(start_point)
nouveau_rayon.add_point_principal(end_point)
nouveau_rayon.find_all_intersections(murs) #determination des points de transmission
list_rayon.append(nouveau_rayon)
return list_rayon
|
bjoukovs/PHYSRayTracing2017
|
processing/direct.py
|
direct.py
|
py
| 445 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13415300522
|
# -*- coding: utf-8 -*-
"""
@author: Taoting
将用coco格式的json转化成labeime标注格式的json
"""
import json
import cv2
import numpy as np
import os
#用一个labelme格式的json作为参考,因为很多信息都是相同的,不需要修改。
def reference_labelme_json():
ref_json_path = './bin/25.json'
data=json.load(open(ref_json_path))
return data
def labelme_shapes(data,data_ref,id):
shapes = []
label_num = {'box':0}#根据你的数据来修改
for ann in data['annotations']:
if id == ann['image_id']:
shape = {}
class_name = [i['name'] for i in data['categories'] if i['id'] == ann['category_id']]
#label要对应每一类从_1开始编号
label_num[class_name[0]] += 1
shape['label'] = class_name[0]
shape['points'] = []
# ~ print(ann['segmentation'])
if not type(ann['segmentation']) == list:
continue
else:
x = ann['segmentation'][0][::2]#奇数个是x的坐标
y = ann['segmentation'][0][1::2]#偶数个是y的坐标
for j in range(len(x)):
shape['points'].append([x[j], y[j]])
shape['shape_type'] = data_ref['shapes'][0]['shape_type']
shape['flags'] = data_ref['shapes'][0]['flags']
shapes.append(shape)
return shapes
def Coco2labelme(json_path,data_ref):
with open(json_path,'r') as fp:
data = json.load(fp) # 加载json文件
for img in data['images']:
id = img['id']
data_labelme={}
data_labelme['version'] = data_ref['version']
data_labelme['flags'] = data_ref['flags']
data_labelme['shapes'] = labelme_shapes(data,data_ref,id)
data_labelme['imagePath'] = img['file_name']
data_labelme['imageData'] = None
# ~ data_labelme['imageData'] = data_ref['imageData']
data_labelme['imageHeight'] = img['height']
data_labelme['imageWidth'] = img['width']
file_name = data_labelme['imagePath']
# 保存json文件
json.dump(data_labelme,open('./%s.json' % file_name.split('.')[0],'w'),indent=4)
return data_labelme
if __name__ == '__main__':
root_dir = '/home/eason/PackSeg/'
json_list = os.listdir(root_dir)
#参考的json
data_ref = reference_labelme_json()
for json_path in json_list:
if json_path.split('.')[-1] == 'json':
print('当前文件: ', json_path)
data_labelme= Coco2labelme(os.path.join(root_dir,json_path), data_ref)
#file_name = data_labelme['imagePath']
# 保存json文件
#json.dump(data_labelme,open('./%s.json' % file_name.split('.')[0],'w'),indent=4)
|
Tommy-Bie/Logistics-Package-Separation-Software
|
DatasetUtils/coco2labelme.py
|
coco2labelme.py
|
py
| 2,887 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5782054342
|
from django.contrib import admin
from .models import Listing
# change Register your models data's list views attribite.
class ListingAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'is_published', 'price', 'list_date', 'realtor') # display items
list_display_links = ('id', 'title', 'realtor') #clickable display items
list_filter = ('realtor',) # filter display items
list_editable = ('is_published', 'price') # editable list item
search_fields = ('title', 'address', 'city', 'description', 'state', 'zipcode') # search panels filter items
list_per_page = 3 # display list per page
admin.site.register(Listing, ListingAdmin)
|
MonadWizard/django_HouseSellRealEstate_project
|
listings/admin.py
|
admin.py
|
py
| 671 |
python
|
en
|
code
| 3 |
github-code
|
6
|
5073225844
|
import urllib, urllib.request
from datetime import datetime
def get_data(province_id): # Отримання тестових даних із WEB-сторінки
url = 'https://www.star.nesdis.noaa.gov/smcd/emb/vci/VH/get_TS_admin.php?country=UKR&provinceID={}&year1=1981&year2=2020&type=Mean'.format(province_id)
# Відкриття WEB-сторінки можна зробити наступним чином:
webpage = urllib.request.urlopen(url)
text = webpage.read()
# Отримати поточну дату і час
now = datetime.now()
# Згенерувати строку з поточою датою і часом та необхідним форматуванням можна за допомогою методу strftime
date_and_time_time = now.strftime("%d.%m.%Y_%H^%M^%S")
# Створити новий файл за допомоги функції open
out = open('D:\\AD\\' + 'NOAA_ID' + str(province_id) + '-' + date_and_time_time + '.csv', 'wb')
# Після відкриття у змінній text міститься текст із WEB-сторінки, який тепер можна записати у файл
out.write(text)
out.close()
import pandas as pd
def make_header(filepath):
headers = ['Year', 'Week', 'SMN', 'SMT', 'VCI', 'TCI', 'VHI', 'empty']
dataframe = pd.read_csv(filepath, header=1, names=headers)
dataframe.drop(dataframe.loc[dataframe['VHI'] == -1].index)
return dataframe
|
DJeik7/lab2
|
Ad1.py
|
Ad1.py
|
py
| 1,504 |
python
|
uk
|
code
| 0 |
github-code
|
6
|
70879486268
|
from enum import Enum
class Color(Enum):
WHITE = True
BLACK = False
class Direction(Enum):
EAST = "e"
SOUTH_EAST = "se"
SOUTH_WEST = "sw"
WEST = "w"
NORTH_WEST = "nw"
NORTH_EAST = "ne"
class Coordinate:
# Using axial coordinates
# https://www.redblobgames.com/grids/hexagons/
def __init__(self, q, r):
self._q = q
self._r = r
def _as_tuple(self):
return (self._q, self._r)
def __repr__(self):
return f"(q:{self._q}, r:{self._r})"
def __hash__(self):
return hash(self._as_tuple())
def __eq__(self, other):
assert isinstance(other, Coordinate)
return self._as_tuple() == other._as_tuple()
def __add__(self, other):
assert isinstance(other, Coordinate)
return Coordinate(self._q + other._q, self._r + other._r)
def adj(self):
return {d: self + Coordinate.ADJ[d] for d in Direction}
Coordinate.ADJ = {
Direction.EAST: Coordinate(+1, 0),
Direction.SOUTH_EAST: Coordinate(0, +1),
Direction.SOUTH_WEST: Coordinate(-1, +1),
Direction.WEST: Coordinate(-1, 0),
Direction.NORTH_WEST: Coordinate(0, -1),
Direction.NORTH_EAST: Coordinate(+1, -1),
}
class HexTile:
_n = 0
def __init__(self):
self._color = Color.WHITE
self.n = HexTile._n
HexTile._n += 1
def __repr__(self):
return f"T{self.n}({self._color})"
def toggle_color(self):
self._color = Color(not self._color.value)
def get_color(self):
return self._color
class HexGrid:
ORIGIN = Coordinate(0, 0)
def __init__(self):
self._tiles = {}
self._create_tile_at(HexGrid.ORIGIN)
def flip_tile(self, directions):
pos = HexGrid.ORIGIN
for d in directions:
pos = pos + Coordinate.ADJ[d]
if pos not in self._tiles:
self._create_tile_at(pos)
self._tiles[pos].toggle_color()
def count_black_tiles(self):
return sum(t.get_color() == Color.BLACK for t in self._tiles.values())
def simulate_day(self):
# add white tiles next to all all black tiles
for pos, tile in list(self._tiles.items()):
if tile.get_color() == Color.BLACK:
for adj_pos in pos.adj().values():
if adj_pos not in self._tiles:
self._create_tile_at(adj_pos)
# determine which tiles need to be flipped
to_flip = {tile for pos, tile in self._tiles.items() if self._should_flip(tile, pos)}
# flip tiles
for tile in to_flip:
tile.toggle_color()
def _should_flip(self, tile, pos):
count = self._count_adj_black_tiles(pos)
if tile.get_color() == Color.BLACK and (count == 0 or count > 2):
return True
elif tile.get_color() == Color.WHITE and count == 2:
return True
return False
def _count_adj_black_tiles(self, pos):
count = 0
for adj_pos in pos.adj().values():
adj_tile = self._tiles.get(adj_pos)
if adj_tile is not None and adj_tile.get_color() == Color.BLACK:
count += 1
return count
def _create_tile_at(self, pos):
assert pos not in self._tiles
self._tiles[pos] = HexTile()
def parse(line):
directions = []
i = 0
while i < len(line):
c = line[i]
if c in "ew":
directions.append(Direction(c))
i += 1
elif c in "ns":
directions.append(Direction(line[i : i + 2]))
i += 2
else:
raise Exception("invalid input")
return directions
def get_grid(txt):
grid = HexGrid()
for line in txt.splitlines():
directions = parse(line)
grid.flip_tile(directions)
return grid
def parta(txt):
grid = get_grid(txt)
return grid.count_black_tiles()
def partb(txt):
grid = get_grid(txt)
for day in range(100):
grid.simulate_day()
# if day < 10 or (day + 1) % 10 == 0:
# print(f"Day {day + 1}: {grid.count_black_tiles()}")
return grid.count_black_tiles()
if __name__ == "__main__":
from aocd import data
print(f"parta: {parta(data)}")
print(f"partb: {partb(data)}")
|
cj81499/advent-of-code
|
src/aoc_cj/aoc2020/day24.py
|
day24.py
|
py
| 4,291 |
python
|
en
|
code
| 2 |
github-code
|
6
|
20651798683
|
import math
import random
import numpy as np
from itertools import combinations
from copy import deepcopy
class Node:
def __init__(self):
self.parent = None
self.state = []
self.children = []
self.fully_expanded = False
self.Q = 0
self.N = 0
def __str__(self):
return f"node state: {self.state}, Q: {self.Q}, N: {self.N}, fully expanded: {self.fully_expanded}"
def ripple(v_sum, f_sum, f_leaves):
return f_leaves - (f_sum - v_sum) * (f_leaves / f_sum) if f_sum != 0 else 0
def distance(v1, v2):
return np.sqrt(np.sum(np.power(v1 - v2, 2)))
def ps(df, v, f, selections):
a = np.copy(f)
for selection in selections:
v_sum = df.loc[selection, 'real'].sum()
f_sum = df.loc[selection, 'predict'].sum()
a[selection] = ripple(v_sum, f_sum, df.loc[selection, 'predict'])
score = max(1 - distance(v, a) / distance(v, f), 0)
return score
def gps(v, f, selections):
a, b = [], []
for selection in selections:
selection_v = v[selection]
selection_f = f[selection]
with np.errstate(divide='ignore', invalid='ignore'):
selection_a = f[selection] * (selection_v.sum() / selection_f.sum())
selection_a = np.nan_to_num(selection_a)
a.extend(np.abs(selection_v - selection_a))
b.extend(np.abs(selection_v - selection_f))
selection = np.logical_or.reduce(selections)
non_selection_v = v[~selection]
non_selection_f = f[~selection]
a = np.mean(a)
b = np.mean(b)
c = np.nan_to_num(np.mean(np.abs(non_selection_v - non_selection_f)))
score = 1 - ((a + c) / (b + c))
return score
def get_unqiue_elements(df, cuboid):
return {tuple(row) for row in df[cuboid].values}
def get_element_mask(df, cuboid, combination):
return [np.logical_and.reduce([df[d] == e for d, e in zip(cuboid, c)]) for c in combination]
def ucb(node, C=math.sqrt(2.0)):
best_child = None
max_score = -1
for child in node.children:
if child.N > 0 and not child.fully_expanded:
left = child.Q
right = C * math.sqrt(math.log(node.N) / child.N)
score = left + right
if score > max_score:
best_child = child
max_score = score
return best_child
def init_children(node, elements):
children = [e for e in elements if e not in set(node.state)]
for c in children:
child = Node()
child.state = node.state + [c]
child.parent = node
node.children.append(child)
def get_initial_scores(df, elements, cuboid, v, f, scoring):
element_scores = dict()
for leaf in elements:
selections = get_element_mask(df, cuboid, [leaf])
if scoring == 'ps':
element_scores[leaf] = ps(df.copy(), v, f, selections)
else:
element_scores[leaf] = gps(v, f, selections)
return element_scores
def sublist(lst1, lst2):
return set(lst1) <= set(lst2)
def selection(node, elements):
while len(node.state) < len(elements):
if len(node.children) == 0: # First time to search this node.
init_children(node, elements)
return node
q_max = 0
all_visit = True
for child in node.children:
q_max = max(q_max, child.Q)
if child.N == 0: # Not all children have been visited.
all_visit = False
if not all_visit and random.random() > q_max:
return node # Expand current node
child_node = ucb(node) # Select the best path got go deeper into the tree.
if child_node is None: # If all children are already fully expanded.
if all_visit:
node.fully_expanded = True
if node.parent is None:
return node # The tree is fully exanded.
node = node.parent # Continue again with parent node.
else:
return node # Expand current node.
else:
node = child_node
node.fully_expanded = True
return node
def expand(node, element_scores):
best_child = None
max_score = -1
for child in node.children:
if child.N == 0:
score = element_scores[child.state[-1]]
if score > max_score:
max_score = score
best_child = child
return best_child
def evaluate(df, selected_node, cuboid, v, f, scoring):
selections = get_element_mask(df, cuboid, selected_node.state)
if scoring == 'ps':
score = ps(df.copy(), v, f, selections)
else:
score = gps(v, f, selections)
return score
def backup(node, new_q):
while node is not None:
node.N += 1
node.Q = max(node.Q, new_q)
node = node.parent
def MCTS(df, elements, cuboid, v, f, pt, m, scoring):
root = Node()
max_q = -1
best_selection = Node()
element_scores = get_initial_scores(df, elements, cuboid, v, f, scoring)
for i in range(m):
node = selection(root, elements)
if not node.fully_expanded:
node = expand(node, element_scores)
if root.fully_expanded:
break
new_q = evaluate(df, node, cuboid, v, f, scoring)
backup(node, new_q)
if new_q > max_q:
max_q = root.Q
best_selection = deepcopy(node)
elif (new_q == max_q) and not sublist(node.state, best_selection.state) and len(node.state) < len(
best_selection.state):
max_q = root.Q
best_selection = deepcopy(node)
if max_q >= pt:
break
return best_selection.state, max_q
def hierarchical_pruning(elements, layer, cuboid, candidate_set):
previous_layer_candidates = [candidate for candidate in candidate_set if candidate['layer'] == layer - 1]
parent_selections = [cand['elements'] for cand in previous_layer_candidates if set(cand['cuboid']) < set(cuboid)]
for parent_selection in parent_selections:
elements = [e for e in elements if np.any([set(pe) < set(e) for pe in parent_selection])]
return elements
def get_best_candidate(candidate_set):
# Sort by score, layer, number of elements
sorted_cands = sorted(candidate_set, key=lambda c: (c['score'], -c['layer'], -len(c['elements'])), reverse=True)
return sorted_cands[0]
def hotspot(df, dimensions, pt=0.67, m=200, scoring='gps', debug=False):
assert scoring in ['ps', 'gps'], "Supported scoring is 'ps' and 'gps'."
# Hierarcical pruning does not seem to work well when using gps scoring
use_pruning = scoring != 'gps'
v = df['real'].values
f = df['predict'].values
candidate_set = []
for layer in range(1, len(dimensions) + 1):
if debug:
print('Layer:', layer)
cuboids = [list(c) for c in combinations(dimensions, layer)]
for cuboid in cuboids:
if debug:
print('Cuboid:', cuboid)
elements = get_unqiue_elements(df, cuboid)
# if debug: print('Elements:', elements)
if use_pruning and layer > 1:
elements = hierarchical_pruning(elements, layer, cuboid, candidate_set)
# if debug: print('Filtered elements:', elements)
selected_set, score = MCTS(df, elements, cuboid, v, f, pt, m, scoring)
if debug:
print('Best subset:', selected_set, 'score', score)
candidate = {
'layer': layer,
'cuboid': cuboid,
'score': score,
'elements': np.array(selected_set)
}
if candidate['score'] >= pt:
return candidate
candidate_set.append(candidate)
return get_best_candidate(candidate_set)
|
shaido987/riskloc
|
algorithms/hotspot.py
|
hotspot.py
|
py
| 8,100 |
python
|
en
|
code
| 93 |
github-code
|
6
|
37175527133
|
from jd.api.base import RestApi
class ComJdStockShopGlobalWebOpenWarehouseFacadeAddStoreRequest(RestApi):
def __init__(self,domain,port=80):
"""
"""
RestApi.__init__(self,domain, port)
self.storeName = None
self.remark = None
self.venderId = None
self.storeId = None
def getapiname(self):
return 'jingdong.com.jd.stock.shop.global.web.open.WarehouseFacade.addStore'
|
PsKs/jd-sdk
|
jd/api/rest/ComJdStockShopGlobalWebOpenWarehouseFacadeAddStoreRequest.py
|
ComJdStockShopGlobalWebOpenWarehouseFacadeAddStoreRequest.py
|
py
| 405 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22093759735
|
import pandas as pd
from sklearn import svm
import statistics
data = pd.read_csv('cleaned_LaptopDataset.csv')
t = statistics.median(data['latest_price'])
h = []
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
for x in data.latest_price:
if (x >= t):
h.append(1)
else:
h.append(0)
data['latest_price'] = h
for col in data:
data[col] = le.fit_transform(data[col])
########Train-test Dataset#######
x = data.drop('latest_price', axis=1)
y = data['latest_price']
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
######## SVM Using Linear Kernel #######
classifiere = svm.SVC(kernel="linear", C=1, gamma=1)
classifiere.fit(x_train, y_train)
predictions = classifiere.predict(x_test)
########SVM Accuracy#######
from sklearn.metrics import accuracy_score
acc = accuracy_score(y_test, predictions)
print("accuracy of SVM " , acc*100,"%")
|
mohamedezzeldeenhassanmohamed/Data-Mining-Project
|
svm.py
|
svm.py
|
py
| 1,007 |
python
|
en
|
code
| 0 |
github-code
|
6
|
720316267
|
# BFD - Bidirectional Forwarding Detection - RFC 5880, 5881
# scapy.contrib.description = BFD
# scapy.contrib.status = loads
from scapy.packet import *
from scapy.fields import *
from scapy.all import * # Otherwise failing at the UDP reference below
class BFD(Packet):
name = "BFD"
fields_desc = [
BitField("version" , 1 , 3),
BitField("diag" , 0 , 5),
BitField("sta" , 3 , 2),
FlagsField("flags", 0x00, 6, ['P', 'F', 'C', 'A', 'D', 'M']),
XByteField("detect_mult", 0x03),
XByteField("len", 24),
BitField("my_discriminator" , 0x11111111 , 32),
BitField("your_discriminator" , 0x22222222 , 32),
BitField("min_tx_interval" , 1000000000, 32),
BitField("min_rx_interval" , 1000000000, 32),
BitField("echo_rx_interval" , 1000000000, 32) ]
def mysummary(self):
return self.sprintf("BFD (my_disc=%BFD.my_discriminator%, your_disc=%BFD.my_discriminator%)")
bind_layers(UDP, BFD, dport=3784)
|
p4lang/scapy-vxlan
|
scapy/contrib/bfd.py
|
bfd.py
|
py
| 1,129 |
python
|
en
|
code
| 33 |
github-code
|
6
|
31472363916
|
# Code adapted from https://www.codeproject.com/Articles/5297227/Deep-Learning-for-Fashion-Classification
# import tensorflow.keras as keras
import os
import matplotlib.pyplot as plt
import matplotlib.image as img
import tensorflow as tf
import keras
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import ImageDataGenerator
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))
data = '/home/lunet/conce/Downloads/Codeproject/DeepFashion Custom/DeepFashion/Train'
os.chdir(data)
batch_size = 3
def DataLoad(shape, preprocessing):
"""Create the training and validation datasets for a given image shape."""
img_data = ImageDataGenerator(
preprocessing_function=preprocessing,
horizontal_flip=True,
validation_split=0.1,
)
height, width = shape
train_dataset = img_data.flow_from_directory(
os.getcwd(),
target_size=(height, width),
classes=['Blazer', 'Blouse', 'Cardigan', 'Dress', 'Jacket',
'Jeans', 'Jumpsuit', 'Romper', 'Shorts', 'Skirts', 'Sweater', 'Sweatpants'
, 'Tank', 'Tee', 'Top'],
batch_size=batch_size,
subset='training',
)
val_dataset = img_data.flow_from_directory(
os.getcwd(),
target_size=(height, width),
classes=['Blazer', 'Blouse', 'Cardigan', 'Dress', 'Jacket',
'Jeans', 'Jumpsuit', 'Romper', 'Shorts', 'Skirts', 'Sweater',
'Sweatpants', 'Tank', 'Tee', 'Top'],
batch_size=batch_size,
subset='validation'
)
return train_dataset, val_dataset
vgg16 = keras.applications.vgg16
conv_model = vgg16.VGG16(weights='imagenet', include_top=False)
conv_model.summary()
train_dataset, val_dataset = DataLoad((224, 224), preprocessing=vgg16.preprocess_input)
# Function for plots images with labels within jupyter notebook
X_train, y_train = next(train_dataset)
# Load ImageNet weights of this network, to be used during the transfer learning
conv_model = vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
# flatten the output of the convolutional part:
x = keras.layers.Flatten()(conv_model.output)
# three hidden layers
x = keras.layers.Dense(100, activation='relu')(x)
x = keras.layers.Dense(100, activation='relu')(x)
x = keras.layers.Dense(100, activation='relu')(x)
# final softmax layer with 15 categories
predictions = keras.layers.Dense(15, activation='softmax')(x)
# creating the full model:
full_model = keras.models.Model(inputs=conv_model.input, outputs=predictions)
full_model.summary()
for layer in conv_model.layers:
layer.trainable = False
full_model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adamax(lr=0.001), metrics=['acc'])
history = full_model.fit_generator(
train_dataset,
validation_data=val_dataset,
workers=0,
epochs=7,
)
def plot_history(history, yrange):
"""Plot loss and accuracy as a function of the epoch, for the training and validation datasets.
"""
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
# Get number of epochs
epochs = range(len(acc))
# Plot training and validation accuracy per epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.ylim(yrange)
# Plot training and validation loss per epoch
plt.figure()
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
plt.show()
plot_history(history, yrange=(0.9, 1))
test_data = '/home/lunet/conce/Downloads/Codeproject/DeepFashion Custom/DeepFashion/Train'
test_datagen = ImageDataGenerator()
test_generator = test_datagen.flow_from_directory(test_data, target_size=(224, 224), batch_size=3, class_mode='categorical')
# X_test, y_test = next(test_generator)
test_results = full_model.evaluate(test_generator)
print("test loss, test acc:", test_results)
|
nnanna217/msc-image-search
|
func/my_samples/cp_fashion-classifier.py
|
cp_fashion-classifier.py
|
py
| 4,162 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10713768489
|
from scipy import stats, signal
from collections import defaultdict
import numpy as np
from tqdm.notebook import tqdm
import pandas as pd
from src import config
from src.FFT import FFTAnalysis as FFT
def _extractTimeDomainFeatures(sig):
''' Extracts time domain features from one vibration signal'''
# Get time features
features = dict()
if sig is not None:
rmsVal = np.sqrt(np.square(sig).mean())
sigMax = sig.max()
absSig = np.abs(sig)
absMean = absSig.mean()
features = {
'mean': sig.mean(),
'min': sig.min(),
'max': sigMax,
'std': np.std(sig),
'skew': stats.skew(sig),
'kurt': stats.kurtosis(sig),
'rms': rmsVal,
'p2p': sigMax - sig.min(),
'crestF': sigMax / rmsVal,
'impulseF': absSig.max() / absMean,
'shapeF': rmsVal / absMean
}
return features
def _getSubBand(FFTfreqs, FFTlevels, band):
''''Extract portion of the FFT corresponding to the given frequency band '''
idxKeep = (FFTfreqs >= band[0]) & (FFTfreqs < band[1])
freqs = FFTfreqs[idxKeep]
levels = FFTlevels[idxKeep]
return freqs, levels
def _extractPeaks(freqs, levels, distance):
''' Extracts peaks from an FFT '''
peakIdx, _ = signal.find_peaks(levels, distance = distance)
peakLevels = levels[peakIdx]
peakFreqs = freqs[peakIdx]
return peakFreqs, peakLevels
def _getTopPeaks(levels, freqs, noPeaks):
''' Extract top <n> peaks from an FFT '''
# Sort peak indices from highest to lowest level
sortIdx = np.argsort(levels)
sortIdx = np.flip(sortIdx)
# Grab top <n> peaks
levels = levels[sortIdx[0:noPeaks]]
freqs = freqs[sortIdx[0:noPeaks]]
return freqs, levels
def _extractFrequencyDomainFeatures(sig, FFTsettings,
noPeaks = config.FBAND_PEAKS,
peakDist = config.PEAK_DIST,
fs = config.Fs,
fBands = config.FBANDS):
''' Extracts frequency domain features from one vibration signal'''
FFTfreqs, FFTlevels = None, None
features = defaultdict()
if sig is not None:
FFTfreqs, FFTlevels = FFT(sig, config.Fs, FFTsettings)
# Split in bands
for bandNo, band in enumerate(fBands):
freqs, levels = _getSubBand(FFTfreqs, FFTlevels, band)
freqs, levels = _extractPeaks(freqs, levels, peakDist)
freqs, levels = _getTopPeaks(levels, freqs, noPeaks)
# Add peaks from current band to the dictionary with the features
for peakNo in range(noPeaks):
featName = f'band_{bandNo + 1}_peak_{peakNo+1}_level'
features[featName] = levels[peakNo]
featName = f'band_{bandNo + 1}_peak_{peakNo+1}_freq'
features[featName] = freqs[peakNo]
return features
def _extractFeatures(sig, FFTsettings):
''''Extracts time- and frequency-domain features from one vibration signal'''
feats = _extractTimeDomainFeatures(sig)
freqFeats = _extractFrequencyDomainFeatures(sig, FFTsettings)
feats.update(freqFeats)
return feats
def extractDatasetFeatures(df, FFTsettings = config.FFT_SETTINGS):
''' Extracts features from the entire dataset '''
# Extract features from every experiment
driveFeats, fanFeats = [], []
for idx, record in tqdm(df.iterrows(), total = df.shape[0]):
FFTsettings['HPCutoffFrequency'] = 20 * record['MotorSpeed_rpm'] / 60
FFTsettings['LPCutoffFrequency'] = 10 * record['MotorSpeed_rpm'] / 60
driveFeats.append(_extractFeatures(record['DriveVibs'], FFTsettings))
fanFeats.append(_extractFeatures(record['FanVibs'], FFTsettings))
# Make dataframes with the extracted features
dfDrive = pd.DataFrame(driveFeats)
dfFan = pd.DataFrame(fanFeats)
# Add corresponding labels
dfDrive['label'] = df['DriveLabel']
dfFan['label'] = df['FanLabel']
# Remove rows with missing records for fan-end bearing
dfFan.dropna(axis = 0, how = 'any', inplace = True)
return dfDrive, dfFan
|
Miltos-90/Bearing_Fault_Classification
|
src/feature_extraction.py
|
feature_extraction.py
|
py
| 4,432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34668621923
|
from django.urls import path
from django.conf.urls import url
from . import views
app_name = 'choistick'
urlpatterns = [
path('', views.index, name='index'),
path('map/', views.map, name='map'),
path('join/', views.signup, name='join'),
path('pick/', views.pick, name='pick'),
path('warn/', views.warn, name='warn'),
# url(r'^login/$', views.signin, name='login'),
url(r'^login/$', views.signin, name='login'),
]
|
jaemin8852/Search_Location
|
choistick/urls.py
|
urls.py
|
py
| 421 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13300386404
|
#!/usr/bin/python3
import _thread
import re, time, cv2, serial
'''
ServoController interfaces with the arduino board to control the servo motor over
USB serial coms
'''
class ServoController:
def __init__(self):
self.ser = serial.Serial('com3', 9600, timeout=0.5)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.ser.close()
def servo_read_position(self):
self.ser.write(b'r')
pos = str(self.ser.readline())
# First read is always empty, sometimes the first seversl,
# so recusively call until numbers come back from arduino
while pos == "b''":
pos = self.servo_read_position()
print("pos_remote_read: " + str(pos))
m = re.search('\d+', str(pos))
pos = int(m.group(0))
return pos
def servo_set_position(self, pos):
print("ad pos: " +str(pos))
self.ser.write(bytes(str(pos), 'utf8'))
self.ser.write(b'\n')
print("pos_remote: " + str(self.ser.readline()))
'''
SerialServo controls camera rotation and motion detection
Currently written to sweep the camera, wait and detect motion, only continue sweeping once no motion detected
'''
class CameraController:
def camera_frame_grab(self):
ret, frame = self.cap.read()
cv2.imshow(self.frameName, frame)
cv2.waitKey(1)
def camera_rotate(self, start, stop, step_delay=0.2):
# If the stop angle bigger than start, increment, else decrement
# If the stop angle is smaller, it cannot be negative, if it's bigger,
# then it can't be bigger than 180
if start <= stop:
if stop < 25:
stop = 25
direction = 1
else:
if stop > 165:
stop = 165
direction = -1
for pos in range(start, stop, direction):
self.camera_frame_grab()
self.sc.servo_set_position(pos)
time.sleep(step_delay)
def camera_sweep(self):
self.camera_rotate(45, 180)
self.camera_rotate(180, 45)
def camera_left(self, degrees):
pos = self.sc.servo_read_position()
if pos <= 180:
self.camera_rotate(pos, pos + degrees)
def camera_right(self, degrees):
pos = self.sc.servo_read_position()
if pos > 0:
self.camera_rotate(pos, pos - degrees)
def detect_motion(self, duration):
start_time = time.time()
while (True):
ret, frame = self.cap.read()
if self.motionSense:
end_time = time.time()
# Wait detecting motion for duration, stop detecting once time has passed and no motion detected
if(end_time - start_time) > duration and count == 1:
print("Finished recording " + str(duration))
break
fgmask = self.fgbg.apply(frame)
# Generate and prepare the threshold image, and find contours with it.
ret, thresh = cv2.threshold(fgmask, 127, 255, 0)
thresh = cv2.dilate(thresh, None, iterations=2)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Count contours with large areas, use them for movement simple detection
count = 1
for c in contours:
if cv2.contourArea(c) < 500:
continue
count += count
# optionally draw bounding boxes around the detected contours
if self.drawRect:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow(self.frameName, frame)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
def __init__(self, camera, servo=0, drawRect=0, motionSense=1):
if servo:
self.sc = ServoController()
self.drawRect = drawRect
self.motionSense = motionSense
self.frameName = "Camera " + str(camera)
self.cap = cv2.VideoCapture(camera)
self.fgbg = cv2.createBackgroundSubtractorMOG2()
ret, frame = self.cap.read()
cv2.imshow(self.frameName, frame)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# When everything done, release the capture and serial port
self.cap.release()
cv2.destroyAllWindows()
def dynamic_camera(cam):
with CameraController(cam, 1) as cc:
while(True):
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_right(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
cc.camera_left(25)
cc.detect_motion(20)
def static_camera(cam):
print("Static Cam - no motion detection")
with CameraController(cam, drawRect=1, motionSense=0) as cc:
while (True):
cc.detect_motion(20)
if __name__ == "__main__":
try:
_thread.start_new_thread(dynamic_camera, (1, ))
_thread.start_new_thread(static_camera, (0, ))
except:
print("Error: unable to start thread")
while 1:
pass
|
bradys/cat-cam
|
Cat_Cam.py
|
Cat_Cam.py
|
py
| 6,213 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21806540252
|
from jetracer.nvidia_racecar import NvidiaRacecar
import time
import sys
from multiprocessing import Process, Value
import zmq
import Jetson.GPIO as GPIO
pinrun = 'DAP4_SCLK' #12
pinbouton = 'SPI2_SCK' #13
pinau = 'SPI2_CS1' #16
autrepin = 'SPI2_CS0' #18
GPIO.setmode(GPIO.TEGRA_SOC)
GPIO.setup(pinrun, GPIO.OUT)
GPIO.output(pinrun, 0)
GPIO.setup(pinau, GPIO.OUT)
GPIO.output(pinau, 0)
GPIO.setup(pinbouton, GPIO.IN)
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5555")
currentlyRunning = Value('b', False)
def RunScript(script, data):
#print("running " + script)
GPIO.output('DAP4_SCLK', 1)
data.value = True
exec(script)
print(f"currentlyRunning from process {currentlyRunning.value}")
print("Script terminé avec succès")
data.value = False
print(f"currentlyRunning from process {currentlyRunning.value}")
GPIO.output('DAP4_SCLK', 0)
def BumperChock(data):
print("Detection pare-chocs")
if currentlyRunning.value:
runThread.terminate() # sends a SIGTERM
currentlyRunning.value = False;
car.throttle = 0.001
car.throttle = 0
GPIO.output(pinrun, 0)
GPIO.output(pinau, 1)
data.value = False
car = NvidiaRacecar()
car.steering_gain = -0.65
car.steering_offset = -0.25
if car.steering_offset != -0.25 : exit()
print("Car ready")
GPIO.add_event_detect(pinbouton, GPIO.FALLING, callback=lambda x: BumperChock(currentlyRunning), bouncetime=10)
while True:
try:
print("en attente recv...")
message = socket.recv()
GPIO.output(pinau, 0)
socket.send(b"OK")
message = message.decode("utf-8")
#print("Received request: %s" % message)
f = open("/KDesir_Tests/logging.txt", "a")
t = time.strftime('%d/%m/%Y-%H:%M:%S', time.localtime()) + ","
log = message.replace("\n", "\n" + t)
f.write(t + log + "\n")
f.close()
#print(message)
if "ArretUrgence" in message:
runThread.terminate() # sends a SIGTERM
#socket.send(b"AU_Done")
print("Arrêt d'urgence déclenché")
currentlyRunning.value = False;
raise
else:
print(f"currentlyRunning from main script {currentlyRunning.value}")
if not currentlyRunning.value:
print(f"currentlyRunning {currentlyRunning.value}")
runThread=Process(target=RunScript,args=(message, currentlyRunning))
runThread.start()
else:
print("Impossible d'exécuter le script car un autre est déjà en cours")
except Exception as e:
print(e)
car.throttle = 0.001
car.throttle = 0
GPIO.output(pinrun, 0)
GPIO.output(pinau, 1)
#finally:
# GPIO.cleanup()
sys.exit("Fin du programme")
|
SpaceLabsfr/BlockApp
|
serveur-blockapp.py
|
serveur-blockapp.py
|
py
| 2,553 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40678810913
|
import argparse
import json
import os
import platform
import subprocess
from typing import List
HOST_MAGMA_ROOT = '../../../.'
def main() -> None:
""" Run main"""
args = _parse_args()
if args.mount:
_run(['up', '-d', 'test'])
_run(['exec', 'test', 'bash'])
_down(args)
elif args.lint:
_run(['up', '-d', 'test'])
_run(['exec', 'test', 'make', 'lint'])
_down(args)
elif args.precommit:
_run(['up', '-d', 'test'])
_run(['exec', 'test', 'make', 'precommit'])
_down(args)
elif args.coverage:
_run(['up', '-d', 'test'])
_run(['exec', 'test', 'make', 'cover'])
_down(args)
elif args.tests:
_run(['up', '-d', 'test'])
_run(['exec', 'test', 'make', 'test'])
_down(args)
elif args.health:
# _set_mac_env_vars is needed to override LOG_DRIVER for mac
_set_mac_env_vars()
_run(['-f', 'docker-compose.yml', '-f', 'docker-compose.override.yml', '-f', 'docker-compose.health.override.yml', 'up', '-d'])
_run_health()
_down(args)
elif args.git:
print(json.dumps(_run_get_git_vars(), indent=4, sort_keys=True))
else:
_run(['build'] + _get_default_build_args(args))
_down(args)
def _run(cmd: List[str]) -> None:
""" Run the required docker compose command """
cmd = ['docker', 'compose', '--compatibility'] + cmd
print("Running '%s'..." % ' '.join(cmd))
try:
subprocess.run(cmd, check=True) # noqa: S603
except subprocess.CalledProcessError as err:
exit(err.returncode)
def _down(args: argparse.Namespace) -> None:
if args.down:
_run(['down'])
def _get_default_build_args(args: argparse.Namespace) -> List[str]:
ret = []
git_info = _run_get_git_vars()
for arg, val in git_info.items():
ret.append("--build-arg")
ret.append("{0}={1}".format(arg, val))
if args.nocache:
ret.append('--no-cache')
return ret
def _run_get_git_vars():
try:
cmd = "tools/get_version_info.sh"
cmd_res = \
subprocess.run(cmd, check=True, capture_output=True) # noqa: S603
except subprocess.CalledProcessError as err:
print("Error _run_get_git_vars")
exit(err.returncode)
return json.loads(cmd_res.stdout)
def _run_health():
try:
cmd = "tools/docker_ps_healthcheck.sh"
subprocess.run(cmd, check=True) # noqa: S603
except subprocess.CalledProcessError as err:
print("Error _run_health")
exit(err.returncode)
def _set_mac_env_vars():
if (platform.system().lower() == "darwin"):
os.environ['LOG_DRIVER'] = "json-file"
def _parse_args() -> argparse.Namespace:
""" Parse the command line args """
# There are multiple ways to invoke finer-grained control over which
# images are built.
#
# (1) How many images to build
#
# all: all images
# default: images required for minimum functionality
# - excluding metrics images
# - including postgres, proxy, etc
#
# (2) Of the core orc8r images, which modules to build
#
# Defaults to all modules, but can be further specified by targeting a
# deployment type.
parser = argparse.ArgumentParser(description='Orc8r build tool')
# Run something
parser.add_argument(
'--tests', '-t',
action='store_true',
help='Run unit tests',
)
parser.add_argument(
'--mount', '-m',
action='store_true',
help='Mount the source code and create a bash shell',
)
parser.add_argument(
'--precommit', '-c',
action='store_true',
help='Mount the source code and run pre-commit checks',
)
parser.add_argument(
'--coverage', '-o',
action='store_true',
help='Generate test coverage statistics',
)
parser.add_argument(
'--lint', '-l',
action='store_true',
help='Run lint test',
)
parser.add_argument(
'--health', '-e',
action='store_true',
help='Run health test',
)
# Run something
parser.add_argument(
'--git', '-g',
action='store_true',
help='Get git info',
)
# How to do it
parser.add_argument(
'--nocache', '-n',
action='store_true',
help='Build the images with no Docker layer caching',
)
parser.add_argument(
'--down', '-down',
action='store_true',
default=False,
help='Leave containers up after running tests',
)
return parser.parse_args()
if __name__ == '__main__':
main()
|
magma/magma
|
feg/gateway/docker/build.py
|
build.py
|
py
| 4,696 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
27213803395
|
def solution(n, words):
word_set = {words[0]}
n_cnt = [0] * (n + 1)
n_cnt[1] += 1
for i in range(1, len(words)):
num = i % n + 1
if words[i][0] != words[i - 1][-1] or words[i] in word_set:
return [num, n_cnt[num] + 1]
else:
word_set.add(words[i])
n_cnt[num] += 1
return [0, 0]
|
hammii/Algorithm
|
Programmers_python/영어_끝말잇기.py
|
영어_끝말잇기.py
|
py
| 360 |
python
|
en
|
code
| 2 |
github-code
|
6
|
34861772057
|
from statuspage.forms import StatusPageModelForm
from utilities.forms import StaticSelect
from ..models import UptimeRobotMonitor
__all__ = (
'UptimeRobotMonitorForm',
)
class UptimeRobotMonitorForm(StatusPageModelForm):
fieldsets = (
('UptimeRobot Monitor', (
'component', 'metric', 'paused',
)),
)
class Meta:
model = UptimeRobotMonitor
fields = (
'component', 'metric', 'paused',
)
widgets = {
'component': StaticSelect(),
'metric': StaticSelect(),
}
|
Status-Page/Status-Page
|
statuspage/sp_uptimerobot/forms/models.py
|
models.py
|
py
| 578 |
python
|
en
|
code
| 45 |
github-code
|
6
|
5308571260
|
n, c = map(int, input().split())
location = []
for _ in range(n):
location.append(int(input()))
location.sort()
# gap 최소값
start = location[1]-location[0]
# gap 최대값
end = location[-1] - location[0]
result = 0
while (start <= end):
# gap의 중간 값
mid = (start + end) // 2
value = location[0]
cnt = 1
# mid 값을 이용해 공유기 설치
for i in range(1, n):
# 현재위치+gap이 다음집보다 작은 경우. 새로 설치해야함
if location[i] >= value + mid:
# 현재위치 갱신
value = location[i]
cnt += 1
# 설치한 공유기가 사용가능 공유기 보다 크거나 같은 경우
if cnt >= c:
start = mid + 1
result = mid
else:
end = mid - 1
print(result)
|
louisuss/Algorithms-Code-Upload
|
Python/DongbinBook/binary_search/find_router.py
|
find_router.py
|
py
| 810 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
36076011155
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request,jsonify, redirect,url_for
from json import dumps
import celery , sys
from celeryconfig import appcelery
from Buscador import tasks
import time, json
app = Flask(__name__)
@app.route('/datos', methods=['GET', 'POST'])
def recibirInformacion():
resultados=[]
expedientes=[]
expedientes=convertirAstring(request.args.get('expedientes'))
informacion = tratamientosDatos(expedientes,request.args.get('array'))
resultados=informacion.unirVectores()
return jsonify(resultados)
def convertirAstring(argumentos):
argumentos=argumentos.replace(' ','')
argumentos=argumentos.replace('"','')
argumentos=argumentos.replace(']','')
argumentos=argumentos.replace('[','')
argumentos=argumentos.split(',',argumentos.count(','))
return argumentos
class tratamientosDatos():
def __init__(self, resultados_ncbi, resultados_array):
self.datos_ncbi=resultados_ncbi[:]
self.resultados_array=resultados_array
def almacenar_datos_visualizacion_array(self):
visualizacion_array=[]
for i in appcelery.AsyncResult(self.resultados_array).get()['experiments']['experiment']:
visualizacion_array.append({'id': i['id'],
'accession': i['accession'],
'name': i['name'], 'releasedate': i['releasedate'],
'description': i['description'][0]['text'],'bd': 'arrayexpress', 'descarga': "null" })
return visualizacion_array
def almacenar_datos_visualizacion_ncbi(self):
tam_list=len(self.datos_ncbi)
visualizacion_ncbi=[]
for j in range(tam_list):
i=appcelery.AsyncResult(self.datos_ncbi[j])
identificador=i.get()['result']['uids'][0]
visualizacion_ncbi.append({'id': identificador,
'accession':i.get()['result'][identificador]['accession'],
'name': i.get()['result'][identificador]['title'],
'releasedate': i.get()['result'][identificador]['pdat'],
'description': i.get()['result'][identificador]['summary'],
'bd': 'ncbi_gds', 'descarga': i.get()['result'][identificador]['ftplink'] })
return visualizacion_ncbi
def unirVectores(self):
vector_ncbi=[]
vector_array=[]
vector_ncbi=self.almacenar_datos_visualizacion_ncbi()
vector_array=self.almacenar_datos_visualizacion_array()
for i in vector_array:
vector_ncbi.append(i)
return vector_ncbi
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True)
|
AntonioAlcM/tfg_ugr
|
backend/tratamientoDatos.py
|
tratamientoDatos.py
|
py
| 2,620 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
44633342253
|
from typing import List
from torch import optim
from torch.optim.optimizer import Optimizer
from torch_geometric.data.data import Data
from src.dataset import citeSeer
from src.model import GAT
import torch
import torch.nn.functional as F
from torch_geometric.data import Dataset
EPOCH = 200
# --- dataloader
'''
전체 graph를 사용하기 때문에 dataloader 불필요.
'''
citeSeer_ds = citeSeer('data')
citeseer = citeSeer_ds[0]
# --- Model
gat_model = GAT(citeSeer_ds.num_features,
num_layers = 2,
hidden_dim = [64],
num_heads = [8],
output_dim = citeSeer_ds.num_classes)
# --- Setting
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
gat_model.to(device)
citeseer = citeseer.to(device)
optimizer = torch.optim.Adam(gat_model.parameters(), lr = 0.005, weight_decay = 5e-4)
criterion = F.nll_loss
# --- training
def train(data : Dataset) -> None:
gat_model.train()
optimizer.zero_grad()
out = gat_model(data.x, data.edge_index)
loss = criterion(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
@torch.no_grad()
def test(data : Dataset) -> dict:
gat_model.eval()
out= gat_model(data.x, data.edge_index)
acc_dic = {}
for name, mask in data('train_mask', 'test_mask', 'val_mask'):
acc = float((out[mask].argmax(-1) == data.y[mask]).sum() / mask.sum())
acc_dic[name[:-5]] = acc
return acc_dic
for epoch in range(EPOCH):
train(citeseer)
acc_dic = test(citeseer)
print(f"Epoch : {epoch+1:03d}, Train : {acc_dic['train']:.4f}, Test : {acc_dic['test']:.4f}")
|
February24-Lee/gnn_research
|
test_gat_exmaple.py
|
test_gat_exmaple.py
|
py
| 1,645 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18732949012
|
import json
import numpy as np
import util
class AudfprintAligner:
matches = {}
def __init__(self, matchfile):
with open(matchfile) as f:
for x, ys in json.load(f).iteritems():
for y, m in ys.iteritems():
m = m[0]
if "Matched" in m:
d = float(self.between(m, "Matched ", " s "))
t1 = float(self.between(m, " at ", " s "))
f1 = self.between(m, " in ", " to ")
t2 = float(self.between(m, " time ", " s "))
f2 = self.between(m, " in ", " with ", 1)
if f1 not in self.matches:
self.matches[f1] = {}
self.matches[f1][f2] = [t1, t2, d]
def between(self, string, s1, s2, index=0):
string = string.split(s1)[index+1]
return string[:string.find(s2)]
def get_alignment_points(self, file, reffile):
if file in self.matches:
if reffile in self.matches[file]:
t1, t2, d = self.matches[file][reffile]
filedur = util.get_duration(file)
refdur = util.get_duration(reffile)
delta_start = t2-t1
delta_end = delta_start+filedur #assume slope 1
return [delta_start, delta_end], 1
return None, 0
|
grateful-dead-live/meta-alignment
|
audfprint_aligner.py
|
audfprint_aligner.py
|
py
| 1,407 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24150609056
|
import requests
import random
from utils.others import get_atitle, get_genre, get_t_from_u, get_urls
from utils.anilist import Anilist
from utils.techzapi import TechZApi
def get_genre_html(li):
x = """<a>{}</a>"""
html = ""
for i in li:
html += x.format(i.strip())
return html
def get_eps_html(data=None, api: TechZApi = None, anime=None):
if not data:
anime = api.gogo_search(anime)[0].get("id").strip()
data = api.gogo_anime(anime).get("episodes")
x = """<a class="ep-btn" href="{}">{}</a>"""
html = ""
pos = 1
for i in data:
i = i.replace("-episode-", "/")
html += x.format(f"/episode/{i}", str(pos))
pos += 1
if api:
return html, anime
return html
def get_eps_html2(data):
x = """<a class="ep-btn" href="{}">{}</a>"""
html = ""
pos = 1
for i in data:
i = i.replace("-episode-", "/")
html += x.format(f"/episode/{i}", str(pos))
pos += 1
return html
ANIME_POS = """<a href="{}"><div class="poster la-anime"> <div id="shadow1" class="shadow"> <div class="dubb">{}</div><div class="dubb dubb2">{}</div></div><div id="shadow2" class="shadow"> <img class="lzy_img" src="https://cdn.jsdelivr.net/gh/TechShreyash/AnimeDex@main/static/img/loading.gif" data-src="{}"> </div><div class="la-details"> <h3>{}</h3> <div id="extra"> <span>{}</span> <span class="dot"></span> <span>{}</span> </div></div></div></a>"""
ANIME_POS2 = """<a href="{}"><div class="poster la-anime"> <div id="shadow1" class="shadow"> <div class="dubb">{}</div></div><div id="shadow2" class="shadow"> <img class="lzy_img" src="https://cdn.jsdelivr.net/gh/TechShreyash/AnimeDex@main/static/img/loading.gif" data-src="{}"> </div><div class="la-details"> <h3>{}</h3> <div id="extra"> <span>{}</span> </div></div></div></a>"""
def animeRecHtml(data):
if not data:
return "Not Available"
if len(data) == 0:
return "Not Available"
html = ""
for i in data.get("recommendations").get("edges"):
i = i.get("node").get("mediaRecommendation")
img = i.get("coverImage")
if img:
img = img.get("medium").replace("small", "medium")
else:
img = i.get("bannerImage")
title = get_atitle(i.get("title"))
url = get_urls(title)
x = ANIME_POS.format(
url,
str(i.get("meanScore")).strip() + " / 100",
"Ep " + str(i.get("episodes")).strip(),
img,
title,
i.get("format"),
i.get("status"),
)
if x not in html:
html += x
return html
def animeRecHtml2(data):
if not data:
return "Not Available"
if len(data) == 0:
return "Not Available"
html = ""
for i in data:
i = i.get("node").get("mediaRecommendation")
img = i.get("coverImage")
if img:
img = img.get("medium").replace("small", "medium")
else:
img = i.get("bannerImage")
title = get_atitle(i.get("title"))
url = get_urls(title)
x = ANIME_POS.format(
url,
str(i.get("meanScore")).strip() + " / 100",
"Ep " + str(i.get("episodes")).strip(),
img,
title,
i.get("format"),
i.get("status"),
)
if x not in html:
html += x
return html
def get_trending_html(data):
html = ""
for id, i in data:
try:
img = i[5]
title = i[0]
url = get_urls(id)
x = ANIME_POS.format(url, i[1], "Ep " + str(i[2]), img, title, i[3], i[4])
html += x
except:
pass
return html
def get_search_html(data):
html = ""
for i in data:
if "dub" in i.get("id").lower():
d = "DUB"
else:
d = "SUB"
x = ANIME_POS2.format(
"/anime/" + i.get("id"),
d,
i.get("img"),
i.get("title"),
"Released: " + i.get("year"),
)
html += x
return html
def get_recent_html(data):
html = ""
for i in data:
url = i.get("id").split("-episode-")[0]
x = ANIME_POS.format(
f"/anime/{url}",
i.get("lang"),
"Ep " + str(i.get("episode")),
i.get("img"),
i.get("title"),
f"Latest {i.get('lang')}",
"HD",
)
html += x
return html
def get_selector_btns(url, current, episodes):
if episodes < 2:
return ""
selector = ""
if current == 1:
x = """<a class="btns" href="usrl"><button class="sbtn inline-flex text-white bg-indigo-500 border-0 py-2 px-6 focus:outline-none hover:bg-indigo-600 rounded text-lg ">Episode NEXT<i style="margin-left:10px; margin-right: auto;" class="fa fa-arrow-circle-right"></i></button></a>"""
selector += x.replace("usrl", url + str(current + 1)).replace(
"NEXT", str(current + 1)
)
elif current == episodes:
x = """<a class="btns" href="usrl"><button class="sbtn inline-flex text-white bg-indigo-500 border-0 py-2 px-6 focus:outline-none hover:bg-indigo-600 rounded text-lg "><i class="fa fa-arrow-circle-left"></i>Episode PREV</button></a>"""
selector += x.replace("usrl", url + str(current - 1)).replace(
"PREV", str(current - 1)
)
else:
x = """<a class="btns" href="usrl"><button class="sbtn inline-flex text-white bg-indigo-500 border-0 py-2 px-6 focus:outline-none hover:bg-indigo-600 rounded text-lg "><i class="fa fa-arrow-circle-left"></i>Episode PREV</button></a>"""
selector += x.replace("usrl", url + str(current - 1)).replace(
"PREV", str(current - 1)
)
x = """<a class="btns" href="usrl"><button class="sbtn inline-flex text-white bg-indigo-500 border-0 py-2 px-6 focus:outline-none hover:bg-indigo-600 rounded text-lg ">Episode NEXT<i style="margin-left:10px; margin-right: auto;" class="fa fa-arrow-circle-right"></i></button></a>"""
selector += x.replace("usrl", url + str(current + 1)).replace(
"NEXT", str(current + 1)
)
return selector
SLIDER_HTML = """<div class="mySlides fade"> <div class="data-slider"> <p class="spotlight">{}</p><h1>{}</h1> <div class="extra1"> <span class="year"><i class="fa fa-play-circle"></i>{}</span> <span class="year year2"><i class="fa fa-calendar"></i>{}</span> <span class="cbox cbox1">{}</span> <span class="cbox cbox2">HD</span> </div><p class="small-synop">{}</p><div id="watchh"> <a href="{}" class="watch-btn"> <i class="fa fa-play-circle"></i> Watch Now </a> <a href="{}" class="watch-btn watch-btn2"> <i class="fa fa-info-circle"></i> Details<i class="fa fa-angle-right"></i> </a> </div></div><div class="shado"> <a href="{}"></a> </div><img src="{}"> </div>"""
def slider_gen():
data = Anilist().trending()
random.shuffle(data)
html = ""
pos = 1
for i in data:
img = i.get("bannerImage")
if not img:
img = (
i.get("coverImage")
.get("medium")
.replace("small", "large")
.replace("medium", "large")
)
title = get_atitle(i.get("title"))
url = get_urls(title)
temp = SLIDER_HTML.format(
f"#{pos} Spotlight",
title,
i.get("type"),
i.get("status"),
get_genre(i.get("genres")),
i.get("description"),
url.replace("/anime/", "/episode/") + "/1",
url,
url,
img,
)
html += temp
pos += 1
return html
def episodeHtml(episode, title, dl=True):
isSub = episode.get("SUB")
isDub = episode.get("DUB")
DL = episode.get("DL")
sub = dub = dlsub = dldub = ""
defa = 0
s, d = 1, 1
if isSub:
for i in isSub:
if defa == 0:
defa = f"/embed?url={i}&title={title}"
sub += f"""<div class="sitem"> <a class="sobtn sactive" onclick="selectServer(this)" data-value="/embed?url={i}&title={title}">Server{s}</a> </div>"""
else:
sub += f"""<div class="sitem"> <a class="sobtn" onclick="selectServer(this)" data-value="/embed?url={i}&title={title}">Server{s}</a> </div>"""
s += 1
if isDub:
for i in isDub:
if defa == 0:
defa = f"/embed?url={i}&title={title}"
dub += f"""<div class="sitem"> <a class="sobtn sactive" onclick="selectServer(this)" data-value="/embed?url={i}&title={title}">Server{d}</a> </div>"""
else:
dub += f"""<div class="sitem"> <a class="sobtn" onclick="selectServer(this)" data-value="/embed?url={i}&title={title}">Server{d}</a> </div>"""
d += 1
if DL:
link = DL.get("SUB")
if link:
for n, l in link.items():
dlsub += f"""<div class="sitem"> <a class="sobtn download" target="_blank" href="{l}"><i class="fa fa-download"></i>{n}</a> </div>"""
link = DL.get("DUB")
if link:
for n, l in link.items():
dldub += f"""<div class="sitem"> <a class="sobtn download" target="_blank" href="{l}"><i class="fa fa-download"></i>{n}</a> </div>"""
if sub != "":
t4 = f"""<div class="server"> <div class="stitle"> <i class="fa fa-closed-captioning"></i>SUB: </div><div class="slist">{sub}</div></div>"""
else:
t4 = ""
if dub != "":
t5 = f""" <div class="server sd"> <div class="stitle"> <i class="fa fa-microphone-alt"></i>DUB: </div><div class="slist">{dub}</div></div>"""
else:
t5 = ""
if dlsub != "":
t6 = f""" <div class="server"> <div class="stitle"> <i class="fa fa-closed-captioning"></i>SUB: </div><div class="slist">{dlsub}</div></div>"""
else:
t6 = ""
if dldub != "":
t7 = f""" <div class="server sd"> <div class="stitle"> <i class="fa fa-microphone-alt"></i>DUB: </div><div class="slist">{dldub}</div></div>"""
else:
t7 = ""
t8 = f"""<a id="showdl" onclick="showDownload()"><i class="fa fa-download"></i>Download</a><div id="dldiv" class="dldiv"><h4 id="download">Download Links:</h4>{t6}{t7}</div>"""
html = t4 + t5
if dl:
html += t8
return html, defa
|
TechShreyash/AnimeDex
|
utils/html_gen.py
|
html_gen.py
|
py
| 10,463 |
python
|
en
|
code
| 186 |
github-code
|
6
|
5991002670
|
from collections import OrderedDict
from itertools import chain
from .types import Vsn, MatrixID, PacketClass
from .patches import patch
from .cache import from_page, get_page
from .sources import version_urls
from .parsers import pre_versions, pre_packets, rel_version, rel_packets
from .parsers import first_heading
__all__ = ('version_packet_ids',)
# Returns matrix with matrix[version][packet_class] = matrix_id
@from_page(dep=(first_heading,pre_versions,pre_packets,rel_version,rel_packets),
rdoc='Recalculate the packet ID matrix. Give if the version_urls dict\n'
'or the code of version_packet_ids() have been changed.', doc_order=-2)
def version_packet_ids():
"""Return a dict mapping `Vsn' instances to dicts mapping `PacketClass'
instances to `MatrixID' instances, giving the matrix of packet IDs as
they vary across packets and across protocol versions."""
used_patches = set()
packet_classes = {}
matrix = OrderedDict()
prev_v = None
for v, url in reversed(version_urls.items()):
with get_page(url) as page:
heading = first_heading(page)
if heading == 'Pre-release protocol':
vdiff = pre_versions(page, v)
if (v, vdiff) in patch:
used_patches.add((v, vdiff))
vdiff = patch[v, vdiff]
from_v, to_v = vdiff
assert v == to_v, '%r != %r' % (v, to_v)
matrix[v] = {}
matrix.move_to_end(v, last=False)
seen_names = {}
all_pre_packets = pre_packets(page, v)
if (v, None) in patch:
all_pre_packets = chain(all_pre_packets, patch[(v, None)])
used_patches.add((v, None))
for packet in all_pre_packets:
if (v, packet) in patch:
used_patches.add((v, packet))
packet = packet.patch(patch[v, packet])
if packet is None: continue
assert packet.name not in seen_names, \
'[%s] Duplicate packet name:\n%s\n%s' % \
(v.name, seen_names[packet.name], packet)
seen_names[packet.name] = packet
packet_class = PacketClass(
name=packet.name, state=packet.state, bound=packet.bound)
if packet.name not in packet_classes:
packet_classes[packet.name] = packet_class
assert packet_class == packet_classes[packet.name], \
'[%s] %r != %r' % (v.name, packet_class, packet_classes[packet.name])
if packet.old_id is None:
assert packet_class not in matrix[from_v], \
'[%s] %r in matrix[%r]' % (v.name, packet_class, from_v)
else:
if packet_class not in matrix[from_v]:
msg = '[%s] [0x%02X] %r not in matrix[%r]' % (
v.name, packet.old_id, packet_class, from_v)
for from_pcls, from_mid in matrix[from_v].items():
if (from_pcls.state, from_pcls.bound, from_mid.id) \
== (packet_class.state, packet_class.bound, packet.old_id):
msg += '\n(however, matrix[%r][%r].id == 0x%02X)' % (
from_v, from_pcls, packet.old_id)
break
raise AssertionError(msg)
assert packet.old_id == matrix[from_v][packet_class].id, \
'[%s] 0x%02X != matrix[%r][%r].id == 0x%02X' % (
v.name, packet.old_id, from_v, packet_class,
matrix[from_v][packet_class].id)
if packet.url is not None:
url = packet.url
elif not packet.changed and from_v and packet_class in matrix[from_v]:
url = matrix[from_v][packet_class].url
else:
url = None
if packet.new_id is not None:
matrix[v][packet_class] = MatrixID(
id=packet.new_id, base_ver=from_v,
changed=packet.changed, html=packet.html, url=url)
for packet_class, id in matrix[from_v].items():
if packet_class.name in seen_names: continue
matrix[v][packet_class] = id._replace(
base_ver=from_v, changed=False)
elif heading == 'Protocol':
rel_v = rel_version(page)
if rel_v.name is None:
rel_v = Vsn(v.name, rel_v.protocol)
assert v == rel_v, '%r != %r' % (v, rel_v)
matrix[v] = {}
seen_names = {}
all_rel_packets = rel_packets(page, v)
if (v, None) in patch:
all_rel_packets = chain(all_rel_packets, patch[(v, None)])
used_patches.add((v, None))
for packet in all_rel_packets:
if (v, packet) in patch:
used_patches.add((v, packet))
packet = packet.patch(patch[v, packet])
if packet is None: continue
assert packet.name not in seen_names, \
'[%s] Duplicate packet name:\n%s\n%s.' \
% (v.name, seen_names[packet.name], packet)
seen_names[packet.name] = packet
packet_class = PacketClass(
name=packet.name, state=packet.state, bound=packet.bound)
if packet.name not in packet_classes:
packet_classes[packet.name] = packet_class
assert packet_classes[packet.name] == packet_class, \
'[%s] %r != %r' % (v.name,
packet_classes[packet.name], packet_class)
matrix[v][packet_class] = MatrixID(
id=packet.id, base_ver=v, changed=False, url=packet.url,
html=packet.html)
else:
raise AssertionError('Unrecognised article title: %r' % heading)
state_bound_ids = {}
for packet_class, matrix_id in matrix[v].items():
key = (packet_class.state, packet_class.bound, matrix_id.id)
assert key not in state_bound_ids, '[%s] Duplicate packet ID: ' \
'%s is used by packets %r and %r.' % (v.name,
'(%s, %s, 0x%02X)' % key, state_bound_ids[key], packet_class.name)
state_bound_ids[key] = packet_class.name
unused_patches = set(k for k in patch.keys() if k[0] == v and k not in used_patches)
if unused_patches:
raise AssertionError('Unused patches:\n'
+ '\n'.join('%r -> %r' % (p, patch[p]) for p in unused_patches))
prev_v = v
unused_patches = set(k for k in patch.keys() if k not in used_patches)
if unused_patches:
raise AssertionError('Unused patches:\n'
+ '\n'.join('%s -> %s' % (p, patch[p]) for p in unused_patches))
return matrix
|
joodicator/mc-dev-data
|
mcdevdata/matrix.py
|
matrix.py
|
py
| 7,498 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34161150799
|
from ninjaopenfoam import Case, Gnuplot, GmtPlot, GmtPlotCopyCase, PDFLaTeXFigure
import os
class SchaerWaves:
def __init__(self):
self.linearUpwindW()
self.cubicFitW()
self.charneyPhillipsW()
def linearUpwindW(self):
self.btf300dzLinearUpwind = GmtPlotCopyCase(
'schaerWaves-btf-300dz-linearUpwind',
source='$atmostests_builddir',
target='$builddir',
plots=['src/thesis/cp/schaerWaves/w.gmtdict'],
files=['18000/Uf'])
btf300dzLinearUpwindCase = Case('schaerWaves-btf-300dz-linearUpwind')
self.btf300dzLinearUpwindW = GmtPlot(
'schaerWaves-btf-300dz-linearUpwind-w',
plot='w',
case=btf300dzLinearUpwindCase,
time=18000,
data=['18000/Uf'])
self.btf300dzLinearUpwindWFigure = PDFLaTeXFigure(
'schaerWaves-btf-300dz-linearUpwind-w-figure',
output=os.path.join('thesis/cp/schaerWaves/fig-btf-300dz-linearUpwind-w'),
figure=os.path.join('src/thesis/cp/schaerWaves/fig-btf-300dz-linearUpwind-w'),
components=self.btf300dzLinearUpwindW.outputs()
)
def cubicFitW(self):
self.btf300dzCubicFit = GmtPlotCopyCase(
'schaerWaves-btf-300dz-cubicFit',
source='$atmostests_builddir',
target='$builddir',
plots=['src/thesis/cp/schaerWaves/wS.gmtdict'],
files=['18000/Uf'])
btf300dzCubicFitCase = Case('schaerWaves-btf-300dz-cubicFit')
self.btf300dzCubicFitW = GmtPlot(
'schaerWaves-btf-300dz-cubicFit-w',
plot='wS',
case=btf300dzCubicFitCase,
time=18000,
data=['18000/Uf'])
self.btf300dzCubicFitWFigure = PDFLaTeXFigure(
'schaerWaves-btf-300dz-cubicFit-w-figure',
output=os.path.join('thesis/cp/schaerWaves/fig-btf-300dz-cubicFit-w'),
figure=os.path.join('src/thesis/cp/schaerWaves/fig-btf-300dz-cubicFit-w'),
components=self.btf300dzCubicFitW.outputs()
)
def charneyPhillipsW(self):
self.btf300dzCharneyPhillips = GmtPlotCopyCase(
'schaerWavesCP-btf-300dz',
source='$atmostests_builddir',
target='$builddir',
plots=['src/thesis/cp/schaerWaves/w.gmtdict'],
files=['18000/Uf'])
btf300dzCharneyPhillipsCase = Case('schaerWavesCP-btf-300dz')
self.btf300dzCharneyPhillipsW = GmtPlot(
'schaerWavesCP-btf-300dz-w',
plot='w',
case=btf300dzCharneyPhillipsCase,
time=18000,
data=['18000/Uf'])
self.btf300dzCharneyPhillipsWFigure = PDFLaTeXFigure(
'schaerWaves-btf-300dz-cp-w-figure',
output=os.path.join('thesis/cp/schaerWaves/fig-btf-300dz-cp-w'),
figure=os.path.join('src/thesis/cp/schaerWaves/fig-btf-300dz-cp-w'),
components=self.btf300dzCharneyPhillipsW.outputs()
)
def outputs(self):
return ['src/thesis/cp/schaerWaves/melvin2010-w-mass-conserving-sisl.png'] \
+ self.btf300dzLinearUpwindWFigure.outputs() \
+ self.btf300dzCubicFitWFigure.outputs() \
+ self.btf300dzCharneyPhillipsWFigure.outputs()
def addTo(self, build):
build.add(self.btf300dzLinearUpwind)
build.add(self.btf300dzCubicFit)
build.add(self.btf300dzCharneyPhillips)
build.add(self.btf300dzLinearUpwindW)
build.add(self.btf300dzLinearUpwindWFigure)
build.add(self.btf300dzCubicFitW)
build.add(self.btf300dzCubicFitWFigure)
build.add(self.btf300dzCharneyPhillipsW)
build.add(self.btf300dzCharneyPhillipsWFigure)
|
hertzsprung/thesis
|
generators/schaerWaves.py
|
schaerWaves.py
|
py
| 3,873 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73526486907
|
import os.path
import pandas as pd
def loadEgPed():
'''Load example pedigree data.
'''
basepath = os.path.abspath(__file__)
folder = os.path.dirname(basepath)
data_path = os.path.join(folder, 'data/ped.txt')
text = pd.read_table(data_path,header=0)
return text
def loadEgGeno():
'''Load example genotype data.
'''
basepath = os.path.abspath(__file__)
folder = os.path.dirname(basepath)
data_path = os.path.join(folder, 'data/geno.traw')
return data_path
|
zhaow-01/PyAGH
|
PyAGH/loaddata.py
|
loaddata.py
|
py
| 512 |
python
|
en
|
code
| 4 |
github-code
|
6
|
74281107067
|
from typing import Any, List
from fastapi import APIRouter, HTTPException, Depends
from apps.auth.model import User
from apps.bank.cruds import invoice
from apps.bank.schemas.invoice import InvoiceUpdate, InvoiceView, InvoiceCreate, InvoiceViewFull
from core.security import current_user_is_banker, get_current_user
router = APIRouter(prefix='/invoices', tags=['Invoices'])
@router.get('/', response_model=List[InvoiceViewFull],
dependencies=[Depends(current_user_is_banker)])
async def list_invoices(skip: int = 0, limit: int = 100) -> Any:
results = await invoice.get_list(skip=skip, limit=limit)
return results
@router.get('/my', response_model=List[InvoiceViewFull])
async def list_my_invoices(user: User = Depends(get_current_user),
skip: int = 0, limit: int = 100) -> Any:
results = await invoice.get_list(user=user, skip=skip, limit=limit)
return results
@router.get('/{obj_id}', response_model=InvoiceViewFull)
async def get_invoice(obj_id: int, user: User = Depends(get_current_user)) -> Any:
result = await invoice.get(id=obj_id, user=user)
if not result:
raise HTTPException(status_code=404, detail='Invoice not found!')
return result
@router.post('/create', response_model=InvoiceView, status_code=201)
async def create_invoice(item: InvoiceCreate, user: User = Depends(get_current_user)) -> Any:
result = await invoice.create_invoice(obj_in=item, user=user)
return result
@router.put('/{obj_id}', response_model=InvoiceView)
async def update_invoice(obj_id: int, item: InvoiceUpdate,
user: User = Depends(get_current_user)) -> Any:
obj_db = await invoice.get(id=obj_id, user=user)
if not obj_db:
raise HTTPException(status_code=404, detail='Invoice not found!')
result = await invoice.update_invoice(obj_db=obj_db, obj_in=item, user=user)
return result
@router.delete('/{obj_id}')
async def delete_invoice(obj_id: int, user: User = Depends(get_current_user)) -> Any:
result = await invoice.remove(id=obj_id, user=user)
return result
|
MojsaKirill/CRUD
|
app/api/api_v1/endpoints/invoices.py
|
invoices.py
|
py
| 2,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73084758907
|
import pandas as pd
# import requests
import sys
import collections
# import urllib.request
import json
# url = 'http://loterias.caixa.gov.br/wps/portal/loterias/landing/lotofacil/!ut/p/a1/04_Sj9CPykssy0xPLMnMz0vMAfGjzOLNDH0MPAzcDbz8vTxNDRy9_Y2NQ13CDA0sTIEKIoEKnN0dPUzMfQwMDEwsjAw8XZw8XMwtfQ0MPM2I02-AAzgaENIfrh-FqsQ9wBmoxN_FydLAGAgNTKEK8DkRrACPGwpyQyMMMj0VAcySpRM!/dl5/d5/L2dBISEvZ0FBIS9nQSEh/pw/Z7_HGK818G0K85260Q5OIRSC42046/res/id=historicoHTML/c=cacheLevelPage/=/'
# url = 'https://servicebus2.caixa.gov.br/portaldeloterias/api/resultados?modalidade=Lotofácil'
# url = sys.argv[1]
file = sys.argv[1] #'resultados.json'
# r = requests.get(url)
# r.text
# r.text = r.text
r = open(file, encoding="utf8")
data = json.load(r)
# data = json.load(urllib.request.urlopen(url))
r_text = data['html'].replace('\\r\\n', '')
r_text = r_text.replace('"\r\n}','')
r_text = r_text.replace('{\r\n "html:','')
r_text
df = pd.read_html(r_text)
type(df)
type(df[0])
df1 = df
df = df[0].copy()
df = df[df['Bola1'] == df['Bola1']]
df.head()
nr_pop = list(range(1,26))
nr_par = []
nr_impar = []
nr_primo = []
for n in nr_pop:
if n % 2 == 0:
nr_par.append(n)
elif n % 2 == 1:
nr_impar.append(n)
for i in range(2,n):
if n % i == 0:
break
elif n not in nr_primo:
nr_primo.append(n)
comb = []
v_cont = []
for n in nr_pop:
v_cont.append([n, 0])
'''v01 = 0
v02 = 0
v03 = 0
v04 = 0
v05 = 0
v06 = 0
v07 = 0
v08 = 0
v09 = 0
v10 = 0
v11 = 0
v12 = 0
v13 = 0
v14 = 0
v15 = 0
v16 = 0
v17 = 0
v18 = 0
v19 = 0
v20 = 0
v21 = 0
v22 = 0
v23 = 0
v24 = 0
v25 = 0'''
cols = ['Bola1', 'Bola2', 'Bola3', 'Bola4', 'Bola5', 'Bola6', 'Bola7', 'Bola8', 'Bola9', 'Bola10', 'Bola11', 'Bola12', 'Bola13', 'Bola14', 'Bola15']
for idx, row in df.iterrows():
v_par = 0
v_impar = 0
v_primo = 0
for c in cols:
if row[c] in nr_par:
v_par += 1
elif row[c] in nr_impar:
v_impar += 1
if row[c] in nr_primo:
v_primo += 1
for n in nr_pop:
if row[c] == n:
v_cont[n-1][1] += 1
'''if row[c] == 1:
v01 += 1
elif row[c] == 2:
v02 += 1
elif row[c] == 3:
v03 += 1
elif row[c] == 4:
v04 += 1
elif row[c] == 5:
v05 += 1
elif row[c] == 6:
v06 += 1
elif row[c] == 7:
v07 += 1
elif row[c] == 8:
v08 += 1
elif row[c] == 9:
v09 += 1
elif row[c] == 10:
v10 += 1
elif row[c] == 11:
v11 += 1
elif row[c] == 12:
v12 += 1
elif row[c] == 13:
v13 += 1
elif row[c] == 14:
v14 += 1
elif row[c] == 15:
v15 += 1
elif row[c] == 16:
v16 += 1
elif row[c] == 17:
v17 += 1
elif row[c] == 18:
v18 += 1
elif row[c] == 19:
v19 += 1
elif row[c] == 20:
v20 += 1
elif row[c] == 21:
v21 += 1
elif row[c] == 22:
v22 += 1
elif row[c] == 23:
v23 += 1
elif row[c] == 24:
v24 += 1
elif row[c] == 25:
v25 += 1'''
comb.append(str(v_par) + 'p-' + str(v_impar) + 'i-' + str(v_primo) + 'np')
freq_nr = v_cont
freq_nr.sort(key=lambda tup: tup[1])
counter_comb = collections.Counter(comb)
resultado = pd.DataFrame(counter_comb.items(), columns=['combination','frequency'])
resultado['p_freq'] = resultado.frequency / resultado.frequency.sum()
resultado.sort_values('p_freq', inplace=True)
print('''
O número mais frequente é: {}
O número menos frequente é: {}
A combinação mais frequente é {}, com a frequência de {}%.
'''.format(freq_nr[-1][0], freq_nr[0][0], resultado['combination'].values[-1], int(resultado['p_freq'].values[-1]*100*100)/100)
)
|
daklima/bootcamp-engdados-oct22
|
A001/main.py
|
main.py
|
py
| 3,954 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26596833071
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
# Import libraries
import matplotlib.pyplot as plt
import matplotlib.animation as animate
import matplotlib.lines as mlines
import agentframework
import csv
# Request input from user for number of heroes and enemies
print("Welcome to the hero agent game. \nPlease set the number of hero and agents by inputting into the console below...")
num_heroes = int(input("Set the number of heroes: "))
num_enemies = int(input("Set the number of enemies: "))
# Declare variables
heroes = []
enemies =[]
winners = []
environment = []
rowlist = []
# Declare figure for plot
fig = plt.figure(num= 1, figsize=(7, 5))
carry_on = True
# DECLARE FUNCTIONS
# Keeps animation running as long as carry_on = true
def gen_function():
global carry_on
a = 0
while carry_on:
yield a
a = a + 1
# Writes end environment to file end_environment.txt
def end_game():
print('End game function called')
with open('end_environment.txt', 'w') as e:
e.write("END ENVIRONMENT: \n")
for row in environment:
e.write(" ".join(str(value) for value in row) +"\n")
e.write("DOCUMENT END \n")
e.close()
with open('stores_record.txt', 'a') as s:
s.write("GAME STARTS with {} heroes and {} enemies: \n".format(num_heroes, num_enemies))
for hero in heroes:
s.write("Hero {} finishes with a total store of {}. \n".format(hero.identity, hero.store))
s.write("GAME ENDS \n")
s.close()
# Sets updates to figure per iteration
def update(frame_number):
# Creates figures and axes:
fig.clear() # Clears figure so that updated markers and environment can be applied at each iteration
axes = plt.gca() # Points to axis
# Sets ranges of axes:
axes.set_xlim([0,300])
axes.set_ylim([0,300])
# Adds environment and colour scale key:
plt.imshow(environment)
plt.colorbar(ax = axes, orientation= 'horizontal', extend = 'min', spacing = 'proportional', shrink = 0.5).set_label('Environment density')
# Plots and actions for heroes:
for hero in heroes: # Loops through heroes
print(hero) # Heroes prints location and store to console
global carry_on # Access to stopping condition variable
# Plots heroes according to status:
if hero.store >= 3000: # First hero to reach store of 3000 wins and is plotted as winner
winners.append(hero)
plt.scatter(winners[0].x, winners[0].y, marker="D", c= "Orange")
plt.text((winners[0].x + 25), (winners[0].y - 1), "{} is the winner!".format(winners[0].identity), fontsize=8, color='White', backgroundcolor='Black')
print("We have a winner! Hero {} wins with a store of {}".format(winners[0].identity, winners[0].store) + "\n Remaining heroes:" )
carry_on = False
end_game()
elif hero.store >= 2500: # Fast heroes plotted
plt.scatter(hero.x, hero.y, c= 'Purple', label='Fast')
plt.text((hero.x + 8), (hero.y - 1), str(hero.identity), fontsize=8, color='White')
elif hero.store >= 1000: # Medium heroes plotted
plt.scatter(hero.x, hero.y, c= 'Pink', label= 'Average')
plt.text((hero.x + 8), (hero.y - 1), str(hero.identity), fontsize=8, color='White')
elif hero.store < 1000: # Fast heroes plotted
plt.scatter(hero.x, hero.y, c= 'Grey', label= 'Slow')
plt.text((hero.x + 8), (hero.y - 1), str(hero.identity), fontsize=8, color='White')
# Actions for heroes (movement and sharing and eating of environment)
hero.move()
hero.eat()
hero.share_with_neighbours()
# Creates key for hero markers
enemy = mlines.Line2D([], [], color='Black', marker='x', linestyle='None', label='Enemy')
key_slow = mlines.Line2D([], [], color='Grey', marker='o', linestyle='None', label='Slow hero')
key_medium = mlines.Line2D([], [], color='Pink', marker='o', linestyle='None', label='Average hero')
key_fast = mlines.Line2D([], [], color='Purple', marker='o', linestyle='None', label='Fast hero')
plt.legend(handles=[key_slow, key_medium, key_fast, enemy], bbox_to_anchor=(1,1), bbox_transform=plt.gcf().transFigure, title='Agent key')
# Plots and actions for enemies
for enemy in enemies:
enemy.move()
enemy.eat()
plt.scatter(enemy.x, enemy.y, marker="x", c= 'Black')
for hero in heroes:
enemy.eat_neighbours(hero) # enemy eats neighbouring hereos' stores
# Creates environment array from data file
f = open('environment.txt', newline='')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
for value in row:
rowlist.append(int(value))
environment.append(rowlist)
rowlist = []
f.close()
# Creates heroes (as many as inputted into console by user) and adds them to hereos list
for identity in range(num_heroes):
heroes.append(agentframework.Agent(environment, heroes, (identity + 1), enemies))
# Creates enemies (as many as inputted into console by user) and adds them to enemies list
for identity in range(num_enemies):
enemies.append(agentframework.Agent(environment, heroes, (identity + 1), enemies))
# Animates plot
animation = animate.FuncAnimation(fig, update, interval=1, frames=gen_function, repeat=False,)
|
emilyjcoups/Agent_Based_Model
|
model.py
|
model.py
|
py
| 5,535 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16986332004
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator
from numpy.lib.shape_base import split
import math
import cmath
def get_MSI (matrix, f, tau, iterCnt): #method of simple iterations
n = np.size(f)
B = np.diagflat([1] * n) - tau * matrix
f = tau * f
x = f
for i in range (iterCnt):
x = B.dot(x) + f
return x
def get_MJ (matrix, f, iterCnt): # method of Jacobi
u = f
n = np.size(f)
for cnt in range(iterCnt):
uNext = np.zeros(n)
for i in range (n):
uNext[i] = (f[i] - matrix[i].dot(u)) / matrix[i][i]
u = uNext
return u
def get_MZ (matrix, f, iterCnt):
u = f
n = np.size(f)
answ = np.zeros((n, iterCnt))
for cnt in range(iterCnt):
uNext = np.zeros(n)
for i in range (n):
uNext[i] = (f[i] -
matrix[i][:i - 1].dot(uNext[:i - 1]) -
matrix[i][i + 1:].dot(u[i + 1:])) / matrix[i][i]
u = uNext
answ[:, cnt] = uNext
return answ[:, -1]
eigenValuesFilename = "eigenValues15.dat"
#Вход задачи
N = 15 # число точек
h = 1 / (N - 1)
matrix = np.zeros (((N - 2) * (N - 2), N * N))
#построение матрицы
def elem (i, j):
return N * j + i
for j in range (1, N - 1):
for i in range (1, N - 1):
eqID = (j - 1) * (N - 2) + i - 1 #equationID - индекс строчки
if (i > 1):
matrix[eqID][elem(i - 1, j)] = -1
if (j > 1):
matrix[eqID][elem(i, j - 1)] = -1
if (i < N - 2):
matrix[eqID][elem(i + 1, j)] = -1
if (j < N - 2):
matrix[eqID][elem(i, j + 1)] = -1
matrix[eqID][elem(i, j)] = 4
print ("half")
#Удаление пустых столбцов
zeroColumns = []
for i in range (N * N - 1, -1, -1):
if i % N == 0 or \
i < N or \
i >= N * (N - 1) or \
(i + 1) % N == 0:
zeroColumns.append (i)
otherColumns = [x for x in range (N * N - 1) if x not in zeroColumns]
matrix = matrix[:, otherColumns]
print ("matrix is done")
# plt.spy(matrix)
# plt.show()
# поиск собственных чисел
# eigenValues = np.linalg.eigvals(matrix)
# print ("eighenvalues are computed")
# with open (eigenValuesFilename, "w") as file:
# for val in eigenValues:
# file.write(str(val) + "\n")
minVal = 1e9
maxVal = -1e9
with open(eigenValuesFilename, 'r') as file:
for line in file:
val = cmath.polar(complex(line))[0]
if (val > maxVal):
maxVal = val
elif val < minVal:
minVal = val
# Формирование вектора правой части
v = np.zeros((N - 2) * (N - 2))
v[(N // 2) * (N - 2) + N // 2 - 1] = h**2
# v[N * N // 3 : N * N // 2] += h ** 2
#Решаем систему разными методами
# u = get_MSI(matrix, v, 2 / (maxVal + minVal), 100)
# u = get_MSI(matrix, v, 1, 100)
# print (matrix)
# u = get_MJ(matrix, v, 0)
u = get_MZ(matrix, v, 10)
# u = np.linalg.solve(matrix, v)
# Заполняем Матрицу Z для отображения
Z = np.zeros((N, N))
for (uVal, oldID) in zip(u, otherColumns):
Z[oldID % N][oldID // N] = uVal
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
w = v = np.linspace(0, 1, N)
X, Y = np.meshgrid(w, v)
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
# A StrMethodFormatter is used automatically
# ax.zaxis.set_major_formatter('{x:.02f}')
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.savefig("pic.png")
# plt.show()
|
Nevtod/Labs
|
ComputingMath/lab1/Computing_math.py
|
Computing_math.py
|
py
| 3,874 |
python
|
en
|
code
| 0 |
github-code
|
6
|
650459657
|
#! /bin/python
import os
import sys
import json
import luigi
import nifty.tools as nt
import elf.skeleton.io as skelio
from elf.skeleton import skeletonize as skel_impl, get_method_names
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
from cluster_tools.utils.task_utils import DummyTask
#
# skeletonize tasks
#
class SkeletonizeBase(luigi.Task):
""" Skeletonize base class
"""
task_name = 'skeletonize'
src_file = os.path.abspath(__file__)
allow_retry = False
# input and output volumes
input_path = luigi.Parameter()
input_key = luigi.Parameter()
morphology_path = luigi.Parameter()
morphology_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
number_of_labels = luigi.IntParameter()
resolution = luigi.ListParameter()
size_threshold = luigi.IntParameter(default=None)
method = luigi.Parameter(default='thinning')
dependency = luigi.TaskParameter(default=DummyTask())
methods = get_method_names()
# expose skeletonization parameter if we support more parameter
@staticmethod
def default_task_config():
# we use this to get also get the common default config
config = LocalTask.default_task_config()
config.update({'chunk_len': 1000, 'method_kwargs': {}})
return config
def requires(self):
return self.dependency
def _prepare_output(self, config):
# make the blocking
block_len = min(self.number_of_labels, config.get('chunk_len', 1000))
block_list = vu.blocks_in_volume((self.number_of_labels,),
(block_len,))
n_jobs = min(len(block_list), self.max_jobs)
# require output dataset
with vu.file_reader(self.output_path) as f:
f.require_dataset(self.output_key, shape=(self.number_of_labels,),
chunks=(1,), compression='gzip', dtype='uint64')
# update the config
config.update({'number_of_labels': self.number_of_labels,
'block_len': block_len})
return config, n_jobs, block_list
def run_impl(self):
assert self.method in self.methods,\
"Method %s is not supported, must be one of %s" % (self.method, str(self.methods))
# TODO support roi
# get the global config and init configs
shebang, block_shape, _, _ = self.global_config_values()
self.init(shebang)
# load the skeletonize config
# update the config with input and output paths and keys
config = self.get_task_config()
config.update({'input_path': self.input_path, 'input_key': self.input_key,
'morphology_path': self.morphology_path,
'morphology_key': self.morphology_key,
'output_path': self.output_path, 'output_key': self.output_key,
'resolution': self.resolution, 'size_threshold': self.size_threshold,
'method': self.method})
config, n_jobs, block_list = self._prepare_output(config)
# prime and run the jobs
self.prepare_jobs(n_jobs, block_list, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class SkeletonizeLocal(SkeletonizeBase, LocalTask):
"""
skeletonize on local machine
"""
pass
class SkeletonizeSlurm(SkeletonizeBase, SlurmTask):
"""
skeletonize on slurm cluster
"""
pass
class SkeletonizeLSF(SkeletonizeBase, LSFTask):
"""
skeletonize on lsf cluster
"""
pass
#
# Implementation
#
# not parallelized for now
def _skeletonize_id_block(blocking, block_id, ds_in, ds_out,
sizes, bb_min, bb_max, resolution, size_threshold,
method):
fu.log("start processing block %i" % block_id)
block = blocking.getBlock(block_id)
id_begin, id_end = block.begin[0], block.end[0]
# we don't compute the skeleton for id 0, which is reserved for the ignore label
id_begin = 1 if id_begin == 0 else id_begin
# we increase the bounding box with a small halo, otherwise there
# semms to be boundary inconsistencies
halo = (2, 2, 2)
shape = ds_in.shape
# skeletonize ids in range and serialize skeletons
for seg_id in range(id_begin, id_end):
if size_threshold is not None:
if sizes[seg_id] < size_threshold:
continue
bb = tuple(slice(max(int(mi - ha), 0),
min(int(ma + ha), sh)) for mi, ma, sh, ha in zip(bb_min[seg_id],
bb_max[seg_id],
shape, halo))
fu.log("skeletonize id %i from bb %s" % (seg_id, str(bb)))
obj = ds_in[bb] == seg_id
# try to skeletonize the object, skip if any exception is thrown
try:
nodes, edges = skel_impl(obj, resolution=resolution, method=method)
except Exception:
continue
offsets = [b.start * res for b, res in zip(bb, resolution)]
skelio.write_n5(ds_out, seg_id, nodes, edges, offsets)
fu.log_block_success(block_id)
def skeletonize(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, 'r') as f:
config = json.load(f)
# read the input cofig
input_path = config['input_path']
input_key = config['input_key']
morphology_path = config['morphology_path']
morphology_key = config['morphology_key']
output_path = config['output_path']
output_key = config['output_key']
size_threshold = config['size_threshold']
resolution = config['resolution']
method = config['method']
# morphology feature-columns
# 0 = label-id
# 1 = pixel size
# 2:5 = center of mass
# 5:8 = min coordinate
# 8:11 = max coordinate
with vu.file_reader(morphology_path) as f:
morpho = f[morphology_key][:]
sizes = morpho[:, 1].astype('uint64')
bb_min = morpho[:, 5:8].astype('uint64')
bb_max = morpho[:, 8:11].astype('uint64') + 1
block_list = config['block_list']
block_len = config['block_len']
n_labels = config['number_of_labels']
blocking = nt.blocking([0], [n_labels], [block_len])
# skeletonize this id block
with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
ds_in = f_in[input_key]
ds_out = f_out[output_key]
for block_id in block_list:
_skeletonize_id_block(blocking, block_id, ds_in, ds_out,
sizes, bb_min, bb_max, resolution, size_threshold,
method)
# log success
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
skeletonize(job_id, path)
|
constantinpape/cluster_tools
|
cluster_tools/skeletons/skeletonize.py
|
skeletonize.py
|
py
| 7,301 |
python
|
en
|
code
| 32 |
github-code
|
6
|
35632930544
|
"""
ID: detrime1
LANG: PYTHON3
TASK: friday
"""
import sys,os.path
from collections import *
if os.path.exists('friday.in'):
sys.stdin = open('friday.in', 'r')
sys.stdout = open('friday.out', 'w')
def detri():
n = int(input())
weekDays = [0]*7
monthDays = [31,28,31,30,31,30,31,31,30,31,30,31]
day = 2
for year in range(1900, 1900+n):
if str(year)[2:]=="00":
if not year%400: monthDays[1] = 29
elif not year%4: monthDays[1] = 29
else: monthDays[1] = 28
for monthDay in monthDays:
day += 12
day %= 7
weekDays[day] += 1
day += monthDay-12
print(*weekDays)
# WORKING GARBAGE WACK BRUTE FORCE
"""days = ["SAT","SUN","MON","TUE","WED","THU","FRI"]
months = {
"JAN":31,
"FEB":(28,29),
"MAR":31,
"APR":30,
"MAY":31,
"JUN":30,
"JUL":31,
"AUG":31,
"SEP":30,
"OCT":31,
"NOV":30,
"DEC":31,
}
ans = {day:0 for day in days}
n = int(input())
day = 0
for year in range(1900, 1900+n):
leap = False
if str(year)[2:]=="00":
if not year%400:
leap = True
else:
if not year%4:
leap = True
months["FEB"] = 29 if leap else 28
for month in months:
for i in range(1,months[month]+1):
day = (day+1)%7
if i==13:
ans[days[day]] += 1
check = []
for val in ans.values():
check.append(val)
check[:] = check[6:]+check[:6]
print(*check)"""
if __name__ == "__main__":
detri()
|
llxSKyWALKeRxll/USACO_Training
|
Chapter 1/Friday The Thirteenth/friday.py
|
friday.py
|
py
| 1,458 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37169471952
|
from flask import request
from flask_restx import Resource
from ..service.auth_service import Auth
from ..util.decorator import admin_token_required
from ..service.user_service import save_new_user, get_a_user
from ..util.dto import AuthDto
api = AuthDto.api
user_auth = AuthDto.user_auth
user_token = AuthDto.user_token
@api.route('/register')
class UserRegister(Resource):
"""
User Register Resource
"""
@api.doc('user register')
@api.expect(user_auth, validate=True)
def post(self):
# get the post data
data = request.json
return save_new_user(data=data)
@api.route('/login')
class UserLogin(Resource):
"""
User Login Resource
"""
@api.doc('user login')
@api.expect(user_auth, validate=True)
def post(self):
# get the post data
post_data = request.json
return Auth.login_user(data=post_data)
@api.route('/logout')
class LogoutAPI(Resource):
"""
Logout Resource
"""
@api.doc('logout a user')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
def post(self):
"""
logout user
"""
# get auth token
auth_header = request.headers.get('Authorization')
return Auth.logout_user(data=auth_header)
@api.route('/force-logout/<id>')
class ForceLogoutAPI(Resource):
"""
Force Logout Resource
"""
@api.doc('force logout a user')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
@api.response(404, 'User not found.')
@admin_token_required
def post(self, id):
"""
force logout a user and blacklist all the tokens
"""
user = user = get_a_user(id)
if not user:
api.abort(404)
return Auth.force_logout_user(user)
@api.route('/expire-token')
class ExpireTokenAPI(Resource):
"""
Expire Token Resource
"""
@api.doc('expire a user token')
@api.doc(params={'Authorization': {'in': 'header', 'description': 'An authorization token'}})
@api.expect(user_token, validate=True)
@admin_token_required
def post(self):
"""
expire a token passed in post body, admin only authorized
"""
token = request.json['token']
return Auth.expire_token(token)
|
miteshnath/flask-admin-jwt
|
app/main/controller/auth_controller.py
|
auth_controller.py
|
py
| 2,367 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70835853948
|
import csv
import argparse
import os
import sys
import numpy as np
import torch
import torch.cuda
from PIL import Image
from torch.autograd import Variable
from torchvision.transforms import transforms
from my.yolov3.easy.net.load_net import load_net
from PIL import Image
image_size = (96, 96)
test_transformations = transforms.Compose([
transforms.ToTensor()
])
def load_trained_net(model_path):
print("Begin to load pre-trained net ... ", end="")
net = load_net("resnet152")
checkpoint = torch.load(model_path)
net.load_state_dict(checkpoint['state_dict'])
net.eval()
print("Finished.")
return net
def predict(net, ims: list):
# Define transformations for the image
transformation = test_transformations
images_tensor_list = []
for im in ims:
w = max(im.size) # 正方形的宽度
im = im.crop((0, 0, w, w)).resize(image_size) # 补成正方形再压缩
image = np.asarray(im)
image_tensor = transformation(image)
images_tensor_list.append(image_tensor)
images_tensor = torch.stack(images_tensor_list)
if torch.cuda.is_available():
images_tensor.cuda()
# 将输入变为变量
input = Variable(images_tensor)
# 预测图像的类
output = net(input)
index = output.data.numpy().argmax(axis=1)
return index + 1 # [0, C-1] -> [1, C]
if __name__ == '__main__':
net = load_trained_net("model/model-87-8.477896466274615e-05.pth")
image_paths = ["../data/images/0a0bf7bc-e0d7-4f20-abec-039136663d85.jpg",
"../data/images/0a0c27d7-2e2a-4817-a715-8182cf07ec9b.jpg",
"../data/images/0a00c2a3-a498-452a-ba88-6b9ef514e201.jpg",
"../data/images/0a1a5d35-1b30-43ff-87bc-9acdab1567c1.jpg"]
ims = []
for image_path in image_paths:
im = Image.open(image_path)
ims.append(im)
results = predict(net, ims)
print(results)
|
NJUCoders/commodity-classification-hard
|
easy/predict.py
|
predict.py
|
py
| 1,931 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20044628025
|
houses = [[3,7],[1,9],[2,0],[5,15],[4,30]]
# houses = [[1,2],[2,3],[3,1],[4,20]]
d = {}
for i in houses:
d[i[0]] = i[1]
SortedDict = {k: v for k, v in sorted(d.items(), key=lambda item: item[1])}
print(SortedDict)
fList = []
finalList = []
for j in SortedDict:
fList.append(j)
print(fList)
finalList = fList[-2:]
finalList.sort()
print(finalList)
# d = {1:'a',3:'b',2:'c',5:'u'}
# print(d)
# print(d.items())
|
Elevenv/Placement-Stuff
|
house.py
|
house.py
|
py
| 418 |
python
|
en
|
code
| 1 |
github-code
|
6
|
811036516
|
'''Process Restricted Friend Requests
https://leetcode.com/problems/process-restricted-friend-requests/
You are given an integer n indicating the number of people in a network. Each person is labeled from 0 to n - 1.
You are also given a 0-indexed 2D integer array restrictions, where restrictions[i] = [xi, yi] means that person xi
and person yi cannot become friends, either directly or indirectly through other people.
Initially, no one is friends with each other. You are given a list of friend requests as a 0-indexed 2D integer array
requests, where requests[j] = [uj, vj] is a friend request between person uj and person vj.
A friend request is successful if uj and vj can be friends. Each friend request is processed in the given order (i.e.,
requests[j] occurs before requests[j + 1]), and upon a successful request, uj and vj become direct friends for all
future friend requests.
Return a boolean array result, where each result[j] is true if the jth friend request is successful or false if it is
not.
Note: If uj and vj are already direct friends, the request is still successful.
Example 1:
Input: n = 3, restrictions = [[0,1]], requests = [[0,2],[2,1]]
Output: [true,false]
Explanation:
Request 0: Person 0 and person 2 can be friends, so they become direct friends.
Request 1: Person 2 and person 1 cannot be friends since person 0 and person 1 would be indirect friends (1--2--0).
'''
from collections import defaultdict, deque
class Solution:
def friendRequests(self, n: int, restrictions: List[List[int]], requests: List[List[int]]) -> List[bool]:
def bfs(node):
q = deque([node])
visited = set()
visited.add(node)
relations = set()
while q:
node = q.popleft()
relations.add(node)
for neighbor in friend_mapping[node]:
if neighbor not in visited:
visited.add(neighbor)
q.append(neighbor)
return relations
banned_mapping = defaultdict(set)
for u, v in restrictions:
banned_mapping[u].add(v)
banned_mapping[v].add(u)
result = [False] * len(requests)
friend_mapping = defaultdict(set)
for index, req in enumerate(requests):
u, v = req[0], req[1]
if v in banned_mapping[u]:
result[index] = False
else:
set1 = bfs(u)
set2 = bfs(v)
banned = False
for key in set1:
for bannedId in banned_mapping[key]:
if bannedId in set2:
result[index] = False
banned = True
if not banned:
result[index] = True
friend_mapping[u].add(v)
friend_mapping[v].add(u)
return result
|
Saima-Chaity/Leetcode
|
Graph/Process Restricted Friend Requests.py
|
Process Restricted Friend Requests.py
|
py
| 2,945 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36255805376
|
import boto3
import json
import os
dynamodb = boto3.resource('dynamodb')
client = boto3.client('dynamodb')
USERS_TABLE = dynamodb.Table(os.environ['USERS_TABLE'])
def delete_user_service(event, context):
try:
response = USERS_TABLE.update_item(
Key={
'userId': event['pathParameters']['id']
},
ConditionExpression='attribute_exists(userId)',
UpdateExpression='SET active = :active',
ExpressionAttributeValues={':active': False}
)
print('[GET RESPONSE]:', response)
return {
'statusCode': 200,
'body': json.dumps('user deleted.')
}
except Exception as e:
print("Error deleting user:")
print(e)
return {
'statusCode': 400,
'body': json.dumps('Error deleting the user')
}
|
Glendid/glendid-app-users
|
src/services/DeleteUser.py
|
DeleteUser.py
|
py
| 875 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4388139380
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para blip.tv
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import urllib
try:
from core import scrapertools
from core import logger
except:
from Code.core import scrapertools
from Code.core import logger
# Resuelve los videos de blip.tv que se usan en el embed
#
def geturl(bliptv_url,recurse=True):
logger.info("[bliptv.py] bliptv_url="+bliptv_url)
devuelve = ""
if bliptv_url.startswith("http://blip.tv/play"):
redirect = scrapertools.getLocationHeaderFromResponse(bliptv_url)
logger.info("[bliptv.py] redirect="+redirect)
patron='file\=(.*?)$'
matches = re.compile(patron).findall(redirect)
logger.info("[bliptv.py] matches1=%d" % len(matches))
if len(matches)==0:
patron='file\=([^\&]+)\&'
matches = re.compile(patron).findall(redirect)
logger.info("[bliptv.py] matches2=%d" % len(matches))
if len(matches)>0:
url = matches[0]
logger.info("[bliptv.py] url="+url)
url = urllib.unquote(url)
logger.info("[bliptv.py] url="+url)
data = scrapertools.cache_page(url)
logger.info(data)
patron = '<media\:content url\="([^"]+)" blip\:role="([^"]+)".*?type="([^"]+)"[^>]+>'
matches = re.compile(patron).findall(data)
scrapertools.printMatches(matches)
for match in matches:
logger.info("url="+str(match[0]))
devuelve = match[0]
return devuelve
|
TuxRneR/pelisalacarta-personal-fork
|
tags/xbmc-addons/plugin.video.pelisalacarta/servers/bliptv.py
|
bliptv.py
|
py
| 1,763 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17007776735
|
import requests
import bs4
import urllib
def spider(max_pages):
for page in range(1, max_pages + 1):
query = urllib.parse.urlencode({'query':u'대선후보'})
url = 'http://news.naver.com/main/search/search.nhn?query=' + '%B4%EB%BC%B1%C8%C4%BA%B8'
source_code = requests.get(url)
plain_text = source_code.text
soup = bs4.BeautifulSoup(plain_text, 'html.parser')
content = soup.find(id='search_div')
for result in content.select('ul > li > div'):
print('############# Title')
print(result.a.text)
print('############# Content')
print(result.p.text)
spider(1)
|
masonHong/INU-Study
|
C Team(Hong, Heo)/Crowaling/Practice 1.py
|
Practice 1.py
|
py
| 668 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72112922749
|
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
# import snntorch
import pandas as pd
import tqdm
import argparse
from . import p_snu_layer
class SNN_Net(torch.nn.Module):
def __init__(self, inputs_num = 4, hidden_num = 4, outputs_num = 3 ,l_tau = 0.8,num_time = 100, batch_size = 80 ,soft = False, rec = False, power = False, gpu = True):
super().__init__()
self.num_time = num_time
self.batch_size = batch_size
self.rec = rec
self.power = power
#parametr
# self.neuron_0 = 4
# self.neuron_1 = 24
# self.neuron_2 = 24
# self.neuron_3 = 4
#my #hidden num = 24
# self.l1 = p_snu_layer.P_SNU(inputs_num, hidden_num, l_tau = l_tau, soft = soft, gpu = gpu)
# self.l2 = p_snu_layer.P_SNU(hidden_num, hidden_num, l_tau = l_tau, soft = soft, gpu = gpu)
# self.l3 = p_snu_layer.P_SNU(hidden_num, hidden_num, l_tau = l_tau, soft = soft, gpu = gpu)
# self.l4 = p_snu_layer.P_SNU(hidden_num, outputs_num, l_tau = l_tau, soft = soft, gpu = gpu)
#my2 hidden num = 4
self.l1 = p_snu_layer.P_SNU(inputs_num, hidden_num, l_tau = l_tau, soft = soft, gpu = gpu)
self.l2 = p_snu_layer.P_SNU(hidden_num, hidden_num, l_tau = l_tau, soft = soft, gpu = gpu)
self.l3 = p_snu_layer.P_SNU(hidden_num, outputs_num, l_tau = l_tau, soft = soft, gpu = gpu)
# for 1 layer test
# self.l4 = p_snu_layer.P_SNU(inputs_num, outputs_num, l_tau = l_tau, soft = soft, gpu = gpu)
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
self.l3.reset_state()
# self.l4.reset_state()
def forward(self,x,y):
# y = torch.tensor(y)
losse = None
accuracy = None
sum_out = None #タイムステップ毎のスパイク数の累積をとる
out_list = [] #各データ(120×4)のタイムステップ(100ms)における出力スパイクを時系列で挿入
out_total_list = []
membrane_out = torch.empty(100,3)
mem1_out = torch.empty(100,4)
mem2_out = torch.empty(100,4)
spikes_ = torch.empty(100,4)
self.reset_state()
for time in range(self.num_time): #num_timeはデフォルトで100になる。
# spike_encoded_neuron = x[time]
# target_ = torch.reshape(y[time],(1,3))
# spike_encoded_neuron = torch.reshape(x[time],(4,1))
#4→4→3(network)
spike_encoded_neuron = x[time]
h1,mem1,u1 = self.l1(spike_encoded_neuron)
h2,mem2,u2 = self.l2(h1)
# out,mem = self.l3(h2)
# 1 layer test 4→3(network)
#膜電位と入出力スパイクの確認
# out,thresh,spike = self.l4(spike_encoded_neuron)
#normal
# out = self.l4(spike_encoded_neuron)
# sum_out = out if sum_out is None else sum_out + out
mem1_out[time] = mem1
mem2_out[time] = mem2
# membrane_out[time] = out
spikes_[time] = h1
#出力を確認する
# return sum_out,y
#バッチ学習の場合
# criterion = nn.CrossEntropyLoss()
# losse = criterion(sum_out,y)
#正解率
# predicted_label = torch.argmax(sum_out)
# accuracy = 1 if predicted_label == y else 0
return spikes_,mem1_out,mem2_out
return mem_out
return sum_out,losse,accuracy
|
GTAKAGI/PSNN
|
snn_model/network.py
|
network.py
|
py
| 3,693 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71271557629
|
class Solution:
def findMaxForm(self, strs: List[str], m: int, n: int) -> int:
dp = [[0] * (n+1) for _ in range(m+1)]
counter=[[s.count("0"), s.count("1")] for s in strs]
for zeroes, ones in counter:
for i in range(m, zeroes-1, -1):
for j in range(n, ones-1, -1):
dp[i][j] = max(dp[i][j], 1+dp[i-zeroes][j-ones])
return dp[-1][-1]
|
anubhavsrivastava10/Leetcode-HackerEarth-Solution
|
Leetcode/May 2022/23)474. Ones and Zeroes.py
|
23)474. Ones and Zeroes.py
|
py
| 451 |
python
|
en
|
code
| 9 |
github-code
|
6
|
27390116761
|
# 2SUM
# http://rosalind.info/problems/2sum/
from utilities import get_file, get_answer_file
def two_sum(num_array):
minus_set = set(-i for i in num_array)
for i, value in enumerate(num_array):
if value in minus_set:
try:
j = num_array.index(-value, i+1)
except ValueError:
continue
return (i+1, j+1)
return (-1, -1)
with get_file() as file:
k, n = map(int, file.readline().split())
num_arrays = [list(map(int, line.split())) for line in file.readlines()]
with get_answer_file() as file:
for num_array in num_arrays:
p, q = two_sum(num_array)
if p == -1:
print(-1, file=file)
else:
print(p, q, file=file)
|
Delta-Life/Bioinformatics
|
Rosalind/Algorithmic Heights/code/2SUM.py
|
2SUM.py
|
py
| 761 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11547913275
|
# This file is part of RADAR.
# Copyright (C) 2019 Cole Daubenspeck
#
# RADAR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RADAR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RADAR. If not, see <https://www.gnu.org/licenses/>.
import re
from typing import Tuple
# used if anything wants to sort/use numeric values instead of strings
VALUE_STANDARD_CONVERSIONS = {
"unknown": 0,
"low": 1,
"medium": 2,
"high": 3,
"very-high": 4
}
# format
# device_type_string
# - list of services (if it might not appear in consistent ports) (THESE ARE REGULAR EXPRESSIONS!)
# - list of ports that correspond to hosts of that type (prefered unless service tends to run on different ports)
HOST_TYPE = {
"webserver": {
"value": "high",
"service_names": [
"werkzeug",
"httpd",
"nginx",
"apache"
],
"ports": [
80,
443,
3000, # node webserver
8000,
8443
]
},
"database": {
"value": "very-high",
"service_names": [
],
"ports": [
1433, # mssql
3306, # mysql
6379, # redis
27017, # mongo
]
},
"fileserver": {
"value": "high",
"service_names": [
],
"ports": [
21,
990
]
},
"mailserver": {
"value": "medium",
"service_names": [
],
"ports": [
25, # smtp ports
468,
587,
2525,
110, # pop3 ports
993,
143, # imap ports
995
]
},
"ics": { # industrial control system
"value": "very-high",
"service_names": [
"modbus"
],
"ports": [
502
]
},
"domain_controller": {
"value": "very-high",
"service_names": [
],
"ports": [
88 # kerberos
]
}
}
def get_info(target: dict) -> Tuple[str, str]:
""" For a given target, returns information about the priority and best-guess type of host
arguments:
target: a dictionary that conforms to RADAR target specifications
returns:
a tuple of strings (priority, type). First string is the value of the device (e.g. "high"), second is the type of device (e.g. "webserver").
Multiple device types will be seperated with a semicolon (e.g. 'webserver;database').
"""
services = target.get("services")
if not services: # no running services, we don't care
return "unknown", "generic"
device_value = "unknown"
device_type = ""
global HOST_TYPE
global VALUE_STANDARD_CONVERSIONS
# for every service on the target
for service in services:
port = int(service.get("port"))
name = service.get("service")
# check if any of the host types matches the target...
for host_type, details in HOST_TYPE.items():
# skip checking the type if it's already flagged (e.g. it has multiple services related to being a webserver)
if host_type in device_type:
continue
type_value = details.get("value")
# by seeing if the port is in one of the lists
if port in details.get("ports"):
device_value = device_value if VALUE_STANDARD_CONVERSIONS[type_value] < VALUE_STANDARD_CONVERSIONS[device_value] else type_value
device_type += f";{host_type}"
# or by seeing if any of the patterns matches
else:
for check_names in details.get("service_names", []):
if re.search(check_names, name):
device_value = device_value if VALUE_STANDARD_CONVERSIONS[type_value] < VALUE_STANDARD_CONVERSIONS[device_value] else type_value
device_type += f";{host_type}"
break
return device_value, device_type[1:] or "unknown"
|
Sevaarcen/RADAR
|
cyber_radar/helpers/target_prioritizer.py
|
target_prioritizer.py
|
py
| 4,715 |
python
|
en
|
code
| 2 |
github-code
|
6
|
23468677797
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tapp', '0005_comment_end_i'),
]
operations = [
migrations.AlterModelOptions(
name='essay',
options={'permissions': (('view_essay', 'View essay'),)},
),
]
|
rihakd/textAnalyticsDjango
|
TA/tapp/migrations/0006_auto_20151119_1941.py
|
0006_auto_20151119_1941.py
|
py
| 386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26225231093
|
s = open('Day14.txt').read()
def twist(a, n):
return a[:n][::-1] + a[n:]
def skip(a, k):
return a[k:] + a[:k]
def knot(a, x):
p = k = 0
for i in x:
a = skip(twist(skip(a, p), i), -p)
p = (p + i + k) % len(a)
k += 1
return a
def densehash(a):
return list(map(
lambda x: reduce(int.__xor__, x),
zip(*(a[i::16] for i in range(16)))
))
def knothash(x):
a = list(range(256))
suffix = [17, 31, 73, 47, 23]
y = list(map(ord, x)) + suffix
dh = densehash(knot(a, y * 64))
return ''.join('{:02x}'.format(n) for n in dh)
def kh(x):
i = lambda h: int(h, 16)
b = '{:04b}'.format
f = lambda h: b(i(h))
return ''.join(map(f, knothash(x)))
def view(x):
return kh(x)[:8].replace('1', '#').replace('0', '.')
arr = np.array(list(map(int, ''.join(map(kh, (s + '-' + i for i in map(str, range(128))))))))
print(arr.sum())
a = arr.reshape(128, -1)
seen = set()
available = set((i, j) for i in range(128) for j in range(128))
groups = defaultdict(set)
def path(i, j, g):
if (i >= 0) and (j >= 0):
seen.add((i, j))
available.difference_update([(i, j)])
if a[i, j] == 1:
groups[g].add((i, j))
moves = set([
(i - 1, j), (i + 1, j),
(i, j - 1), (i, j + 1)
])
for i, j in (moves & available):
path(i, j, g)
g = 0
while available:
i, j = next(iter(available))
path(i, j, g)
g += 1
print(len(groups.keys()))
print(
'\n'.join(map(view, (t + '-{}'.format(i) for i in range(8))))
)
|
pirsquared/Advent-of-Code
|
2017/Day14.py
|
Day14.py
|
py
| 1,621 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73549612348
|
import pandas as pd
import numpy as np
import io
import requests
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn import metrics
from beta_encoder import BetaEncoder
import category_encoders as ce
from utils import *
import csv
import xgboost as xgb
def run_ls_experiments():
print("Loading Data")
df = load_data()
continuous = ['company_size', 'interested_desks']
categorical = ['industry','location', 'lead_source']
#columns:
print("continuous columns: ",continuous)
print("categorical columns: ",categorical)
# plot increasing dimensionality va computation time
sample_sizes = [2000,
4000,
6000,
8000,
10000,
12000,
14000,
16000,
18000,
20000,
22000,
24000,
26000,
28000,
30000,
32000,
34000,
36000,
38000,
40000,
42000,
44000,
46000,
48000,
50000]
sample_sizes = [42000,
44000,
46000,
48000,
50000]
results = [['model','Encoder','Accuracy','STD','Training Time','Sparsity','Dimensions','sample_size']]
for sample_size in sample_sizes:
print("")
print("----------------------")
print("Sample Size: ",sample_size)
print("----------------------")
if not sample_size < len(df):
sample_size = len(df)
sample = df.sample(sample_size)
X = sample[continuous+categorical]
y = sample[['converted']]
successes = y.sum()[0]
alpha_prior = float(successes / len(y))
model = xgb.XGBClassifier(n_jobs=4) #[GradientBoostingClassifier(max_depth=8, n_estimators=64)]
#BetaEncoder (mean)
print("Beta Encoder (mean) Results:")
acc, std, time, sparsity, dimensions = cv_lead_scoring_classification(model, X, y, continuous, categorical, encoder=BetaEncoder(alpha=alpha_prior, beta=1-alpha_prior))
results.append([type(model), 'BetaEncoder (m)', acc, std, time, sparsity, dimensions,sample_size])
#OneHotEncoder
print("OneHotEncoder Results:")
acc, std, time, sparsity, dimensions = cv_lead_scoring_classification(model, X, y, continuous, categorical, encoder=OneHotEncoder(handle_unknown='ignore', sparse=False))
results.append([type(model), 'OneHotEncoder', acc, std, time, sparsity, dimensions,sample_size])
file = 'lead_scoring_experiments_comp_time_2.csv'
with open(file, "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(results)
try:
upload_file(file)
except:
print("File Not Uploaded")
def load_data():
df = pd.read_csv('lead_scoring_1mil.csv')
df = df.fillna('null')
industries = df.industry.str.split(',', n=-1, expand=True)
df['industry'] = industries[0]
#training_df['sector'] = industries[1]
return df
if __name__ == '__main__':
run_ls_experiments()
|
aslakey/CBM_Encoding
|
lead_scoring_computation_time.py
|
lead_scoring_computation_time.py
|
py
| 3,159 |
python
|
en
|
code
| 18 |
github-code
|
6
|
75132007548
|
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split # model_selection模型选择过程中各种数据分割的类与函数
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, SGDRegressor, LogisticRegression # 线性回归
# externals是外部的、外部扩展的意思
from sklearn.externals import joblib # 模型的保存和提取 ==》 https://blog.csdn.net/YZXnuaa/article/details/80694372
from sklearn.metrics import mean_squared_error, classification_report # 均方误差的评估
import pandas as pd
import numpy as np
# 通过线性回归预测房价
# (数据量小的时候,可以用linearRegression,也就是最小二乘法之正规方程,可以直接求得误差最小点,但是计算量特别大,数据多时不推荐使用)
# 这个也就是高中学的线性方程的求解k和b的值类似的方法
def mylinear():
# 获取数据
lb = load_boston()
# 特征数值化
# 分割数据集
x_train, x_test, y_train, y_test = train_test_split(lb.data, lb.target, test_size=0.25)
# 归一化处理
"""因为特征值和目标值对应的数组维度不一样, 所以无法用同一个转换器进行归一化,而是分别创造一个转换器"""
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
# 对于目标的
std_y = StandardScaler()
y_train = std_y.fit_transform(y_train.reshape(-1, 1))
y_test = std_y.transform(y_test.reshape(-1, 1))
# 建立模型
lr = LinearRegression()
lr.fit(x_train, y_train)
print(lr.coef_)
# 模型保存,可以在别的地方直接加载出来这次保存好的模型用来继续学习或者预测使用
joblib.dump(lr, './通过joblib保存线性回归模型.pkl')
# 预测结果评估
y_predict = lr.predict(x_test)
# 线性回归模型的好坏通过均方误差来评测
print("产生的均方误差为:", mean_squared_error(y_test, y_predict))
# 将数据变回归一化之前的大小
y_predict_orign = std_y.inverse_transform(y_predict)
print(y_predict, "==================", y_predict_orign) # ==》产生的均方误差为: 0.42782165417388635
# 用梯度下降来预测房价,在数据量大的时候推荐使用这个
def sgd_regression():
# 下面这些是直接从上面的函数中copy下来的
# 获取数据
lb = load_boston()
# 特征数值化
# 分割数据集
x_train, x_test, y_train, y_test = train_test_split(lb.data, lb.target, test_size=0.25)
# 归一化处理
"""因为特征值和目标值对应的数组维度不一样, 所以无法用同一个转换器进行归一化,而是分别创造一个转换器"""
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
# 对于目标的
std_y = StandardScaler()
y_train = std_y.fit_transform(y_train.reshape(-1, 1))
y_test = std_y.transform(y_test.reshape(-1, 1))
# 建立模型(基于随机梯度下降法估计参数的SGDRegressor)
sgd = SGDRegressor()
# 预测并评估
sgd.fit(x_train, y_train)
y_predict = sgd.predict(x_test)
print(mean_squared_error(y_test, y_predict)) # ==》产生的均方误差为: 0.21901470588593194
# 岭回归:也就是逻辑回归
# 只能用于解决二分类问题,属于判别模型(与前面所学的生成模型相区别,生成模型是需要从历史中总结出某些概率)
# 但我感觉对于判别模型和生成模型还是不太对
# 岭回归解觉二分类问题的思路 ==》 https://blog.csdn.net/weixin_39445556/article/details/83930186
# 先生成回归曲线,再从y轴找一个合理的值设置为阈值(临界值),求解的y值大于这个阈值的为一个结果,y小于这个阈值是另一个结果
# 阈值的选择可以根据二分类中的一类占总数量的比例
def LingHuiGui():
# 对于癌症的预测
# 读取数据
column = ['Sample code number','Clump Thickness', 'Uniformity of Cell Size','Uniformity of Cell Shape','Marginal Adhesion', 'Single Epithelial Cell Size','Bare Nuclei','Bland Chromatin','Normal Nucleoli','Mitoses','Class']
data = pd.read_csv(r"E:\chrome下载的东西\breast-cancer-wisconsin.data", names=column) # 这个指定标签名的方式一定要学一学
print(data)
# 缺失值处理
data = data.replace("?", np.nan)
data = data.dropna()
# 数据分离 # data中的column是前面第89行的那个标签名列表
x_train, x_test, y_train, y_test = train_test_split(data[column[1:10]], data[column[10]], test_size=0.25)
# 数据标准化(归一化)
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
# 这里用的逻辑回归是个二分类方法,是解决二分类问题的
# std_y = StandardScaler()
# y_train = std_y.fit_transform(y_train)
# y_test = std_y.transform(y_test)
# 建立模型
lg = LogisticRegression(C=1.0) # c为正则化参数(正则化用于解决过拟合问题,可以减少模型的复杂度)
lg.fit(x_train, y_train)
y_predict = lg.predict(x_test)
# 误差评估
print("精确率:", lg.score(x_test, y_test))
# 将结果为2、4两个数值换为名字良性和恶性
print("召回率", classification_report(y_test, y_predict, labels=[2, 4], target_names=["良性", "恶性"]))
print("加油!")
# mylinear()
# sgd_regression()
LingHuiGui()
|
hahahei957/NewProject_Opencv2
|
机器学习/19_线性回归.py
|
19_线性回归.py
|
py
| 5,679 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
8694936867
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("newListing", views.newForm, name="new"),
path("createListing", views.createNewListing, name="create"),
path("<str:title>Listing", views.listing_view, name="listing"),
path("Watchlist", views.watchlist, name="watchlist"),
path("AddToWatchlist/<str:title>", views.add_to_watchlist, name="add"),
path("RemoveFromWatchlist/<str:title>", views.remove_from_watchlist, name="remove"),
path("placeBid/<str:title>", views.place_bid, name="bid"),
path("closeListing/<str:title>", views.close_listing, name="close"),
path("postComment/<str:title>", views.post_comment, name="postComment"),
path("categories", views.categories_view, name="categories"),
path("showCategory/<str:category>", views.single_category_view, name="showCategory")
]
|
SHorne41/Project-2-Commerce
|
auctions/urls.py
|
urls.py
|
py
| 1,046 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35379120975
|
import math
import boto3
from aws_cdk import (
core,
aws_ec2 as ec2,
aws_ecs as ecs,
aws_cloudwatch as cw
)
from cdklocust.locust_container import locustContainer
class CdklocustStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.get_cdk_context()
self.vpc=vpc
#ECS cluster for the loadgen
self.loadgen_cluster = ecs.Cluster(
self, "Loadgen-Cluster",
vpc=self.vpc
)
#Just using base ENI count, not caring about having ENI trunking turned on
client = boto3.client('ec2')
response = client.describe_instance_types(InstanceTypes=[self.ecs_instance_type])
eni_per_instance = response['InstanceTypes'][0]['NetworkInfo']['MaximumNetworkInterfaces']
number_of_instances = math.ceil((self.number_of_workers + 1) / (eni_per_instance-1))
self.loadgen_cluster.add_capacity("AsgSpot",
max_capacity=number_of_instances * 2,
min_capacity=number_of_instances,
instance_type=ec2.InstanceType(self.ecs_instance_type),
spot_price="0.07",
spot_instance_draining=True
)
#cloudmap for service discovery so workers can lookup mast via dns
self.loadgen_cluster.add_default_cloud_map_namespace(name = self.cloudmap_namespace)
#Create a graph widget to track reservation metrics for our cluster
ecs_widget = cw.GraphWidget(
left=[self.loadgen_cluster.metric_cpu_reservation()],
right=[self.loadgen_cluster.metric_memory_reservation()],
title="ECS - CPU and Memory Reservation",
)
#CloudWatch dashboard to monitor our stuff
self.dashboard = cw.Dashboard(self, "Locustdashboard")
self.dashboard.add_widgets(ecs_widget)
if not self.distributed_locust:
role = "standalone"
locustContainer(self, "locust" + role, self.vpc, self.loadgen_cluster, role, self.target_url)
else:
role = "master"
master_construct = locustContainer(self, "locust" + role, self.vpc,
self.loadgen_cluster, role, self.target_url)
lb_widget = cw.GraphWidget(
left=[master_construct.lb.metric_active_connection_count(),
master_construct.lb.metric_target_response_time()],
right=[master_construct.lb.metric_request_count()],
title="Load Balancer")
self.dashboard.add_widgets(lb_widget)
role = "worker"
worker_construct = locustContainer(self, "locust" + role, self.vpc,
self.loadgen_cluster, role, self.target_url,
self.number_of_workers)
worker_construct.node.add_dependency(master_construct)
def get_cdk_context(self):
# grab stuff from context
self.number_of_workers = int(self.node.try_get_context("number_of_workers"))
self.ecs_instance_type = self.node.try_get_context("ecs_instance_type")
self.vpc_cidr = self.node.try_get_context("vpc_cidr")
self.distributed_locust = self.node.try_get_context("distributed_locust")
self.cloudmap_namespace = self.node.try_get_context("cloudmap_namespace")
self.target_url = self.node.try_get_context("target_url")
|
tynooo/cdklocust
|
cdklocust/cdklocust_stack.py
|
cdklocust_stack.py
|
py
| 3,687 |
python
|
en
|
code
| 3 |
github-code
|
6
|
26345256198
|
n = map(float, input().split(' '))
counts = {}
for x in n:
if x in counts:
counts[x] += 1
else:
counts[x] = 1
for x in sorted(counts):
print("{} -> {} times".format(x, counts[x]))
|
YovchoGandjurov/Python-Fundamentals
|
02. Lists and Dictionaries/Dictionaries/02.Count_Real_Numbers.py
|
02.Count_Real_Numbers.py
|
py
| 211 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9487674486
|
# -*- coding: utf-8 -*-
import nengo
import numpy as np
from nengo_ssp.vector_generation import HexagonalBasis, GridCellEncoders
#from nengo_ssp.utils import ssp_vectorized
class PathIntegrator(nengo.Network):
def __init__(self, n_neurons, n_gridcells, scale_fac=1.0, basis=None,xy_rad=10, **kwargs):
kwargs.setdefault("label", "PathIntegrator")
super().__init__(**kwargs)
if basis is None:
X, Y, myK = HexagonalBasis(5,5)
d = X.v.shape[0]
else:
X = basis[0]
Y = basis[1]
d = X.v.shape[0]
myK = np.vstack([np.angle(np.fft.fftshift(np.fft.fft(X.v)))[0:d//2],
np.angle(np.fft.fftshift(np.fft.fft(Y.v)))[0:d//2]]).T
n_oscs = d//2
real_ids = np.arange(1,n_oscs*3,3)
imag_ids = np.arange(2,n_oscs*3,3)
S_ids = np.zeros(n_oscs*2 + 1, dtype=int)
S_ids[0:d//2] = real_ids
S_ids[d//2:(n_oscs*2)] = imag_ids
S_ids[-1] = n_oscs*3
i_S_ids = np.argsort(S_ids)
G_encoders,G_sorts = GridCellEncoders(n_gridcells, X,Y, xy_rad)
taus = 0.1*np.ones(n_oscs)
with self:
to_SSP = self.get_to_SSP_mat(d)
i_to_SSP = self.get_from_SSP_mat(d)
self.input_vel = nengo.Node(size_in=2, label="input_vel")
self.input_SSP = nengo.Node(size_in=d, label="input_initial_SSP")
#self.input_FSSP = nengo.Node(size_in=d-1, label="input_initial_FSSP")
#nengo.Node(self.input_SSP, self.input_FSSP, )
self.output = nengo.Node(size_in=d, label="output")
self.velocity = nengo.Ensemble(n_neurons, dimensions=2,label='velocity')
zero_freq_term = nengo.Node([1,0,0])
self.osc = nengo.networks.EnsembleArray(n_neurons, n_oscs + 1,
ens_dimensions = 3,radius=np.sqrt(3), label="osc")
self.osc.output.output = lambda t, x: x
self.grid_cells = nengo.Ensemble(n_gridcells, dimensions=d, encoders = G_encoders,
radius=np.sqrt(2), label="grid_cells")
def feedback(x, tau):
w = x[0]/scale_fac
r = np.maximum(np.sqrt(x[1]**2 + x[2]**2), 1e-5)
dx1 = x[1]*(1-r**2)/r - x[2]*w
dx2 = x[2]*(1-r**2)/r + x[1]*w
return 0, tau*dx1 + x[1], tau*dx2 + x[2]
nengo.Connection(self.input_vel, self.velocity, transform = scale_fac)
for i in np.arange(n_oscs):
nengo.Connection(self.velocity, self.osc.ea_ensembles[i][0], transform = myK[i,:].reshape(1,-1),
synapse=taus[i])
S_back_mat = i_to_SSP[i_S_ids[2*i:(2*i+2)],:]
nengo.Connection(self.input_SSP, self.osc.ea_ensembles[i][1:], transform=S_back_mat) #initialize
nengo.Connection(self.osc.ea_ensembles[i], self.osc.ea_ensembles[i],
function= lambda x: feedback(x, taus[i]),
synapse=taus[i])
#nengo.Connection(self.grid_cells, self.osc.ea_ensembles[i][1:], transform=S_back_mat, synapse=taus[i])
nengo.Connection(zero_freq_term, self.osc.ea_ensembles[-1])
nengo.Connection(self.osc.output[S_ids], self.grid_cells, transform = to_SSP, synapse=taus[0])
#nengo.Connection(self.input_initial_SSP, self.grid_cells)
nengo.Connection(self.grid_cells, self.output)
def get_to_SSP_mat(self,D):
W = np.fft.ifft(np.eye(D))
W1 = W.real @ np.fft.ifftshift(np.eye(D),axes=0)
W2 = W.imag @ np.fft.ifftshift(np.eye(D),axes=0)
shiftmat1 = np.vstack([np.eye(D//2), np.zeros((1,D//2)), np.flip(np.eye(D//2), axis=0)])
shiftmat2 = np.vstack([np.eye(D//2), np.zeros((1,D//2)), -np.flip(np.eye(D//2), axis=0)])
shiftmat = np.vstack([ np.hstack([shiftmat1, np.zeros(shiftmat2.shape)]),
np.hstack([np.zeros(shiftmat2.shape), shiftmat2])])
shiftmat = np.hstack([shiftmat, np.zeros((shiftmat.shape[0],1))])
shiftmat[D//2,-1] = 1
tr = np.hstack([W1, -W2]) @ shiftmat
return tr
def get_from_SSP_mat(self,D):
W = np.fft.fft(np.eye(D))
W1 = np.fft.fftshift(np.eye(D),axes=0) @ W.real
W2 = np.fft.fftshift(np.eye(D),axes=0) @ W.imag
shiftmat1 = np.hstack([np.eye(D//2), np.zeros((D//2, 2*(D//2) + D//2 + 2))])
shiftmat2 = np.hstack([np.zeros((D//2, 2*(D//2) + 1)), np.eye(D//2), np.zeros((D//2, D//2 + 1))])
shiftmat = np.vstack([ shiftmat1,shiftmat2])
tr = shiftmat @ np.vstack([W1, W2])
return tr
|
nsdumont/nengo_ssp
|
nengo_ssp/networks.py
|
networks.py
|
py
| 5,032 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10422637903
|
from __future__ import annotations
import dataclasses
from typing import TYPE_CHECKING
from randovania.bitpacking import bitpacking
from randovania.bitpacking.bitpacking import BitPackDecoder, BitPackValue
from randovania.game_description import default_database
if TYPE_CHECKING:
from collections.abc import Iterator
from randovania.game_description.pickup.ammo_pickup import AmmoPickupDefinition
@dataclasses.dataclass(frozen=True)
class AmmoPickupState(BitPackValue):
ammo_count: tuple[int, ...] = (0,)
pickup_count: int = 0
requires_main_item: bool = True
def check_consistency(self, ammo: AmmoPickupDefinition):
db = default_database.resource_database_for(ammo.game)
if len(self.ammo_count) != len(ammo.items):
raise ValueError(f"Ammo state has {len(self.ammo_count)} ammo counts, expected {len(ammo.items)}")
for count, ammo_name in zip(self.ammo_count, ammo.items):
ammo_item = db.get_item(ammo_name)
minimum_count = -ammo_item.max_capacity if ammo.allows_negative else 0
if not (minimum_count <= count <= ammo_item.max_capacity):
raise ValueError(
f"Ammo count for item {ammo_name} of value {count} is not "
f"in range [{minimum_count}, {ammo_item.max_capacity}]."
)
if self.pickup_count < 0:
raise ValueError(f"Pickup count must be at least 0, got {self.pickup_count}")
def bit_pack_encode(self, metadata) -> Iterator[tuple[int, int]]:
ammo: AmmoPickupDefinition = metadata["ammo"]
db = default_database.resource_database_for(ammo.game)
for count, ammo_name in zip(self.ammo_count, ammo.items):
ammo_item = db.get_item(ammo_name)
yield from bitpacking.encode_int_with_limits(
abs(count),
(ammo_item.max_capacity // 2, ammo_item.max_capacity + 1),
)
if ammo.allows_negative:
yield from bitpacking.encode_bool(count < 0) # Negative?
yield from bitpacking.encode_big_int(self.pickup_count)
if ammo.unlocked_by is not None:
yield from bitpacking.encode_bool(self.requires_main_item)
@classmethod
def bit_pack_unpack(cls, decoder: BitPackDecoder, metadata) -> AmmoPickupState:
ammo: AmmoPickupDefinition = metadata["ammo"]
db = default_database.resource_database_for(ammo.game)
# Ammo Count
ammo_count = []
for ammo_name in ammo.items:
ammo_item = db.get_item(ammo_name)
count = bitpacking.decode_int_with_limits(
decoder,
(ammo_item.max_capacity // 2, ammo_item.max_capacity + 1),
)
if ammo.allows_negative and bitpacking.decode_bool(decoder): # Negative?
count *= -1
ammo_count.append(count)
# Pickup Count
pickup_count = bitpacking.decode_big_int(decoder)
# Require Main Item
requires_main_item = True
if ammo.unlocked_by is not None:
requires_main_item = bitpacking.decode_bool(decoder)
return cls(
ammo_count=tuple(ammo_count),
pickup_count=pickup_count,
requires_main_item=requires_main_item,
)
@property
def as_json(self) -> dict:
result: dict = {}
for field in dataclasses.fields(self):
value = getattr(self, field.name)
result[field.name] = value
result["ammo_count"] = list(result["ammo_count"])
return result
@classmethod
def from_json(cls, value: dict) -> AmmoPickupState:
kwargs = {}
for field in dataclasses.fields(cls):
if field.name in value:
kwargs[field.name] = value[field.name]
if "ammo_count" in kwargs:
kwargs["ammo_count"] = tuple(kwargs["ammo_count"])
return cls(**kwargs)
|
randovania/randovania
|
randovania/layout/base/ammo_pickup_state.py
|
ammo_pickup_state.py
|
py
| 3,973 |
python
|
en
|
code
| 165 |
github-code
|
6
|
10294402912
|
#-*-python-*-
from warn import *
from Rnaseq import *
from Rnaseq.command import *
from sqlalchemy import *
from sqlalchemy.orm import mapper, sessionmaker
# usage: provenance load <readset pipeline>
# This is sort of a test command, probably won't be used in production
class Load(Command):
def description(self):
return "load a pipeline and quit (debugging tool)"
def usage(self):
return "usage: load -p <pipeline> -r <readset>"
def run(self, *argv, **args):
try:
config=args['config']
readset_file=config['readset_file']
pipeline_name=config['pipeline_name']
except KeyError as e:
raise MissingArgError(str(e))
# have to create session before creating any objects that session adds, etc:
readset=Readset(name=readset_file).load()
pipeline=Pipeline(name=pipeline_name, readset=readset)
pipeline.update(RnaseqGlobals.config)
pipeline.description='desc for juan'
session=RnaseqGlobals.get_session()
session.add(pipeline)
session.commit()
#print __file__, "checking in"
|
phonybone/Rnaseq
|
lib/Rnaseq/cmds/rnaseq/load.py
|
load.py
|
py
| 1,138 |
python
|
en
|
code
| 3 |
github-code
|
6
|
8352755533
|
from __future__ import absolute_import, unicode_literals
import base64
import json
import random
import warnings
import websocket
from c8 import constants
from c8.api import APIWrapper
from c8.apikeys import APIKeys
from c8.c8ql import C8QL
from c8.collection import StandardCollection
from c8.exceptions import (
CollectionCreateError,
CollectionDeleteError,
CollectionFindError,
CollectionListError,
CollectionPropertiesError,
EventCreateError,
EventGetError,
FabricCreateError,
FabricDeleteError,
FabricGetMetadataError,
FabricListError,
FabricPropertiesError,
FabricSetMetadataError,
FabricUpdateMetadataError,
GetAPIKeys,
GetDcDetailError,
GetDcListError,
GetLocalDcError,
GraphCreateError,
GraphDeleteError,
GraphListError,
RestqlCreateError,
RestqlCursorError,
RestqlDeleteError,
RestqlExecuteError,
RestqlImportError,
RestqlListError,
RestqlUpdateError,
RestqlValidationError,
ServerConnectionError,
ServerVersionError,
SpotRegionAssignError,
SpotRegionUpdateError,
StreamAppGetSampleError,
StreamCommunicationError,
StreamConnectionError,
StreamCreateError,
StreamDeleteError,
StreamListError,
StreamPermissionError,
)
from c8.executor import AsyncExecutor, BatchExecutor, DefaultExecutor
from c8.graph import Graph
from c8.keyvalue import KV
from c8.request import Request
from c8.search import Search
from c8.stream_apps import StreamApps
from c8.stream_collection import StreamCollection
__all__ = [
"StandardFabric",
"AsyncFabric",
"BatchFabric",
]
ENDPOINT = "/streams"
def raise_timeout(signum, frame):
raise TimeoutError
class Fabric(APIWrapper):
"""Base class for Fabric API wrappers.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
:param executor: API executor.
:type executor: c8.executor.Executor
"""
def enum(**enums):
return type("Enum", (), enums)
SPOT_CREATION_TYPES = enum(
AUTOMATIC="automatic", NONE="none", SPOT_REGION="spot_region"
)
def __init__(self, connection, executor):
self.url = connection.url
self.header = connection.headers
self.stream_port = constants.STREAM_PORT
super(Fabric, self).__init__(connection, executor)
def __getitem__(self, name):
"""Return the collection API wrapper.
:param name: Collection name.
:type name: str | unicode
:returns: Collection API wrapper.
:rtype: c8.collection.StandardCollection
"""
return self.collection(name)
@property
def name(self):
"""Return fabric name.
:returns: Fabric name.
:rtype: str | unicode
"""
return self.fabric_name
@property
def c8ql(self):
"""Return C8QL (C8Db Query Language) API wrapper.
:returns: C8QL API wrapper.
:rtype: c8.c8ql.C8QL
"""
return C8QL(self._conn, self._executor)
@property
def key_value(self):
"""Return KV (Key Value) API wrapper.
:returns: KV API wrapper.
:rtype: c8.keyvalue.KV
"""
return KV(self._conn, self._executor)
def on_change(self, collection, callback, timeout=60):
"""Execute given input function on receiving a change.
:param collection: Collection name(s) regex to listen for
:type collection: str
:param timeout: timeout value
:type timeout: int
:param callback: Function to execute on a change
:type callback: function
"""
if not callback:
raise ValueError("You must specify a callback function")
if not collection:
raise ValueError(
"You must specify a collection on which realtime "
"data is to be watched!"
)
namespace = constants.STREAM_LOCAL_NS_PREFIX + self.fabric_name
subscription_name = "%s-%s-subscription-%s" % (
self.tenant_name,
self.fabric_name,
str(random.randint(1, 1000)),
)
url = self.url.split("//")[1].split(":")[0]
topic = "wss://{}/_ws/ws/v2/consumer/persistent/{}/{}/{}/{}".format(
url, self.tenant_name, namespace, collection, subscription_name
)
ws = websocket.create_connection(topic, header=self.header, timeout=timeout)
try:
# "pyC8 Realtime: Begin monitoring realtime updates for " + topic
while True:
msg = json.loads(ws.recv())
data = base64.b64decode(msg["payload"])
ws.send(json.dumps({"messageId": msg["messageId"]}))
callback(data)
except websocket.WebSocketTimeoutException:
pass
except Exception as e:
raise Exception(e)
finally:
ws.close()
def properties(self):
"""Return fabric properties.
:returns: Fabric properties.
:rtype: dict
:raise c8.exceptions.FabricPropertiesError: If retrieval fails.
"""
request = Request(
method="get",
endpoint="/database/current",
)
def response_handler(resp):
if not resp.is_success:
raise FabricPropertiesError(resp, request)
result = resp.body["result"]
result["system"] = result.pop("isSystem")
return result
return self._execute(request, response_handler)
def update_spot_region(self, tenant, fabric, new_dc):
"""Updates spot primary region for the geo-fabric
:param tenant: tenant name
:type tenant: str
:param fabric: fabric name
:type fabric: str
:param new_dc: New spot region
:type new_dc: str
:returns: True if request successful,false otherwise
:rtype: bool
:raise c8.exceptions.SpotRegionUpdateError: If updation fails.
"""
request = Request(
method="put", endpoint="/_fabric/{}/database/{}".format(fabric, new_dc)
)
def response_handler(resp):
if not resp.is_success:
raise SpotRegionUpdateError(resp, request)
return True
return self._execute(request, response_handler)
def fabrics_detail(self):
request = Request(method="get", endpoint="/database/user")
def response_handler(resp):
if not resp.is_success:
raise FabricListError(resp, request)
return [
{"name": col["name"], "options": col["options"]}
for col in map(dict, resp.body["result"])
]
return self._execute(request, response_handler)
def version(self):
"""Return C8Db server version.
:returns: Server version.
:rtype: str | unicode
:raise c8.exceptions.ServerVersionError: If retrieval fails.
"""
request = Request(method="get", endpoint="/version", params={"details": False})
def response_handler(resp):
if not resp.is_success:
raise ServerVersionError(resp, request)
return resp.body["version"]
return self._execute(request, response_handler)
def ping(self):
"""Ping the C8Db server by sending a test request.
:returns: Response code from server.
:rtype: int
:raise c8.exceptions.ServerConnectionError: If ping fails.
"""
request = Request(
method="get",
endpoint="/collection",
)
def response_handler(resp):
code = resp.status_code
if code in {401, 403}:
raise ServerConnectionError("bad username and/or password")
if not resp.is_success:
raise ServerConnectionError(resp.error_message or "bad server response")
return code
return self._execute(request, response_handler)
#########################
# Datacenter Management #
#########################
def dclist(self, detail=False):
"""Return the list of names of Datacenters
:param detail: detail list of DCs if set to true else only DC names
:type: boolean
:returns: DC List.
:rtype: [str | unicode ]
:raise c8.exceptions.GetDcListError: If retrieval fails.
"""
properties = self.properties()
if not detail:
return properties["options"]["dcList"].split(",")
tenant_name = properties["options"]["tenant"]
request = Request(
method="get", endpoint="/datacenter/_tenant/{}".format(tenant_name)
)
def response_handler(resp):
if not resp.is_success:
raise GetDcListError(resp, request)
dc_list = resp.body[0]["dcInfo"]
for dc in dc_list:
if dc["name"] not in properties["options"]["dcList"]:
dc_list.remove(dc)
return dc_list
return self._execute(request, response_handler, custom_prefix="")
def localdc(self, detail=True):
"""Fetch data for a local/regional the data center
:param detail: Details of local DC if set to true else only DC name.
:type: boolean
:returns: Local DC details.
:rtype: str | dict
:raise c8.exceptions.GetLocalDcError: If retrieval fails.
"""
request = Request(method="get", endpoint="/datacenter/local")
def response_handler(resp):
if not resp.is_success:
raise GetLocalDcError(resp, request)
if detail:
return resp.body
return resp.body["name"]
return self._execute(request, response_handler, custom_prefix="")
def get_dc_detail(self, dc):
"""Fetch data for data center, identified by dc-name
:param dc: DC name
:type: str
:returns: DC details.
:rtype: dict
:raise c8.exceptions.GetDcDetailError: If retrieval fails.
"""
request = Request(method="get", endpoint="/datacenter/{}".format(dc))
def response_handler(resp):
if not resp.is_success:
raise GetDcDetailError(resp, request)
return resp.body
return self._execute(request, response_handler, custom_prefix="")
def dclist_all(self):
"""Fetch data about all the data centers
:returns: DC List.
:rtype: [str | unicode ]
:raise c8.exceptions.GetDcListError: If retrieval fails.
"""
request = Request(method="get", endpoint="/datacenter/all")
def response_handler(resp):
if not resp.is_success:
raise GetDcListError(resp, request)
return resp.body
return self._execute(request, response_handler, custom_prefix="")
def assign_dc_spot(self, dc, spot_region=False):
"""Assigns spot region of a fed
:param dc: dc name
:type dc: str
:param spot_region: If True, makes the region a spot region
:type spot_region: bool
:returns: True if request successful, False otherwise
:rtype: bool
:raise c8.exceptions.SpotRegionAssignError: If assignment fails.
"""
data = json.dumps(spot_region)
request = Request(method="put", endpoint="/datacenter/{}/{}".format(dc, data))
def response_handler(resp):
if not resp.is_success:
raise SpotRegionAssignError(resp, request)
return True
return self._execute(request, response_handler, custom_prefix="")
#######################
# Fabric Management #
#######################
def fabrics(self):
"""Return the names all fabrics.
:returns: Fabric names.
:rtype: [str | unicode]
:raise c8.exceptions.FabricListError: If retrieval fails.
"""
request = Request(method="get", endpoint="/database")
def response_handler(resp):
if not resp.is_success:
raise FabricListError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def has_fabric(self, name):
"""Check if a fabric exists.
:param name: Fabric name.
:type name: str | unicode
:returns: True if fabric exists, False otherwise.
:rtype: bool
"""
return name in self.fabrics()
def create_fabric(
self,
name,
spot_dc=None,
users=None,
dclist=None,
spot_creation_type=SPOT_CREATION_TYPES.AUTOMATIC,
):
"""Create a new fabric.
:param name: Fabric name.
:type name: str | unicode
:param spot_creation_type: Specifying the mode of creating geo-fabric.
If you use AUTOMATIC, a random spot region
will be assigned by the system. If you
specify NONE, a geo-fabric is created
without the spot properties. If you specify
SPOT_REGION,pass the corresponding spot
region in the spot_dc parameter.
:type name: Enum containing spot region creation types
:param name: Spot Region name, if spot_creation_type is set to
SPOT_REGION
:type name: str
:param users: List of users with access to the new fabric
:type users: [str | unicode]
:param dclist: list of strings of datacenters
:type dclist: [str | unicode]
:returns: True if fabric was created successfully.
:rtype: bool
:raise c8.exceptions.FabricCreateError: If create fails.
"""
data = {"name": name}
if users is not None:
data["users"] = users
options = {}
dcl = ""
if dclist:
# Process dclist param (type list) to build up comma-separated
# string of DCs
for dc in dclist:
if len(dcl) > 0:
dcl += ","
dcl += dc
options["dcList"] = dcl
if spot_creation_type == self.SPOT_CREATION_TYPES.NONE:
options["spotDc"] = ""
elif spot_creation_type == self.SPOT_CREATION_TYPES.SPOT_REGION and spot_dc:
options["spotDc"] = spot_dc
data["options"] = options
request = Request(method="post", endpoint="/database", data=data)
def response_handler(resp):
if not resp.is_success:
raise FabricCreateError(resp, request)
return True
return self._execute(request, response_handler)
def get_fabric_metadata(self):
"""Fetch information about a GeoFabric.
:returns: Fabric information.
:rtype: dict
:raise c8.exceptions.FabricGetMetadataError: If retrieval fails.
"""
request = Request(method="get", endpoint="/database/metadata")
def response_handler(resp):
if not resp.is_success:
raise FabricGetMetadataError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def set_fabric_metadata(self, metadata):
"""Set the GeoFabric Metadata.
:param metadata: Fabric metadata.
:type metadata: dict
:returns: True if metadata was set successfully.
:rtype: bool
:raise c8.exceptions.FabricSetMetadataError: If set fails.
"""
data = {"metadata": metadata}
request = Request(method="put", endpoint="/database/metadata", data=data)
def response_handler(resp):
if not resp.is_success:
raise FabricSetMetadataError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def update_fabric_metadata(self, metadata):
"""Modfiy the GeoFabric metadata.
:param metadata: Fabric metadata.
:type metadata: dict
:returns: True if metadata was set successfully.
:rtype: bool
:raise c8.exceptions.FabricUpdateMetadataError: If update fails.
"""
data = {"metadata": metadata}
request = Request(method="patch", endpoint="/database/metadata", data=data)
def response_handler(resp):
if not resp.is_success:
raise FabricUpdateMetadataError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def delete_fabric(self, name, ignore_missing=False):
"""Delete the fabric.
:param name: Fabric name.
:type name: str | unicode
:param ignore_missing: Do not raise an exception on missing fabric.
:type ignore_missing: bool
:returns: True if fabric was deleted successfully, False if fabric
was not found and **ignore_missing** was set to True.
:rtype: bool
:raise c8.exceptions.FabricDeleteError: If delete fails.
"""
request = Request(method="delete", endpoint="/database/{}".format(name))
def response_handler(resp):
if resp.error_code == 1228 and ignore_missing:
return False
if not resp.is_success:
raise FabricDeleteError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
#########################
# Collection Management #
#########################
def collection(self, name):
"""Return the standard collection API wrapper.
:param name: Collection name.
:type name: str | unicode
:returns: Standard collection API wrapper.
:rtype: c8.collection.StandardCollection
"""
if self.has_collection(name):
return StandardCollection(self._conn, self._executor, name)
else:
raise CollectionFindError("Collection not found")
def has_collection(self, name):
"""Check if collection exists in the fabric.
:param name: Collection name.
:type name: str | unicode
:returns: True if collection exists, False otherwise.
:rtype: bool
"""
return any(col["name"] == name for col in self.collections())
def collections(self, collectionModel=None):
"""Return the collections in the fabric.
:returns: Collections in the fabric and their details.
:rtype: [dict]
:raise c8.exceptions.CollectionListError: If retrieval fails.
"""
request = Request(method="get", endpoint="/collection")
def response_handler(resp):
if not resp.is_success:
raise CollectionListError(resp, request)
if collectionModel is not None:
docs = [
col
for col in map(dict, resp.body["result"])
if col["collectionModel"] == collectionModel
]
else:
docs = [col for col in map(dict, resp.body["result"])]
collections = []
for col in docs:
c = {
"id": col["id"],
"name": col["name"],
"system": col["isSystem"],
"type": StandardCollection.types[col["type"]],
"status": StandardCollection.statuses[col["status"]],
"collectionModel": col["collectionModel"],
}
if "isSpot" in col.keys():
c["isSpot"] = col["isSpot"]
else:
c["strongConsistency"] = col.get("strongConsistency", False)
collections.append(c)
return collections
return self._execute(request, response_handler)
def create_collection(
self,
name,
sync=False,
edge=False,
user_keys=True,
key_increment=None,
key_offset=None,
key_generator="traditional",
shard_fields=None,
index_bucket_count=None,
sync_replication=None,
enforce_replication_factor=None,
strong_consistency=None,
local_collection=False,
is_system=False,
stream=False,
*,
spot_collection=None,
):
"""Create a new collection.
:param name: Collection name.
:type name: str | unicode
:param sync: If set to True, document operations via the collection
will block until synchronized to disk by default.
:type sync: bool
:param edge: If set to True, an edge collection is created.
:type edge: bool
:param key_generator: Used for generating document keys. Allowed values
are "traditional" or "autoincrement".
:type key_generator: str | unicode
:param user_keys: If set to True, users are allowed to supply document
keys. If set to False, the key generator is solely responsible for
supplying the key values.
:type user_keys: bool
:param key_increment: Key increment value. Applies only when value of
**key_generator** is set to "autoincrement".
:type key_increment: int
:param key_offset: Key offset value. Applies only when value of
**key_generator** is set to "autoincrement".
:type key_offset: int
:param shard_fields: Field(s) used to determine the target shard.
:type shard_fields: [str | unicode]
:param index_bucket_count: Number of buckets into which indexes using
hash tables are split. The default is 16, and this number has to be
a power of 2 and less than or equal to 1024. For large collections,
one should increase this to avoid long pauses when the hash table
has to be initially built or re-sized, since buckets are re-sized
individually and can be initially built in parallel. For instance,
64 may be a sensible value for 100 million documents.
:type index_bucket_count: int
:param sync_replication: If set to True, server reports success only
when collection is created in all replicas. You can set this to
False for faster server response, and if full replication is not a
concern.
:type sync_replication: bool
:param enforce_replication_factor: Check if there are enough replicas
available at creation time, or halt the operation.
:type enforce_replication_factor: bool
:param strong_consistency: If True, strong consistency is enabled
:type strong_consistency: bool
:param is_system: If True, able to create system collections
:type is_system: bool
:param stream: If True, create a local stream for collection.
:type stream: bool
:param spot_collection: If True, it is a spot collection.
Deprecated. Use If strong_consistency instead.
:type spot_collection: bool
:returns: Standard collection API wrapper.
:rtype: c8.collection.StandardCollection
:raise c8.exceptions.CollectionCreateError: If create fails.
"""
# Newer versions of GDN has renamed the isSpot property to strongConsistency.
# We are using a keyword only argument to keep the backward compatibility of the SDK
if spot_collection is not None and strong_consistency is not None:
raise TypeError("create_collection recieved both spot_collection and strong_consistency")
elif spot_collection is not None:
warnings.simplefilter("once", DeprecationWarning)
warnings.warn("spot_collection is deprecated. Use strong_consistency instead.", DeprecationWarning, 2)
warnings.simplefilter("default", DeprecationWarning)
isSpot = spot_collection
elif strong_consistency is not None:
isSpot = strong_consistency
else:
isSpot = False
key_options = {"type": key_generator, "allowUserKeys": user_keys}
if key_increment is not None:
key_options["increment"] = key_increment
if key_offset is not None:
key_options["offset"] = key_offset
if spot_collection and local_collection:
return "Collection can either be spot or local"
else:
# Both strong_consistency and isSpot is added to the request body.
# Correct value will be picked by the GDN depending on the supported property.
data = {
"name": name,
"waitForSync": sync,
"keyOptions": key_options,
"type": 3 if edge else 2,
"isSpot": isSpot,
"strongConsistency": isSpot,
"isLocal": local_collection,
"isSystem": is_system,
"stream": stream,
}
if shard_fields is not None:
data["shardKeys"] = shard_fields
if index_bucket_count is not None:
data["indexBuckets"] = index_bucket_count
params = {}
if sync_replication is not None:
params["waitForSyncReplication"] = sync_replication
if enforce_replication_factor is not None:
params["enforceReplicationFactor"] = enforce_replication_factor
request = Request(
method="post", endpoint="/collection", params=params, data=data
)
def response_handler(resp):
if resp.is_success:
return self.collection(name)
raise CollectionCreateError(resp, request)
return self._execute(request, response_handler)
def update_collection_properties(
self, collection_name, has_stream=None, wait_for_sync=None
):
"""Changes the properties of a collection.
Note: except for waitForSync and hasStream, collection properties cannot be changed once a collection is created.
:param collection_name: Collection name.
:type collection_name: str | unicode
:param has_stream: True if creating a live collection stream.
:type has_stream: bool
:param wait_for_sync: True if all data must be synced to storage before operation returns.
:type wait_for_sync: bool
"""
data = {}
if has_stream is not None:
data["hasStream"] = has_stream
if wait_for_sync is not None:
data["waitForSync"] = wait_for_sync
request = Request(
method="put",
endpoint="/collection/{}/properties".format(collection_name),
data=data,
)
def response_handler(resp):
if resp.is_success:
return resp.body
raise CollectionPropertiesError(resp, request)
return self._execute(request, response_handler)
def delete_collection(self, name, ignore_missing=False, system=None):
"""Delete the collection.
:param name: Collection name.
:type name: str | unicode
:param ignore_missing: Do not raise an exception on missing collection.
:type ignore_missing: bool
:param system: Whether the collection is a system collection.
:type system: bool
:returns: True if collection was deleted successfully, False if
collection was not found and **ignore_missing** was set to True.
:rtype: bool
:raise c8.exceptions.CollectionDeleteError: If delete fails.
"""
params = {}
if system is not None:
params["isSystem"] = system
request = Request(
method="delete", endpoint="/collection/{}".format(name), params=params
)
def response_handler(resp):
if resp.error_code == 1203 and ignore_missing:
return False
if not resp.is_success:
raise CollectionDeleteError(resp, request)
return True
return self._execute(request, response_handler)
####################
# Graph Management #
####################
def graph(self, name):
"""Return the graph API wrapper.
:param name: Graph name.
:type name: str | unicode
:returns: Graph API wrapper.
:rtype: c8.graph.Graph
"""
return Graph(self._conn, self._executor, name)
def has_graph(self, name):
"""Check if a graph exists in the fabric.
:param name: Graph name.
:type name: str | unicode
:returns: True if graph exists, False otherwise.
:rtype: bool
"""
for graph in self.graphs():
if graph["name"] == name:
return True
return False
def graphs(self):
"""List all graphs in the fabric.
:returns: Graphs in the fabric.
:rtype: [dict]
:raise c8.exceptions.GraphListError: If retrieval fails.
"""
request = Request(method="get", endpoint="/graph")
def response_handler(resp):
if not resp.is_success:
raise GraphListError(resp, request)
return [
{
"id": body["_id"],
"name": body["_key"],
"revision": body["_rev"],
"orphan_collections": body["orphanCollections"],
"edge_definitions": [
{
"edge_collection": definition["collection"],
"from_vertex_collections": definition["from"],
"to_vertex_collections": definition["to"],
}
for definition in body["edgeDefinitions"]
],
"shard_count": body.get("numberOfShards"),
"replication_factor": body.get("replicationFactor"),
}
for body in resp.body["graphs"]
]
return self._execute(request, response_handler)
def create_graph(
self, name, edge_definitions=None, orphan_collections=None, shard_count=None
):
"""Create a new graph.
:param name: Graph name.
:type name: str | unicode
:param edge_definitions: List of edge definitions, where each edge
definition entry is a dictionary with fields "edge_collection",
"from_vertex_collections" and "to_vertex_collections" (see below
for example).
:type edge_definitions: [dict]
:param orphan_collections: Names of additional vertex collections that
are not in edge definitions.
:type orphan_collections: [str | unicode]
:param shard_count: Number of shards used for every collection in the
graph. To use this, parameter **smart** must be set to True and
every vertex in the graph must have the smart field. This number
cannot be modified later once set. Applies only to enterprise
version of C8Db.
:type shard_count: int
:returns: Graph API wrapper.
:rtype: c8.graph.Graph
:raise c8.exceptions.GraphCreateError: If create fails.
Here is an example entry for parameter **edge_definitions**:
.. code-block:: python
{
'edge_collection': 'teach',
'from_vertex_collections': ['teachers'],
'to_vertex_collections': ['lectures']
}
"""
data = {"name": name}
if edge_definitions is not None:
data["edgeDefinitions"] = [
{
"collection": definition["edge_collection"],
"from": definition["from_vertex_collections"],
"to": definition["to_vertex_collections"],
}
for definition in edge_definitions
]
if orphan_collections is not None:
data["orphanCollections"] = orphan_collections
if shard_count is not None: # pragma: no cover
data["numberOfShards"] = shard_count
request = Request(method="post", endpoint="/graph", data=data)
def response_handler(resp):
if resp.is_success:
return Graph(self._conn, self._executor, name)
raise GraphCreateError(resp, request)
return self._execute(request, response_handler)
def delete_graph(self, name, ignore_missing=False, drop_collections=None):
"""Drop the graph of the given name from the fabric.
:param name: Graph name.
:type name: str | unicode
:param ignore_missing: Do not raise an exception on missing graph.
:type ignore_missing: bool
:param drop_collections: Drop the collections of the graph also. This
is only if they are not in use by other graphs.
:type drop_collections: bool
:returns: True if graph was deleted successfully, False if graph was not
found and **ignore_missing** was set to True.
:rtype: bool
:raise c8.exceptions.GraphDeleteError: If delete fails.
"""
params = {}
if drop_collections is not None:
params["dropCollections"] = drop_collections
request = Request(
method="delete", endpoint="/graph/{}".format(name), params=params
)
def response_handler(resp):
if resp.error_code == 1924 and ignore_missing:
return False
if not resp.is_success:
raise GraphDeleteError(resp, request)
return True
return self._execute(request, response_handler)
########################
# Async Job Management #
########################
# Pratik: APIs not supported in documentation. Waiting for verification
# def async_jobs(self, status, count=None):
# """Return IDs of async jobs with given status.
#
# :param status: Job status (e.g. "pending", "done").
# :type status: str | unicode
# :param count: Max number of job IDs to return.
# :type count: int
# :returns: List of job IDs.
# :rtype: [str | unicode]
# :raise c8.exceptions.AsyncJobListError: If retrieval fails.
# """
# params = {}
# if count is not None:
# params['count'] = count
#
# request = Request(
# method='get',
# endpoint='/job/{}'.format(status),
# params=params
# )
#
# def response_handler(resp):
# if resp.is_success:
# return resp.body
# raise AsyncJobListError(resp, request)
#
# return self._execute(request, response_handler)
#
# def clear_async_jobs(self, threshold=None):
# """Clear async job results from the server.
#
# Async jobs that are still queued or running are not stopped.
#
# :param threshold: If specified, only the job results created prior to
# the threshold (a unix timestamp) are deleted. Otherwise, all job
# results are deleted.
# :type threshold: int
# :returns: True if job results were cleared successfully.
# :rtype: bool
# :raise c8.exceptions.AsyncJobClearError: If operation fails.
# """
# if threshold is None:
# url = '/job/all'
# params = None
# else:
# url = '/job/expired'
# params = {'stamp': threshold}
#
# request = Request(
# method='delete',
# endpoint=url,
# params=params
# )
#
# def response_handler(resp):
# if resp.is_success:
# return True
# raise AsyncJobClearError(resp, request)
#
# return self._execute(request, response_handler)
########################
# Streams Management #
########################
def stream(self, operation_timeout_seconds=30):
"""Return the stream collection API wrapper.
:returns: stream collection API wrapper.
:rtype: c8.stream_collection.StreamCollection
"""
return StreamCollection(
self,
self._conn,
self._executor,
self.url,
self.stream_port,
operation_timeout_seconds,
)
def streams(self, local=None):
"""Get list of all streams under given fabric
:returns: List of streams under given fabric.
:rtype: json
:raise c8.exceptions.StreamListError: If retrieving streams fails.
"""
if local is False:
url_endpoint = "/streams?global=true"
elif local is True:
url_endpoint = "/streams?global=false"
elif local is None:
url_endpoint = "/streams"
request = Request(method="get", endpoint=url_endpoint)
def response_handler(resp):
code = resp.status_code
if resp.is_success:
return [
{
"name": col["topic"],
"topic": col["topic"],
"local": col["local"],
"db": col["db"],
"tenant": col["tenant"],
"type": StreamCollection.types[col["type"]],
"status": "terminated"
if "terminated" in col
else "active", # noqa
}
for col in map(dict, resp.body["result"])
]
elif code == 403:
raise StreamPermissionError(resp, request)
raise StreamListError(resp, request)
return self._execute(request, response_handler)
def has_stream(self, stream, isCollectionStream=False, local=False):
"""Check if the list of streams has a stream with the given name.
:param stream: The name of the stream for which to check in the list
of all streams.
:type stream: str | unicode
:returns: True=stream found; False=stream not found.
:rtype: bool
"""
if isCollectionStream is False:
if local is False and "c8globals" not in stream:
stream = "c8globals." + stream
elif local is True and "c8locals" not in stream:
stream = "c8locals." + stream
return any(mystream["name"] == stream for mystream in self.streams(local=local))
def create_stream(self, stream, local=False):
"""
Create the stream under the given fabric
:param stream: name of stream
:param local: Operate on a local stream instead of a global one.
:returns: 200, OK if operation successful
:raise: c8.exceptions.StreamCreateError: If creating streams fails.
"""
if local is True:
endpoint = "{}/{}?global=False".format(ENDPOINT, stream)
elif local is False:
endpoint = "{}/{}?global=True".format(ENDPOINT, stream)
request = Request(method="post", endpoint=endpoint)
def response_handler(resp):
code = resp.status_code
if resp.is_success:
return resp.body["result"]
elif code == 502:
raise StreamCommunicationError(resp, request)
raise StreamCreateError(resp, request)
return self._execute(request, response_handler)
def delete_stream(self, stream, force=False):
"""
Delete the streams under the given fabric
:param stream: name of stream
:param force:
:returns: 200, OK if operation successful
:raise: c8.exceptions.StreamDeleteError: If deleting streams fails.
"""
endpoint = f"{ENDPOINT}/{stream}"
if force:
endpoint = endpoint + "?force=true"
request = Request(method="delete", endpoint=endpoint)
def response_handler(resp):
code = resp.status_code
if resp.is_success:
return True
elif code == 403:
raise StreamPermissionError(resp, request)
elif code == 412:
raise StreamDeleteError(resp, request)
raise StreamConnectionError(resp, request)
return self._execute(request, response_handler)
#####################
# Restql Management #
#####################
def save_restql(self, data):
"""Save restql by name.
:param data: data to be used for restql POST API
:type data: dict
:returns: Results of restql API
:rtype: dict
:raise c8.exceptions.RestqlCreateError: if restql operation failed
"""
query_name = data["query"]["name"]
if " " in query_name:
raise RestqlValidationError("White Spaces not allowed in Query " "Name")
request = Request(method="post", endpoint="/restql", data=data)
def response_handler(resp):
if not resp.is_success:
raise RestqlCreateError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def import_restql(self, queries, details=False):
"""Import custom queries.
:param queries: queries to be imported
:type queries: [dict]
:param details: Whether to include details
:type details: bool
:returns: Results of importing restql
:rtype: dict
:raise c8.exceptions.RestqlImportError: if restql operation failed
"""
data = {"queries": queries, "details": details}
request = Request(method="post", endpoint="/restql/import", data=data)
def response_handler(resp):
if not resp.is_success:
raise RestqlImportError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def execute_restql(self, name, data=None):
"""Execute restql by name.
:param name: restql name
:type name: str | unicode
:param data: restql data (optional)
:type data: dict
:returns: Results of execute restql
:rtype: dict
:raise c8.exceptions.RestqlExecuteError: if restql execution failed
"""
if data is None or not ("bindVars" in data or "batchSize" in data):
data = {}
request = Request(
method="post", data=data, endpoint="/restql/execute/{}".format(name)
)
def response_handler(resp):
if not resp.is_success:
raise RestqlExecuteError(resp, request)
return resp.body
return self._execute(request, response_handler)
def read_next_batch_restql(self, id):
"""Read next batch from query worker cursor.
:param id: the cursor-identifier
:type id: int
:returns: Results of execute restql
:rtype: dict
:raise c8.exceptions.RestqlCursorError: if fetch next batch failed
"""
request = Request(method="put", endpoint="/restql/fetch/{}".format(id))
def response_handler(resp):
if not resp.is_success:
raise RestqlCursorError(resp, request)
return resp.body
return self._execute(request, response_handler)
def get_all_restql(self):
"""Get all restql associated for user.
:returns: Details of all restql
:rtype: list
:raise c8.exceptions.RestqlListError: if getting restql failed
"""
request = Request(method="get", endpoint="/restql/user")
def response_handler(resp):
if not resp.is_success:
raise RestqlListError(resp, request)
return resp.body["result"]
return self._execute(request, response_handler)
def update_restql(self, name, data):
"""Update restql by name.
:param name: name of restql
:type name: str | unicode
:param data: restql data
:type data: dict
:returns: True if restql is updated
:rtype: bool
:raise c8.exceptions.RestqlUpdateError: if query update failed
"""
request = Request(method="put", data=data, endpoint="/restql/" + name)
def response_handler(resp):
if not resp.is_success:
raise RestqlUpdateError(resp, request)
return True
return self._execute(request, response_handler)
def delete_restql(self, name):
"""Delete restql by name.
:param name: restql name
:type name: str | unicode
:returns: True if restql is deleted
:rtype: bool
:raise c8.exceptions.RestqlDeleteError: if restql deletion failed
"""
request = Request(method="delete", endpoint="/restql/" + name)
def response_handler(resp):
if not resp.is_success:
raise RestqlDeleteError(resp, request)
return True
return self._execute(request, response_handler)
########################
# Events #
########################
def create_event(self, payload):
"""Create an event.
:param payload: Payload to create event
:type payload: dict
:returns: Dictionary containing the event id
:rtype: dict
:raise c8.exceptions.EventCreateError: if event creation failed
Here is an example entry for parameter **payload**:
.. code-block:: python
{
"action": "string",
"attributes": {},
"description": "string",
"details": "string",
"entityName": "string",
"entityType": "string",
"status": "string"
}
"""
request = Request(method="post", endpoint="/events", data=payload)
def response_handler(resp):
if not resp.is_success:
raise EventCreateError(resp, request)
return resp.body
return self._execute(request, response_handler)
def delete_event(self, eventIds):
"""Delete an event/s.
:param eventIds: The event id for which you want to fetch the event details
:type eventId: list of strings(event Ids)
:returns: List containig all the information of existing events
:rtype: list
:raise c8.exceptions.EventDeleteError: if event creation failed
"""
data = json.dumps((eventIds))
request = Request(method="delete", endpoint="/events", data=data)
def response_handler(resp):
if not resp.is_success:
raise EventGetError(resp, request)
return True
return self._execute(request, response_handler)
def get_all_events(self):
"""Create an event.
:returns: List containig all the information of existing events
:rtype: list
:raise c8.exceptions.EventGetError: if event creation failed
"""
request = Request(method="get", endpoint="/events/tenant")
def response_handler(resp):
if not resp.is_success:
raise EventGetError(resp, request)
return resp.body
return self._execute(request, response_handler)
def get_event_by_Id(self, eventId):
"""Create an event.
:param eventId: The event id for which you want to fetch the event details
:returns: List containig all the information of existing events
:rtype: list
:raise c8.exceptions.EventGetError: if event creation failed
"""
request = Request(method="get", endpoint="/events/" + str(eventId))
def response_handler(resp):
if not resp.is_success:
raise EventGetError(resp, request)
return resp.body
return self._execute(request, response_handler)
########################
# Stream Apps #
########################
def stream_app(self, name):
return StreamApps(self._conn, self._executor, name)
def validate_stream_app(self, data):
"""validates a stream app by given data
:param data: stream app defination string
"""
body = {"definition": data}
req = Request(
method="post", endpoint="/streamapps/validate", data=json.dumps(body)
)
def response_handler(resp):
if resp.is_success is True:
return True
return False
return self._execute(req, response_handler)
def retrieve_stream_app(self):
"""retrieves all the stream apps of a fabric"""
req = Request(
method="get",
endpoint="/streamapps",
)
def response_handler(resp):
if resp.is_success is True:
return resp.body
return False
return self._execute(req, response_handler)
def get_samples_stream_app(self):
"""gets samples for stream apps"""
req = Request(
method="get",
endpoint="/streamapps/samples",
)
def response_handler(resp):
if resp.is_success is not True:
raise StreamAppGetSampleError(resp, req)
return resp.body["streamAppSample"]
return self._execute(req, response_handler)
def create_stream_app(self, data, dclist=[]):
"""Creates a stream application by given data
:param data: stream app definition
:param dclist: regions where stream app has to be deployed
"""
# create request body
req_body = {"definition": data, "regions": dclist}
# create request
req = Request(method="post", endpoint="/streamapps", data=json.dumps(req_body))
# create response handler
def response_handler(resp):
if resp.is_success is True:
return True
return False
# call api
return self._execute(req, response_handler)
########################
# APIKeys #
########################
def api_keys(self, keyid):
"""Return the API keys API wrapper.
:param keyid: API Key id
:type kaeyid: string | unicode
:returns: API keys API wrapper.
:rtype: c8.stream_collection.StreamCollection
"""
return APIKeys(self._conn, self._executor, keyid)
def list_all_api_keys(self):
"""List the API keys.
:returns:list.
:raise c8.exceptions.GetAPIKeys: If request fails
"""
request = Request(
method="get",
endpoint="/key",
)
# create response handler
def response_handler(resp):
if not resp.is_success:
raise GetAPIKeys(resp, request)
else:
return resp.body["result"]
return self._execute(request, response_handler, custom_prefix="/_api")
##############################
# Search, View and Analyzers #
##############################
def search(self):
"""Returns the Search APIWrapper
:returns: Search API Wrapper
:rtype: c8.search.Search
"""
return Search(self._conn, self._executor)
class StandardFabric(Fabric):
"""Standard fabric API wrapper.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
"""
def __init__(self, connection):
super(StandardFabric, self).__init__(
connection=connection, executor=DefaultExecutor(connection)
)
def __repr__(self):
return "<StandardFabric {}>".format(self.name)
def begin_async_execution(self, return_result=True):
"""Begin async execution.
:param return_result: If set to True, API executions return instances
of :class:`c8.job.AsyncJob`, which you can use to retrieve
results from server once available. If set to False, API executions
return None and no results are stored on server.
:type return_result: bool
:returns: Fabric API wrapper built specifically for async execution.
:rtype: c8.fabric.AsyncFabric
"""
return AsyncFabric(self._conn, return_result)
def begin_batch_execution(self, return_result=True):
"""Begin batch execution.
:param return_result: If set to True, API executions return instances
of :class:`c8.job.BatchJob` that are populated with results on
commit. If set to False, API executions return None and no results
are tracked client-side.
:type return_result: bool
:returns: Fabric API wrapper built specifically for batch execution.
:rtype: c8.fabric.BatchFabric
"""
return BatchFabric(self._conn, return_result)
class AsyncFabric(Fabric):
"""Fabric API wrapper tailored specifically for async execution.
See :func:`c8.fabric.StandardFabric.begin_async_execution`.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
:param return_result: If set to True, API executions return instances of
:class:`c8.job.AsyncJob`, which you can use to retrieve results
from server once available. If set to False, API executions return None
and no results are stored on server.
:type return_result: bool
"""
def __init__(self, connection, return_result):
super(AsyncFabric, self).__init__(
connection=connection, executor=AsyncExecutor(connection, return_result)
)
def __repr__(self):
return "<AsyncFabric {}>".format(self.name)
class BatchFabric(Fabric):
"""Fabric API wrapper tailored specifically for batch execution.
See :func:`c8.fabric.StandardFabric.begin_batch_execution`.
:param connection: HTTP connection.
:type connection: c8.connection.Connection
:param return_result: If set to True, API executions return instances of
:class:`c8.job.BatchJob` that are populated with results on commit.
If set to False, API executions return None and no results are tracked
client-side.
:type return_result: bool
"""
def __init__(self, connection, return_result):
super(BatchFabric, self).__init__(
connection=connection, executor=BatchExecutor(connection, return_result)
)
def __repr__(self):
return "<BatchFabric {}>".format(self.name)
def __enter__(self):
return self
def __exit__(self, exception, *_):
if exception is None:
self._executor.commit()
def queued_jobs(self):
"""Return the queued batch jobs.
:returns: Queued batch jobs or None if **return_result** parameter was
set to False during initialization.
:rtype: [c8.job.BatchJob] | None
"""
return self._executor.jobs
def commit(self):
"""Execute the queued requests in a single batch API request.
If **return_result** parameter was set to True during initialization,
:class:`c8.job.BatchJob` instances are populated with results.
:returns: Batch jobs, or None if **return_result** parameter was set to
False during initialization.
:rtype: [c8.job.BatchJob] | None
:raise c8.exceptions.BatchStateError: If batch state is invalid
(e.g. batch was already committed or the response size did not
match expected).
:raise c8.exceptions.BatchExecuteError: If commit fails.
"""
return self._executor.commit()
|
Macrometacorp/pyC8
|
c8/fabric.py
|
fabric.py
|
py
| 56,104 |
python
|
en
|
code
| 6 |
github-code
|
6
|
33215300998
|
#
# @lc app=leetcode.cn id=15 lang=python3
#
# [15] 三数之和
#
# @lc code=start
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
result = []
n = len(nums)
nums.sort()
for i, first in enumerate(nums):
if i > 0 and first == nums[i - 1]:
continue
k = n - 1
for j, second in enumerate(nums[i+1:], i + 1):
if j > i + 1 and second == nums[j - 1]:
continue
while second + nums[k] > -first and j < k:
k -= 1
if j == k:
break
if second + nums[k] == -first:
result.append((first, second, nums[k]))
return result
# @lc code=end
|
P4Peemo/Leetcode
|
15.三数之和.py
|
15.三数之和.py
|
py
| 807 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8600407692
|
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .routers import post, user, auth, vote
############################################
#models.Base.metadata.create_all(bind=engine)
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(post.router)
app.include_router(user.router)
app.include_router(auth.router)
app.include_router(vote.router)
@app.get("/")
def root():
return {"message": "Hello World"}
"""
my_posts = [{"title": "title of post 1", "content": "content of post 1", "id":1},
{"title": "favorite foods", "content": "I like pizza", "id":2}]
def find_post(id):
for p in my_posts:
if p["id"] == id:
return p
def find_index_post(id):
for i, p in enumerate(my_posts):
if p['id'] == id:
return i
"""
"""
#Path Operations
@app.get("/")
#IT IS THE DECORATOR: IT CREATES THE ENDPOINT OF THE FUNCTION. It is called on the already created FastAPI INSTANCE (app)
#Within the brackets we have the PATH, the path that we must access from the URL
#the get method: is one of the possible HTTP methods
def root():
1): Async it is optional (so we can delete it)
2): the name of the fun should be as much descriptive as possible
3)RETURN: is the message that is returned back to the user
return {"message": "Hello World"}
"""
|
Mattia921/example-fastapi
|
app/main.py
|
main.py
|
py
| 1,524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18849850963
|
import os
import random
import math
import seaborn
import matplotlib.pyplot as plt
num_train_samples = 1
threshold = 0.25
dtw_window = 50
# thresholds: 0.15, 0.2, ...
def read_gesture(path):
with open(path, "r") as file:
lines = [line.rstrip() for line in file]
gesture = [[float(value) for value in data.split(',')] for data in lines]
return gesture
labels = ['circle_ccw', 'circle_cw', 'heart_cw', 'square_ccw', 'triangle_cw', 'junk']
paths = os.listdir('gestures')
circle_ccw = [('circle_ccw', read_gesture('gestures/' + path)) for path in paths if path.startswith('circle_ccw')]
circle_cw = [('circle_cw', read_gesture('gestures/' + path)) for path in paths if path.startswith('circle_cw')]
heart_cw = [('heart_cw', read_gesture('gestures/' + path)) for path in paths if path.startswith('heart_cw')]
square_ccw = [('square_ccw', read_gesture('gestures/' + path)) for path in paths if path.startswith('square_ccw')]
triangle_cw = [('triangle_cw', read_gesture('gestures/' + path)) for path in paths if path.startswith('triangle_cw')]
junk = [('junk', read_gesture('gestures/' + path)) for path in paths if path.startswith('junk')]
def fir_lowpass_first(a):
q = 0.95
b = [a[0]]
for i in range(1, len(a)):
x = (1.0 - q) * a[i - 1][0] + q * a[i][0]
y = (1.0 - q) * a[i - 1][1] + q * a[i][1]
z = (1.0 - q) * a[i - 1][2] + q * a[i][2]
b.append([x, y, z])
return b
def calc_distance(a, b) -> float:
ax = a[0]
ay = a[1]
az = a[2]
bx = b[0]
by = b[1]
bz = b[2]
dir = (ax * bx + ay * by + az * bz) / (normalize(ax, ay, az) * normalize(bx, by, bz) + 0.0000001)
return (1.0 - 0.5 * dir) * normalize(ax - bx, ay - by, az - bz)
def normalize(x, y, z) -> float:
return math.sqrt(x * x + y * y + z * z)
def calc_dtw(a, b) -> float:
a = fir_lowpass_first(a)
b = fir_lowpass_first(b)
dtw = [[0.0 for _ in range(50)] for _ in range(50)]
dtw[0][0] = calc_distance(a[0], b[0])
for i in range(1, 50):
dtw[i][0] = calc_distance(a[i], b[0]) + dtw[i - 1][0]
dtw[0][i] = calc_distance(a[0], b[i]) + dtw[0][i - 1]
for i in range(1, 50):
for j in range(1, 50):
dtw[i][j] = calc_distance(a[i], b[j]) + min(dtw[i - 1][j], dtw[i][j - 1], dtw[i - 1][j - 1])
i = 49
j = 49
distance = [0.0 for _ in range(100)]
length = 0
while i > 0 and j > 0:
if dtw[i - 1][j] <= dtw[i][j - 1] and dtw[i - 1][j] <= dtw[i - 1][j - 1] and (j - i) <= dtw_window:
distance[length] = dtw[i][j] - dtw[i - 1][j]
i -= 1
elif dtw[i][j - 1] < dtw[i - 1][j - 1] and (i - j) <= dtw_window:
distance[length] = dtw[i][j] - dtw[i][j - 1]
j -= 1
else:
distance[length] = dtw[i][j] - dtw[i - 1][j - 1]
i -= 1
j -= 1
length += 1
while i > 0:
distance[length] = dtw[i][0] - dtw[i - 1][0]
i -= 1
length += 1
while j > 0:
distance[length] = dtw[0][j] - dtw[0][j - 1]
j -= 1
length += 1
distance[length] = dtw[0][0]
length += 1
mean = 0.0
for i in range(length):
mean += distance[i]
mean = mean / float(length)
return mean
confusion_matrix = {}
num_trails = {}
for true_label in labels:
confusion_matrix[true_label] = {}
num_trails[true_label] = 0.0
for predicted_label in labels:
confusion_matrix[true_label][predicted_label] = 0.0
for _ in range(25):
random.shuffle(circle_ccw)
random.shuffle(circle_cw)
random.shuffle(heart_cw)
random.shuffle(square_ccw)
random.shuffle(triangle_cw)
circle_ccw_train = circle_ccw[:num_train_samples]
circle_ccw_test = circle_ccw[num_train_samples:]
circle_cw_train = circle_cw[:num_train_samples]
circle_cw_test = circle_cw[num_train_samples:]
heart_cw_train = heart_cw[:num_train_samples]
heart_cw_test = heart_cw[num_train_samples:]
square_ccw_train = square_ccw[:num_train_samples]
square_ccw_test = square_ccw[num_train_samples:]
triangle_cw_train = triangle_cw[:num_train_samples]
triangle_cw_test = triangle_cw[num_train_samples:]
train = circle_ccw_train + circle_cw_train + heart_cw_train + square_ccw_train + triangle_cw_train
test = circle_ccw_test + circle_cw_test + heart_cw_test + square_ccw_test + triangle_cw_test + junk
for (predicted_label, gesture) in test:
means = [(a, calc_dtw(gesture, tr)) for (a, tr) in train]
means.sort(key=lambda x:x[1])
true_label = means[0][0]
mean = means[0][1]
if mean > threshold:
true_label = 'junk'
confusion_matrix[true_label][predicted_label] += 1.0
num_trails[predicted_label] += 1.0
cf_plot = [[0 for _ in labels] for _ in labels]
for (i, true_label) in enumerate(['circle_ccw', 'circle_cw', 'heart_cw', 'square_ccw', 'triangle_cw', 'junk']):
for (j, predicted_label) in enumerate(['circle_ccw', 'circle_cw', 'heart_cw', 'square_ccw', 'triangle_cw', 'junk']):
cf_plot[j][i] = confusion_matrix[true_label][predicted_label] / num_trails[predicted_label]
plt.tick_params(labeltop=True, labelbottom=False)
seaborn.heatmap(cf_plot, cmap='rocket_r', annot=True, vmin=0.0, vmax=1.0, xticklabels=labels, yticklabels=labels, cbar=False)
plt.show()
|
xrgman/ColorMatchingBracelet
|
arduino/GestureRecorder/evaluate.py
|
evaluate.py
|
py
| 5,392 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2055718392
|
# USAGE
# python knn.py --dataset ../../SolutionDL4CV/SB_code/datasets/animals
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from pyimagesearch.preprocessing import SimplePreprocessor
from pyimagesearch.datasets import SimpleDatasetLoader
from imutils import paths
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dataset', required=True, help='path to input dataset')
ap.add_argument('-k', '--neighbors', type=int, default=1, help='# of nearest neighbors for classification')
ap.add_argument('-j', '--jobs', type=int, default=-1, help='# of jobs for k-NN distance (-1 uses all available cores)')
args = vars(ap.parse_args())
print('[INFO] loading images...')
img_paths = list(paths.list_images(args['dataset']))
sp = SimplePreprocessor(32, 32)
sdl = SimpleDatasetLoader(preprocessors=[sp])
data, labels = sdl.load(img_paths, verbose=500)
data = data.reshape((len(data), -1))
print(f'[INFO] feature matrix: {data.nbytes/(1024*1000.0):.1f}MB')
# Encode the labels as integer
le = LabelEncoder()
labels = le.fit_transform(labels)
# Split to train and test set
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.25, random_state=42)
print("[INFO] evaluating k-NN classifier...")
model = KNeighborsClassifier(n_neighbors=args['neighbors'], n_jobs=args['jobs'])
model.fit(X_train, y_train)
print(classification_report(y_test, model.predict(X_test), target_names=le.classes_))
|
lykhahaha/Mine
|
StarterBundle/chapter07-first_image_classifier/knn.py
|
knn.py
|
py
| 1,583 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4709876834
|
import numpy as np
import matplotlib.pyplot as plt
import activation_functions as acfunc
inp_a = np.arange(-1.0, 1.0, 0.2)
inp_b = np.arange(-1.0, 1.0, 0.2)
outputs = np.zeros((10, 10))
weight_a = 2.5
weight_b = 3
bias = 0.1
for i in range(10):
for j in range(10):
u_single = inp_a[i] * weight_a + inp_b[j] * weight_b + bias
outputs[i][j] = acfunc.sigmoid_func(u_single)
plt.imshow(outputs, "gray", vmin=0.0, vmax=1.0)
plt.colorbar()
plt.show()
|
tsubamon55/pyailesson
|
single_neuron.py
|
single_neuron.py
|
py
| 471 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14478011852
|
'''
Loss functions.
'''
import copy
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import utils
class NLLLoss(nn.Module):
"""Self-Defined NLLLoss Function
Args:
weight: Tensor (num_class, )
"""
def __init__(self, weight):
super(NLLLoss, self).__init__()
self.weight = weight
def forward(self, prob, target):
"""
Args:
prob: (N, C)
target : (N, )
"""
N = target.size(0)
C = prob.size(1)
weight = Variable(self.weight).view((1, -1))
weight = weight.expand(N, C) # (N, C)
if prob.is_cuda:
weight = weight.cuda()
prob = weight * prob
one_hot = torch.zeros((N, C))
if prob.is_cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(1, target.data.view((-1,1)), 1)
one_hot = one_hot.type(torch.ByteTensor)
one_hot = Variable(one_hot)
if prob.is_cuda:
one_hot = one_hot.cuda()
loss = torch.masked_select(prob, one_hot)
return -torch.sum(loss)
class GANLoss(nn.Module):
"""Reward-Refined NLLLoss Function for adversial training of Generator"""
def __init__(self):
super(GANLoss, self).__init__()
def forward_reinforce(self, prob, target, reward, cuda=False):
"""
Forward function used in the SeqGAN implementation.
Args:
prob: (N, C), torch Variable
target : (N, ), torch Variable
reward : (N, ), torch Variable
"""
N = target.size(0)
C = prob.size(1)
one_hot = torch.zeros((N, C))
if cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(1, target.data.view((-1,1)), 1)
one_hot = one_hot.type(torch.ByteTensor)
one_hot = Variable(one_hot)
if cuda:
one_hot = one_hot.cuda()
loss = torch.masked_select(prob, one_hot)
loss = loss * reward
loss = -torch.sum(loss)
return loss
def forward_reward(self, i, samples, prob, rewards, BATCH_SIZE, g_sequence_len, VOCAB_SIZE, cuda=False):
"""
Returns what is used to get the gradient contribution of the i-th term of the batch.
"""
conditional_proba = Variable(torch.zeros(BATCH_SIZE, VOCAB_SIZE))
if cuda:
conditional_proba = conditional_proba.cuda()
for j in range(BATCH_SIZE):
conditional_proba[j, int(samples[j, i])] = 1
conditional_proba[j, :] = - (rewards[j]/BATCH_SIZE * conditional_proba[j, :])
return conditional_proba
def forward_reward_grads(self, samples, prob, rewards, g, BATCH_SIZE, g_sequence_len, VOCAB_SIZE, cuda=False):
"""
Returns a list of gradient contribution of every term in the batch
"""
conditional_proba = Variable(torch.zeros(BATCH_SIZE, g_sequence_len, VOCAB_SIZE))
batch_grads = []
if cuda:
conditional_proba = conditional_proba.cuda()
for j in range(BATCH_SIZE):
for i in range(g_sequence_len):
conditional_proba[j, i, int(samples[j, i])] = 1
conditional_proba[j, :, :] = - (rewards[j] * conditional_proba[j, :, :])
for j in range(BATCH_SIZE):
j_grads = []
# since we want to isolate each contribution, we have to zero the generator's gradients here.
g.zero_grad()
prob[j, :, :].backward(conditional_proba[j, :, :], retain_graph=True)
for p in g.parameters():
j_grads.append(p.grad.clone())
batch_grads.append(j_grads)
return batch_grads
class VarianceLoss(nn.Module):
"""Loss for the control variate annex network"""
def __init__(self):
super(VarianceLoss, self).__init__()
def forward(self, grad, cuda = False):
"""
Used to get the gradient of the variance.
"""
bs = len(grad)
ref = 0
for j in range(bs):
for i in range(len(grad[j])):
ref += torch.sum(grad[j][i]**2).item()
total_loss = np.array([ref/bs])
total_loss = Variable(torch.Tensor(total_loss), requires_grad=True)
if cuda:
total_loss = total_loss.cuda()
return total_loss
def forward_variance(self, grad, cuda=False):
"""
Used to get the variance of one single parameter.
In this case, we take look at the last layer, then take the variance of the first parameter of this last layer in main.py
"""
bs = len(grad)
n_layers = len(grad[0])
square_term = torch.zeros((grad[0][n_layers-1].size()))
normal_term = torch.zeros((grad[0][n_layers-1].size()))
if cuda:
square_term = square_term.cuda()
normal_term = normal_term.cuda()
for j in range(bs):
square_term = torch.add(square_term, grad[j][n_layers-1]**2)
normal_term = torch.add(normal_term, grad[j][n_layers-1])
square_term /= bs
normal_term /= bs
normal_term = normal_term ** 2
return square_term - normal_term
|
TalkToTheGAN/REGAN
|
loss.py
|
loss.py
|
py
| 5,235 |
python
|
en
|
code
| 42 |
github-code
|
6
|
8310998664
|
def makeForms(verb):
character = ('o', 'ch', 's', 'sh', 'x', 'z')
if verb.endswith("y"):
new = verb[:-1] + "ies"
elif verb.endswith(character):
new = verb + "es"
else:
new = verb + "s"
return new
def main():
verb = input("Enter your word: ")
print("The third person singular form is", makeForms(verb))
main()
|
angelinekaren/Programming-Exercises
|
Exercise 4/answer12/main.py
|
main.py
|
py
| 378 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71994815868
|
"""Control implementation for assignment 1.
The controller used the simulation in file `aer1216_fall2020_hw1_sim.py`.
Example
-------
To run the simulation, type in a terminal:
$ python aer1216_fall2020_hw1_sim.py
Notes
-----
Tune the PD coefficients in `HW1Control.__init__()`.
"""
import numpy as np
from gym_pybullet_drones.envs.BaseAviary import BaseAviary
class HW1Control():
"""Control class for assignment 1."""
################################################################################
def __init__(self,
env: BaseAviary
):
""" Initialization of class HW1Control.
Parameters
----------
env : BaseAviary
The PyBullet-based simulation environment.
"""
self.g = env.G
"""float: Gravity acceleration, in meters per second squared."""
self.mass = env.M
"""float: The mass of quad from environment."""
self.timestep = env.TIMESTEP
"""float: Simulation and control timestep."""
self.kf_coeff = env.KF
"""float: RPMs to force coefficient."""
self.km_coeff = env.KM
"""float: RPMs to torque coefficient."""
############################################################
############################################################
#### HOMEWORK CODE (START) #################################
############################################################
############################################################
self.p_coeff_position = 0.7 * 0.7
"""float: Proportional coefficient for position control."""
self.d_coeff_position = 2 * 0.7 * 0.7
"""float: Derivative coefficient for position control."""
############################################################
############################################################
#### HOMEWORK CODE (END) ###################################
############################################################
############################################################
self.reset()
################################################################################
def reset(self):
""" Resets the controller counter."""
self.control_counter = 0
################################################################################
def compute_control(self,
current_position,
current_velocity,
target_position,
target_velocity=np.zeros(3),
target_acceleration=np.zeros(3),
):
"""Compute the propellers' RPMs for the target state, given the current state.
Parameters
----------
current_position : ndarray
(3,)-shaped array of floats containing global x, y, z, in meters.
current_velocity : ndarray
(3,)-shaped array of floats containing global vx, vy, vz, in m/s.
target_position : ndarray
(3,)-shaped array of float containing global x, y, z, in meters.
target_velocity : ndarray, optional
(3,)-shaped array of floats containing global, in m/s.
target_acceleration : ndarray, optional
(3,)-shaped array of floats containing global, in m/s^2.
Returns
-------
ndarray
(4,)-shaped array of ints containing the desired RPMs of each propeller.
"""
self.control_counter += 1
############################################################
############################################################
#### HOMEWORK CODE (START) #################################
############################################################
############################################################
##### Calculate position and velocity errors ###############
current_pos_error = target_position[2] - current_position[2]
current_vel_error = target_velocity[2] - current_velocity[2]
#### Calculate input with a PD controller ##################
# u = desired_acceleration + Kv * velocity_error + Kp * position_error
u = target_acceleration[2] \
+ self.d_coeff_position * current_vel_error \
+ self.p_coeff_position * current_pos_error
##### Calculate propeller turn rates given the PD input ####
# turn_rate = sqrt( (m*u + m*g) / (4*Kf) )
propellers_rpm = np.sqrt((u*self.mass + self.g*self.mass) / (4 * self.kf_coeff))
# For up-down motion, assign the same turn rates to all motors
propellers_0_and_3_rpm, propellers_1_and_2_rpm = propellers_rpm, propellers_rpm
############################################################
############################################################
#### HOMEWORK CODE (END) ###################################
############################################################
############################################################
#### Print relevant output #################################
if self.control_counter%(1/self.timestep) == 0:
print("current_position", current_position)
print("current_velocity", current_velocity)
print("target_position", target_position)
print("target_velocity", target_velocity)
print("target_acceleration", target_acceleration)
return np.array([propellers_0_and_3_rpm, propellers_1_and_2_rpm,
propellers_1_and_2_rpm, propellers_0_and_3_rpm])
|
kaustubhsridhar/Constrained_Models
|
Drones/gym-pybullet-drones/assignments/aer1216_fall2020_hw1_ctrl.py
|
aer1216_fall2020_hw1_ctrl.py
|
py
| 5,709 |
python
|
de
|
code
| 15 |
github-code
|
6
|
71570292029
|
from django import forms
from django.core.exceptions import ValidationError
from semester.models import Semester, CourseOffered, CourseDistribution, DistributedSectionDetail
from tempus_dominus.widgets import TimePicker, DatePicker
from django.contrib import messages
from django.shortcuts import redirect
class SemesterForm(forms.ModelForm):
start_effect_date = forms.DateField(
widget=DatePicker(
options={
'collapse': False,
'format': 'L',
},
attrs={
'append': 'fa fa-calendar',
'icon_toggle': True,
'input_toggle': True,
'placeholder': 'Start Effect Date',
'required': 'true',
'autocomplete': 'off',
},
)
)
end_effect_date = forms.DateField(
widget=DatePicker(
options={
'collapse': False,
'format': 'L',
},
attrs={
'append': 'fa fa-calendar',
'icon_toggle': True,
'input_toggle': True,
'placeholder': 'End Effect Date',
'required': 'true',
'autocomplete': 'off',
},
)
)
class Meta:
model = Semester
fields = ('name', 'year', 'start_effect_date', 'end_effect_date')
class CourseOfferingForm(forms.ModelForm):
class Meta:
model = CourseOffered
fields = '__all__'
class CourseDistributionForm(forms.ModelForm):
starting_id = forms.CharField()
ending_id = forms.CharField()
class Meta:
model = CourseDistribution
fields = ('offered', 'section', 'teacher', 'parent_dist', 'starting_id', 'ending_id')
def clean(self):
cd = self.cleaned_data
qs = CourseDistribution.objects.filter(offered=cd.get('offered'), section=cd.get('section'))
if qs:
for obj in qs:
try:
ms = DistributedSectionDetail.objects.get(distribution=obj)
if ms.starting_id == "*" or cd.get('starting_id') == "*":
raise ValidationError("Course already distributed to Section.")
else:
if not (cd.get('ending_id') < ms.starting_id or cd.get('starting_id') > ms.ending_id):
raise ValidationError("Course already distributed to Section.")
# raise ValidationError("Course already distributed to Section.")
except:
return redirect("/distribution/create/")
parent = cd.get('parent_dist')
if parent:
while parent.parent_dist != parent:
cd['parent_dist'] = parent.parent_dist
parent = cd.get('parent_dist')
if parent and cd.get('teacher') != parent.teacher:
raise ValidationError("Merged sections has different teacher")
return cd
def save(self, commit=True):
cd = self.cleaned_data
instance = super(CourseDistributionForm, self).save(commit=True)
if not instance.parent_dist:
instance.parent_dist = instance
instance.save()
DistributedSectionDetail.objects.update(
distribution=instance,
starting_id=cd.get('starting_id'),
ending_id=cd.get('ending_id')
)
return instance
class CourseDistributionUpdateForm(forms.ModelForm):
class Meta:
model = CourseDistribution
fields = ('offered', 'section', 'teacher', 'parent_dist')
def clean(self):
cd = self.cleaned_data
parent = cd.get('parent_dist')
if parent:
while parent.parent_dist != parent:
cd['parent_dist'] = parent.parent_dist
parent = cd.get('parent_dist')
if parent and cd.get('teacher') != parent.teacher:
raise ValidationError("Merged sections has different teacher")
return cd
|
Emad-ahmed/luRoutine
|
semester/forms.py
|
forms.py
|
py
| 4,085 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40182196672
|
from anomaly_detection import Amean
from multiprocessing import Process, Queue
from database import DataBase
from datetime import datetime, timedelta
import time
import traceback
class AnomalyDomain (Process):
# initilize data
def __init__(self, name, host) :
super(AnomalyDomain, self).__init__()
self.name = name
self.db = DataBase(host=host, database="anomaly", collection="data")
self.last_update = ""
self.last_value = 0
self.timeline = []
for i in range(24) :
hour_detection = Amean()
self.timeline.append(hour_detection)
self.timeline_weekend = []
for i in range(24) :
hour_detection = Amean()
self.timeline_weekend.append(hour_detection)
return
def __predict (self, hour=0, current=0, angle=0, date="") :
"""predict and return value for new data point"""
date = datetime.strptime(date, "%Y-%m-%d")
code = self.timeline[hour].predict(current=current, angle=angle)
if code == '' :
return 10
else :
return code
def __log (self, msg) :
with open("log.txt","a") as f :
f.write(msg + '\n')
return
def __check_new_data (self, name) :
"""check if there is new data in repo
if yes, return all new data"""
# check if repo is null (start-date = null)
if self.last_update == "" :
start_date = self.db.get_start_date(name=name)
if start_date != '' :
self.last_update = start_date
# check last update
# go to database and get last_update, then update data in anomaly class (this class)
db_last_update = self.db.get_last_update(name=name)
print("db_last_update: ",db_last_update)
if db_last_update == '' or not db_last_update:
return []
else :
db_last_update = datetime.strptime(db_last_update, "%Y-%m-%d %H")
last_update = datetime.strptime(self.last_update, "%Y-%m-%d %H")
result = []
while last_update < db_last_update :
print("db_last_update: ", name," ", db_last_update)
last_update += timedelta(seconds=3600)
print("check last update :", last_update)
date = last_update.strftime("%Y-%m-%d")
hour = last_update.hour
data_value = self.db.get_data_by_hour(name=name, date=date, hour=hour)
self.__log(date + ' ' + str(hour) + ' ' + str(data_value))
data = {'angle':float(data_value)-float(self.last_value),
'current':data_value,
'date':date,
'hour':hour}
result.append(data)
self.last_value = data_value
self.last_update = datetime.strftime(last_update, '%Y-%m-%d %H')
return result
def __save_result (self, name, date, result) :
self.db.insert_result(name=name, date=date, value=result)
return
#========================= RUN ==============================
# Run process method
# start per process by calling run()
def run(self) :
name = self.name
try :
while True :
time.sleep(10)
data = self.__check_new_data (name)
# data :
# [] : no new data
# [ {date:, hour:, current:, angle:)]
print("--------------AnomalyDomain is running1--------------")
if data != [] :
print("--------------AnomalyDomain is running2--------------")
# predict new data
for hour_data in data :
result_prediction = self.__predict(hour=hour_data['hour'],
current=hour_data['current'],
angle=hour_data['angle'],
date=hour_data["date"])
# save result to db
self.__save_result(name=name,
date=hour_data['date']+' '+str(hour_data['hour']),
result=result_prediction)
#continue waiting
except Exception as e:
with open("log.txt","a") as f :
f.write(str(e) + '\n')
traceback.print_exc()
|
DUCQUAN7850/warning_service_master
|
warning_service-master/anomaly_domain.py
|
anomaly_domain.py
|
py
| 4,642 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42174278814
|
# import transformers
# import datasets
# from pprint import pprint
# # with pipeline
# model = transformers.AutoModelForSequenceClassification.from_pretrained("") # load model from local directory
# tokenizer = transformers.AutoTokenizer.from_pretrained("TurkuNLP/bert-base-finnish-cased-v1")
# test_pipe = transformers.pipeline(task="text-classification", model=model, tokenizer=tokenizer, function_to_apply="sigmoid", top_k=None) # return_all_scores=True is deprecated
# test = [""] # add examples to test
# results = test_pipe(test)
# for zipped in zip(test, results):
# pprint(zipped)
import transformers
import torch
import numpy as np
import argparse
from pprint import PrettyPrinter
import json
import datasets
import pandas as pd
import csv
""" This script is meant for looking at multi-label predictions for raw text data and saving the probabilities with id. """
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True,
help="the model name")
parser.add_argument('--data', required=True,
help="the file name of the raw text to use for predictions")
parser.add_argument('--tokenizer', required=True,
help="the tokenizer to use for tokenizing new text")
parser.add_argument('--filename', required=True,
help="the file name to give file resulting from the predictions")
args = parser.parse_args()
print(args)
pprint = PrettyPrinter(compact=True).pprint
# read the data in
data = args.data
if ".json" in data:
with open(data, 'r') as json_file:
json_list = list(json_file)
lines = [json.loads(jline) for jline in json_list]
# use pandas to look at each column
df=pd.DataFrame(lines)
# # TODO might have to change this depending on the data type
# elif ".tsv" in data:
# with open(data, "rt", encoding="utf-8") as f:
# lines = f.readlines()
# lines = lines[1:]
# for i in range(len(lines)):
# lines[i] = lines[i].replace("\n", "")
# lines[i] = lines[i].split("\t")
# assert len(lines[i]) == 3
# df=pd.DataFrame(lines, columns = ['id', 'label', 'text'])
elif ".tsv" in data:
with open(data, "rt", encoding="utf-8") as f:
lines = f.readlines()
lines = lines[1:]
for i in range(len(lines)):
lines[i] = lines[i].replace("\n", "")
lines[i] = lines[i].split("\t")
assert len(lines[i]) == 2
df=pd.DataFrame(lines, columns = ['label', 'text'])
# instantiate model, this is pretty simple
model=transformers.AutoModelForSequenceClassification.from_pretrained(args.model)
trainer = transformers.Trainer(
model=model
)
tokenizer = transformers.AutoTokenizer.from_pretrained(args.tokenizer)
def tokenize(example):
return tokenizer(
example["text"],
padding='max_length', # this got it to work, data_collator could have helped as well?
max_length=512,
truncation=True,
)
dataset = datasets.Dataset.from_pandas(df)
#map all the examples
dataset = dataset.map(tokenize)
labels = dataset["label"]
# oh right I would have to change the labels for the test set to match the upper ones if I wanted easily readable results
dataset = dataset.remove_columns("label")
texts = dataset["text"]
#ids = dataset["id"]
# see how the labels are predicted
test_pred = trainer.predict(dataset)
predictions = test_pred.predictions # these are the logits
sigmoid = torch.nn.Sigmoid()
probs = sigmoid(torch.Tensor(predictions))
probs = probs.numpy()
unique_labels = ["IN", "NA", "HI", "LY", "IP", "SP", "ID", "OP", "QA_NEW"] # upper labels plus qa_new
with open(args.filename, 'w') as outfile:
header = ["text", "gold_labels", *unique_labels] #maybe I should put the text last
writer = csv.writer(outfile, delimiter="\t")
writer.writerow(header)
for i in range(len(texts)):
text = texts[i]
gold = labels[i]
line = [text, gold]
pred_list = [str(val) for val in probs[i]]
line = [*line, *pred_list]
writer.writerow(line)
|
TurkuNLP/register-qa
|
predict.py
|
predict.py
|
py
| 4,037 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36920706174
|
#-------------------------------------------------------------------------
# Script en python que se encarga de conectarse a un recurso Event Hub de
# Microsoft Azure y leer todos los mensajes disponibles, al mismo tiempo
# que deja un checkpoint de lo que ha leído para no repetir mensajes
# la siguiente vez que arranque el programa.
#
# Instrucciones para utilizarla.
# 1-. Tener Python 3.4 o mayor.
# 2-. Tener el instalador de paquetes "pip".
# 3-. Ingresar el comando "pip install azure-eventhub"
#
# Autor: Noé Amador Campos Castillo.
# E-mail: [email protected]
#--------------------------------------------------------------------------
import os
import sys
import json
import signal
import logging
import asyncio
import functools
from azure.eventprocessorhost import (
AbstractEventProcessor,
AzureStorageCheckpointLeaseManager,
EventHubConfig,
EventProcessorHost,
EPHOptions)
class EventProcessor(AbstractEventProcessor):
# Constructor de un Event Processor
def __init__(self, params=None):
super().__init__(params)
self._msg_counter = 0
# Función que se puede hacer override
# Es para inicializar un Procesador de Eventos
async def open_async(self, context):
print("Connection established {}".format(context.partition_id))
# Función que se puede hacer override
# Sirve para detener el Procesador de Eventos.
async def close_async(self, context, reason):
print("Connection closed (reason {}, id {}, offset {}, sq_number {})".format(
reason,
context.partition_id,
context.offset,
context.sequence_number))
# Función que se puede hacer override
"""
Se llama cuando el EPH recibe un nuevo batch de eventos.
Es donde se programa las acciones a realizar.
Parametros:
context = Información sobre la partición
messages = El batch de eventos a procesar
"""
async def process_events_async(self, context, messages):
# Por cada evento...
for Event in messages:
# Se imprime el número de secuencia
print("Mensaje: {}".format(Event.sequence_number))
# Se parsea el json recibido en el mensaje del evento
parsedMessage = json.loads(Event.body_as_str())
# Se imprime de manera más estetica
print(json.dumps(parsedMessage, indent=2, sort_keys=True))
# Deja un checkpoint del evento recibido
await context.checkpoint_async()
# Función que se puede hacer override
"""
Se llama cada que el cliente experimenta algún error al recibir eventos.
El Event Proccessor Host se recupera recibiendo desde donde se quedo.
( A menos de que se haya matado el programa )
Parametros:
context = Información sobre la partición
messages = El batch de eventos a procesar
"""
async def process_error_async(self, context, error):
print("Event Processor Error {!r}".format(error))
# Recibir eventos por dos minutos y luego apagarlo
async def wait_and_close(host):
await asyncio.sleep(60)
await host.close_async()
# Se conecta y recibe mensajes
try:
# Regresa un loop asincrono
ephLoop = asyncio.get_event_loop()
# Nombre del Storage Account
stgName = "-"
# Key del storage
stgKey = "-"
# Nombre del Blob
blobName = "-"
# Nombre del namespace de Event Hubs
ehNamespace = "-"
# Nombre del Event Hub
ehName = "-"
# Nombre del SAS Policy del Event Hub
SASUser = "-"
# Llave del SAS Policy del Event Hub
SASKey = "-"
"""
Configuración del Event Hub
Párametros:
sb_name = Nombre del namespace de Event Hubs
eh_name = Nombre del Event Hub
policy = Nombre del SAS Policy
key = Llave de la SAS Policy
"""
ehConfig = EventHubConfig(ehNamespace, ehName, SASUser, SASKey)
# Opciones por default
ehOptions = EPHOptions()
# Set algunas opciones
ehOptions.release_pump_on_timeout = True
ehOptions.debug_trace = False
"""
Configuración del Storage
Párametros:
storage_account_name = Nombre del storage
storage_account_key = Llave del storage
lease_container_name = Nombre del contenedor
"""
stgManager = AzureStorageCheckpointLeaseManager(
stgName, stgKey, blobName)
# Host del Event Hub Processor
ehHost = EventProcessorHost(
EventProcessor,
ehConfig,
stgManager,
ep_params = ["param1", "param2"],
eph_options = ehOptions,
loop = ephLoop)
# Prepara los procedimientos a ejecutar en loop
ephTasks = asyncio.gather(
ehHost.open_async(),
wait_and_close(ehHost))
# Corre el loop
ephLoop.run_until_complete(ephTasks)
# En caso de ocurrri excepciones de teclado
except KeyboardInterrupt:
# Cancela las tareas y el loop
for task in asyncio.Task.all_tasks():
task.cancel()
ephLoop.run_forever()
ephTasks.exception()
# Cierra el loop
finally:
ephLoop.stop()
|
NoeCampos22/Ejercicio_Azure_Databricks
|
Mini-Ejercicios/1_Enviar_Recibir_Eventos_EventHub/EPH.py
|
EPH.py
|
py
| 5,351 |
python
|
es
|
code
| 0 |
github-code
|
6
|
2500816506
|
from bokeh.layouts import column
from bokeh.models.widgets import RadioButtonGroup,Select, Div, Button,PreText
from bokeh.models import TextInput, RadioGroup
from bokeh.plotting import curdoc
button_group = RadioButtonGroup(labels=["Physical parameters", "Geometric parameters", "Initial conditions"], active=1)
## Add the elements inside each radio button
slope = TextInput(value='25', title="slope:")
###### Drop down menus
Coefficient = Select(title="Coefficient:", value="chezy", options=["Manning", "chezy"])
Coefficient_value1 = TextInput(value=" ", title="chezy:" )
Coefficient_value2 = TextInput(value=" ", title="Manning:" )
#### Add geometric parameters
length = TextInput(value=" ", title="Length:")
width = TextInput(value=" ", title="Width:")
#LABELS = ["km", "m"]
#radio_group = RadioGroup(labels=LABELS, active=0)
#### Add ginitial conditiions
depth = TextInput(value=" ", title="Depth(m):")
Q = TextInput(value=" ", title="Discharge(m3/sec):")
texto = PreText(text="""Please click here""",width=500, height=100)
layout = ()
def button_group_change(active):
ch = active
if ch == 0:
slope.visible=True
Coefficient.visible=True
Coefficient_value1.visible=True
Coefficient_value2.visible=True
length.visible=False
width.visible=False
depth.visible=False
Q.visible=False
# layout= column(slope, Coefficient,Coefficient_value1, Coefficient_value2)
elif ch == 1:
length.visible=True
width.visible=True
slope.visible=False
Coefficient.visible=False
Coefficient_value1.visible=False
Coefficient_value2.visible=False
depth.visible=False
Q.visible=False
# layout= column(length, width)
elif ch == 2:
depth.visible=True
Q.visible=True
slope.visible=False
Coefficient.visible=False
Coefficient_value1.visible=False
Coefficient_value2.visible=False
length.visible=False
width.visible=False
# layout= column(length, width )
texto.text ='text' #str(layout)
#curdoc().add_root(column(button_group,slope, Coefficient))
#curdoc().add_root(row(button_group,length))
layout= column(slope, Coefficient,Coefficient_value1, Coefficient_value2,length, width,length)
button_group.on_click(button_group_change)
###show
curdoc().add_root(column(texto,button_group,layout))
#bokeh serve --show BWC.py
|
sduarte09/Module5
|
Exercise/Group5/BWC.py
|
BWC.py
|
py
| 2,455 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39688374544
|
# Time: O(n)
# Space: O(1)
class Solution:
def rob(self, nums: List[int]) -> int:
"""
"""
if not nums:
return 0
if len(nums)==1:
return nums[0]
return max(self.util(nums[:-1]), self.util(nums[1:]))
def util(self, nums):
# Linear Space
# max_arr = [0]*len(nums)
# max_arr[0] = nums[0]
# Constant Space
prev_max = nums[0]
pp_max = 0
for i in range(1, len(nums)):
cur_max = max(nums[i] + pp_max, prev_max)
pp_max = prev_max
prev_max = cur_max
max_arr[i] = max(nums[i] + max_arr[i-2], max_arr[i-1])
return prev_max
|
cmattey/leetcode_problems
|
Python/lc_213_house_robber_ii.py
|
lc_213_house_robber_ii.py
|
py
| 704 |
python
|
en
|
code
| 4 |
github-code
|
6
|
38453788242
|
import sys
input = sys.stdin.readline
T = int(input())
for tc in range(1,T+1):
L = list(map(int,input().split()))
L.sort()
a,b,c = L
ans = "NO"
if a**2 + b**2 == c**2:
ans = "YES"
print(f"Case #{tc}: {ans}")
|
LightPotato99/baekjoon
|
math/geometry/triangle/pythagoras/rightTri.py
|
rightTri.py
|
py
| 240 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17137130653
|
import requests
import re
from bs4 import BeautifulSoup
from openpyxl import load_workbook
DIRECTORY_URL = "https://directory.tufts.edu/searchresults.cgi"
WORKBOOK_NAME = "DirectoryResults_2017-2018.xlsx"
NAME_SHEET = "DirectoryResults"
# This script works on Excel Sheets with a single column in the A column of
# a unique listing of names. It searches for every name in the tufts directory,
# then strips out a department if it is found and places it in the B column next
# to the search query.
def getDirectoryPage(name):
rawPage = requests.post(DIRECTORY_URL, data={"type": "Faculty", "search": name})
soup = BeautifulSoup(rawPage.text, "html.parser")
for child in soup.find_all(href=re.compile("department.cgi")):
return child.contents[0].strip()
dataBook = load_workbook(WORKBOOK_NAME)
nameSheet = dataBook[NAME_SHEET]
for index in range(2, nameSheet.max_row + 1):
currentName = nameSheet["A{0}".format(index)].value
affiliation = getDirectoryPage(currentName)
nameSheet["B{0}".format(index)] = affiliation
# Rudimentary backup every 100 entries
if index % 100 == 0:
print("Progress: {:.2%}".format(index / (nameSheet.max_row + 1)))
dataBook.save(WORKBOOK_NAME)
dataBook.save(WORKBOOK_NAME)
|
jGowgiel/fec-donation-aggregator
|
scripts/DirectoryScrape.py
|
DirectoryScrape.py
|
py
| 1,261 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24502646621
|
from utils import *
from fastapi import FastAPI, Query, Path, Body, Cookie, Header
from pydantic import BaseModel, Required, Field, HttpUrl
app = FastAPI()
@app.get('/')
def read_root():
return {'Hello': 'World'}
# Examplos com path params
class ModelName(str, Enum):
name1 = 'Phelipe'
name2 = 'Marcos'
name3 = 'Fran'
@app.get('/items/{item_id}')
def read_item(item_id: int, q: Union[str, None] = None):
return {'item_id': item_id, 'q': q}
@app.get('/names/{name}')
def get_name(name: ModelName):
response = {'model_name': name}
if name.value == 'Phelipe':
response.update({'message': 'Bad name.'})
else:
response.update({'Message': 'Cool name.'})
return response
# Exemplo com query params
fake_items_db = [{'item_name': 'Foo'}, {'item_name': 'Bar'}, {'item_name': 'Baz'}]
_lmt = len(fake_items_db) - 1
@app.get('/items/')
async def read_item(skip: int = 0, limit: int = _lmt):
return fake_items_db[skip : skip + limit]
# Exemplo com query param opcional
@app.get('/cats/')
async def get_cat(name: str = None):
cats = [
'zoe',
'zulke',
'zara',
'miuda',
'frajola',
'cruel',
'mafalda',
'jade',
'maria',
]
if name:
if name in cats:
return {'Valor aleatorio': False, 'Nome do gato': name}
return {'Valor aleatorio': True, 'Nome do gato': cats[random.randint(0, len(cats))]}
# Exemplo com Pydantic model
class Item(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
@app.post('/items_s/')
async def create_item(item: Item):
if item.tax:
price = item.price + item.price * item.tax
item.price = price
return item
"""
Examplo usando Query como valor padrão, validadores e sobrescrita de documentação
Old way - async def create_dog(name: str, age: int, description: Union[None, str] = None):
"""
class Dog(BaseModel):
level: int
name: str
@app.post('/dogs/')
async def create_dog(
brothers: Union[List[Dog], None],
age: int,
name: str = Query(
default=..., title='Nome', description='Esse é um nome', regex='^love'
),
description: Union[None, str] = Query(
default=None,
title='Descrição',
description='Essa é uma descrição',
min_length=5,
max_length=10,
deprecated=True,
),
hidden_query: Union[str, None] = Query(default=None, include_in_schema=False),
):
dog = {'name': name, 'age': age, 'brothers': {}}
if description:
dog.update({'description': description})
if brothers:
list(map(lambda d: dog['brothers'].update({d.name: d.level}), brothers))
return dog
"""
Exemplo usando Path pydantic
"""
@app.get('/memories/{person_id}')
def create_memories(
*, person_id: int = Path(..., title='Uma pessoa existente.', gt=0, le=1000)
):
people = {1: {'name': 'amourir'}, 2: {'name': 'joão'}}
if person_id in people.keys():
return people.get(person_id)
return {}
"""
Exemplo usando Body e multiplos parâmetros
"""
class City(BaseModel):
name: str
country: int
@app.post('/cities/{country_id}')
def create_cities(
*,
country_id: int = Path(..., title='Id de um país existente.', gt=1, le=5),
city: City = Body(..., embed=False),
person_name: str = Body(..., regex='^mar', embed=False)
):
countries = {1: 'Brazil', 2: 'Russia', 3: 'Senegal', 4: 'Marrocos', 5: 'Irã'}
city_item = {'name': city.name}
country_name = countries.get(country_id)
city_item.update({'country': country_name})
return city_item
"""
Exemplo usando Body Field
"""
class PersonalComputer(BaseModel):
name: str = Field(
regex='^pc', default=None, description='Nome do pc', max_length=10
)
serie: int = Field(gt=1, description='Passe uma série válida')
@app.put('/pcs/{pc_id}')
async def update_pc(
*,
pc_id: int = Path(..., description='Passe um válido.'),
pc: PersonalComputer = Body(..., embed=True, description='Uma maquina')
):
if pc_id // 2 == 0:
return {'message': 'Inválido'}
return pc
"""
Exemplo com subtipos como BaseModel
"""
class Image(BaseModel):
url: HttpUrl
size: int
class Product(BaseModel):
name: str = Field(..., example='Produto base')
price: float
images: list[Union[Image, None]] = None
class Store(BaseModel):
products: Union[list[Product], None]
name: str = Field(..., description='Nome da loja')
class Config:
schema_extra = {
'example': {
'products': [
{
'name': 'Computador',
'price': 12.4,
'images': [{'url': 'http://test.com', 'size': 1}],
}
],
'name': 'Loja dos fundos',
}
}
@app.post('/products/')
def create_products(store: Store = Body(..., embed=True)):
return store
@app.get('/products/{product_id}')
def retrive_products(*, product_id: UUID = Path(..., description='Produto existente')):
return {}
"""
Exemplo com Cookie e Header
"""
@app.get('/params/')
def request_params(
user_agent: str = Header(default=None),
ads_id: Union[str, None] = Cookie(default=None),
):
return {'cookie': ads_id, 'user_agent': user_agent}
|
williamelias/Fast-Api-Quiz
|
code/app/main.py
|
main.py
|
py
| 5,446 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
27456097150
|
from pathlib import Path
from sphinx.directives import SphinxDirective
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.logging import getLogger
import yaml
import json
logger = getLogger(__name__)
class PyConfig(SphinxDirective):
has_content = True
def run(self):
if self.content:
data = "\n".join(self.content)
data = json.dumps(yaml.safe_load(data), indent=2)
return [nodes.raw("", f'<py-config type="json">\n{data}\n</py-config>\n', format="html")]
class PyRepl(SphinxDirective):
has_content = True
option_spec = {
"auto-generate": directives.unchanged,
"output": directives.unchanged,
"src": directives.path,
}
def run(self):
attrs: str = ""
code: str = ""
for key, value in self.options.items():
attrs += f' {key}="{value}"'
if self.content:
code = "\n".join(self.content)
py_repl = f'''
<py-repl {attrs}>
{code}
</py-repl>
'''
return [nodes.raw("", py_repl, format="html")]
class PyScript(SphinxDirective):
has_content = True
option_spec = {
"file": directives.path,
"output": directives.unchanged
}
def run(self):
if "file" in self.options:
path = self.env.relfn2path(self.options['file'])[1]
try:
with open(path, 'r') as f:
code = f.read()
self.env.note_dependency(path)
except (FileNotFoundError, Exception) as err:
logger.warn('reading error: %s, %s', path, err)
return []
elif self.content:
code = "\n".join(self.content)
else:
raise logger.error("Must provide either content or the 'file' option")
return [nodes.raw("", f"<py-script>\n{code}\n</py-script>\n", format="html")]
class PyTerminal(SphinxDirective):
option_spec = {
"auto": directives.flag,
"false": directives.flag
}
def run(self):
attrs: str = ""
for key, _ in self.options.items():
attrs += f' {key}'
py_terminal = f'''
<py-terminal {attrs}></py-terminal>
'''
return [nodes.raw("", py_terminal, format="html")]
|
yoblee/docs
|
sphext/sphinx_pyscript/pys_directives/__init__.py
|
__init__.py
|
py
| 2,360 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41774127803
|
import hearts.model.game as m
from hearts.game_master import GameMaster
import logging
class GameBackend(object):
def __init__(self, player_svc):
self._next_game_id = 1
self._game_masters = {}
self._players = {}
self._player_mapping = {}
self._player_svc = player_svc
self.logger = logging.getLogger(__name__)
def create_game(self, players):
game_id = self._next_game_id
self._next_game_id += 1
model = m.HeartsGame()
model.start()
master = GameMaster(model, game_id)
self._game_masters[game_id] = master
master.add_observer(self)
self._players[game_id] = list(players)
for idx, player_id in enumerate(players):
self._player_mapping[player_id] = (game_id, idx)
return game_id
def get_game_master(self, game_id):
return self._game_masters[game_id]
def try_get_player_game(self, player_id):
data = self._player_mapping.get(player_id)
if data is None:
return None
return data[0]
def try_get_game_info(self, player_id):
data = self._player_mapping.get(player_id)
if data is None:
return None
return data
def is_in_game(self, player_id):
return player_id in self._player_mapping
def on_game_finished(self, game_id):
self.logger.info("Game %d has finished.", game_id)
self._destruct_game(game_id)
def on_game_abandoned(self, game_id):
self.logger.info("Game %d has been abandoned.", game_id)
self._destruct_game(game_id)
def _destruct_game(self, game_id):
for player in self._players[game_id]:
del self._player_mapping[player]
self._player_svc.remove_player(player)
del self._players[game_id]
del self._game_masters[game_id]
|
MHeasell/hearts-server
|
hearts/game_backend.py
|
game_backend.py
|
py
| 1,880 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31329871933
|
#%%
import pandas as pd
import numpy as np
import datetime as dt
import xarray as xr
import cftime
import dask
from glob import glob
#%%
'''SUBSET RH DATA'''
data = pd.read_csv("preprocessing/inputdata/AMF_US-MBP_BASE_HH_2-5.csv",
skiprows = 2,
na_values = -9999)
data['TIMESTAMP_START'] = pd.to_datetime(data.TIMESTAMP_START, format = '%Y%m%d%H%M')
data['TIMESTAMP_END'] = pd.to_datetime(data.TIMESTAMP_END, format = '%Y%m%d%H%M')
data = data.rename(columns = {'TIMESTAMP_START':'TIMESTAMP'})
'''
#TRY EDI Data
data = pd.read_csv('preprocessing/inputdata/BogLake_Met_data_30min.csv', skiprows = 1, sep = ",",
names=["TIMESTAMP", "Air_TempC_Avg", "RH", "Soil_TempC_5cm", "Soil_TempC_10cm", "Soil_TempC_20cm",
"Soil_TempC_30cm", "Soil_TempC_40cm", "Soil_TempC_50cm", "Soil_TempC_100cm", "Soil_TempC_200cm",
"WS_Tot", "WindDir_D", "WindDir_SD", "PAR_Den_Avg"],
parse_dates = ['TIMESTAMP'],
na_values = {'RH':['NA',]})
'''
# Subset to the right variables
RH = data[['TIMESTAMP', 'RH']]
# Sort into Year-Month combos
RH['Year'] = RH.TIMESTAMP.dt.year
RH['Month'] = RH.TIMESTAMP.dt.month
RH['Year-Month'] = RH.TIMESTAMP.dt.strftime('%Y') + '-' + RH.TIMESTAMP.dt.strftime('%m')
#Subset to proper years
RH = RH[(RH.Year > 2010) & (RH.Year < 2018)]
#Patch long NA period in August 2017 with data from the NADP site in the EDI data
patch_data = pd.read_csv('preprocessing/inputdata/NADP_Met_data_30min.csv', skiprows = 1, sep = ",",
names=["TIMESTAMP", "Air_TempC_Avg", "RH", "Soil_TempC_Avg", "WS_Tot", "WindDir_D", "WindDir_SD", "PAR_Den_Avg", "Soil_VWC_Avg"],
parse_dates = ['TIMESTAMP'],
na_values = {'RH':['NA',]})
RH_patch = patch_data[['TIMESTAMP', 'RH']]
RH_patch = RH_patch[(RH_patch.TIMESTAMP.dt.year == 2017) & (RH_patch.TIMESTAMP.dt.month == 8)]
# %%
'''OPEN ALL NCDF DATA'''
vals = set(RH['Year-Month'])
for val in vals:
#Open sample data set
test = xr.open_mfdataset(val + '.nc', decode_times = False)
#Select RH data:
if val == '2017-08':
test_RH = RH_patch
else:
test_RH = RH[RH['Year-Month'] == val]
#Subset netdcf spatially
test2 = test.drop_sel(lon = 1)
#If leap Feb and leap year remove Feb29 data
r = test_RH['RH']
if (val.endswith('02')) & (len(r) > len(test2.time)):
#remove feb 9
r = r[:len(test2.time)]
#print(val + ": " + str(min(r)))
#print("NEW" + val + ": " + str(min(r)))
# Interpolate
r3 = pd.Series(r).interpolate(method = "linear")
print("NAN" + val + ": " + str(any(np.isnan(r3))))
#Reshape
r4 = np.reshape(list(r3),(-1,1,1))
#Add RH Data
test2['RH'] = xr.DataArray(r4,
dims = ['time', 'lat', 'lon'],
attrs = {'FillValue': np.NaN,
'long_name': 'relative humidity at the lowest atm level (RH)',
'units': '%' })
#Write sample
test2.to_netcdf('preprocessin/forcings-modified' + val + '.nc')
# %%
'''CHECK'''
#Open file to check
dat = xr.open_mfdataset('preprocessing/forcings-modified/2017-08.nc')
# %%
|
mwdjones/clm_frost
|
preprocessing/forcings/Add_RH_to_Forcings.py
|
Add_RH_to_Forcings.py
|
py
| 3,217 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8898957294
|
import telebot
import config
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import json
from telebot import types
import pyowm
owm = pyowm.OWM('c8548689b28b1916f78403fb9c92e4f3', language='ru')
bot = telebot.TeleBot(config.TOKEN)
authenticator = IAMAuthenticator('9n-ZTrznhrAKV0YAJIWIM-fwico0pbNeHp9Wek67nt6V')
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url('https://api.eu-gb.language-translator.watson.cloud.ibm.com/instances'
'/1bec2e12-6251-4b94-8d80-fcead8ec6d68')
languages = language_translator.list_languages().get_result()
user_dict = {}
# Формируем класс, для сохранения в него переменных (то что вводит кандидат)!
class User:
def __init__(self, name):
self.name = name
self.application = None
self.writer_lang = None
self.phrases = None
self.place = None
# Обработать / start и / help
@bot.message_handler(commands=['start', 'help'])
def command_start(message):
chat_id = message.chat.id
text = message.text
msg = bot.send_message(chat_id,
"Здравствуйте, я полезный бот! Мое предназначение помогать в тех функциях которые в меня "
"встроенные! "
"В них входит перевод текста на необходимый Вам язык! Подскажите как я могу к Вам обращатся?")
bot.register_next_step_handler(msg, process_name_step)
# Бот предлагает свои услуги\возможности!
@bot.message_handler(content_types=['text'])
def process_name_step(message):
chat_id = message.chat.id
name = message.text
user = User(name)
user_dict[chat_id] = user
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик', 'Погода', 'Список покупок')
msg = bot.send_message(chat_id, 'Приятно познакомится ' + user.name + '! Выберите приложение, которым хотите '
'воспользоваться!', reply_markup=markup)
bot.register_next_step_handler(msg, how_can_i_help)
# Действие выполняется при выборе приложения!
@bot.message_handler(content_types=['text'])
def how_can_i_help(message):
chat_id = message.chat.id
application = message.text
if application == '/start' or application == '/help':
return command_start
if application == u'Переводчик':
user = user_dict[chat_id]
user.application = application
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('английский', 'русский', 'испанский', 'немецкий', 'итальянский', 'французский', 'китайский',
'Погода')
msg = bot.send_message(chat_id, "Укажите на каком языке будете писать (если такого языка нет в выпадающем "
"списке, напишите этот язык в сообщении!): ", reply_markup=markup)
bot.register_next_step_handler(msg, translater_func1)
elif application == u'Погода':
user = user_dict[chat_id]
user.application = application
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик', 'Погода')
msg = bot.send_message(chat_id, "Введите в каком городе/стране?: ", reply_markup=markup)
bot.register_next_step_handler(msg, weather_bot)
elif application == u'Список покупок':
pass
# Тут запускается приложение для перевода текста!
@bot.message_handler(content_types=['text'])
def translater_func1(message):
global lang1 # Создаем переменную lang1 (язык ввода текста)
try: # Создаем исключение для того чтобы различать кнопки и команду /start
chat_id = message.chat.id
writer_lang = message.text
if writer_lang == u'Погода': # Создаем условие нажатия на кнопку: если это погода выполняем код - ниже!
user = user_dict[chat_id]
user.writer_lang = writer_lang
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик')
msg = bot.send_message(chat_id, "Введите в каком городе/стране?: ", reply_markup=markup) # Спрашиваем в
# каком городе проверить и при этом крепим кнопку переводчика!
bot.register_next_step_handler(msg, weather_bot)
# Дальше условия касаются выбора языка с помощью кнопок! Если выбрали одну из кнопок ниже :
if writer_lang == u'английский':
lang1 = 'en'
elif writer_lang == u'русский':
lang1 = 'ru'
elif writer_lang == u'украинский':
lang1 = 'uk'
elif writer_lang == u'испанский':
lang1 = 'es'
elif writer_lang == u'немецкий':
lang1 = 'de'
elif writer_lang == u'итальянский':
lang1 = 'it'
elif writer_lang == u'французский':
lang1 = 'fr'
elif writer_lang == u'китайский':
lang1 = 'zh'
user = user_dict[chat_id]
user.writer_lang = writer_lang
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('английский', 'русский', 'испанский', 'немецкий', 'итальянский', 'французский', 'китайский')
# При этом спрашиваем на какой язык переводить и крепим кнопки с соответствующими языками
msg = bot.send_message(chat_id, "На какой язык переводить (если такого языка нет в выпадающем "
"списке, напишите этот язык в сообщении!): ", reply_markup=markup)
bot.register_next_step_handler(msg, translater_func2)
except:
if writer_lang == '/start' or writer_lang == '/help': # Если написали команду старт, то возвращаемся в начало кода!
msg = bot.send_message(chat_id,
"Здравствуйте, я полезный бот! Мое предназначение помогать в тех функциях которые "
"в меня "
"встроенные! "
"В них входит перевод текста на необходимый Вам язык! Подскажите как я могу к Вам "
"обращатся?")
bot.register_next_step_handler(msg, command_start)
else:
msg = bot.send_message(chat_id, "Oooops!")
bot.register_next_step_handler(msg, translater_func1)
def translater_func2(message):
global lang # Создаем глобальную переменную для языка на который будет осуществлятся перевод!
try:
chat_id = message.chat.id
translation_lang = message.text
if translation_lang == u'Погода': # Создаем условие нажатия на кнопку: если это погода выполняем код - ниже!
user = user_dict[chat_id]
user.translation_lang = translation_lang
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик')
msg = bot.send_message(chat_id, "Введите в каком городе/стране?: ", reply_markup=markup) # Спрашиваем в
# каком городе проверить и при этом крепим кнопку переводчика!
bot.register_next_step_handler(msg, weather_bot)
# Условия для выбора языка на который будет перевод!
if translation_lang == u'английский':
lang = 'en'
elif translation_lang == u'русский':
lang = 'ru'
elif translation_lang == u'украинский':
lang = 'uk'
elif translation_lang == u'испанский':
lang = 'es'
elif translation_lang == u'немецкий':
lang = 'de'
elif translation_lang == u'итальянский':
lang = 'it'
elif translation_lang == u'французский':
lang = 'fr'
elif translation_lang == u'китайский':
lang = 'zh'
user = user_dict[chat_id]
user.translation_lang = translation_lang
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Смена языков для перевода', 'Смена языка для перевода', 'Погода')
msg = bot.send_message(chat_id, "Введите фразу для перевода : ", reply_markup=markup) # После определения
# языков просим ввести текст для перевода. А так же крепим кнопки!
bot.register_next_step_handler(msg, translate_phrases)
except:
if translation_lang == '/start' or translation_lang == '/help': # Если написали команду старт, то возвращаемся в начало кода!
msg = bot.send_message(chat_id,
"Здравствуйте, я полезный бот! Мое предназначение помогать в тех функциях которые "
"в меня "
"встроенные! "
"В них входит перевод текста на необходимый Вам язык! Подскажите как я могу к Вам "
"обращатся?")
bot.register_next_step_handler(msg, command_start)
else:
msg = bot.send_message(chat_id, 'Oooops')
bot.register_next_step_handler(msg, translater_func2)
def translate_phrases(message):
#global translation
chat_id = message.chat.id
phrases = message.text
# Устанавливаем цикл: если нажаты кнопки текст не переводится, а выполняется переход по приложениям или смена языков!
while phrases == u'Погода' or phrases == u'Смена языков для перевода' or phrases == u'Смена языка для перевода':
# Тут как раз условия которые работают если нажали на одну из кнопок!
if phrases == u'Погода':
user = user_dict[chat_id]
user.phrases = phrases
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик')
msg = bot.send_message(chat_id, "Введите в каком городе/стране?: ", reply_markup=markup)
bot.register_next_step_handler(msg, weather_bot)
elif phrases == u'Смена языков для перевода':
user = user_dict[chat_id]
user.phrases = phrases
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('английский', 'русский', 'испанский', 'немецкий', 'итальянский', 'французский', 'китайский',
'Погода')
msg = bot.send_message(chat_id, "Укажите на каком языке будете писать (если такого языка нет в выпадающем "
"списке, напишите этот язык в сообщении!): ", reply_markup=markup)
bot.register_next_step_handler(msg, translater_func1)
elif phrases == u'Смена языка для перевода':
user = user_dict[chat_id]
user.phrases = phrases
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('английский', 'русский', 'испанский', 'немецкий', 'итальянский', 'французский', 'китайский',
'Погода')
msg = bot.send_message(chat_id, "На какой язык переводить (если такого языка нет в выпадающем "
"списке, напишите этот язык в сообщении!): ", reply_markup=markup)
bot.register_next_step_handler(msg, translater_func2)
break
else:
translation = language_translator.translate(
phrases,
source=lang1, target=lang).get_result()
msg = bot.send_message(chat_id, json.dumps(translation, indent=2, ensure_ascii=False))
bot.register_next_step_handler(msg, translate_phrases)
# Здесь запускается приложение погода!
@bot.message_handler(content_types=['text'])
def weather_bot(message):
global place
try:
chat_id = message.chat.id
place = message.text
user = user_dict[chat_id]
user.place = place
observation = owm.weather_at_place(place)
w = observation.get_weather()
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('Переводчик')
temp = w.get_temperature('celsius')["temp"]
temp = round(temp)
msg = bot.send_message(chat_id,
'В городе ' + place + ' сейчас ' + w.get_detailed_status() + ' Температура в этом '
'городе: ' + str(
temp), reply_markup=markup)
if temp < 10 and temp >= 0:
msg = bot.send_message(chat_id, 'Сейчас пипец как холодно, одевайся как танк!')
elif temp >= 10 and temp < 20:
msg = bot.send_message(chat_id, 'Тепло конечно, но загорать еще рано!')
elif temp >= 20 and temp < 25:
msg = bot.send_message(chat_id, 'Ну еще чуть чуть и загорать можно идти!')
elif temp > 25:
msg = bot.send_message(chat_id, 'Можно смело загорать!')
else:
msg = bot.send_message(chat_id, 'Снеговики наступааааают!!!')
bot.register_next_step_handler(msg, weather_bot)
except:
if place == u'Переводчик':
user = user_dict[chat_id]
user.place = place
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add('английский', 'русский', 'испанский', 'немецкий', 'итальянский', 'французский', 'китайский',
'Погода')
msg = bot.send_message(chat_id, 'Да, давайте переводить. Укажите на каком языке будете писать!', reply_markup=markup)
bot.register_next_step_handler(msg, translater_func1)
elif place == '/start' or place == '/help':
msg = bot.send_message(chat_id,
"Здравствуйте, я полезный бот! Мое предназначение помогать в тех функциях которые в меня "
"встроенные! "
"В них входит перевод текста на необходимый Вам язык! Подскажите как я могу к Вам обращатся?")
bot.register_next_step_handler(msg, command_start)
else:
msg = bot.send_message(chat_id, 'Такого города нет... Уточните пожалуйста название!')
bot.register_next_step_handler(msg, weather_bot)
bot.polling()
while True:
pass
|
IgorSopronyuk/translate_IBM_bot
|
translater_IBM_bot.py
|
translater_IBM_bot.py
|
py
| 17,771 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
21682006700
|
from tkinter import *
from sudoku_api import get_grid,solve
from sudoku_solver import solver,if_poss
import tkinter.messagebox
root=Tk()
entries=user_inps=[[0 for i in range(9)] for j in range(9)]
canvas=Canvas(root,height=500,width=450)
canvas.pack()
board=[[0 for i in range(9)] for j in range(9)]
def get(event):
return (event.widget.get())
#Create GUI
def create_board(grid):
global canvas
global root
global board
board=grid
global entries
canvas.delete("all")
for i in range(9):
canvas.create_line(0,i*50,450,i*50)
canvas.create_line(i*50,0,i*50,450)
for i in range(9):
for j in range(9):
if grid[i][j]==0:
entry=Entry(root)
entries[i][j]=entry
canvas.create_window(((25+50*j),(25+50*i)),height=50,width=50,window=entry)
else:
canvas.create_text(((25+50*j),(25+50*i)),text=str(grid[i][j]))
check_button=Button(canvas,text="CHECK!!!",font="Times 20 bold")
solve_button=Button(canvas,text="SOLVE!!!",font="Times 20 bold")
canvas.create_window((122,475),height=50,width=225,window=solve_button)
canvas.create_window((347,475),height=50,width=225,window=check_button)
solve_button.bind("<Button-1>",get_solved)
check_button.bind("<Button-1>",check_correct)
def check_correct(event):
global board
global entries
global canvas
for i in range(9):
for j in range(9):
if board[i][j]==0:
if entries[i][j].get():
board[i][j]=int(entries[i][j].get())
else:
board[i][j]=0
if is_correct(board):
tkinter.messagebox.showinfo("Congratulations!!!!","You have completed the sudoku!!!")
canvas.delete("all")
for i in range(9):
canvas.create_line(0,i*50,450,i*50)
canvas.create_line(i*50,0,i*50,450)
for i in range(9):
for j in range(9):
canvas.create_text(((25+50*j),(25+50*i)),text=str(board[i][j]))
check_button=Button(canvas,text="CHECK!!!",font="Times 20 bold")
solve_button=Button(canvas,text="SOLVE!!!",font="Times 20 bold")
canvas.create_window((122,475),height=50,width=225,window=solve_button)
canvas.create_window((347,475),height=50,width=225,window=check_button)
else:
tkinter.messagebox.showinfo("SORRY!!!!","That is not correct!!!")
def is_correct(grid):
for i in range(9):
for j in range(9):
if grid[i][j]==0:
return False
if not if_poss(grid,i,j,grid[i][j]):
return False
return True
def get_solved(event):
global board
global canvas
solver(board)
canvas.delete("all")
for i in range(9):
canvas.create_line(0,i*50,450,i*50)
canvas.create_line(i*50,0,i*50,450)
for i in range(9):
for j in range(9):
canvas.create_text(((25+50*j),(25+50*i)),text=str(board[i][j]))
def easy(event):
grid=get_grid(1)
create_board(grid)
def medium(event):
grid=get_grid(2)
create_board(grid)
def hard(event):
grid=get_grid(3)
create_board(grid)
#Select Difficult
canvas.create_text((220,75),text="Select Difficulty",font="Times 20 bold")
easy_button=Button(canvas,text="Easy",font="Times 20 bold")
med_button=Button(canvas,text="Medium",font="Times 20 bold")
hard_button=Button(canvas,text="Hard",font="Times 20 bold")
canvas.create_window((220,125),height=40,width=200,window=easy_button)
canvas.create_window((220,205),height=40,width=200,window=med_button)
canvas.create_window((220,285),height=40,width=200,window=hard_button)
easy_button.bind("<Button-1>",easy)
med_button.bind("<Button-1>",medium)
hard_button.bind("<Button-1>",hard)
root.mainloop()
|
m-mukund/Sudoko_Solver
|
sudoku_inter.py
|
sudoku_inter.py
|
py
| 3,397 |
python
|
en
|
code
| 0 |
github-code
|
6
|
69869460347
|
def Mark(
x=None, text='', color='',
bold=True, underline=False):
""" This function prints an object x and adds a description text.
It is useful for for debugging. """
start = ''
end = ''
if color != '' or bold or underline:
end='\033[0m'
colorDict = {
'': '',
None: '',
'purple' : '\033[95m',
'cyan' : '\033[96m',
'darkcyan' : '\033[36m',
'blue' : '\033[94m',
'green' : '\033[92m',
'yellow' : '\033[93m',
'red' : '\033[91m',
'bold' : '\033[1m',
'underline' : '\033[4m',
'end' : '\033[0m'}
if bold:
start = start + colorDict['bold']
if underline:
start = start + colorDict['underline']
start = start + colorDict[color]
print("*** " + start + text + end)
if x is not None:
print(x)
'''
## examples
Mark(2*10, 'xxx', bold=False, underline=True)
Mark(2*10, 'xxx', bold=False, underline=True, color='red')
Mark(x='', text='xxx', bold=True, underline=True, color='green')
'''
## These are the default functions to communicate with OS
# re-write them if needed
FileExists = os.path.exists
OpenFile = open
ListDir = os.listdir
def CustomMarkFcn(fn=None, logTime=True, color=''):
""" This functions returns a custom function which prints x,
with description text.
It also saves x in a file if fn is not None. """
fileExists = False
if fn != None:
fileExists = FileExists(fn)
if fileExists:
appendWrite = 'a' # append if already exists
else:
appendWrite = 'w' # make a new file if not
# define the Marking Fcn here
def F(x=None, text='', color=color, bold=True, underline=False):
timeStr = str(datetime.datetime.now())[:19]
if fn is not None:
orig_stdout = sys.stdout
f = OpenFile(fn, appendWrite)
sys.stdout = f
if logTime:
Mark(text='This was run at this time:' + timeStr,
bold=False, underline=False, color='')
Mark(x=x, text=text, color='', bold=False, underline=False)
f.close()
sys.stdout = orig_stdout
if logTime:
Mark(text='This was run at this time:' + timeStr)
Mark(x=x, text=text, color=color, bold=bold, underline=underline)
return F
'''
fn = 'log.txt'
CustomMark = CustomMarkFcn(fn=fn)
CustomMark(x=2, text='NO')
'''
# to print a function definition
PrintFcnContent = inspect.getsourcelines
## mapping a dictionary via map
#def MapDict(f, dic):
# return dict(map(lambda (k,v): (k, f(v)), dic.iteritems()))
def BitOr(x):
"""bitwise OR: same as BIT_OR in SQL."""
return functools.reduce(lambda a,b: (a|b), x)
def Signif(n):
""" Builds a function for rounding up to n number of significant digits."""
def F(x):
if math.isnan(x):
return x
if x == 0:
return 0
out = round(np.absolute(x),
-int(math.floor(math.log10(np.absolute(x))) + (-n+1)))
if x < 0:
out = -out
return out
return F
### Reading / Writing Data
## read csv
def ReadCsv(
fn,
sep=',',
nrows=None,
typeDict={},
header='infer',
engine='c',
error_bad_lines=False,
printLog=False):
with OpenFile(fn, 'r') as f:
df = pd.read_csv(
f, sep=sep, nrows=nrows, dtype=typeDict, header=header,
engine=engine, error_bad_lines=error_bad_lines)
if printLog:
print(fn + ' was read.')
return df
## write csv (or tsv)
def WriteCsv(
fn,
df,
sep=',',
append=False,
index=False,
printLog=False):
wa = 'w'
header = list(df.columns)
if append:
wa = 'a'
header = False
with OpenFile(fn, wa) as f:
df.to_csv(f, sep=sep, index=index, mode=wa, header=header)
if printLog:
print(fn + ' was written.')
return None
## reads multiple data files according to a pattern given.
## Filters them and then row binds them.
## the pattern is given in three lists: prefix, middle, suffix
def ReadMultipleDf(
prefix,
middle,
suffix,
ReadF=ReadCsv,
DfFilterF=None):
n = max([len(prefix), len(middle), len(suffix)])
def FillList(x):
if len(x) < n:
x = x*n
x = x[:n]
return x
prefix = FillList(prefix)
suffix = FillList(suffix)
middle = FillList(middle)
df = pd.DataFrame({'prefix': prefix, 'middle': middle, 'suffix': suffix})
fileList = (df['prefix'] + df['middle'] + df['suffix']).values
#dfList = list()
for i in range(len(fileList)):
f = fileList[i]
df = ReadF(f)
if DfFilterF != None:
df = DfFilterF(df)
if i == 0:
dfAll = df
else:
dfAll = dfAll.append(df, ignore_index=True)
return dfAll
## Read all files in a dir with same columns
# and concatenating them
def ReadDirData(
path, ListDirF=ListDir, ReadF=ReadCsv,
WriteF=WriteCsv, writeFn=None, DfFilterF=None):
print(path)
fileList = ListDirF(path)
print(fileList)
#dfList = list()
outDf = None
for i in range(len(fileList)):
f = path + fileList[i]
print("*** opening: " + f)
df = ReadF(f)
print("data shape for this partition:")
print(df.shape)
if DfFilterF != None:
df = DfFilterF(df)
print("data shape for this partition after filtering:")
print(df.shape)
## we either row bind data or we write data if writeFn is not None
if writeFn == None:
if i == 0:
outDf = df
else:
outDf = outDf.append(df, ignore_index=True)
else:
if i == 0:
WriteF(fn=writeFn, df=df, sep=',', append=False)
else:
WriteF(fn=writeFn, df=df, sep=',', append=True)
print("First rows of data:")
print(df.iloc[:5])
return outDf
## Read all files in a dir with same columns
# and concatenating them
def ReadDirData_parallel(
path, ListDirF=ListDir, ReadF=ReadCsv,
WriteF=WriteCsv, writeFn=None,
DfFilterF=None, returnDfDict=False,
limitFileNum=None):
print(path)
fileList = ListDirF(path)
print(fileList)
if limitFileNum is not None:
k = min(limitFileNum, len(fileList))
fileList = fileList[:k]
outDf = None
dfDict = {}
def F(i):
f = path + fileList[i]
Mark(text="opening: partition " + str(i) + '; ' + f)
df = ReadF(f)
Mark(df.shape, text="data shape for partition " + str(i))
if DfFilterF != None:
df = DfFilterF(df)
Mark(
df.shape,
text="data shape for partition " + str(i) + " after filtering:")
dfDict[i] = df
return None
[F(x) for x in range(len(fileList))]
if returnDfDict:
return dfDict
## we either row bind data or we write data if writeFn is not None
if writeFn is None:
'''
for i in range(len(fileList)):
if i == 0:
outDf = dfDict[i]
else:
outDf = outDf.append(dfDict[i], ignore_index=True)
'''
outDf = pd.concat(dfDict.values())
else:
for i in range(len(fileList)):
if i == 0:
WriteF(fn=writeFn, df=dfDict[i], sep=',', append=False)
else:
WriteF(fn=writeFn, df=dfDict[i], sep=',', append=True)
Mark(outDf.iloc[:10], text="First rows of data:")
return outDf
def Write_shardedData_parallel(
df, fnPrefix, path, fnExten=".csv",
partitionCol=None,
shardNum=100, WriteF=WriteCsv,
limitFileNum=None):
""" write sharded data wrt a partition column
the data is written in parallel for speed purposes
also at read time we can read data faster"""
if partitionCol is None:
partitionCol = "dummy_col"
df["dummy_col"] = range(len(df))
def Bucket(s):
return int(hashlib.sha1(str(s)).hexdigest(), 16) % (shardNum)
df["shard"] = df[partitionCol].map(Bucket)
if partitionCol is None:
del df["dummy_col"]
def Write(bucket):
df0 = df[df["shard"] == bucket]
fn = path + fnPrefix + "_" + str(bucket) + ".csv"
WriteF(fn=fn, df=df0, sep=',', append=False)
print(fn + " was written")
buckets = list(set(df["shard"].values))
if limitFileNum is not None:
k = min(limitFileNum, len(buckets))
buckets = buckets[:k]
[Write(bucket) for bucket in buckets]
return None
"""
df = GenUsageDf_forTesting()
path = ""
Write_shardedData_parallel(
df=df, fnPrefix="test", path=path, fnExten=".csv",
partitionCol="user_id",
WriteF=WriteCsv)
"""
############### Part 1: Data frame and data wrangling functions
## generate a data frame manually for testing data frame functions
# and usage metrics
def GenUsageDf_forTesting():
df = pd.DataFrame(columns=[
'country', 'user_id', 'expt', 'date', 'time',
'end_time', 'prod', 'form_factor'])
df.loc[len(df)] = ['US', '0', 'base', '2017-04-12', '2017-04-12 00:03:00',
'2017-04-12 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['US', '0', 'base', '2017-04-12', '2017-04-12 00:04:01',
'2017-04-12 00:05:03', 'photoFeat', 'COMP']
df.loc[len(df)] = ['US', '0', 'base', '2017-04-12', '2017-04-12 00:05:05',
'2017-04-12 00:06:04', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '0', 'base', '2017-04-12', '2017-04-12 00:06:05',
'2017-04-12 00:06:08', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '0', 'base', '2017-04-12', '2017-04-12 00:06:30',
'2017-04-12 00:06:45', 'exploreFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:07:00',
'2017-04-12 00:07:50', 'editingFeat', 'PHN']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:14:00',
'2017-04-12 00:14:10', 'photoFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:16:00',
'2017-04-12 00:17:09', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:18:00',
'2017-04-12 00:18:30', 'browsingFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:20:00',
'2017-04-12 00:21:00', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:22:00',
'2017-04-12 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:03:00',
'2017-04-12 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:04:01',
'2017-04-12 00:05:03', 'photoFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:05:05',
'2017-04-12 00:06:04', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '2', 'base', '2017-04-12', '2017-04-12 00:06:05',
'2017-04-12 00:06:08', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '2', 'base', '2017-04-12', '2017-04-12 00:06:30',
'2017-04-12 00:06:45', 'exploreFeat', 'COMP']
df.loc[len(df)] = ['US', '2', 'base', '2017-04-12', '2017-04-12 00:07:00',
'2017-04-12 00:07:50', 'editingFeat', 'PHN']
df.loc[len(df)] = ['JP', '3', 'test', '2017-04-12', '2017-04-12 00:14:00',
'2017-04-12 00:14:10', 'photoFeat', 'COMP']
df.loc[len(df)] = ['JP', '3', 'test', '2017-04-12', '2017-04-12 00:14:20',
'2017-04-12 00:18:59', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '3', 'test', '2017-04-12', '2017-04-12 00:19:00',
'2017-04-12 00:20:00', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '3', 'test', '2017-04-12', '2017-04-12 00:20:20',
'2017-04-12 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['US', '4', 'test', '2017-04-14', '2017-04-14 00:03:10',
'2017-04-14 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['US', '4', 'test', '2017-04-14', '2017-04-14 00:04:10',
'2017-04-14 00:05:03', 'photoFeat', 'COMP']
df.loc[len(df)] = ['US', '4', 'test', '2017-04-14', '2017-04-14 00:05:15',
'2017-04-14 00:06:04', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '4', 'test', '2017-04-14', '2017-04-14 00:06:01',
'2017-04-14 00:06:08', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '4', 'test', '2017-04-14', '2017-04-14 00:06:35',
'2017-04-14 00:06:45', 'PresFeat', 'COMP']
df.loc[len(df)] = ['US', '5', 'test', '2017-04-14', '2017-04-14 00:03:07',
'2017-04-14 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['US', '5', 'test', '2017-04-14', '2017-04-14 00:04:04',
'2017-04-14 00:05:03', 'photoFeat', 'COMP']
df.loc[len(df)] = ['US', '5', 'test', '2017-04-14', '2017-04-14 00:05:04',
'2017-04-14 00:06:04', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '5', 'test', '2017-04-14', '2017-04-14 00:06:03',
'2017-04-14 00:06:08', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '5', 'test', '2017-04-14', '2017-04-14 00:06:28',
'2017-04-14 00:06:45', 'PresFeat', 'COMP']
df.loc[len(df)] = ['JP', '6', 'test', '2017-04-14', '2017-04-14 00:14:01',
'2017-04-14 00:14:10', 'photoFeat', 'COMP']
df.loc[len(df)] = ['JP', '6', 'test', '2017-04-14', '2017-04-14 00:14:19',
'2017-04-14 00:18:59', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '6', 'test', '2017-04-14', '2017-04-14 00:19:10',
'2017-04-14 00:20:00', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '6', 'test', '2017-04-14', '2017-04-14 00:20:11',
'2017-04-14 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['JP', '7', 'base', '2017-04-15', '2017-04-15 00:14:11',
'2017-04-15 00:14:10', 'photoFeat', 'COMP']
df.loc[len(df)] = ['JP', '7', 'base', '2017-04-15', '2017-04-15 00:14:22',
'2017-04-15 00:18:59', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '7', 'base', '2017-04-15', '2017-04-15 00:19:57',
'2017-04-15 00:20:00', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '7', 'base', '2017-04-15', '2017-04-15 00:21:56',
'2017-04-15 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['FR', '8', 'base', '2017-04-12', '2017-04-12 00:03:00',
'2017-04-12 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['FR', '8', 'base', '2017-04-12', '2017-04-12 00:04:01',
'2017-04-12 00:05:03', 'photoFeat', 'COMP']
df.loc[len(df)] = ['FR', '8', 'base', '2017-04-12', '2017-04-12 00:05:05',
'2017-04-12 00:06:04', 'PresFeat', 'PHN']
df.loc[len(df)] = ['FR', '8', 'base', '2017-04-12', '2017-04-12 00:06:05',
'2017-04-12 00:06:08', 'PresFeat', 'PHN']
df.loc[len(df)] = ['FR', '8', 'base', '2017-04-12', '2017-04-12 00:06:30',
'2017-04-12 00:06:45', 'exploreFeat', 'COMP']
df.loc[len(df)] = ['FR', '9', 'test', '2017-04-15', '2017-04-15 00:14:11',
'2017-04-15 00:14:10', 'photoFeat', 'COMP']
df.loc[len(df)] = ['FR', '9', 'test', '2017-04-15', '2017-04-15 00:14:22',
'2017-04-15 00:18:59', 'locFeat', 'COMP']
df.loc[len(df)] = ['FR', '9', 'test', '2017-04-15', '2017-04-15 00:19:57',
'2017-04-15 00:20:00', 'locFeat', 'COMP']
df.loc[len(df)] = ['FR', '9', 'test', '2017-04-15', '2017-04-15 00:21:56',
'2017-04-15 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['NG', '10', 'test', '2017-04-16', '2017-04-15 00:21:56',
'2017-04-15 00:22:00', 'StorageFeat', 'PHN']
df.loc[len(df)] = ['IR', '11', 'test', '2017-04-12', '2017-04-15 00:21:56',
'2017-04-15 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['IR', '12', 'base', '2017-04-16', '2017-04-15 00:21:56',
'2017-04-15 00:22:00', 'watchFeat', 'PHN']
df.loc[len(df)] = ['IR', '13', 'base', '2017-04-12', '2017-04-12 00:03:00',
'2017-04-12 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['RU', '14', 'base', '2017-04-12', '2017-04-12 00:03:00',
'2017-04-12 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['RU', '15', 'base', '2017-04-13', '2017-04-13 00:03:00',
'2017-04-13 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['RU', '16', 'base', '2017-04-14', '2017-04-14 00:03:00',
'2017-04-14 00:04:00', 'PresFeat', 'COMP']
df['user_id'] = 'id' + df['user_id']
def F(x):
return(datetime.datetime.strptime(x[:19], "%Y-%m-%d %H:%M:%S"))
for col in ['time', 'end_time']:
df[col] = df[col].map(F)
df['duration'] = (df['end_time'] - df['time']) / np.timedelta64(1, 's')
df['value'] = np.random.uniform(low=1.0, high=5.0, size=df.shape[0])
return df
def BuildCondInd(df, condDict):
""" subsets a df according to values given in the dict: condDict
the data columns are given in the dictionary keys
the possible values (a list of values) for each column are
given in the dict values """
cols = condDict.keys()
n = df.shape[0]
ind = pd.Series([True] * n)
for i in range(len(cols)):
col = cols[i]
valueList = condDict[col]
if valueList != None and valueList != []:
ind0 = (df[col].isin(valueList))
ind = ind * ind0
return ind
'''
df = pd.DataFrame({
'a':[2, 1, 3, 2, 2, 2],
'b':['A', 'A', 'B', 'C', 'C', 'C'],
'c':['11','22','22','22', '22', '22']})
ind = BuildCondInd(df=df, condDict={'a':[1, 2], 'b':['A', 'B']})
df[ind]
ind = BuildCondInd(df=df, condDict={'a':[1, 2], 'b':None})
df[ind]
'''
## get the sub df immediately
def SubDf_withCond(df, condDict, resetIndex=True):
if (condDict is None) or (len(condDict) == 0):
return df
df = df.reset_index(drop=True)
ind = BuildCondInd(df=df, condDict=condDict)
df2 = df[ind].copy()
if resetIndex:
df2 = df2.reset_index(drop=True)
return df2
## subset df based on regex filters on string columns
# every column is given in a key and the value is a regex
def BuildRegexInd(df, regDict):
cols = regDict.keys()
n = df.shape[0]
ind = pd.Series([True]*n)
for i in range(len(cols)):
col = cols[i]
valueList = regDict[col]
if valueList != None and valueList != []:
ind0 = pd.Series([False] * n)
for value in valueList:
ind0 = ind0 + df[col].map(str).str.contains(value)
ind = ind * ind0
return ind
'''
df = pd.DataFrame(
{'a':[24, 12, 63, 2, 3312, 2],
'b':['A', 'A', 'BBAA', 'CD', 'CE', 'CF'],
'c':['11','22','22','23', '22', '22']})
ind = BuildRegexInd(df=df, regDict={'a':['1', '2'], 'b':['A', 'B']})
Mark(df[ind])
ind = BuildRegexInd(df=df, regDict={'a':['1', '3'], 'b':None})
Mark(df[ind])
ind = BuildRegexInd(df=df, regDict={'b':['B', 'C'], 'b':['^(?:(?!CE).)*$']})
Mark(df[ind])
## column b does not include CE but it includes A or B.
ind = BuildRegexInd(df=df, regDict={'b':['^(?!.*CE).*B.*$', '^(?!.*CE).*A.*$']})
Mark(df[ind])
'''
## check for two strings regex
def Regex_includesBothStr(s1, s2):
out = '^(?=.*' + s1 + ')(?=.*' + s2 + ').*$'
return out
'''
reg = Regex_includesBothStr(' cat ', ' dog ')
print(reg)
print(pd.Series(['cat-dog', ' cat hates dog ', 'tiger']).str.contains(reg))
'''
## rehashing a column (col)
# the input is a dictionary of data frames with that column
# we make sure the rehashing is fixed across data frames
def RehashCol_dfDict(dfDict, col, newCol=None, omitCol=False):
if newCol == None:
newCol = col + '_hashed'
dfNames = dfDict.keys()
values = []
for key in dfNames:
df0 = dfDict[key]
values0 = df0[col].values
values = list(set(values + list(values0)))
dfHash = pd.DataFrame({col: values, 'tempCol': range(len(values))})
newDfDict = {}
for key in dfNames:
df0 = dfDict[key]
dfNew = pd.merge(df0, dfHash, on=[col], how='left')
dfNew[newCol] = dfNew['tempCol']
del dfNew['tempCol']
if omitCol:
del dfNew[col]
newDfDict[key] = dfNew
return newDfDict
# it converts a float or string date to datetime
def FloatOrStr_toDate(x, format="%Y%m%d"):
if (x == None) or (x == 'nan') or (x == np.nan):
return pd.NaT
if (type(x).__name__ == 'float') and math.isnan(x):
return pd.NaT
s = str(x)
if s == 'nan':
return pd.NaT
import re
s = re.sub('_', '', s)
s = re.sub('-', '', s)
s = re.sub(':', '', s)
s = s[:8]
return datetime.datetime.strptime(s, format)
## convert to datetime
def ConvertToDateTime(x, dateTimeFormat="%Y-%m-%d %H:%M:%S", strLen=19):
if (x == None) or (x == 'nan') or (x == np.nan):
return pd.NaT
if (type(x).__name__ == 'float') and math.isnan(x):
return pd.NaT
s = str(x)
if s == 'nan':
return pd.NaT
return datetime.datetime.strptime(x[:strLen], dateTimeFormat)
## also lets define a function generator version for easy mapping
def ConvertToDateTimeFcn(dateTimeFormat="%Y-%m-%d %H:%M:%S", strLen=19):
def F(x):
return ConvertToDateTime(x, dateTimeFormat=dateTimeFormat, strLen=strLen)
return F
## convert weekday returned by isoweekday() to string
def WeekDayToStr(x):
d = {1:'Mon', 2:'Tue', 3:'Wed', 4:'Thu', 5:'Fri', 6:'Sat', 7:'Sun'}
if x in d.keys():
return d[x]
return 'nan'
'''
x = datetime.datetime(2017, 01, 01)
u = x.isoweekday()
WeekDayToStr(u)
'''
## assigns object types to string
# and assign "nan" to missing
def PrepareDf(df):
# colTypes = [str(df[col].dtype) for col in df.columns]
for col in df.columns:
if str(df[col].dtype) == "object":
df[col].fillna("nan", inplace=True)
df[col] = df[col].astype(str)
return df
# variable type
def Type(x):
return type(x).__name__
## short hashing
def ShortHash(s, length=8):
s = str(s).encode('utf-8')
hasher = hashlib.sha1(s)
return base64.urlsafe_b64encode(hasher.digest()[:length])
"""
ShortHash("asas")
"""
## integrates columns (integCols) out from a data frame.
# It uses integFcn for integration
# it only keeps valueCols for integration
def IntegOutDf(df, integFcn, integOutCols, valueCols=None):
cols = list(df.columns)
if valueCols == None:
valueCols = list(filter(lambda x: x not in integOutCols, cols))
gCols = list(filter(lambda x: x not in (integOutCols + valueCols), cols))
if len(gCols) > 0:
cols = gCols + valueCols
df = df[cols]
if len(gCols) == 0:
gCols = ['tempCol']
df['tempCol'] = 1
g = df.groupby(by=gCols)
dfAgg = g.aggregate(integFcn)
dfAgg = dfAgg.reset_index()
if 'tempCol' in dfAgg.columns:
del dfAgg['tempCol']
return dfAgg
'''
size = 10
df = pd.DataFrame({
'categ1':np.random.choice(
a=['a', 'b', 'c', 'd', 'e'], size=size, replace=True),
'categ2':np.random.choice(a=['A', 'B'], size=size, replace=True),
'col1':np.random.uniform(low=0.0, high=100.0, size=size),
'col2':np.random.uniform(low=0.0, high=100.0, size=size)
})
print(df)
IntegOutDf(df, integFcn=sum, integOutCols=['categ1'], valueCols=['col1', 'col2'])
'''
## aggregates df with different agg fcns for multiple columns
# gCols is not needed since it will be assume to be
# (all columns - the columns being aggregated)
def AggWithDict(df, aggDict, gCols=None):
cols = list(df.columns)
valueCols = aggDict.keys()
if gCols == None:
gCols = list(filter(lambda x: x not in (valueCols), cols))
g = df.groupby(gCols)
dfAgg = g.aggregate(aggDict)
dfAgg = dfAgg.reset_index()
return dfAgg
'''
size = 10
df = pd.DataFrame({
'categ1':np.random.choice(
a=['a', 'b', 'c', 'd', 'e'],
size=size,
replace=True),
'categ2':np.random.choice(a=['A', 'B'], size=size, replace=True),
'col1':np.random.uniform(low=0.0, high=100.0, size=size),
'col2':np.random.uniform(low=0.0, high=100.0, size=size)
})
df = df.sort_values(['categ2', 'categ1', 'col1', 'col2'])
print(df)
aggDf0 = AggWithDict(df=df, aggDict={'col1':sum, 'col2':min})
aggDf1 = AggWithDict(df=df, aggDict={'col1':sum, 'col2':min}, gCols=['categ2'])
print(aggDf0)
print(aggDf1)
'''
## find rows which have repeated values on some cols
def FindRepRows(df, cols):
return pd.concat(g for _, g in df.groupby(cols) if len(g) > 1)
## slice df by sliceCol and with given values in sliceValues
def DfSliceDict(df, sliceCol, sliceValues=None):
if sliceValues == None:
sliceValues = list(set(df[sliceCol].values))
dfDict = {}
for i in range(len(sliceValues)):
v = sliceValues[i]
dfDict[v] = df[df[sliceCol] == v]
return dfDict
## merge dfDict
def MergeDfDict(dfDict, onCols, how='outer', naFill=None):
keys = dfDict.keys()
for i in range(len(keys)):
key = keys[i]
df0 = dfDict[key]
cols = list(df0.columns)
valueCols = list(filter(lambda x: x not in (onCols), cols))
df0 = df0[onCols + valueCols]
df0.columns = onCols + [(s + '_' + key) for s in valueCols]
if i == 0:
outDf = df0
else:
outDf = pd.merge(outDf, df0, how=how, on=onCols)
if naFill != None:
outDf = outDf.fillna(naFill)
return outDf
'''
def GenDf(size):
df = pd.DataFrame({
'categ1':np.random.choice(
a=['a', 'b', 'c', 'd', 'e'], size=size, replace=True),
'categ2':np.random.choice(a=['A', 'B'], size=size, replace=True),
'col1':np.random.uniform(low=0.0, high=100.0, size=size),
'col2':np.random.uniform(low=0.0, high=100.0, size=size)
})
df = df.sort_values(['categ2', 'categ1', 'col1', 'col2'])
return(df)
size = 5
dfDict = {'US':GenDf(size), 'IN':GenDf(size), 'GER':GenDf(size)}
MergeDfDict(dfDict=dfDict, onCols=['categ1', 'categ2'], how='outer', naFill=0)
'''
## split data based on values of a column: col
def SplitDfByCol(df, col):
#create unique list of device names
uniqueNames = df[col].unique()
#create a data frame dictionary to store your data frames
dfDict = {elem : pd.DataFrame for elem in uniqueNames}
for key in dfDict.keys():
dfDict[key] = df[df[col] == key]
return dfDict
## calculates value_counts() aka freq for combination of cols
def CombinFreqDf(
df, cols=None, countColName='cnt', propColName='prop (%)'):
if Type(df) == "Series":
df = pd.DataFrame(df)
if cols == None:
cols = list(df.columns)
if len(cols) < 2:
cols.append('dummy')
df['dummy'] = 'NA'
outDf = df[cols].groupby(cols).agg(len).reset_index()
outDf.columns = list(outDf.columns[:len(outDf.columns)-1]) + [countColName]
outDf[propColName] = 100.0 * outDf[countColName] / outDf[countColName].sum()
outDf = outDf.sort_values([countColName], ascending=[0])
if 'dummy' in cols:
del outDf['dummy']
outDf = outDf.reset_index(drop=True)
return outDf
'''
df0 = pd.DataFrame({
'app':['fb', 'fb', 'mailingFeat', 'mailingFeat'],
'party':['1P', '1P', '3P', '3P']})
CombinFreqDf(df=df0, cols=['app', 'party'])
'''
## maps a categorical variable with too many labels to less labels.
def Remap_lowFreqCategs(
df,
cols,
newLabels="nan",
otherLabelsToReMap=None,
freqThresh=5,
propThresh=0.1,
labelsNumMax=None):
df2 = df.copy()
k = len(cols)
if Type(freqThresh) == 'int':
freqThresh = [freqThresh] * k
if Type(propThresh) in ['int', 'float']:
propThresh = [propThresh] * k
if Type(newLabels) == 'str':
newLabels = [newLabels] * k
if (labelsNumMax is not None) and Type(labelsNumMax) == 'int':
labelsNumMax = [labelsNumMax] * k
def GetFreqLabels(i):
col = cols[i]
freqDf = CombinFreqDf(df[col])
ind = (freqDf["cnt"] > freqThresh[i]) & (freqDf["prop (%)"] > propThresh[i])
freqLabels = list(freqDf.loc[ind][col].values)
if labelsNumMax is not None:
maxNum = min(len(freqLabels), labelsNumMax[i])
freqLabels = freqLabels[0:(maxNum)]
if otherLabelsToReMap is not None:
freqLabels = list(set(freqLabels) - set(otherLabelsToReMap))
return freqLabels
freqLabelsList = [GetFreqLabels(x) for x in range(k)]
freqLabelsDict = dict(zip(cols, freqLabelsList))
def F(df):
for i in range(len(cols)):
col = cols[i]
newLabel = newLabels[i]
ind = [x not in freqLabelsDict[col] for x in df[col]]
if max(ind):
df.loc[ind, col] = newLabel
return df
return {"df":F(df2), "F":F, "freqLabelsDict":freqLabelsDict}
## this function works on a data frame with two categorical columns
## one is the category column
## one is the label column
## for each category it creates a distribution for the labels
def CalcFreqTablePerCateg(df, categCol, valueCol):
def AggFcnBuild(categValue):
def F(x):
return sum(x == categValue)/(1.0)
return F
df = df.fillna('NA')
labels = list(set(df[valueCol]))
def G(value):
AggFcn = AggFcnBuild(value)
dfAgg = df.groupby([categCol])[[valueCol]].agg(lambda x: AggFcn(x))
dfAgg = dfAgg.reset_index()
return dfAgg
value = labels[0]
dfAgg = G(value)
for i in range(1, len(labels)):
value = labels[i]
dfAgg1 = G(value)
dfAgg = pd.merge(dfAgg, dfAgg1, how='left', on=[categCol])
dfAgg.columns = [categCol] + labels
return {'df': dfAgg, 'labels': labels}
'''
size = 20
df0 = pd.DataFrame({
'categ':np.random.choice(a=['a', 'b', 'c'], size=size, replace=True),
'value':np.random.choice(a=['AA', 'BB', 'CC'], size=size, replace=True),
'col2':np.random.uniform(low=0.0, high=100.0, size=size),
'col3':np.random.uniform(low=0.0, high=100.0, size=size),
'col4':np.random.uniform(low=0.0, high=100.0, size=size)})
CalcFreqTablePerCateg(df=df0, categCol='categ', valueCol='value')['df']
'''
## merges a dict of tables
def MergeTablesDict(tabDict):
keys = tabDict.keys()
#print(keys)
n = len(keys)
for i in range(n):
key = keys[i]
tab = tabDict[key]
df = PropDfTab(tab)
df = df[['categ', 'freq', 'prop']]
df.columns = ['categ', 'freq_' + key, 'prop_' + key]
if i == 0:
outDf = df
else:
outDf = pd.merge(outDf, df, on=['categ'], how='outer')
outDf = outDf.reset_index(drop=True)
outDf = outDf.fillna(value=0)
return outDf
## creating a single string column using multiple columns (cols)
# and adding that to the data frame
def Concat_stringColsDf(df, cols, colName=None, sepStr='-'):
x = ''
if colName == None:
colName = sepStr.join(cols)
for i in range(len(cols)):
col = cols[i]
x = (x + df[col].map(str))
if (i < len(cols)-1):
x = x +'-'
df[colName] = x
return df
'''
df = pd.DataFrame({'a':range(3), 'b':['rr', 'gg', 'gg'], 'c':range(3)})
Concat_stringColsDf(df=df, cols=['a', 'b', 'c'], colName=None, sepStr='-')
'''
## flatten a column (listCol) of df with multiple values
def Flatten_RepField(df, listCol, sep=None):
if sep != None:
df = df.assign(**{listCol: df[listCol].str.split(',')})
outDf = pd.DataFrame({
col: np.repeat(df[col].values, df[listCol].str.len())
for col in df.columns.difference([listCol])
}).assign(
**{listCol: np.concatenate(df[listCol].values)})[df.columns.tolist()]
return outDf
'''
df = pd.DataFrame({'var1': ['a,b,c', 'd,e,f'], 'var2': [1, 2], 'var3':[5, 6]})
print(df)
Flatten_RepField(df, listCol='var1', sep=',')
'''
### tables p-value
## for a given table of frequencies and for each category calculates
## the total count of other categs (complement)
def TabCategCompl(tab, categCol, freqCol, complementCol=None):
categs = tab[categCol].values
s = tab[freqCol].sum()
complement = list()
for i in range(tab.shape[0]):
categ = categs[i]
tab0 = tab[(tab[categCol] == categ)]
x = tab0[freqCol].values[0]
c = s - x
complement.append(c)
if complementCol == None:
complementCol = freqCol + '_compl'
tab[complementCol] = complement
return tab
## does above for multiple columns
def TabCategComplMulti(tab, categCol, freqCols):
complementCols = []
for i in range(len(freqCols)):
freqCol = freqCols[i]
tab = TabCategCompl(tab=tab, categCol=categCol, freqCol=freqCol,
complementCol=None)
complementCol = freqCol + '_compl'
complementCols.append(complementCol)
cols = freqCols + complementCols
tab = tab[[categCol] + cols]
return tab
## adds a p-value per categ for comparing two frequencies
def TabComplPvalue(tab, categCol, freqCols):
tab = TabCategComplMulti(tab, categCol, freqCols)
n = tab.shape[0]
pvalueList = []
for i in range(n):
r = tab.iloc[i]
d = pd.DataFrame({'col1': [r[1], r[2]], 'col2': [r[3],r[4]]})
pvalue = scipy.stats.fisher_exact(table=d, alternative='two-sided')[1]
pvalueList.append(Signif(3)(pvalue))
tab['p-value'] = pvalueList
return tab
#### Useful functions for mapping a string column to another string column
## using a pattern string
## function for classification mapping
## it uses patterns to map to categories (general purpose)
def LabelByPattern(
x, patternDf, patternCol='pattern', categCol='category',
exactMatch=False):
# x a series
# patternDict has patterns and labels
# remove duplicate rows
import re
patternDf = patternDf.drop_duplicates(keep='first')
patterns = patternDf[patternCol]
categs = patternDf[categCol]
y = ['']*len(x)
outDf = pd.DataFrame({'x':x, 'y':y})
for i in range(len(patterns)):
pattern = patterns[i]
categ = categs[i]
hasCateg = x.str.contains(pattern)
if exactMatch:
hascateg = (x.str == pattern)
ind = np.where(hasCateg > 0)[0].tolist()
for j in ind:
if not bool(re.search(categ, y[j])):
y[j] = y[j] + categ
outDf['y'] = y
outDf.columns = ['signal', categCol]
return outDf
## label a data frame based on patternDf
# which includes pattern column and category column
def LabelByPatternDf(
df, signalCol, patternDf, patternCol, categCol,
newColName='mapped_category'):
patternDf = patternDf[[patternCol, categCol]]
patternDf = patternDf.drop_duplicates(keep='first')
patternDf = patternDf.reset_index(drop=True)
x = df[signalCol]
df2 = LabelByPattern(x=x, patternDf=patternDf, patternCol=patternCol,
categCol=categCol)
df[newColName] = df2[categCol]
return df
######################### Graphical/Plotting Functions ######################
## bar charts for multiple columns (yCols), with different colors
## x axis labels come from the xCol
def BarPlotMultiple(df, xCol, yCols, rotation=45, pltTitle=''):
x = range(len(df[xCol]))
colorList = ['r', 'm', 'g', 'y', 'c']
x = 8*np.array(x)
for i in range(len(yCols)):
col = yCols[i]
x1 = x + 1*i
plt.bar(x1, df[col], color=colorList[i], alpha=0.6, label=col)
locs, labels = plt.xticks()
plt.xticks(x1, df[xCol], rotation=rotation)
plt.setp(labels, rotation=rotation, fontsize=10)
plt.title(pltTitle + ': ' + xCol)
plt.legend()
import matplotlib.scale as mscale
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.ticker as ticker
import numpy as np
class SquareRootScale(mscale.ScaleBase):
"""
ScaleBase class for generating square root scale.
"""
name = 'squareroot'
def __init__(self, axis, **kwargs):
mscale.ScaleBase.__init__(self)
def set_default_locators_and_formatters(self, axis):
axis.set_major_locator(ticker.AutoLocator())
axis.set_major_formatter(ticker.ScalarFormatter())
axis.set_minor_locator(ticker.NullLocator())
axis.set_minor_formatter(ticker.NullFormatter())
def limit_range_for_scale(self, vmin, vmax, minpos):
return max(0., vmin), vmax
class SquareRootTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def transform_non_affine(self, a):
return np.array(a)**0.5
def inverted(self):
return SquareRootScale.InvertedSquareRootTransform()
class InvertedSquareRootTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def transform(self, a):
return np.array(a)**2
def inverted(self):
return SquareRootScale.SquareRootTransform()
def get_transform(self):
return self.SquareRootTransform()
mscale.register_scale(SquareRootScale)
## compares the freq of usages given in labelCol
# and creates a joint distribution of labelCol given in x-axis
# and compareCol given with colors
# it either shows the joint probability (prop (%)) on y-axis
# or it will show the freq divided by denomConstant if prop == False
# labelCol: categorical var denoted in x-axis
# compareCol: categorical var denoted by colors
# countDistinctCols: what should be counted once, e.g. unit_id will count
# each item with given unit_id once
# prop = True, will calculates proportions, otherwise we divide counts by
# denomConstant,
# and if denomCountCols is given, we use it to count number of items
# and divide by (denomConstant * itemCount)
def PltCompare_bivarCategFreq(
df, labelCol, compareCol=None, countDistinctCols=None,
rotation=90, pltTitle='', compareOrder=None, limitNum=None,
prop=True, denomConstant=1.0, denomCountCols=None,
newColName="value", yScale=None):
if countDistinctCols is not None:
keepCols = [labelCol] + countDistinctCols
if compareCol is not None:
keepCols = keepCols + [compareCol]
if denomCountCols is not None:
keepCols = keepCols + denomCountCols
df = df[keepCols].drop_duplicates().reset_index()
if compareCol is None:
combinDf = CombinFreqDf(df[labelCol])
else:
combinDf = CombinFreqDf(df[[labelCol, compareCol]])
hue = compareCol
if limitNum is not None:
combinDf = combinDf[:limitNum]
if compareOrder is not None:
hue_order = compareOrder
respCol = "prop (%)"
#Mark(denomConstant, "denomConstant")
if denomCountCols is not None:
itemCount = len(df[denomCountCols].drop_duplicates().reset_index())
denomConstant = 1.0 * denomConstant * itemCount
#Mark(denomConstant, "denomConstant")
if prop is False:
combinDf[newColName] = combinDf["cnt"] / denomConstant
respCol = newColName
if compareCol is None:
sns.barplot(data=combinDf, x=labelCol, y=respCol)
else:
sns.barplot(data=combinDf, x=labelCol, hue=hue, y=respCol)
locs, labels = plt.xticks()
out = plt.setp(labels, rotation=rotation, fontsize=10)
plt.legend(loc='upper right')
if yScale is not None:
plt.yscale(yScale)
return combinDf
"""
df = pd.DataFrame({
"label":["cat", "dog", "cat", "dog", "dog", "cat", "cat", "dog"],
"gender":["M", "F", "M", "F", "F", "F", "F", "M"]})
PltCompare_bivarCategFreq(
df=df, labelCol="label", compareCol="gender")
PltCompare_bivarCategFreq(
df=df, labelCol="label", compareCol="gender",
prop=False, denomConstant=1.0, newColName="cnt per day")
"""
## make a boxplot for multiple columns Side by Side (Sbs, include mean with a star
def BoxPlt_dfColsSbS(
df, cols=None, pltTitle='', xlab='', ylab='value',
boxColors=['darkkhaki', 'royalblue', 'r', 'g', 'y', 'o', 'b'],
ylim=None):
from matplotlib.patches import Polygon
data = []
if cols is None:
cols = df.columns
for i in range(len(cols)):
col = cols[i]
data.append(df[col])
fig, ax1 = plt.subplots(figsize=(10, 6))
fig.canvas.set_window_title('')
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = plt.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_title(pltTitle, fontsize=20)
ax1.set_xlabel(xlab)
ax1.set_ylabel(ylab)
# Now fill the boxes with desired colors
numBoxes = len(data)
medians = list(range(numBoxes))
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = list(zip(boxX, boxY))
boxPolygon = Polygon(boxCoords, facecolor=boxColors[i], label=cols[i])
ax1.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
plt.plot([np.average(med.get_xdata())], [np.average(data[i])],
color='w', marker='*', markeredgecolor='k')
if ylim is not None:
ax1.set_ylim(ylim)
plt.legend()
def CustomSortDf(df, col, orderedValues):
values = set(df[col].values)
remainingValues = list(set(df[col].values) - set(orderedValues))
orderedValues = orderedValues + remainingValues
df2 = pd.DataFrame(
{col:orderedValues, "dummy_order":range(len(orderedValues))})
df3 = pd.merge(df, df2, how="left", on=[col])
df3 = df3.sort_values(["dummy_order"])
df3 = df3.reset_index(drop=True)
del df3["dummy_order"]
return df3
"""
n = 10
df = pd.DataFrame({
'categ':np.random.choice(a=['a', 'b', 'c', 'd'], size=n, replace=True),
'col1':np.random.uniform(low=0.0, high=100.0, size=n),
'col2':np.random.uniform(low=0.0, high=100.0, size=n),
'col3':np.random.uniform(low=0.0, high=100.0, size=n),
'col4':np.random.uniform(low=0.0, high=100.0, size=n)})
col = "categ"
orderedValues = ["c", "a", "b"]
CustomSortDf(df=df, col=col, orderedValues=orderedValues)
"""
## it plots all columns wrt index
# it uses colors to compare them side by side.
def PltCols_wrtIndex(
df, cols=None, categCol=None, pltTitle='', ymin=None,
ymax=None, yLabel='', xLabel='', colorList=None,
orderedValues=None, alphaList=None, sciNotation=False,
ExtraFcn=None, orient='v',
sizeAlpha=0.75, legendColAlpha=2):
df2 = df.copy()
if cols is None:
cols = list(df2.columns)
if categCol is not None:
df2.index = df2[categCol]
if (categCol in cols):
cols = list(set(cols) - set([categCol]))
# cols = cols.remove(categCol)
# print(cols)
# Mark(categs)
if orderedValues is not None:
df2 = CustomSortDf(df=df2, col=categCol, orderedValues=orderedValues)
df2.index = df2[categCol]
categs = df2.index
num = len(categs)
x = range(num)
if colorList is None:
colorList = [
'r', 'g', 'm', 'y', 'c', 'darkkhaki', 'royalblue',
'darkred', 'crimson', 'darkcyan', 'gold', 'lime', 'black',
'navy', 'deepskyblue', 'k']
if alphaList is None:
alphaList = [0.7] * len(cols)
stretch = 4 * len(cols)
x = stretch * np.array(x)
if orient == 'v':
fig, ax = plt.subplots(figsize=(15*sizeAlpha, 10*sizeAlpha),
dpi=1200, facecolor='w', edgecolor='k')
for i in range(len(cols)):
col = df2[cols[i]]
plt.bar(x + 2*i, col.values, alpha=alphaList[i], label=cols[i],
color=colorList[i], width=2, edgecolor='black',
linewidth=2.0*sizeAlpha)
plt.title(pltTitle, fontsize=20, fontweight='bold')
if sciNotation:
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
#labels = [item.get_text() for item in ax.get_xticklabels()]
labels = categs
ax.set_xticklabels(labels)
locs, labels = plt.xticks(x, categs)
plt.setp(labels, rotation=15, fontsize=17*sizeAlpha, fontweight='bold')
locs2, labels2 = plt.yticks()
plt.setp(labels2, rotation=0, fontsize=17*sizeAlpha, fontweight='bold')
ncol = len(cols) / legendColAlpha
plt.legend(
loc='best',
fontsize=17,
prop={'weight': 'semibold',
'size': 17 * sizeAlpha},
ncol=ncol)
axes = plt.gca()
axes.set_ylim([ymin, ymax])
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
if ExtraFcn is not None:
ExtraFcn(ax)
if orient == 'h':
fig, ax = plt.subplots(figsize=(10*sizeAlpha, 15*sizeAlpha), dpi=1200,
facecolor='black', edgecolor='black')
for i in range(len(cols)):
col = df2[cols[i]]
plt.barh(
x + 2 * (i - 1),
col.values,
alpha=alphaList[i],
label=cols[i],
color=colorList[i],
height=2,
edgecolor='black',
linewidth=2.0 * sizeAlpha)
plt.title(pltTitle, fontsize=20*sizeAlpha, fontweight='bold')
if sciNotation:
plt.ticklabel_format(
style='sci', axis='x', scilimits=(0, 0), prop={'weight': 'bold'})
labels = categs
ax.set_yticklabels(labels)
locs, labels = plt.yticks(x, categs)
plt.setp(labels, rotation=0, fontsize=17*sizeAlpha, fontweight='bold')
locs2, labels2 = plt.xticks()
plt.setp(labels2, rotation=20, fontsize=17*sizeAlpha, fontweight='bold')
ncol = len(cols) / legendColAlpha
plt.legend(
loc='best',
ncol=ncol,
prop={'weight': 'semibold',
'size': 17 * sizeAlpha})
axes = plt.gca()
axes.set_xlim([ymin, ymax])
ax.set_xlabel(yLabel)
ax.set_ylabel(xLabel)
if ExtraFcn != None:
ExtraFcn(ax)
plt.gca().invert_yaxis()
return {'fig': fig, 'ax': ax}
'''
n = 3
df = pd.DataFrame({
'categ':np.random.choice(
a=['a', 'b', 'c', 'd', 'e', 'f'],
size=n,
replace=False),
'col1':np.random.uniform(low=0.0, high=100.0, size=n),
'col2':np.random.uniform(low=0.0, high=100.0, size=n),
'col3':np.random.uniform(low=0.0, high=100.0, size=n),
'col4':np.random.uniform(low=0.0, high=100.0, size=n)})
orderedValues = ["c", "a", "b", "d", "f", "e"]
PltCols_wrtIndex(
df=df,
cols=['col1', 'col2', 'col3', 'col4'],
categCol='categ',
orderedValues=orderedValues,
orient='v',
sciNotation=True)
'''
## this function creates a plot with each bar representing the distribution
# for a category (given in categCol)
# each distribution is defined on a set of labels
# the distributions are given in each column
def Plt_stackedDist_perCateg(
df, categCol, cols=None, labels=None,
sortCols=None, figsize=(10, 5), mainText=''):
import colorsys
if cols == None:
cols = list(df.columns[1:len(df.columns)])
if labels == None:
labels = cols
if sortCols == None:
sortCols = cols
df = df.sort(sortCols, ascending=False)
m = len(cols)
HSV_tuples = [(x*1.0/m, 0.5, 0.5) for x in range(m)]
RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples]
n = df.shape[0]
x = pd.Series(n*[0])
bar_locations = np.arange(n)
fig, ax = plt.subplots(figsize=figsize)
for i in range(len(cols)):
col = cols[i]
y = df[col].values
ax.bar(bar_locations, y, bottom=x, color=RGB_tuples[i], label=labels[i])
x = x + y
plt.legend(loc="best")
bar_locations2 = np.arange(n) + 0.5
plt.xticks(bar_locations2, df[categCol].values, rotation='vertical')
plt.title(mainText)
print(bar_locations)
print(df.loc[0].values)
fig.show()
'''
df0 = pd.DataFrame({'country':['JP', 'US', 'FR'],
'col1':np.random.uniform(low=0.0, high=100.0, size=3),
'col2':np.random.uniform(low=0.0, high=100.0, size=3),
'col3':np.random.uniform(low=0.0, high=100.0, size=3),
'col4':np.random.uniform(low=0.0, high=100.0, size=3)})
Plt_stackedDist_perCateg(
df=df0, categCol='country', cols=['col1', 'col2', 'col3', 'col4'], labels=None,
sortCols=None, figsize=(10, 5), mainText='')
'''
## compares the values (valueCol) for the index (pivotIndCol)
# for various classes given in compareCol
# first it pivots the data and then plots side by side
def PivotPlotWrt(
df, pivotIndCol, compareCol, valueCol,
cols=None, pltTitle='', sizeAlpha=0.75):
dfPivot = df.pivot(index=pivotIndCol, columns=compareCol, values=valueCol)
dfPivot = dfPivot.fillna(0)
dfPivot[pivotIndCol] = dfPivot.index
if cols is None:
cols = list(set(df[compareCol].values))
p = PltCols_wrtIndex(
df=dfPivot,
cols=cols,
categCol=pivotIndCol,
orient='h',
pltTitle=pltTitle,
sizeAlpha=sizeAlpha)
return {'df':dfPivot, 'plt':p}
## creating quantiles for a continuous variable and removing repetitions
def Qbins(x, num=10):
qs = list(
np.percentile(a=x,
q=list(100 * np.linspace(
start=0,
stop=(1 - 1.0/num),
num=num))))
qs = list(set([float("-inf")] + qs + [float("inf")]))
qs.sort()
return qs
# cuts uniformly
def Ubins(x, num=10):
b = np.linspace(start=min(x), stop=max(x), num=num)
b = [Signif(2)(x) for x in b]
b = list(set(b))
b = [float("-inf")] + b + [float("inf")]
b.sort()
return b
## cuts using quantiles
def CutQ(x, num=10):
qs = Qbins(x, num)
discX = pd.cut(x, bins=qs)
return(discX)
## make a bar plot
def BarPlot(y, yLabels, ylim=None, pltTitle='', figSize=[5, 5]):
n = len(y)
x = pd.Series(n*[0])
bar_locations = np.arange(n)
fig, ax = plt.subplots()
fig.set_size_inches(figSize[0], figSize[1])
ax.bar(bar_locations, y, bottom=x, color='r')
plt.legend(loc="best")
bar_locations2 = np.arange(n) + 0.5
plt.xticks(bar_locations2, yLabels, rotation='vertical')
axes = plt.gca()
axes.set_ylim(ylim)
plt.title(pltTitle)
fig.show()
## creates a cut column with NA being an explicit category
def ExplicitNa_cutCol(df, col, cuts, newCol=None):
if newCol is None:
newCol = col + '_range'
df[newCol] = pd.cut(df[col], cuts)
df[newCol] = df[newCol].cat.add_categories(["NA"])
df[newCol] = df[newCol].fillna("NA")
return df
'''
z = np.random.normal(loc=50.0, scale=20.0, size=10)
z = np.insert(z, 0, float('nan'))
df0 = pd.DataFrame({'z':z})
ExplicitNa_cutCol(
df=df0,
col='z',
cuts=[-20, 0, 20, 40, 60, 80, 100, 120, 140, float('inf')], newCol=None)
'''
## order df based on a cut column
def OrderDf_cutCol(df, cutCol, orderCol='order'):
def F(s):
x = re.search(r'.*?\((.*),.*', s)
if x is None:
return(float('-inf'))
return(float(x.group(1)))
df['order'] = df[cutCol].map(F)
df = df.sort_values('order')
return df
'''
z = np.random.normal(loc=50.0, scale=20.0, size=10)
z = np.insert(z, 0, float('nan'))
df0 = pd.DataFrame({'z':z})
u = pd.cut(z, [-20, 0, 20, 40, 60, 80, 100, 120, 140, float('inf')])
df0['col'] = u
df0['col'] = df0['col'].cat.add_categories(["NA"])
df0['col'] = df0['col'].fillna("NA")
OrderDf_cutCol(df=df0, cutCol="col")
'''
## for a variable generated with pd.cut it make a barplot
# it orders the labels based on the their values (rather than freq)
def FreqPlot_cutCol(u, pltTitle='', figSize=[5, 5]):
tab = u.value_counts()
df0 = pd.DataFrame(tab)
df0['label'] = list(df0.index)
df0 = OrderDf_cutCol(df=df0, cutCol='label')
df0.columns = ['value', 'label', 'order']
df0 = df0.sort_values('order')
BarPlot(
y=df0['value'], yLabels=df0['label'], pltTitle=pltTitle, figSize=figSize)
return df0
'''
z = np.random.normal(loc=50.0, scale=20.0, size=1000)
u = pd.cut(z, [-20, 0, 20, 40, 60, 80, 100, 120, 140, float('inf')])
FreqPlot_cutCol(u)
'''
def PropDfTab(tab, ylim=None, categCol='categ', pltIt=False, pltTitle=''):
d = pd.DataFrame(tab)
d.columns = ['freq']
e = (100.0 * d.values) / sum(d.values)
e = [Signif(5)(x) for x in e]
d['prop'] = e
d[categCol] = d.index
if pltIt:
BarPlot(y=e, yLabels=list(d.index), ylim=ylim, pltTitle=pltTitle)
return d
## cut continuous var
def CutConti(x, num=10, method='quantile'):
if method == 'quantile':
b = Qbins(x, num=num)
elif (method == 'uniform'):
b = Ubins(x, num=num)
z = pd.cut(x, bins=b)
return z
## gets a continuous var x, partitions the real line based on quantiles
# or bins of x
# then generates a function which assigns levels to any new value/values
def CutContiFcn(
x,
num=10,
method='quantile',
intervalColName='interval',
levelColName='level',
levelsType='int',
levelPrefix='Lev',
rawValueColName='raw'):
if method == 'quantile':
b = Qbins(x, num=num)
print(b)
elif (method == 'uniform'):
b = Ubins(x, num=num)
intervals = sorted(set(pd.cut(x + b[1:], bins=b)))
if ():
levels = [levelPrefix + str(x) for x in range(len(intervals))]
Mark(levels)
levDf = pd.DataFrame({intervalColName:intervals, levelColName:levels})
Mark(levDf)
def F(u):
z = pd.cut(u, bins=b)
df0 = pd.DataFrame({rawValueColName:u, intervalColName: z})
outDf = pd.merge(df0, levDf, on=[intervalColName], how='left')
return(outDf)
return F
'''
x = [1, 3, 4, 5, 66, 77, 88]
F = CutContiFcn(
x, num=10, method='quantile', intervalColName='interval',
levelColName='level', levelPrefix='Lev', rawValueColName='raw')
F(x)
F(x + [5, 1, 3, 100, -1 , 90, 2.2])
'''
## cuts continuous data and creates a bar plot
def CutBarPlot(x, num=10, method='quantile', pltTitle='', figSize=[5, 5]):
z = CutConti(x, num=num, method=method)
u = z.value_counts()
#print(u)
d = pd.DataFrame(u)
d = 100.0 * (d / d.sum())
d = d.sort_index()
BarPlot(y=d.values, yLabels=d.index, ylim=None, pltTitle=pltTitle,
figSize=figSize)
## returns a functions which calculates quantiles according to q (which )
def QuantileFcn(q):
def F(x):
return(np.percentile(a=x, q=q))
return F
def Plt_quantilesPerSlice(
df, sliceCol, respCol, gridNum=100.0, pltTitle=''):
slices = list(set(df[sliceCol].values))
outDict = {}
for sl in slices:
x = df[df[sliceCol] == sl][respCol].values
grid = list(
gridNum * np.linspace(
start=1.0/float(gridNum),
stop=(1 - 1.0/float(gridNum)),
num=int(gridNum)))
q = QuantileFcn(grid)(x)
outDict[sl] = q
plt.plot(grid, q, label=sl)
plt.legend()
plt.title(pltTitle)
return pd.DataFrame(outDict)
'''
df = pd.DataFrame({
'value':[1, 1, 1, 2, 3, 4, 2, 2, 2],
'categ':['a', 'a', 'b', 'b', 'b', 'a', 'a', 'b', 'a']})
Plt_quantilesPerSlice(df=df, sliceCol='categ', respCol='value', pltTitle='')
'''
## takes a vector of labels, eg pandas series
# it returns a freq table with props in dataframe format
def GenFreqTable(x, rounding=None):
freqTab = x.value_counts()
distbnTab = 100.0 * x.value_counts() / freqTab.sum()
labels = freqTab.keys()
freqValues = list(freqTab)
propValues = list(distbnTab)
if rounding is not None:
propValues = [Signif(rounding)(x) for x in propValues]
outDict = {'label':labels, 'freq':freqValues, 'prop':propValues}
outDf = pd.DataFrame(outDict)
return outDf[['label', 'freq', 'prop']]
'''
x = pd.Series(['a', 'a', 'b', 'b', 'c'])
print(GenFreqTable(x))
'''
## builds a categ distbn for each combination after groupby indCols
def CategDistbnDf(df, indCols, categCol, rounding=None):
def F1(x):
return tuple(GenFreqTable(x)['label'].values)
def F2(x):
return tuple(GenFreqTable(x)['freq'].values)
def F3(x):
return tuple(GenFreqTable(x, rounding=4)['prop'].values)
df = df[indCols + [categCol]].copy()
df[categCol + '_freq'] = df[categCol]
df[categCol + '_prop'] = df[categCol]
g = df.groupby(indCols)
outDf = g.aggregate({categCol:F1 , categCol + '_freq':F2,
categCol + '_prop':F3})
outDf = outDf.reset_index()
return outDf[BringElemsToFront(outDf.columns, indCols + [categCol])]
'''
df = pd.DataFrame({
'user_id':[1, 1, 1, 2, 2, 2, 2, 2, 1, 1],
'interface':['A', 'A', 'A', 'B', 'B', 'B', 'A', 'A', 'A', 'B'],
'categ':['a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'b', 'b']})
dnDf = CategDistbnDf(
df=df, indCols=['user_id', 'interface'], categCol='categ', rounding=None)
'''
## compare label distribution across slices
def LabelDistbn_acrossSlice(
df,
sliceCol,
labelCol,
slices=None,
orderedValues=None,
pltIt=True,
pltTitle='',
orderIntervals=False,
sortBySlice=False,
limitLabelNum=20):
def F(group):
return CombinFreqDf(group[[sliceCol, labelCol]])
g = df.groupby([sliceCol], as_index=False)
outDf = g.apply(F).reset_index(drop=True)
if slices is None:
slices = list(set(df[sliceCol].values))
horizDf = None
for s in slices:
s = str(s)
subDf = outDf[outDf[sliceCol].map(str) == s][[labelCol, 'cnt', 'prop (%)']]
subDf.columns = [labelCol, s + '_cnt', s + '_prop (%)']
#Mark(subDf[:2])
if horizDf is None:
horizDf = subDf
horizDf['total_cnt'] = subDf[s + '_cnt']
else:
horizDf = pd.merge(horizDf, subDf, on=labelCol, how='outer')
horizDf['total_cnt'] = horizDf['total_cnt'] + horizDf[s + '_cnt']
#Mark(subDf, 'subDf')
#Mark(horizDf, 'horizDf')
print(horizDf)
horizDf = horizDf.sort_values(['total_cnt'], ascending=[0])
if orderIntervals:
horizDf = OrderDf_cutCol(df=horizDf, cutCol=labelCol, orderCol='order')
if sortBySlice:
horizDf.sort_values([sliceCol])
if limitLabelNum is not None:
horizDf = horizDf[:limitLabelNum]
p = None
if pltIt:
p = PltCols_wrtIndex(
df=horizDf,
cols=[str(x) + '_prop (%)' for x in slices],
categCol=labelCol,
orderedValues=orderedValues,
orient='h',
pltTitle=pltTitle)
return {'outDf':outDf, 'horizDf':horizDf, 'p':p}
'''
df = GenUsageDf_forTesting()
Mark(df[:2])
res = LabelDistbn_acrossSlice(
df=df, sliceCol='expt', labelCol='prod', pltIt=True)
res['p']
res = LabelDistbn_acrossSlice(
df=df,
sliceCol='expt',
labelCol='prod',
orderedValues=[],
pltIt=True)
'''
# make a single label distbn
def LabelDistbn(
df,
labelCol,
orderIntervals=False,
pltTitle="",
CustomOrder=None,
figSize=[10, 8]):
out = CombinFreqDf(df[[labelCol]])
del out['cnt']
out['prop (%)'] = out['prop (%)'].map(Signif(3))
if orderIntervals:
out = OrderDf_cutCol(df=out, cutCol=labelCol, orderCol='order')
del out['order']
if CustomOrder is not None:
out = CustomOrder(out)
fig, ax = plt.subplots();
fig.set_size_inches(figSize[0], figSize[1])
plt.bar(range(len(out)), out['prop (%)'])
plt.xticks(np.array(range(len(out))) + 0.5, out[labelCol], rotation=90)
plt.grid(False)
plt.grid(axis='y', linewidth=1, color='red', alpha=0.5)
if pltTitle == "":
pltTitle = labelCol + " distbn"
plt.title(pltTitle, fontsize=20, fontweight='bold')
return out
##
def LabelDistbn_perSlice(
df,
sliceCol,
labelCol,
pltIt=True,
pltTitle='',
orderIntervals=False,
sortBySlice=False,
labels=None,
sizeAlpha=0.75):
def F(group):
return CombinFreqDf(group[[sliceCol, labelCol]])
g = df.groupby([labelCol], as_index=False)
outDf = g.apply(F).reset_index(drop=True)
if labels is None:
labels = list(set(df[labelCol].values))
horizDf = None
for l in labels:
l = str(l)
subDf = outDf[outDf[labelCol].map(str) == l][[sliceCol, 'cnt', 'prop (%)']]
subDf.columns = [sliceCol, l + '_cnt', l + '_prop (%)']
if horizDf is None:
horizDf = subDf
horizDf['total_cnt'] = subDf[l + '_cnt']
else:
horizDf = pd.merge(horizDf, subDf, on=sliceCol, how='outer')
horizDf = horizDf.fillna(0)
horizDf['total_cnt'] = horizDf['total_cnt'] + horizDf[l + '_cnt']
horizDf = horizDf.sort_values(['total_cnt'], ascending=[0])
if orderIntervals:
horizDf = OrderDf_cutCol(df=horizDf, cutCol=sliceCol, orderCol='order')
if sortBySlice:
horizDf = horizDf.sort_values([sliceCol])
horizDf = horizDf[:20]
p = None
for l in labels:
horizDf[l + '_%'] = 100 * (horizDf[l + '_cnt'] / horizDf['total_cnt'])
if pltIt:
p = PltCols_wrtIndex(
df=horizDf,
cols=[str(x) + '_%' for x in labels],
categCol=sliceCol,
orient='h',
pltTitle=pltTitle,
sizeAlpha=sizeAlpha)
return {'outDf':outDf, 'horizDf':horizDf, 'p':p}
## interpolate missing categ values wrt certain columns
# condDict will determine which subset of data should be used to make predictions
# replacedValues are the values which need replacement/interpolation
# dropUnassigned is to determine if we should keep rows which remain unassigned around
def InterpCategColWrt(
df, yCol, xCols, condDict={}, replacedValues=None, dropUnassigned=True):
df2 = df.copy()
if len(condDict) > 0:
ind = BuildCondInd(df=df2, condDict=condDict)
df2 = df2[ind].copy()
if replacedValues is not None:
df2 = df2[~df2[yCol].isin(replacedValues)].copy()
predDf = CategDistbnDf(df=df2, indCols=xCols, categCol=yCol)
predDf[yCol + '_pred'] = predDf[yCol].map(lambda x: x[0])
ind = df[yCol].isin(replacedValues)
df3 = df[ind].copy()
df4 = pd.merge(df3, predDf[xCols + [yCol + '_pred']], on=xCols, how='left')
df.loc[ind, yCol] = df4[yCol + '_pred'].values
if dropUnassigned:
df = df.dropna(subset=[yCol])
return df
'''
df = pd.DataFrame({
'user_id':[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3],
'os':['and', 'and', 'and', 'randSurface', 'randSurface', 'randSurface', 'randSurface', 'and', 'and', 'and', 'randSurface', 'randSurface', 'randSurface', 'randSurface', 'randSurface'],
'y':[None, 'c', 'b', 'b', 'b', 'b', 'a', 'a', 'a', 'b', 'b', 'nan', 'b', 'b', None],
'country': ['us', 'us', 'us', 'us', 'us', 'jp', 'us', 'jp', 'us', 'us', 'us', 'us', 'jp', 'us', 'us']})
print(df)
InterpCategColWrt(df=df,
yCol='y',
xCols=['user_id', 'os'],
condDict={'country':['us']},
replacedValues=['nan', None],
dropUnassigned=False)
'''
## for a df with sliceCols, it groups by sliceCols
# and for each categCols combination,
# it adds a total count column for the valueCol
# for example sliceCols: country, categCols=[sequence, event_1st, event_2nd]
# and valueCol=sequence_count
# we can figure out the total frequency of each sequence in each country
# as well as the frequency of the first event for the same country (sliceCols)
# we also agg a grand total for the valueCols for each combination of sliceCols
def AddTotalsDf(
df, categCols, valueCols, sliceCols=[], aggFnDict=sum,
integOutOther=False):
## if there are no sliceCols, we generate a tempCol to be sliceCol
## then we delete it at the end
l = len(sliceCols)
if l == 0:
sliceCols = ['tempCol']
df['tempCol'] = 1
## integrates out wrt sliceCols + categCols first.
## so other columns will be dropped
## and the valueCols will be integrated out across,
## when there are repeated sliceCols + categCol even if there is no extra col
if integOutOther:
df = df[sliceCols + categCols + valueCols]
g = df.groupby(sliceCols + categCols)
df = g.agg(aggFnDict)
df = df.reset_index()
df0 = df[sliceCols + categCols + valueCols]
outDf = df.copy()
for categCol in categCols:
g = df0.groupby(sliceCols + [categCol])
aggDf = g.agg(aggFnDict)
aggDf= aggDf.reset_index()
aggDf.columns = (sliceCols +
[categCol] +
[categCol + '_' + x + '_agg' for x in valueCols])
outDf = pd.merge(outDf, aggDf, on=sliceCols + [categCol], how='left')
# add slice (sliceCols slice) totals: same as above but we drop the categCol
df0 = df[sliceCols + valueCols]
g = df0.groupby(sliceCols)
aggDf = g.agg(aggFnDict)
aggDf= aggDf.reset_index()
aggDf.columns = sliceCols + [x + '_slice_total' for x in valueCols]
outDf = pd.merge(outDf, aggDf, on=sliceCols, how='left')
# reorder the columns
cols = (sliceCols +
sorted(categCols) +
valueCols +
list(sorted(set(outDf) - set(sliceCols + categCols + valueCols))))
outDf = outDf[cols]
## remove extra column if it was created
if l == 0:
del outDf['tempCol']
return outDf
'''
df = pd.DataFrame({
'country':['JP', 'JP', 'JP', 'BR', 'BR', 'BR', 'JP', 'JP', 'JP', 'BR'],
'seq':['a>b', 'a>b', 'b>a', 'b>a', 'a>b', 'a>b', 'a>c', 'a>c', 'b>c', 'c>b'],
'1st':['a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'b', 'c'],
'2nd':['b', 'b', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'b'],
'count':[10, 11, 1, 20, 2, 2, 2, 200, 1, 1],
'utility':[-10, -11, 1, 20, 2, 2, 2, -200, 1, 1],})
sliceCols = ['country']
categCols = ['seq', '1st', '2nd']
valueCols = ['count', 'utility']
aggFnDict = {'count':sum, 'utility':np.mean}
AddTotalsDf(
df=df, categCols=categCols, valueCols=valueCols,
sliceCols=sliceCols, aggFnDict=sum)
AddTotalsDf(
df=df, categCols=categCols, valueCols=valueCols,
sliceCols=sliceCols, aggFnDict=aggFnDict)
AddTotalsDf(
df=df, categCols=categCols, valueCols=valueCols,
sliceCols=sliceCols, aggFnDict=aggFnDict, integOutOther=True)
AddTotalsDf(
df=df, categCols=categCols, valueCols=valueCols,
sliceCols=[], aggFnDict=aggFnDict, integOutOther=True)
'''
## for a data frame with a countCol, we do bootstrap
def BsWithCounts(df, countCol=None):
if countCol == None:
n = df.shape[0]
ind = np.random.choice(a=n, size=n, replace=True, p=None)
df2 = df.iloc[ind]
return(df2)
df = df.reset_index(drop=True)
rowInd = list(range(len(df)))
counts = df[countCol].values
longInd = []
for a, b in zip(rowInd, counts):
longInd.extend([a] * b)
bsLongInd = np.random.choice(
a=longInd, size=len(longInd), replace=True, p=None)
bsIndDf = pd.DataFrame(pd.Series(bsLongInd).value_counts())
bsRowInd = list(bsIndDf.index)
bsCounts = bsIndDf[0].values
df2 = df.iloc[bsRowInd]
df2[countCol] = bsCounts
df2 = df2.reset_index(drop=True)
return df2
'''
df = pd.DataFrame({
'a':['cats', 'horses', 'dogs', 'wolves'],
'count':[2, 10, 4, 1]})
Mark(df, 'original df')
countCol = 'count'
Mark(BsWithCounts(df, countCol), ' using counts')
Mark(BsWithCounts(df, countCol=None), ' not using counts')
'''
## get a sublist with unique elements
# while preserving order
def UniqueList(l):
seen = set()
seen_add = seen.add
return [x for x in l if not (x in seen or seen_add(x))]
'''
x = [1, 2, 1, 1, 2, 3] + range(100000) + [1, 2, 1, 1, 2, 3]
tic = time.clock()
UniqueList(x)
toc = time.clock()
Mark((toc-tic)*100)
tic = time.clock()
set(x)
toc = time.clock()
Mark((toc-tic)*100)
'''
## bring certain elements (x) of a list (l) to front
# without re-ordering others
def BringElemsToFront(l, subList):
front = []
for k in range(len(subList)):
front = front + [j for i,j in enumerate(l) if j == subList[k]]
end = [j for i,j in enumerate(l) if not (j in subList)]
return front + end
'''
BringElemsToFront(l=[1, 2, 3, 1], subList=[1])
BringElemsToFront(l=[1, 2, 3, 1, 4, 5,], subList=[1, 4, 5])
'''
## get a fcn which returns a color grid of size n
def GetColorGridFcn(n):
'''Returns a function that maps each index in 0, 1, ... N-1 to a distinct
RGB color.'''
color_norm = matplotlib.colors.Normalize(vmin=0, vmax=n-1)
scalar_map = matplotlib.cm.ScalarMappable(norm=color_norm, cmap='hsv')
def map_index_to_rgb_color(index):
return scalar_map.to_rgba(index)
return map_index_to_rgb_color
'''
def main():
n = 5
fig=plt.figure()
ax=fig.add_subplot(111)
plt.axis('scaled')
ax.set_xlim([ 0, n])
ax.set_ylim([-0.5, 0.5])
cmap = GetColorGridFcn(n)
for i in range(n):
col = cmap(i)
rect = plt.Rectangle((i, -0.5), 1, 1, facecolor=col)
ax.add_artist(rect)
ax.set_yticks([])
plt.show()
if __name__=='__main__':
main()
'''
## takes a dictionary of lists to one string
def DictOfLists_toString(
d,
dictElemSepr='__',
listElemSepr='_',
keyValueSepr=':',
noneString=''):
if d == None or d == {}:
return(noneString)
keys = d.keys()
keys = map(str, keys)
keys.sort()
out = ''
for key in keys:
if (d[key] != None):
l = [str(x) for x in d[key]]
value = str(listElemSepr.join(l))
if out != '':
out = out + dictElemSepr
out = out + key + keyValueSepr + value
return out
'''
d = {'z':[2], 'd':[1], 'e':[2]}
DictOfLists_toString(d)
d = {'z':[2], 'd':[1], 'e':None}
DictOfLists_toString(d)
condDict = {'form_factor':['PHN']}
condDict = {'form_factor':None}
condDict = {'form_factor':['PHN'], 'country':['JP']}
condDict = None
condStr = DictOfLists_toString(condDict, dictElemSepr='__', listElemSepr='_')
'''
## plotting confidence intervals given in each row
# will label the rows in labelCol is given
def PlotCI(df, colUpper, colLower, y=None, col=None, ciHeight=0.5,
color='grey', labelCol=None, pltLabel=''):
if y is None:
y = range(len(df))
minCiWidth = (df[colUpper] - df[colLower]).min()
if col is not None:
## following was troubling in log scale,
# the width of the lines were changing in visualization (not desired)
'''
p = plt.barh(
bottom=y,
width=np.array([minCiWidth]*len(y)),
left=df[col],
height = ciHeight,
color='green',
alpha=1,
label=None)
'''
for i in range(len(y)):
plt.plot(
[df[col].values[i],
df[col].values[i]],
[y[i], y[i] + ciHeight],
color=color,
linestyle='-',
alpha=0.7,
linewidth=4)
plt.plot(
[df[col].values[i],
df[col].values[i]],
[y[i], y[i] + ciHeight],
color="black",
linestyle='-',
alpha=0.5,
linewidth=2,
dashes=[6, 2])
if int(matplotlib.__version__[0]) < 3:
p = plt.barh(
bottom=y,
width=(df[colUpper]-df[colLower]).values,
left=df[colLower],
color=color,
edgecolor='black',
height=ciHeight,
alpha=0.6,
label=pltLabel)
else:
p = plt.barh(
y=y,
width=(df[colUpper]-df[colLower]).values,
left=df[colLower],
align="edge",
color=color,
edgecolor='black',
height=ciHeight,
alpha=0.6,
label=pltLabel)
if labelCol is not None:
plt.yticks(y, df[labelCol].values, rotation='vertical');
'''
df0 = pd.DataFrame({'med':[1, 2, 3, 10], 'upper':[2, 5, 6, 12],
'lower':[-1, -2, -3, 4], 'categ':['a', 'b', 'c', 'd']})
PlotCI(df=df0, colUpper='upper', colLower='lower', y=None, col='med',
ciHeight=0.5, color='grey', labelCol='categ', pltLabel='')
'''
## compares the CI's for available labels in labeCol
# we do that for each slice with different color to compare
def PlotCIWrt(
df, colUpper, colLower, sliceCols, labelCol, col=None,
ciHeight=0.5, rotation = 0, addVerLines=[], logScale=False,
lowerLim=None, pltTitle='', figSize=[5, 20]):
df2 = Concat_stringColsDf(
df=df.copy(),
cols=sliceCols,
colName='slice_comb',
sepStr='-')
labelSet = UniqueList(df2[labelCol].values)
labelIndDf = pd.DataFrame({labelCol: labelSet})
labelIndDf = labelIndDf.sort_values([labelCol])
labelIndDf['labelInd'] = range(len(labelSet))
n = len(labelIndDf)
## groupby each slice
slicesSet = set(df2['slice_comb'])
g = df2.groupby(['slice_comb'])
sliceNum = len(g)
sliceNames = list(g.groups.keys())
sliceNames.sort()
ColorFcn = GetColorGridFcn(sliceNum + 2)
plt.figure(1);
fig, ax = plt.subplots();
fig.set_size_inches(figSize[0], figSize[1]*(n/20.0))
for i in range(sliceNum):
sliceName = sliceNames[i]
df3 = g.get_group(sliceName)
df3 = pd.merge(df3, labelIndDf, on=[labelCol], how='outer')
df3 = df3.sort_values([labelCol])
df3 = df3.fillna(0)
ciHeight = 1.0 / sliceNum
shift = ciHeight * i
y = [(float(x) + shift) for x in range(n)]
assert (len(df3) == len(y)),("len(y) must be the same as merged df (df3)." +
" This might be because of repeated rows in df3")
PlotCI(
df=df3, colUpper=colUpper, colLower=colLower, y=y, col=col,
ciHeight=ciHeight, color=ColorFcn(i + 1), labelCol=labelCol,
pltLabel=sliceName)
for j in range(n + 1):
plt.axhline(y=j, color='grey', alpha=0.95)
labels = [item.get_text() for item in ax.get_xticklabels()]
labels = list(labelIndDf[labelCol].values)
ax.set_yticklabels(labels)
locs, labels = plt.yticks([(float(x) + 0.5) for x in range(n)], labels)
plt.setp(labels, rotation=rotation, fontweight='bold', fontsize="large")
for x in addVerLines:
plt.axvline(x=x, color='orange', alpha=0.5)
if logScale:
plt.xscale('log')
if len(addVerLines) > 0:
#labels = [item.get_text() for item in ax.get_xticklabels()]
#ax.set_xticklabels(map(str, addVerLines))
ax = plt.gca() # grab the current axis
ax.set_xticks(addVerLines) # choose which x locations to have ticks
ax.set_xticklabels(addVerLines) # set the labels to display at those ticks
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# ncol=2, mode="expand", borderaxespad=0.)
plt.legend(
bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
prop={'weight':'bold', 'size':'large'})
plt.xlim((lowerLim, None))
plt.xlim((lowerLim, None))
plt.title(
pltTitle, fontname="Times New Roman",fontweight="bold",
fontsize="x-large")
return fig
#plt.show()
'''
## Example
df0 = pd.DataFrame({
'med':[1, 2, 3, 10, 11, 12, 1, 2],
'upper':[2, 5, 6, 12, 13, 16, 5, 6],
'lower':[-1, -2, -3, 4, 2, 2, 1, 2],
'categ':['a', 'b', 'c', 'd', 'a', 'c', 'd', 'a'],
'country':['JP', 'JP', 'JP', 'US', 'US', 'US', 'BR', 'BR']})
res = PlotCIWrt(
df=df0,
colUpper='upper',
colLower='lower',
sliceCols=['country'],
labelCol='categ',
col='med',
ciHeight=0.5,
rotation = 0,
pltTitle="WTF is going on?",
figSize=[10, 30])
'''
## this function will partition a df using keyCol
# for which their row (so maybe involve other columns to check conditions)
# satisfy conditions
# any combination of keys which passes the condition at least once will be
# considered as satisfy
def PartDf_byKeyCols_wrtCond(
df, keyCols, condDict, passColName='passesCond'):
keyDfUnique = df[keyCols].drop_duplicates()
ind = BuildCondInd(df=df, condDict=condDict)
passDf = df[ind].copy()
passKeyDf = passDf[keyCols].drop_duplicates()
passKeyDf[passColName] = True
keyDfLabeled = pd.merge(keyDfUnique, passKeyDf, on=keyCols, how='left')
keyDfLabeled = keyDfLabeled.fillna(False)
dfLabeled = pd.merge(df, keyDfLabeled, on=keyCols, how='left')
return {'dfLabeled':dfLabeled, 'keyDfLabeled':keyDfLabeled}
'''
df = pd.DataFrame({
'user_id':[1, 1, 2, 2, 3, 3, 4, 4],
'device':['pixel', 'sams', 'lg', 'lg', 'sams', 'pixel', 'nex', 'pixel'],
'country':['us', 'us', 'jp', 'jp', 'kr', 'kr', 'in', 'in']})
outDict = PartDf_byKeyCols_wrtCond(
df=df, keyCols=['user_id'], condDict={'device':['pixel'],
'country':['us', 'in']}, passColName='passesCond')
Mark(df)
Mark(outDict['dfLabeled'])
Mark(outDict['keyDfLabeled'])
'''
## create good pandas boxplots
def PandasBoxPlt(
df, col, by, ylim=None, yscale=None, pltTitle=None, figSize=None):
# demonstrate how to customize the display different elements:
boxprops = dict(linestyle='-', linewidth=4, color='k')
medianprops = dict(linestyle='-', linewidth=4, color='k')
bp = df.boxplot(
column=col, by=by,
showfliers=False, showmeans=True,
boxprops=boxprops, medianprops=medianprops)
if yscale is not None:
plt.yscale(yscale)
[ax_tmp.set_xlabel('') for ax_tmp in np.asarray(bp).reshape(-1)]
fig = np.asarray(bp).reshape(-1)[0].get_figure()
if figSize is not None:
fig.set_size_inches(figSize[0], figSize[1])
plt.xticks(rotation=45)
axes = plt.gca()
if pltTitle is not None:
plt.title(pltTitle)
if ylim is not None:
axes.set_ylim(ylim)
return plt.show()
def Plt_compareUsageSet(
df, unitCol, usageCol, compareCol=None, excludeValues=[],
mapToOther=["UNKNOWN", "MOBILE_UNKNOWN"], removeOther=True,
setLabelsNumMax=15, bpPltTitle=None):
if compareCol is None:
compareCol = "..."
df[compareCol] = "..."
if len(excludeValues) > 0:
df = df[~df[usageCol].isin(excludeValues)]
df2 = df[[unitCol, compareCol, usageCol]].copy()
res = Remap_lowFreqCategs(
df=df2, cols=[usageCol], newLabels="OTHER",
otherLabelsToReMap=(["", "nan"] + mapToOther),
freqThresh=10, labelsNumMax=30)
df2 = res["df"]
if removeOther:
df2 = df2[df2[usageCol] != "OTHER"]
g = df2.groupby([unitCol, compareCol], as_index=False)
dfSet = g.agg({usageCol:lambda x: tuple(sorted(set(x)))})
res = Remap_lowFreqCategs(
df=dfSet, cols=[usageCol], newLabels="OTHER",
otherLabelsToReMap=["", "nan"],
freqThresh=5, labelsNumMax=setLabelsNumMax)
dfSet = res["df"]
if removeOther:
dfSet = dfSet[dfSet[usageCol] != "OTHER"]
pltTitle = usageCol + " set distbn " + " across " + unitCol + "s"
res = LabelDistbn_acrossSlice(
df=dfSet, sliceCol=compareCol, labelCol=usageCol,
pltIt=True, pltTitle=pltTitle)
dfCount = g.agg({usageCol:lambda x: len(set(x))})
res["dfCount"] = dfCount
if bpPltTitle is None:
bpPltTitle = "# of " + usageCol + " across " + unitCol + "s"
PandasBoxPlt(
df=dfCount, col=usageCol, by=compareCol,
ylim=[0, None], pltTitle=bpPltTitle)
return res
def BirthYear_toAgeCateg(x, currentYear=None):
if currentYear is None:
currentYear = datetime.datetime.now().year
if x is None or x == "" or x == 0 or math.isnan(x):
return "other"
x = float(x)
age = currentYear - x
if age <= 17:
return "<18"
if age <= 25:
return "18-25"
if age <= 35:
return "26-35"
if age <= 50:
return "36-50"
return ">51"
def BirthYear_toAge(x, currentYear=None, minBirthYear=1940):
if currentYear is None:
currentYear = datetime.datetime.now().year
if x is None or x == "" or x == 0 or math.isnan(x):
return None
if x < minBirthYear or x > currentYear:
return None
x = float(x)
return (currentYear - x)
"""
BirthYear_toAgeCateg(1900)
"""
def Plt_compareDensity(
df, compareCol, valueCol, compareValues=None):
if compareValues is None:
compareValues = set(df[compareCol].values)
# Iterate through the five airlines
for value in compareValues:
# Subset to the airline
subset = df[df[compareCol] == value]
# Draw the density plot
sns.distplot(
subset[valueCol], hist=False, kde=True,
kde_kws={'linewidth': 3, "alpha": 0.75},
label=value)
# Plot formatting
plt.legend(prop={'size': 8}, title=compareCol)
plt.title('Compare Density Plot for Multiple ' + compareCol)
plt.xlabel(valueCol)
plt.ylabel('Density')
## drops (multiple) ending vowels from a string
def DropEndingVowels(s, minLength=2):
cond = True
while cond and len(s) > minLength:
if s[len(s)-1].lower() in ["a", "o", "e", "u", "i"]:
s = s[0:(len(s)-1)]
else:
cond = False
return s
def DropEndingChars(s, chars, minLength=2):
cond = True
while cond and len(s) > minLength:
if s[len(s)-1].lower() in chars:
s = s[0:(len(s)-1)]
else:
cond = False
return s
## abbreviates a string.
# first we abbreviate each word in a string (phrase)
# then we concat them back and abbreviate the whole phrase
def AbbrString(
s,
wordLength=6,
replaceList=["/", "&", " and ", "-", ",", ";"],
sep="-",
totalLength=None,
wordNumLimit=None):
for char in replaceList:
s = s.replace(char, " ")
sList = s.split(" ")
sList = [s[0:wordLength] for s in sList]
sList = [x for x in sList if x not in ["", " ", " ", " "]]
sList = [DropEndingVowels(s) for s in sList]
sList = list(collections.OrderedDict.fromkeys(sList))
print(sList)
if wordNumLimit is not None:
sList = sList[:wordNumLimit]
s = sep.join(sList)
if totalLength is not None:
s = s[0:totalLength]
s = DropEndingVowels(s)
s = DropEndingChars(s=s, chars=["/", "&", " and ", "-", sep, " ", ",", ";"])
return s
"""
s = "language books/"
AbbrString(s, sep="-")
"""
## replace in pandas is slow
def ReplaceValues_dfCols_viaReplace(
df, cols, values, newValues, newCols=None):
if newCols is None:
newCols = cols
mappingDict = dict(zip(values, newValues))
df[newCols] = df[cols].replace(mappingDict)
return df
def ReplaceValues_dfCols(df, cols, values, newValues, newCols=None):
if newCols is None:
newCols = cols
m = pd.Series(newValues, values)
df[newCols] = df[cols].stack().map(m).unstack()
return df
"""
import datetime
import pandas as pd
import numpy as np
import string
n = 10000
m = 500
df = pd.DataFrame(
pd.DataFrame(
np.random.choice(list(string.letters), n * m * 3) \
.reshape(3, -1)).sum().values.reshape(n, -1))
cols = [0, 1]
u = np.unique(df[cols])
fromSeries = pd.Series(u)
toSeries = fromSeries + "XXX"
fromValues = fromSeries.values
toValues = toSeries.values
a = datetime.datetime.now()
df0 = ReplaceValues_dfCols(
df=df.copy(), cols=cols, values=fromValues, newValues=toValues)
b = datetime.datetime.now()
time1 = b-a
print(time1)
a = datetime.datetime.now()
df1 = ReplaceValues_dfCols_viaReplace(
df=df.copy(), cols=cols, values=fromValues,
newValues=toValues, newCols=None)
b = datetime.datetime.now()
time2 = b-a
print(time2)
print(time2.total_seconds() / time1.total_seconds())
"""
def AbbrStringCols(
df, cols, newCols=None, wordLength=6,
replaceList=["/", "&", " and ", "-"], sep="-",
totalLength=None, wordNumLimit=None):
values = np.unique(df[cols])
def Abbr(s):
return AbbrString(
s=s, wordLength=wordLength, replaceList=replaceList,
sep=sep, totalLength=totalLength, wordNumLimit=wordNumLimit)
abbrValues = [Abbr(s) for s in values]
mapDf = pd.DataFrame({"value":values, "abbr_values": abbrValues})
df = ReplaceValues_dfCols(
df=df, cols=cols, values=values, newValues=abbrValues, newCols=newCols)
return {"df":df, "mapDf":mapDf}
"""
df = pd.DataFrame({
"col":["life is beautiful", "i like mountains", "ok", "cool"]})
#AbbrStringCols(df, cols=["col"])
AbbrStringCols(df=df, cols=["col"], totalLength=10, wordNumLimit=None)
"""
## convert data.frame to code
def ConvertDf_toCode(df):
s = (
"df = pd.DataFrame( %s )"
% (str(df.to_dict()).replace(" nan"," float('nan')")))
return s
"""
df = pd.DataFrame({"a":[1, 2, 3], "b":[1, 2, 3]})
ConvertDf_toCode(df)
"""
|
google/expt-analysis
|
python/data_analysis.py
|
data_analysis.py
|
py
| 82,173 |
python
|
en
|
code
| 6 |
github-code
|
6
|
1805360050
|
#! /usr/bin/env python3
count = {}
def char_count(str):
for char in str:
c = count.get(char)
if c is None:
count[char] = 1
else:
count[char] += 1
print(count)
if __name__ == '__main__':
s = input('Enter a string')
char_count(s)
|
wffh/project
|
count_str_fast.py
|
count_str_fast.py
|
py
| 297 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23435779102
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import sys
import json
import mainwindow, mystock, recepe, compute
class MyMainWindow(mainwindow.Ui_MainWindow):
def setupUi(self, mw, database):
super().setupUi(mw)
self.tabWidget = QtWidgets.QTabWidget()
mw.setCentralWidget(self.tabWidget)
self.ms = QtWidgets.QWidget()
self.mystock = mystock.MyStock()
self.mystock.setupUi(self.ms, database["bernard"]["stock"])
self.tabWidget.addTab(self.ms, "STOCK")
self.mr = QtWidgets.QWidget()
self.myRecepe = recepe.Ui_TabRecepe()
self.myRecepe.setupUi(self.mr)
self.tabWidget.addTab(self.mr, "Recepe")
self.mc = QtWidgets.QWidget()
self.myCompute = compute.Ui_TabCompute()
self.myCompute.setupUi(self.mc)
self.tabWidget.addTab(self.mc, "Compute")
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mw = QtWidgets.QMainWindow()
ui = MyMainWindow()
ui.setupUi(mw)
mw.show()
sys.exit(app.exec_())
|
bernard169/open-breware
|
mymainwindow.py
|
mymainwindow.py
|
py
| 1,084 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14872333572
|
import torch
from torch import nn
import torch.nn.functional as F
from torch import optim
from torchvision import datasets, transforms, models
from workspace_utils import active_session
from collections import OrderedDict
import numpy as np
from PIL import Image
import argparse
import json
parser = argparse.ArgumentParser(description='Inference for classification')
parser.add_argument('-i','--image_path',type=str, metavar='', required=True, help='path to image to predict e.g. flowers/test/class/image')
parser.add_argument('-t','--top_k', type=int, metavar='', default=1, help='print out the top K classes along with associated probabilities')
parser.add_argument('-c','--category_names', type=str, metavar='', default='cat_to_name.json', help='load a JSON file that maps the class values to other category names')
parser.add_argument('-g','--gpu',action="store_true", default=False, help='choose training the model on a GPU')
args = parser.parse_args()
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
# a function that loads a checkpoint and rebuilds the model
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
# Load the saved file
checkpoint = torch.load("checkpoint.pth")
architecture = checkpoint['architecture']
# Download pretrained model
model = getattr(models, architecture)(pretrained=True);
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
hidden_units = checkpoint['hidden_units']
model.class_to_idx = checkpoint['class_to_idx']
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
#optimizer = optim.Adam(model.classifier.parameters())
#optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epochs = checkpoint['epochs']
return model
model = load_checkpoint('checkpoint.pth')
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
im = Image.open(image)
size = 256,256
im.thumbnail(size)
#Crop
left = (256-224)/2
top = (256-224)/2
right = (left + 224)
bottom = (top + 224)
im = im.crop((left, top, right, bottom))
np_image = np.array(im)
np_image = np_image / 255
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
np_image = (np_image - mean) / std
np_image = np_image.transpose(2, 0, 1)
return np_image
# Reversing idx to class
idx_to_class = {}
for key, value in model.class_to_idx.items():
idx_to_class[value] = key
def predict(image_path, model, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# Use GPU if it's available
if args.gpu:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# TODO: Implement the code to predict the class from an image file
#preprocess image
image = process_image(image_path)
image = torch.from_numpy(np.array([image])).float()
# turn off dropout
model.eval()
#Load image and the model to cpu or gpu
model.to(device)
image = image.to(device)
logps = model.forward(image)
ps = torch.exp(logps)
top_p, top_class = ps.topk(topk, dim=1)
top_class = np.array(top_class)[0]
top_p = np.array(top_p.detach())[0]
# Mapping index to class
top_classes = []
for i in range(len(top_class)):
top_classes.append(idx_to_class[top_class[i]])
# Mapping class to flower name
flower_names = []
for i in range(len(top_classes)):
flower_names.append(cat_to_name[top_classes[i]])
return top_p, flower_names
probs, classes = predict(args.image_path, model, args.top_k)
print(f"class Probability: {probs}")
print(f"flower name: {classes}")
|
OmarMohy/Image-Classifier-with-Deep-Learning
|
predict.py
|
predict.py
|
py
| 3,922 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33293378254
|
'''Simple Script to Label Item Description'''
#import libraries packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import string
import re
import json
#read from csv
data = pd.read_csv('./item.csv')
item = data['Item Description']
#data cleaning (lowercase, remove whitespace, remove numbers, remove punctutions)
item = item.str.lower() #change to lowercase
#keyword for each label
medical_surgical_label = ['revision', 'replacement', 'reposition']
obstet_label = ['abortion', 'extraction', 'transplantation']
placement_label = ['removal', 'dressing', 'compression']
admin_label = ['irrigation', 'introduction']
osteo_label = ['osteopathic']
chiro_label = ['chiropractic']
imaging_label = ['fluoroscopy', 'ct scan', 'computerized tomography', 'ultrasonography']
mental_label = ['crisis', 'management', 'psychological']
radiation_label = ['cesium', 'radiation', 'photon', 'radiosurgery']
nuclear_label = ['nuclear']
#check string contains any keyword above and label accordingly
for i in item.shape:
for row in medical_surgical_label:
item[item.str.contains(row)] = 'Medical and Surgical'
for row in obstet_label:
item[item.str.contains(row)] = 'Obstetrics'
for row in placement_label:
item[item.str.contains(row)] = 'Placement'
for row in admin_label:
item[item.str.contains(row)] = 'Administration'
for row in osteo_label:
item[item.str.contains(row)] = 'Osteopathic'
for row in chiro_label:
item[item.str.contains(row)] = 'Chiropractic'
for row in imaging_label:
item[item.str.contains(row)] = 'Imaging'
for row in mental_label:
item[item.str.contains(row)] = 'Mental Health'
for row in radiation_label:
item[item.str.contains(row)] = 'Radiation Therapy'
for row in nuclear_label:
item[item.str.contains(row)] = 'Nuclear Medicine'
print(item)
|
aqillakhamis/Text-Matching-Label
|
textMatching.py
|
textMatching.py
|
py
| 1,824 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4729018877
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 13 00:32:57 2018
@author: pablosanchez
"""
import tensorflow as tf
import utils.constants as const
from networks.dense_net import DenseNet
class DeconvNet(object):
def __init__(self, width, height, nchannels, reuse, transfer_fct=tf.nn.relu,
act_out=tf.nn.sigmoid, drop_rate=0., kinit=tf.contrib.layers.xavier_initializer(),
bias_init=tf.constant_initializer(0.0)):
self.width = width
self.height = height
self.nchannels = nchannels
self.transfer_fct = transfer_fct
self.act_out = act_out
self.reuse = reuse
self.drop_rate = drop_rate
self.kinit= kinit
self.bias_init = bias_init
def build(self, input_):
raise NotImplementedError
def deconv_layer(self, input_, filters, k_size, stride, padding, name, act_func=tf.nn.relu):
deconv = tf.layers.conv2d_transpose(input_,
filters,
k_size,
strides=stride,
padding=padding,
activation=act_func,
kernel_initializer=self.kinit,
bias_initializer=self.bias_init,
name=name,
reuse=self.reuse)
print('[*] Layer (',deconv.name, ') output shape:', deconv.get_shape().as_list())
return deconv
class DeconvNet3(DeconvNet):
def __init__(self, input_, width, height, nchannels, reuse, transfer_fct=tf.nn.relu,
act_out=tf.nn.sigmoid, drop_rate=0., kinit=tf.contrib.layers.xavier_initializer(),
bias_init=tf.constant_initializer(0.0)):
super().__init__(width, height, nchannels,reuse, transfer_fct, act_out, drop_rate, kinit, bias_init)
self.output = self.build(input_)
def build(self, input_):
aux_size = self.width//2//2
aux_size_2 = self.height//2//2
initial_n_channels = 64
out_dense_dim = aux_size*aux_size_2*initial_n_channels
hidden_dim = input_.get_shape()[-1].value*3
dense = DenseNet(input_=input_,
hidden_dim=hidden_dim,
output_dim=out_dense_dim,
num_layers=2,
transfer_fct=self.transfer_fct,
act_out=self.transfer_fct,
reuse=self.reuse,
kinit=self.kinit,
bias_init=self.bias_init,
drop_rate=self.drop_rate)
x = dense.output
x = tf.reshape(x, [-1,aux_size,aux_size_2,initial_n_channels])
x = self.deconv_layer(input_=x,
filters=64,
k_size=4, #[4, 4]
stride=2,
padding='SAME',
name='deconv_1',
act_func=self.transfer_fct)
x = self.deconv_layer(input_=x,
filters=32,
k_size=4, #[4, 4]
stride=2,
padding='SAME',
name='deconv_2',
act_func=self.transfer_fct)
x = self.deconv_layer(input_=x,
filters=self.nchannels,
k_size=4, #[4, 4]
stride=1,
padding='SAME',
name='deconv_3',
act_func=self.act_out)
return x
|
psanch21/VAE-GMVAE
|
networks/deconv_net.py
|
deconv_net.py
|
py
| 3,987 |
python
|
en
|
code
| 197 |
github-code
|
6
|
5223556496
|
import os
import time
import requests
import functools
from concurrent.futures import ThreadPoolExecutor
import click
import yaml
def _get_connection_urls(workload_yaml):
with open(workload_yaml) as f:
workload = yaml.safe_load(f)
uris = workload.get("EnvironmentDetails", {}).get("MongosyncConnectionURIs")
if not uris:
raise Exception(
f"This actor requires setting EnvironmentDetails: MongosyncConnectionURIs to use this script"
)
return uris
def poll(workload_yaml, predicate, key):
"""
Wait for all mongosyncs to reach a certain state (e.g. predicate returns False)
based on a value returned by the /progress endpoint
"""
connection_urls = _get_connection_urls(workload_yaml)
def get_progress():
res = requests.get(f"{url}/api/v1/progress")
return res.json()["progress"][key]
for url in connection_urls:
info = get_progress()
while predicate(info):
time.sleep(1)
print(f"Polling {url} for {key}, current value = {info}", flush=True)
info = get_progress()
def _change_one_mongosync_state(route, body, url):
"""
Change state of a given mongosync running at the provided url
"""
resp = requests.post(f"{url}{route}", json=body)
print(resp.json(), flush=True)
success = resp.json()["success"]
if not success:
raise Exception(f"State change failed at route {route}")
return success
def change_state(workload_yaml, route, body):
"""
Helper function to change state of mongosync. This must
send all requests in parallel, as some commands block until
all instances recieve them
"""
connection_urls = _get_connection_urls(workload_yaml)
fn = functools.partial(_change_one_mongosync_state, route, body)
with ThreadPoolExecutor() as executor:
futures = []
# Using executor.map swallows exceptions from the task,
# using .submit and then accessing the future's .result
# will cause exceptions to be rethrown
for url in connection_urls:
futures.append(executor.submit(fn, url))
for f in futures:
f.result()
@click.group(name="MongosyncActor", context_settings=dict(help_option_names=["-h", "--help"]))
def cli():
pass
@cli.command(
"start",
help=("Issue /start to all mongosync processes"),
)
@click.argument("workload_yaml", nargs=1)
def start(workload_yaml):
change_state(workload_yaml, "/api/v1/start", {"Source": "cluster0", "Destination": "cluster1"})
@cli.command(
"poll_for_cea",
help=("Poll all available instances for the CEA stage"),
)
@click.argument("workload_yaml", nargs=1)
def poll_for_cea(workload_yaml):
poll(workload_yaml, lambda x: x != "change event application", "info")
@cli.command(
"poll_for_commit_point",
help=("Wait till all the instances canCommit = true and lagTimeSeconds < 120"),
)
@click.argument("workload_yaml", nargs=1)
def poll_for_commit_point(workload_yaml):
poll(workload_yaml, lambda x: bool(x) == False, "canCommit") or poll(
workload_yaml, lambda x: int(x) > 120, "lagTimeSeconds"
)
@cli.command(
"drain_writes",
help=("Wait till all writes have been drained to the destination cluster"),
)
@click.argument("workload_yaml", nargs=1)
def drain_writes(workload_yaml):
poll(workload_yaml, lambda x: int(x) > 5, "lagTimeSeconds")
@cli.command(
"commit",
help=("Commit the migration"),
)
@click.argument("workload_yaml", nargs=1)
def commit(workload_yaml):
change_state(workload_yaml, "/api/v1/commit", {})
@cli.command(
"wait_for_commit",
help=("Wait until all mongosyncs are finished commiting the migration"),
)
@click.argument("workload_yaml", nargs=1)
def wait_for_commit(workload_yaml):
poll(workload_yaml, lambda x: x != "COMMITTED", "state")
@cli.command(
"pause",
help=("Pause the migration"),
)
@click.argument("workload_yaml", nargs=1)
def pause(workload_yaml):
change_state(workload_yaml, "/api/v1/pause", {})
@cli.command(
"resume",
help=("Resume the migration"),
)
@click.argument("workload_yaml", nargs=1)
def resume(workload_yaml):
change_state(workload_yaml, "/api/v1/resume", {})
if __name__ == "__main__":
cli()
|
mongodb/genny
|
src/cast_python/src/mongosync_actor.py
|
mongosync_actor.py
|
py
| 4,300 |
python
|
en
|
code
| 42 |
github-code
|
6
|
30367868751
|
from numpy import linspace, sin
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
from enable.api import ComponentEditor
from traits.api import Enum, HasTraits, Instance
from traitsui.api import Item, Group, View
class PlotEditor(HasTraits):
plot = Instance(Plot)
plot_type = Enum("scatter", "line")
orientation = Enum("horizontal", "vertical")
traits_view = View(
Item("orientation", label="Orientation"),
Item("plot", editor=ComponentEditor(), show_label=False),
width=500,
height=500,
resizable=True,
)
def __init__(self, *args, **kw):
HasTraits.__init__(self, *args, **kw)
# Create the data and the PlotData object
x = linspace(-14, 14, 100)
y = sin(x) * x ** 3
plotdata = ArrayPlotData(x=x, y=y)
# Create the scatter plot
plot = Plot(plotdata)
plot.plot(("x", "y"), type=self.plot_type, color="blue")
plot.tools.append(PanTool(plot))
plot.tools.append(ZoomTool(plot))
self.plot = plot
def _orientation_changed(self):
if self.orientation == "vertical":
self.plot.orientation = "v"
else:
self.plot.orientation = "h"
self.plot.request_redraw()
# ===============================================================================
# demo object that is used by the demo.py application.
# ===============================================================================
class Demo(HasTraits):
# Scatter plot.
scatter_plot = Instance(PlotEditor)
# Line plot.
line_plot = Instance(PlotEditor)
traits_view = View(
Group(Item("@scatter_plot", show_label=False), label="Scatter"),
Group(Item("@line_plot", show_label=False), label="Line"),
title="Chaco Plot",
resizable=True,
)
def __init__(self, *args, **kws):
super(Demo, self).__init__(*args, **kws)
# Hook up the ranges.
self.scatter_plot.plot.range2d = self.line_plot.plot.range2d
def _scatter_plot_default(self):
return PlotEditor(plot_type="scatter")
def _line_plot_default(self):
return PlotEditor(plot_type="line")
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
|
enthought/chaco
|
examples/tutorials/scipy2008/ploteditor.py
|
ploteditor.py
|
py
| 2,299 |
python
|
en
|
code
| 286 |
github-code
|
6
|
36584033730
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 14:36:47 2020
@author: allison
"""
def readATS(filename):
infile = open(filename)
ats = []
for line in infile:
ats.append(line.replace("\n",""))
infile.close()
return ats
def readCouplingMatrix(filename):
infile = open(filename)
Csca = []
i = 0
for line in infile:
Csca.append([])
entries = line.split(",")
entries = entries[0:len(entries)-1]
for v in entries:
Csca[i].append(float(v))
i+=1
return Csca
def getPairwiseCouplingValues(Csca, ats):
coupling_values = []
for i in range(0, len(ats)):
for j in range(i+1, len(ats)):
coupling_values.append(Csca[i][j])
coupling_values.sort()
return coupling_values
def getCouplingInfo(pos1, pos2):
ats = readATS("Data/ats.txt")
if str(pos1) not in ats:
return (None, 1)
if str(pos2) not in ats:
return (None, 2)
Csca = readCouplingMatrix("Data/SCA_matrix.csv")
coupling_values = getPairwiseCouplingValues(Csca, ats)
index1 = ats.index(str(pos1))
index2 = ats.index(str(pos2))
coupling = Csca[index1][index2]
first_index = coupling_values.index(coupling)
last_index = len(coupling_values) - 1 - coupling_values[::-1].index(coupling)
if first_index == -1:
return (coupling, -1)
percentile = 100.0*(first_index+last_index)/(2*len(coupling_values))
return (coupling, percentile)
#position1 = 4
#position2 = 11
#(coupling, percentile) = getCouplingInfo(position1,position2)
|
allie-walker/SCA4RNA_results
|
coupling_matrix_tools.py
|
coupling_matrix_tools.py
|
py
| 1,583 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20918248522
|
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity, linear_kernel
def GetMoviesByDescription(movieName):
movie_list=[]
csv_path = "cleaned data/movies.csv"
df = pd.read_csv(csv_path)
df['description'] = df['description'].fillna('')
#create the matrix
tf = TfidfVectorizer(analyzer='word',ngram_range=(1, 2),min_df=0, stop_words='english')
tfidf_matrix = tf.fit_transform(df['description'])
#calaculate the Cosine Similarity Score
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
md = df.reset_index()
titles = df['title']
indices = pd.Series(df.index, index=df['title'])
try:
idx = indices[movieName]
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:31]
movie_indices = [i[0] for i in sim_scores]
recdf = titles.iloc[movie_indices]
count = 0
for index, value in recdf.items():
count = count + 1
movie_list.append(value)
if(count == 8):
break
except:
movie_list.append("No Recommendation available.")
return movie_list
def GetMoviesByUserRating(movieName):
movies = pd.read_csv("raw data/movies.csv")
ratings = pd.read_csv("raw data/ratings.csv")
movievsuser = ratings.pivot(index='movieId',columns='userId',values='rating')
movievsuser.fillna(0, inplace=True)
ratingsByMovie = ratings.groupby('movieId')['rating'].agg('count')
ratingsByUser = ratings.groupby('userId')['rating'].agg('count')
movievsuser = movievsuser.loc[ratingsByMovie[ratingsByMovie > 40].index,:]
csr_data = csr_matrix(movievsuser.values)
movievsuser.reset_index(inplace=True)
# Using KNN algorithm to predict similarity with cosine distance
knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=20, n_jobs=-1)
knn.fit(csr_data)
reccomendCount = 8
listing = []
movieList = movies[movies['title'].str.contains(movieName)]
if len(movieList):
movie_id= movieList.iloc[0]['movieId']
movie_id = movievsuser[movievsuser['movieId'] == movie_id].index[0]
distances , indices = knn.kneighbors(csr_data[movie_id],n_neighbors=reccomendCount+1)
recommendedMovies = sorted(list(zip(indices.squeeze().tolist(),distances.squeeze().tolist())),key=lambda x: x[1])[:0:-1]
recMoviesList = []
for val in recommendedMovies:
movie_id = movievsuser.iloc[val[0]]['movieId']
idx = movies[movies['movieId'] == movie_id].index
recMoviesList.append({'Title':movies.iloc[idx]['title'].values[0],'Distance':val[1]})
df = pd.DataFrame(recMoviesList,index=range(1,reccomendCount+1))
df['Distance'] = pd.to_numeric(df['Distance'])
df= df.sort_values('Distance')
listing = df['Title']
return listing
def GetMoviesByGenre(movieName):
file_to_load = "raw data/movies.csv"
ratings = pd.read_csv("raw data/ratings.csv")
data = pd.read_csv(file_to_load)
data['genres'] = data['genres'].str.replace(r'|', ' ')
genre_data=data[['title','genres']]
genre_data=genre_data.set_index('title')
#convert genre column to array
cv = CountVectorizer()
X = cv.fit_transform(genre_data["genres"]).toarray()
similarities = cosine_similarity(X)
movie_index = data.loc[data['title'].str.contains(movieName)].index[0]
similarity_values = pd.Series(similarities[movie_index])
#We converted list into series in order to preserve the actual indexes of dataset even after sorting
similarity_values.sort_values(ascending=False)
similar_movie_indexes = list(similarity_values.sort_values(ascending=False).index)
#Remove the already watched movie from index list
similar_movie_indexes.remove(movie_index)
movie_list=[]
for i in range(8):
movie_list.append(genre_data.index[similar_movie_indexes[i]])
return movie_list
def GetPredictionsForMovie(moviename):
complete_df = pd.read_csv("cleaned data/complete_df_with_predictions.csv")
mv = complete_df.loc[complete_df['title'].str.contains(moviename),['title']]
movie = mv.head(1)
names = movie.to_numpy()
name = names[0][0]
#based on all the users in the dataframe and their predictions what is the average rating the movie will get
movie_rating=round((complete_df.loc[complete_df['title']==name,['predicted rating']].values).mean(),2)
#from data already available what is the average of the movie
movie_gavg=round((complete_df.loc[complete_df['title']==name,['MAvg']].values).mean(),2)
percdiff = round(((movie_rating-movie_gavg)/movie_gavg*100),2)
summary = {'Predicted Rating': movie_rating, 'Actual Rating': movie_gavg ,"Percentage Difference%":percdiff}
return summary
def GetPredictions(moviename, userid):
complete_df = pd.read_csv("cleaned data/complete_df_with_predictions.csv")
#print(complete_df.head())
userid=int(userid)
try:
mv = complete_df.loc[complete_df['title'].str.contains(moviename),['title']]
movie = mv.head(1)
names = movie.to_numpy()
name = names[0][0]
#print(name)
#based on users past ratings what is the prediction for a particular movie
pred_rating=round(complete_df.loc[(complete_df['user']==userid) & (complete_df['title']==name),['predicted rating']].values[0][0],2)
#from data already available what is the average of the movie
user_rating=round(complete_df.loc[(complete_df['user']==userid) & (complete_df['title']==name),['rating']].values[0][0],2)
percdiff = round(((pred_rating-user_rating)/user_rating*100),2)
summary = {'Predicted Rating': pred_rating, 'Actual Rating': user_rating ,"Percentage Difference%":percdiff}
return summary
except:
pred_rating=0
user_rating=0
percdiff = 0
#based on all the users in the dataframe and their predictions what is the average rating a user gives
user_rating=round((complete_df.loc[complete_df['user']==userid,['predicted rating']].values).mean(),2)
#from data already available what is the average of the movie
user_uavg=round((complete_df.loc[complete_df['user']==userid,['UAvg']].values).mean(),2)
percdiff = round(((user_rating-user_uavg)/user_uavg*100),2)
print ("this user has not rated this movie")
summary = {'Status': "this user has not rated this movie, showing a prediction of what this user is likely to predict",'Predicted Rating': user_rating, 'Actual Rating': user_uavg ,"Percentage Difference%":percdiff}
return summary
|
InsiyaKanjee/Project4
|
initdb.py
|
initdb.py
|
py
| 7,073 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18677212205
|
import argparse
import logging
import random
import sys
import time
from copy import deepcopy
import numpy as np
import torch
from scipy.stats import kendalltau
from datasets.dataloader import get_dataloader
from models.cell_operations import NAS_BENCH_201
from models.supernet import Supernet201
from utils import obtain_accuracy, AverageMeter, set_seed, run_func, time_record
parser = argparse.ArgumentParser("Train 201 Supernet")
# dataset
parser.add_argument("--data_root", type=str, default='./dataset/', help="The path to dataset")
parser.add_argument("--dataset", type=str, default='cifar10', help="Dataset.")
parser.add_argument("--search_space_name", type=str, default='nas-bench-201', help="The search space name.")
parser.add_argument("--num_classes", type=int, default=10, help="Dataset Classes")
# supernet
parser.add_argument("--max_nodes", type=int, default=4, help="The maximum number of nodes.")
parser.add_argument("--channel", type=int, default=16, help="The number of channels.")
parser.add_argument("--num_cells", type=int, default=5, help="The number of cells in one stage.")
# training settings
parser.add_argument("--exp_name", type=str, default='debug_baseline', help='exp_name for saving results')
parser.add_argument("--method", type=str, default='spos', choices=['spos', 'fairnas', 'sumnas'])
parser.add_argument("--lr", type=float, default=0.05, help="Learning rate")
parser.add_argument("--inner_lr", type=float, default=0.05, help="Learning rate")
parser.add_argument("--momentum", type=float, default=0.9, help="Momentum")
parser.add_argument("--wd", type=float, default=2.5e-4, help="Weight decay")
parser.add_argument("--epochs", type=int, default=250, help="Training epochs")
parser.add_argument("--gpu_id", type=int, default=0, help="Training GPU")
parser.add_argument("--train_batch_size", type=int, default=256, help="Train batch size")
parser.add_argument("--valid_batch_size", type=int, default=512, help="Valid batch size")
parser.add_argument("--print_freq", type=int, default=50, help="print frequency when training")
parser.add_argument("--rank_print_freq", type=int, default=100, help="print frequency when ranking")
parser.add_argument("--seed", type=int, default=0, help="manual seed")
parser.add_argument("--debug", default=False, action='store_true', help="for debug")
args = parser.parse_args()
args.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
args.ckpt_path = 'checkpoints/%s.pt' % args.exp_name
args.pred_path = 'results/%s.npy' % args.exp_name
if args.debug:
args.epochs = 5
# logging config
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
logging.info(args)
set_seed(args.seed)
def mix_grad(grad_list, weight_list):
"""
calc weighted average of gradient
"""
mixed_grad = []
for g_list in zip(*grad_list):
g_list = torch.stack([weight_list[i] * g_list[i] for i in range(len(weight_list))])
mixed_grad.append(torch.sum(g_list, dim=0))
return mixed_grad
def apply_grad(model, grad):
"""
assign gradient to model(nn.Module) instance. return the norm of gradient
"""
for p, g in zip(model.parameters(), grad):
if p.grad is None:
p.grad = g
else:
p.grad += g
def train(epoch, train_loader, model, criterion, optimizer, inner_optimizer=None):
train_loss = AverageMeter()
train_top1 = AverageMeter()
train_top5 = AverageMeter()
model.train()
path_list = []
num_candidate_ops = 5
candidate_ops = list(range(5))
candidate_edges = 6
for step, (inputs, targets) in enumerate(train_loader):
inputs = inputs.to(args.device)
targets = targets.to(args.device)
if args.method == 'spos':
# randomly sample an arch
sampled_arch = [
random.choice(candidate_ops) for _ in range(candidate_edges)
]
optimizer.zero_grad()
logits = model(inputs, sampled_arch)
loss = criterion(logits, targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
elif args.method == 'fairnas':
# shuffle the ops to get sub-models with strict fairness
for _ in range(candidate_edges):
random.shuffle(candidate_ops)
path_list.append(deepcopy(candidate_ops))
# inner loop
optimizer.zero_grad()
for _path_id in range(num_candidate_ops):
sampled_arch = [_operations[_path_id] for _operations in path_list]
logits = model(inputs, sampled_arch)
loss = criterion(logits, targets)
loss.backward()
# record training metrics
prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
train_loss.update(loss.item(), inputs.size(0))
train_top1.update(prec1.item(), inputs.size(0))
train_top5.update(prec5.item(), inputs.size(0))
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
elif args.method == 'sumnas':
# record the supernet weights
weights_before = deepcopy(model.state_dict())
grad_list = []
# shuffle the ops to get sub-models fairly
for _ in range(candidate_edges):
random.shuffle(candidate_ops)
path_list.append(deepcopy(candidate_ops))
# inner loop
for _path_id in range(num_candidate_ops):
sampled_arch = [_operations[_path_id] for _operations in path_list]
# inner optimization
for _step in range(args.adaption_steps):
inner_optimizer.zero_grad()
logits = model(inputs, sampled_arch)
loss = criterion(logits, targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
inner_optimizer.step()
# record training metrics
prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
train_loss.update(loss.item(), inputs.size(0))
train_top1.update(prec1.item(), inputs.size(0))
train_top5.update(prec5.item(), inputs.size(0))
# record reptile gradient
outer_grad = []
weights_after = deepcopy(model.state_dict())
for p_0, p_T in zip(weights_before.items(), weights_after.items()):
outer_grad.append(-(p_T[1] - p_0[1]).detach())
grad_list.append(outer_grad)
model.load_state_dict(weights_before)
# outer loop
optimizer.zero_grad()
weight = torch.ones(len(grad_list)) / len(grad_list)
grad = mix_grad(grad_list, weight)
apply_grad(model, grad)
optimizer.step()
else:
raise ValueError('Wrong training method for the supernet: %s' % args.method)
# record
prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
train_loss.update(loss.item(), inputs.size(0))
train_top1.update(prec1.item(), inputs.size(0))
train_top5.update(prec5.item(), inputs.size(0))
if step % args.print_freq == 0 or step + 1 == len(train_loader):
logging.info('[Training] Epoch %03d/%03d, step %03d/%03d, loss: %.3f, top1: %.3f, top5: %.3f'
% (epoch, args.epochs, step, len(train_loader), train_loss.avg, train_top1.avg, train_top5.avg))
return train_loss.avg, train_top1.avg, train_top5.avg
def valid(valid_loader, model, criterion):
val_loss, val_top1, val_top5 = AverageMeter(), AverageMeter(), AverageMeter()
model.eval()
with torch.no_grad():
for step, (val_inputs, val_targets) in enumerate(valid_loader):
val_inputs = val_inputs.to(args.device)
val_targets = val_targets.to(args.device)
# randomly sample an arch
candidate_ops = range(5)
candidate_edges = 6
sampled_arch = [
random.choice(candidate_ops) for _ in range(candidate_edges)
]
# prediction
logits = model(val_inputs, sampled_arch)
loss = criterion(logits, val_targets)
# record
prec1, prec5 = obtain_accuracy(
logits.data, val_targets.data, topk=(1, 5)
)
val_loss.update(loss.item(), val_inputs.size(0))
val_top1.update(prec1.item(), val_inputs.size(0))
val_top5.update(prec5.item(), val_inputs.size(0))
return val_loss.avg, val_top1.avg, val_top5.avg
def valid_specific_path(valid_loader, model, sampled_arch, criterion, device):
val_loss, val_top1, val_top5 = AverageMeter(), AverageMeter(), AverageMeter()
model.eval()
with torch.no_grad():
for step, (val_inputs, val_targets) in enumerate(valid_loader):
val_inputs = val_inputs.to(device)
val_targets = val_targets.to(device)
# prediction
logits = model(val_inputs, sampled_arch)
loss = criterion(logits, val_targets)
# record
prec1, prec5 = obtain_accuracy(
logits.data, val_targets.data, topk=(1, 5)
)
val_loss.update(loss.item(), val_inputs.size(0))
val_top1.update(prec1.item(), val_inputs.size(0))
val_top5.update(prec5.item(), val_inputs.size(0))
return val_loss.avg, val_top1.avg, val_top5.avg
def rank_supernet(valid_loader, model, criterion):
logging.info('---------- Start to rank on NAS-Bench-201 ----------')
nasbench201 = np.load('./dataset/nasbench201/nasbench201_dict.npy', allow_pickle=True).item()
if args.debug:
new_dict = {}
for i in range(5):
new_dict[str(i)] = deepcopy(nasbench201[str(i)])
nasbench201 = deepcopy(new_dict)
nasbench201_len = len(nasbench201)
tmp_pred = []
tmp_target = []
prediction = {}
for step, item in enumerate(nasbench201):
model_id = int(item)
operation = nasbench201[item]['operation']
target = nasbench201[item]['cifar10_test']
val_loss, val_top1, val_top5 = valid_specific_path(valid_loader, model, operation, criterion, args.device)
tmp_pred.append(val_top1)
tmp_target.append(target)
prediction[model_id] = {'id': model_id, 'model_gene': operation, 'pred': val_top1, 'target': target}
if step % args.rank_print_freq == 0 or (step + 1) == nasbench201_len:
logging.info("model_id: %d gene: %s loss: %.3f top1: %.3f target: %.3f"
% (model_id, str(operation), val_loss, val_top1, target))
logging.info("Evaluated: %05d\tWaiting: %05d\tCurrent Kendall's Tau: %.5f" %
(len(tmp_pred), nasbench201_len-len(tmp_pred), kendalltau(tmp_pred, tmp_target)[0]))
# save predictions
print('\n')
np.save(args.pred_path, prediction)
logging.info('Finish ranking and save predictions to : %s' % args.pred_path)
final_ranking = kendalltau(tmp_pred, tmp_target)[0]
logging.info("Final_pred: %05d\tFinal_target: %05d\tFinal_Kendall's Tau: %.5f" %
(len(tmp_pred), len(tmp_target), final_ranking))
return final_ranking
def main():
# time record
train_start = time.time()
# dataloader
train_loader, valid_loader = get_dataloader(args, model=None, dataset=args.dataset)
# supernet
model = Supernet201(
C=args.channel, N=args.num_cells, max_nodes=args.max_nodes,
num_classes=args.num_classes, search_space=NAS_BENCH_201
).to(args.device)
# training settings
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.wd)
if args.method == 'sumnas':
args.adaption_steps = 2
optimizer = torch.optim.SGD(model.parameters(), 1.0, weight_decay=4e-5)
inner_optimizer = torch.optim.SGD(model.parameters(), 0.05, momentum=0.9)
else:
inner_optimizer = None
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs)
criterion = torch.nn.CrossEntropyLoss()
best_val_top1 = 0.0
logging.info('---------- Start to train supernet ----------')
for epoch in range(args.epochs):
# train supernet
train_loss, train_top1, cnn_top5 = train(epoch, train_loader, model, criterion, optimizer, inner_optimizer)
logging.info(
"[Epoch: %s/%s] train_loss=%.3f, train_top1=%.3f, train_top5=%.3f" %
(epoch, args.epochs, train_loss, train_top1, cnn_top5)
)
# valid supernet
val_loss, val_top1, val_top5 = valid(valid_loader, model, criterion)
logging.info(
"[Validation], val_loss=%.3f, val_top1=%.3f, val_top5=%.3f, best_top1=%.3f" %
(val_loss, val_top1, val_top5, best_val_top1)
)
if best_val_top1 < val_top1:
best_val_top1 = val_top1
# save latest checkpoint
torch.save(model.state_dict(), args.ckpt_path)
logging.info('Save latest checkpoint to %s' % args.ckpt_path)
# scheduler step
scheduler.step()
print('\n')
# time record
supernet_training_elapse = time_record(train_start, prefix='Supernet training')
print('\n')
# load best supernet weights
latest_pretrained_weights = torch.load(args.ckpt_path)
model.load_state_dict(latest_pretrained_weights)
model.eval()
# ranking supernet
final_ranking = rank_supernet(valid_loader, model, criterion)
# write results
with open('./results/ranking.txt', 'a') as f:
f.write("EXP: %s \t Seed: %s \t Kendall' Tau: %.6f \t Training_Elapse: %s \n"
% (args.exp_name, args.seed, final_ranking, supernet_training_elapse))
if __name__ == "__main__":
run_func(args, main)
|
ShunLu91/PA-DA
|
nasbench201/train_baselines_201.py
|
train_baselines_201.py
|
py
| 14,254 |
python
|
en
|
code
| 29 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.