seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
20070392797
|
import importlib
import sys
from unittest import mock
class MockConfig:
def __init__(self):
self.bot = mock.MagicMock()
self.state = mock.Mock()
def set_up_class(cls, *module_names):
mock_config = MockConfig()
sys.modules['config'] = mock_config
try:
for module_name in module_names:
module = importlib.import_module(module_name)
setattr(cls, module_name, module)
except Exception:
del sys.modules['config']
raise
def tear_down_class(cls):
del sys.modules['config']
|
raylu/sbot
|
tests/mock_config.py
|
mock_config.py
|
py
| 491 |
python
|
en
|
code
| 8 |
github-code
|
6
|
1439842893
|
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
import scipy.optimize
def jj_cpr_ballistic(gamma, tau):
return np.sin(gamma) / np.sqrt(1 - tau * np.sin(gamma/2)**2)
def jj_free_energy_ballistic(gamma, tau):
return 4 / tau * (1 - np.sqrt(1 - tau * np.sin(gamma/2)**2))
# d/dγ I(γ)
def jj_diff_ballistic(gamma, tau):
nom_vals = 1 - tau * np.sin(gamma / 2)**2
return 1/4 * tau * np.sin(gamma)**2 / (nom_vals)**(3/2) + \
np.cos(gamma) / np.sqrt(nom_vals)
def _normalize_phase(phi):
phi = np.fmod(phi, 2 * np.pi)
phi = np.where(phi < 0, phi + 2 * np.pi, phi)
# normalize to (-pi, pi)
phi = np.where(phi > np.pi, phi - 2*np.pi, phi)
return phi
class network:
def __init__(self, Nx, Ny, diff_x=None, diff_y=None, *, cpr_x, cpr_y, free_energy_x, free_energy_y):
self.Nx = Nx
self.Ny = Ny
self.cpr_x = cpr_x
self.cpr_y = cpr_y
self.free_energy_x = free_energy_x
self.free_energy_y = free_energy_y
self.diff_x = diff_x
self.diff_y = diff_y
self.island_x_coords, self.island_y_coords = np.meshgrid(np.arange(Nx), np.arange(Ny), indexing="ij")
self.phi_matrix = np.zeros((Nx, Ny), dtype=np.float32)
self.set_frustration(0)
def reset_network(self):
self.phi_matrix *= 0
self.set_frustration(0)
def set_random_state(self):
self.phi_matrix = np.float32(2 * np.pi * numpy.random.rand(self.Nx, self.Ny))
def set_frustration(self, f):
Nx = self.Nx
Ny = self.Ny
A_x = np.linspace(0, -(Ny-1) * f, Ny) + (Ny - 1)/2 * f
A_x = np.tile(A_x, (Nx - 1, 1))
A_y = np.linspace(0, (Nx-1) * f, Nx) - (Nx-1)/2 * f
A_y = np.tile(A_y, (Ny - 1, 1)).T
self.A_x = np.float32(-np.pi * A_x)
self.A_y = np.float32(-np.pi * A_y)
def add_phase_gradient(self, d_phi):
for i in range(self.Nx):
for j in range(self.Ny):
self.phi_matrix[i,j] += d_phi * (i + 1)
def add_vortex(self, x0, y0, vorticity=1):
self.phi_matrix += vorticity * np.arctan2(self.island_y_coords - y0,
self.island_x_coords - x0)
def get_gamma_matrices(self):
Nx = self.Nx
Ny = self.Ny
gamma_x = np.zeros((Nx - 1, Ny))
gamma_y = np.zeros((Nx, Ny - 1))
gamma_x += self.A_x
gamma_y += self.A_y
phi_matrix = self.phi_matrix
gamma_x += phi_matrix[1:,:] - phi_matrix[:-1,:]
gamma_y += phi_matrix[:,1:] - phi_matrix[:,:-1]
return (gamma_x, gamma_y)
def get_current_matrices(self):
gamma_x, gamma_y = self.get_gamma_matrices()
return self.cpr_x(gamma_x), self.cpr_y(gamma_y)
def get_current(self):
I_x, I_y = self.get_current_matrices()
return np.sum(I_x[0,:])
def free_energy(self):
gamma_x, gamma_y = self.get_gamma_matrices()
return np.sum(self.free_energy_x(gamma_x)) + \
np.sum(self.free_energy_y(gamma_y))
def winding_number(self):
# integrate grad φ around the array
phi_matrix = self.phi_matrix
rv = 0
# bottom edge
rv += np.sum(_normalize_phase(phi_matrix[1:,0] - phi_matrix[:-1,0]))
# right edge
rv += np.sum(_normalize_phase(phi_matrix[-1,1:] - phi_matrix[-1,:-1]))
# top edge
rv += -np.sum(_normalize_phase(phi_matrix[1:,-1] - phi_matrix[:-1,-1]))
# left edge
rv += -np.sum(_normalize_phase(phi_matrix[0,1:] - phi_matrix[0,:-1]))
return rv
def plot_phases(self):
plt.clf()
m = self.phi_matrix.copy()
m = np.flip(m, axis=1)
m = np.swapaxes(m, 0, 1)
plt.imshow(m/np.pi, aspect='equal', cmap='gray')
plt.colorbar(format="%.1f", label='φ')
def plot_currents(self):
Nx = self.Nx
Ny = self.Ny
x_currents, y_currents = self.get_current_matrices()
x_current_xcoords, x_current_ycoords = np.meshgrid(np.arange(Nx-1), np.arange(Ny), indexing="ij")
x_current_xcoords = x_current_xcoords.astype('float64')
x_current_ycoords = x_current_ycoords.astype('float64')
x_current_xcoords += 0.5
y_current_xcoords, y_current_ycoords = np.meshgrid(np.arange(Nx), np.arange(Ny-1), indexing="ij")
y_current_xcoords = y_current_xcoords.astype('float64')
y_current_ycoords = y_current_ycoords.astype('float64')
y_current_ycoords += 0.5
plt.clf()
plt.quiver(x_current_xcoords, x_current_ycoords,
x_currents, np.zeros(x_currents.shape),
pivot='mid', units='width', scale=5*Nx, width=1/(30*Nx))
plt.quiver(y_current_xcoords, y_current_ycoords,
np.zeros(y_currents.shape), y_currents,
pivot='mid', units='width', scale=5*Nx, width=1/(30*Nx))
plt.scatter(self.island_x_coords, self.island_y_coords, marker='s', c='b', s=5)
# do not use newton solver? simple gradient descent seems to converge
# with similar speed when using an optimized ε parameter
def optimization_step_newton(self):
# phi -> phi - cpr(phi) / cpr'(phi)
Nx = self.Nx
Ny = self.Ny
phi_matrix = self.phi_matrix
A_x = self.A_x
A_y = self.A_y
cpr_x = self.cpr_x
cpr_y = self.cpr_y
diff_x = self.diff_x
diff_y = self.diff_y
for i in range(Nx):
for j in range(Ny):
I_prime = 0
I = 0
phi_i_j = phi_matrix[i,j]
# y-component
if j > 0:
gamma = phi_i_j - phi_matrix[i,j-1] + A_y[i, j-1]
I += cpr_y(gamma)
I_prime += diff_y(gamma)
if j < Ny - 1:
gamma = -phi_i_j + phi_matrix[i,j+1] + A_y[i,j]
I += -cpr_y(gamma)
I_prime += diff_y(gamma)
# x-component
if i == 0:
gamma = phi_i_j - self.phi_l + A_x[0, j]
I += cpr_x(gamma)
I_prime += diff_x(gamma)
gamma = -phi_i_j + phi_matrix[i+1, j] + A_x[1,j]
I += -cpr_x(gamma)
I_prime += diff_x(gamma)
elif i == Nx - 1:
gamma = -phi_i_j + self.phi_r + A_x[i+1, j]
I += -cpr_x(gamma)
I_prime += diff_x(gamma)
gamma = phi_i_j - phi_matrix[i-1, j] + A_x[i,j]
I += cpr_x(gamma)
I_prime += diff_x(gamma)
else:
gamma = -phi_i_j + phi_matrix[i+1,j]+ A_x[i+1, j]
I += -cpr_x(gamma)
I_prime += diff_x(gamma)
gamma = phi_i_j - phi_matrix[i-1, j]+ A_x[i,j]
I += cpr_x(gamma)
I_prime += diff_x(gamma)
new_phi = phi_i_j - I / I_prime
phi_matrix[i, j] = new_phi
return np.abs(I)
def optimization_step(self, optimize_leads=False, temp=0, fix_contacts=False, epsilon=0.4):
# minimize free energy f(phi) using gradient descent
# update all phi's in-place
# phi -> phi - ε f'(phi)
Nx = self.Nx
Ny = self.Ny
phi_matrix = self.phi_matrix
A_x = self.A_x
A_y = self.A_y
cpr_x = self.cpr_x
cpr_y = self.cpr_y
I_norm = 0
for i in range(Nx):
if fix_contacts and (i == 0 or i == Nx - 1):
continue
for j in range(Ny):
f_prime = 0
phi_i_j = phi_matrix[i,j]
# x-component
if i > 0:
f_prime += cpr_x(phi_i_j - phi_matrix[i-1, j]+ A_x[i-1,j])
if i < Nx - 1:
f_prime += -cpr_x(-phi_i_j + phi_matrix[i+1,j]+ A_x[i, j])
# y-component
if j > 0:
f_prime += cpr_y(phi_i_j - phi_matrix[i,j-1] + A_y[i, j-1])
if j < Ny - 1:
f_prime += -cpr_y(-phi_i_j + phi_matrix[i,j+1] + A_y[i,j])
new_phi = phi_i_j - epsilon * f_prime
if temp > 0:
new_phi += temp * numpy.random.randn()
phi_matrix[i, j] = new_phi
I_norm += np.abs(f_prime)
return I_norm / (Nx * Ny)
def find_ground_state(self, T_start=0.35, N_max=5000, *args, **kwargs):
# annealing schedule
for i in range(N_max):
temp = T_start * (N_max - i) / N_max
delta = self.optimization_step(temp=temp)
print("temp = %g\ndelta = %g" % (temp, delta))
# converge
return self.optimize(temp = 0, *args, **kwargs)
def optimize(self, maxiter=10000, delta_tol=1e-2, *args, **kwargs):
for i in range(maxiter):
delta = self.optimization_step(*args, **kwargs)
if i % 100 == 0:
print("i = %d, delta = %.3g" % (i, delta))
if delta < delta_tol:
print("i(final) = %d, delta(final) = %.3g" % (i, delta))
break
return delta
|
amba/JJA-solver
|
JJAsolver/network.py
|
network.py
|
py
| 9,572 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73268144828
|
import tweepy
import tweeting
import argparse
from time import sleep
parser = argparse.ArgumentParser(description="Tweet news stories periodically according to global priorities.")
parser.add_argument('-dbg_mode', default=False, type=bool, nargs=1, help="Run in debug mode (default = False)")
parser.add_argument('-tw_cred', default='twitter_API_keys.txt', type=str, nargs=1,
help="Contains Twitter API credentials. Must be in directory above repo.")
parser.add_argument('-news_cred', default='newsapi_key.txt', type=str, nargs=1,
help="Contains NewsAPI key. Must be in directory above repo.")
parser.add_argument('-url_path', default='url_content_lookup.csv', type=str, nargs=1,
help="Directory of the url lookup table")
parser.add_argument('-qaly_path', type=str, nargs=1, default='global_prios/global_prios.csv',
help="Path to the QALY table (default =global_prios/global_prios.csv)")
parser.add_argument('-db_filename', default='news.db', type=str, nargs=1,
help="Name of news database. Default = news.db")
parser.add_argument('-periodicity_s', default=3600, type=float, nargs=1,
help="Tweet periodicity (s). Default=3600.")
parser.add_argument('-max_time', default=7*24*3600, type=float, nargs=1,
help="Duration to tweet (s). Default=604800 (1 week).")
parser.add_argument('-tweet_time_window', default=2*7*24.0, type=float, nargs=1,
help="Time window to search into the past for news (hours). Default=336 (2 weeks).")
parser.add_argument('-news_refresh_period', default=24.0/3, type=float, nargs=1,
help="Periodicity to update news database (hours). Default = 8.")
args = parser.parse_args()
dbg_mode = args.dbg_mode
twitter_credentials_filename = args.tw_cred
news_api_filename = args.news_cred
url_path = args.url_path
qaly_path = args.qaly_path
db_filename = args.db_filename
periodicity_s = args.periodicity_s
max_time = args.max_time
tweet_time_window = args.tweet_time_window
news_refresh_period = args.news_refresh_period
credentials_dir = '../'
# Parse twitter credentials from the text file, see https://developer.twitter.com/en/apps
fp = open(credentials_dir+twitter_credentials_filename, 'r')
credentials = fp.read().splitlines()
fp.close()
consumer_token = credentials[0].split('=')[1]
consumer_secret = credentials[1].split('=')[1]
access_token = credentials[2].split('=')[1]
access_token_secret = credentials[3].split('=')[1]
# Get news API key
fp = open(credentials_dir+news_api_filename, 'r')
api_key = fp.read().split()[0]
# Set twitter credentials
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
tweepyapi = tweepy.API(auth)
while True:
tweeting.tweet_news(tweepyapi, api_key, qaly_path, url_path,
db_filename, tweet_time_window, news_refresh_period,
dbg_mode=dbg_mode)
sleep(periodicity_s)
|
jaryaman/propNews
|
main.py
|
main.py
|
py
| 3,030 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4716246754
|
# reading file name
fn = input('Enter file name: ')
# making sure user enters valid file name
try:
fh = open(fn)
except:
print('Invalid Name')
quit()
# creating dictionary & list
counts = dict()
lst = list()
# reading through the file
for ln in fh:
# finding the required line
if ln.startswith('From '):
ln = ln.rstrip()
ln = ln.split()
# extracting the hours
ln.pop(0)
ln.pop(0)
ln.pop(0)
ln.pop(0)
ln.pop(0)
ln.pop(1)
hrs = ln[0]
# extracting only the hour data
hrs = hrs.split(':')
hrs.pop(1)
hrs.pop(1)
# counting the number of hours
for hrc in hrs:
counts[hrc] = counts.get(hrc, 0) + 1
# sorting the count by hours and printing the data
for k, v in sorted(counts.items()):
print(k, v)
|
sumeetkumar1/My_python_experience
|
P1/Scripts/10.2.py
|
10.2.py
|
py
| 827 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16484232797
|
# ---------------------------------------------------------------------------- #
# #
# Module: main.py #
# Author: oscarmccullough #
# Created: Thu Sep 15 2022 #
# Description: V5 project #
# #
# ---------------------------------------------------------------------------- #
# Library imports
from vex import *
# Brain should be defined by default
brain=Brain()
# define threewireport, makes defining threewireports easier
ThreeWirePort = brain.three_wire_port
#toggle variable example
pneumaticsToggle = False
# define all motors and devices here
# motors example
# name = Motor(PORT, GearSetting, reversed)
frontLeft = Motor(Ports.PORT1, GearSetting.RATIO_18_1, True)
backLeft = Motor(Ports.PORT2, GearSetting.RATIO_18_1, True)
frontRight = Motor(Ports.PORT10, GearSetting.RATIO_18_1, False)
backRight = Motor(Ports.PORT20, GearSetting.RATIO_18_1, False)
# use motor_groups to group together motors on the same side
leftDrive = MotorGroup(frontLeft, backLeft)
rightDrive = MotorGroup(frontRight, backRight)
# controller example - controller = Controller(ControllerType.PRIMARY | PARTNER )
controller = Controller(ControllerType.PRIMARY)
# inertial sensor example - imu = Inertial(PORT)
imu = Inertial(Ports.PORT15)
# pneumatics example pneu1 = Pneumatics(ThreeWirePort)
pneu1 = Pneumatics(ThreeWirePort.a)
# RotationSensor Example rot1 = Rotation(PORT)
rot1 = Rotation(Ports.PORT12)
# function callbacks - useful for button.pressed
def toggle_pneumatics():
global pneumaticsToggle
pneumaticsToggle = not pneumaticsToggle
# define callbacks for when a button is pressed
controller.buttonL1.pressed(toggle_pneumatics)
brain.screen.print("Hello V5\n")
# When using an inertial sensor, you need to calibrate it
def imu_calibrate():
global imu
#start calibrating
imu.calibrate()
while imu.is_calibrating():
wait(50, MSEC)
# pre-autonomous function - this runs before a match starts and is where sensors are initialized, rezeroed, etc.
def pre_autonomous():
brain.screen.clear_screen()
brain.screen.print("Entering Pre-Auton\n")
# Call our IMU Sensor Calibration
imu_calibrate()
# autonomous function - this runs your autonomous code, the 15 seconds at the start of the match
def autonomous():
brain.screen.clear_screen()
brain.screen.print("Autonomous...\n")
# usercontrol - this runs your driver code, or the last 1:45 of the match
def user_control():
brain.screen.clear_screen()
# add a while loop to ensure everything stays running
while True:
# Tank Drive, independent control of left and right side
# Axis 3 - Y-Axis on Left Joystick, Axis 2 - Y-Axis on Right Joystick
leftDrive.spin(FORWARD, controller.axis3.position(), VelocityUnits.PERCENT)
rightDrive.spin(FORWARD, controller.axis2.position(), VelocityUnits.PERCENT)
# Arcade Drive - Y-Axis controls vertical movement, X-Axis controls horizontal
# Axis 4 - X-Axis on Left Joystick, Axis 1 - X-Axis on Right Joystick
# define variables for vertical and horizontal movement
# uncomment these lines if you want to use arcade drive
# vertical = controller.axis3.position()
# horizontal = controller.axis1.position()
# leftDrive.spin(FORWARD, vertical - horizontal, VelocityUnits.PERCENT)
# rightDrive.spin(FORWARD, vertical + horizontal, VelocityUnits.PERCENT)
# running pneumatics with toggle and callback
if pneumaticsToggle:
pneu1.open
else:
pneu1.close
wait(10, MSEC)
# Define competition control instance
comp = Competition(user_control, autonomous)
pre_autonomous()
|
odm3/PotomacPythonTemplate
|
src/main.py
|
main.py
|
py
| 4,168 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15332380058
|
"""
Handle photobooth configuration.
"""
import os
import sys
import logging
import yaml
class Configuration:
"""
Create configuration object.
"""
def __init__(self, file):
self.logger = logging.getLogger(__name__)
self.file = file
self._get_config()
def _get_config(self):
"""
Try to extract configuration from YAML file
"""
try:
# Open configuration file and load YAML
with open(self.file, 'r', encoding="utf-8") as f_cfg:
self.logger.debug("Load %s configuration file.", self.file)
config = yaml.load(f_cfg)
# Set configuration attribute
self.resolution = config['resolution']
self.pictures_directory = os.path.expanduser(config['pictures_directory'])
# Set meta for each language
for lang in config['languages'].keys():
logging.debug("Set lang [%s]", config['languages'])
setattr(self, lang, config['languages'][lang])
except KeyError as key_e:
self.logger.error("Parameters missing in configuration file: %s.", key_e, exc_info=True)
sys.exit(2)
except (OSError, yaml.YAMLError):
self.logger.error("Failed to parse configuration file", exc_info=True)
sys.exit(2)
|
diablo02000/pyphotobooth
|
pyphotobooth/libs/configuration.py
|
configuration.py
|
py
| 1,363 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27318831763
|
# @PascalPuchtler
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This class is inspired by https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/TFLite_detection_webcam.py
class ObjectDetection:
def __init__(self, objectDetectionNetwork, labelPath):
self.objectDetectionNetwork = objectDetectionNetwork
self.labels = self.getLabels(labelPath)
def analyseImage(self, image):
boxes, classes, scores = self.objectDetectionNetwork.analyseImage(image)
imH=image.shape[0]
imW=image.shape[1]
pylons = self.filterAndLabelBoxes(boxes, classes, scores, imH, imW )
return pylons
def filterAndLabelBoxes(self, boxes, classes, scores, imH, imW, minConfThreshold = 0.5):
# Loop over all detections and draw detection box if confidence is above minimum threshold
objects = []
for i in range(len(scores)):
if ((scores[i] > minConfThreshold) and (scores[i] <= 1.0)):
label = str(int(classes[i]))
if int(classes[i]) < len(self.labels):
label = self.labels[int(classes[i])]
objects.append({
'ymin': int(max(1,(boxes[i][0] * imH))),
'xmin': int(max(1,(boxes[i][1] * imW))),
'ymax': int(min(imH,(boxes[i][2] * imH))),
'xmax': int(min(imW,(boxes[i][3] * imW))),
'label': label,
'score': scores[i]
})
return objects
def getLabels(self, labelPath):
with open(labelPath, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
return labels
|
iisys-hof/autonomous-driving
|
car-controller/src/mainController/Controller/ObjectDetection/ObjectDetection.py
|
ObjectDetection.py
|
py
| 2,582 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20538778289
|
# https://leetcode.com/problems/word-search-ii/
"""
Time complexity:- O(N * M * W), where N and M are the dimensions of the board and W is the total number of characters in the words.
Space Complexity:- O(W)
"""
"""
Intuition:
The findWords method uses a Trie data structure to efficiently search for words on the board.
It iterates through the board cells and starts the search from each cell if it is a prefix in the Trie.
The find_str function performs a depth-first search (DFS) on the board to find words in the Trie.
The unique words found are stored in the res set.
"""
from collections import defaultdict
from functools import reduce
from typing import List
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
# Create a Trie data structure
Trie = lambda: defaultdict(Trie)
trie = Trie()
END = True
# Build the trie using the given words
for word in words:
reduce(dict.__getitem__, word, trie)[END] = word
# Set to store unique results
res = set()
def find_str(i, j, t):
# Helper function to explore the board and find words
if END in t:
res.add(t[END])
letter = board[i][j]
board[i][j] = "" # Mark the cell as visited
# Check adjacent cells and continue the search
if i > 0 and board[i - 1][j] in t:
find_str(i - 1, j, t[board[i - 1][j]])
if j > 0 and board[i][j - 1] in t:
find_str(i, j - 1, t[board[i][j - 1]])
if i < len(board) - 1 and board[i + 1][j] in t:
find_str(i + 1, j, t[board[i + 1][j]])
if j < len(board[0]) - 1 and board[i][j + 1] in t:
find_str(i, j + 1, t[board[i][j + 1]])
board[i][j] = letter # Restore the original cell value
return
# Iterate through the board
for i, row in enumerate(board):
for j, char in enumerate(row):
# If the current cell is a prefix in the trie, start the search
if board[i][j] in trie:
find_str(i, j, trie[board[i][j]])
return list(res)
|
Amit258012/100daysofcode
|
Day96/word_search_2.py
|
word_search_2.py
|
py
| 2,234 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9297911197
|
import os
# Specified path
s = input('Please input some path:')
path = "C:/Users/77718/Documents/Work/Pyhton/KBTU/"+ s
# Get list of directories present in the specified path
dirs = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path, d))]
# Get list of files present in the specified path
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
# Print the list of directories and files
print("List of Directories:")
for d in dirs:
print(d)
print("\nList of Files:")
for f in files:
print(f)
|
DayFay1/KBTU
|
TSISVI/dir-and-files/1.py
|
1.py
|
py
| 541 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70129578749
|
#-----------------------------------------------------------------------------------------#
import torch
import matplotlib.pyplot as plt
import deepwave
from deepwave import scalar
import numpy as np
import warnings
#-----------------------------------------------------------------------------------------#
warnings.filterwarnings("ignore", category=UserWarning)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
#-----------------------------------------------------------------------------------------#
ny = 500
nx = 500
# vp = 1500 * torch.ones(ny, nx)
ny = 2301; nx = 751; dx = 4.0
v = torch.from_file('data/modeling/velocity.bin', size=ny*nx).reshape(ny, nx).to(device)
ny = 100
nx = 300
# vp = 1500 * torch.ones(ny, nx)
# vs = 1000 * torch.ones(ny, nx)
# rho = 2200 * torch.ones(ny, nx)
# 1) Create 2 layers
layer_boundary = ny // 2 # Create a layer boundary in the middle of the y-axis
# 2 & 3) Define vp for top and bottom layers
vp = torch.ones(ny, nx) # Creating vp tensor
vp[:layer_boundary, :] = 1500 # Top layer
vp[layer_boundary:, :] = 4000 # Bottom layer
#-----------------------------------------------------------------------------------------#
# NOTE in case for QC input velocity
# v = v.cpu().numpy()
# plt.imshow(np.rot90(v, 3), cmap='gray', vmin=2000, vmax=5000)
# plt.show()
#-----------------------------------------------------------------------------------------#
n_shots = 115
n_sources_per_shot = 1
d_source = 20 # 20 * 4m = 80m
first_source = 10 # 10 * 4m = 40m
source_depth = 2 # 2 * 4m = 8m
freq = 25
nt = 750
dt = 0.004
peak_time = 1.5 / freq
# source_locations
source_locations = torch.zeros(n_shots, n_sources_per_shot, 2, dtype=torch.long, device=device)
source_locations[..., 1] = source_depth
# source_locations[:, 0, 0] = (torch.arange(n_shots) * d_source +
# first_source)
# source_amplitudes
source_amplitudes = (deepwave.wavelets.ricker(freq, nt, dt, peak_time)
.repeat(n_shots, n_sources_per_shot, 1)
.to(device))
out = scalar(v, dx, dt, source_amplitudes=source_amplitudes,
source_locations=source_locations,
accuracy=8,
pml_freq=freq)
# receiver_amplitudes = out[-1]
# vmin, vmax = torch.quantile(receiver_amplitudes[0],
# torch.tensor([0.05, 0.95]).to(device))
# _, ax = plt.subplots(1, 2, figsize=(10.5, 7), sharey=True)
# ax[0].imshow(receiver_amplitudes[57].cpu().T, aspect='auto',
# cmap='gray', vmin=vmin, vmax=vmax)
# ax[1].imshow(receiver_amplitudes[:, 192].cpu().T, aspect='auto',
# cmap='gray', vmin=vmin, vmax=vmax)
# ax[0].set_xlabel("Channel")
# ax[0].set_ylabel("Time Sample")
# ax[1].set_xlabel("Shot")
# plt.tight_layout()
# plt.show()
# receiver_amplitudes.cpu().numpy().tofile('test.bin')
|
PongthepGeo/geophysics_23
|
codes/seismic/keep/test_forward.py
|
test_forward.py
|
py
| 2,872 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75006495547
|
import os
mypath = r'\Documents\Certificates' #change to correct folder path
files = []
for r, d, f in os.walk(mypath):
for file in f:
if '.pdf' in file: # you can change this to e.g .txt, to get any text file
files.append(file.replace('_', ' ').replace('.pdf', '').title())
for f in range(len(files)):
print(f+1, files[f], sep='. ')
|
djunehor/pdfs-directory
|
run.py
|
run.py
|
py
| 363 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40542696905
|
from flask import Flask, request, jsonify
from flask_mail import Mail, Message
import json
import sqlalchemy
from sqlalchemy import or_,desc
from tables import db,GDPs, Impact, ImpactPredicted
app = Flask(__name__)
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = '***@gmail.com' # enter your email he$
app.config['MAIL_DEFAULT_SENDER'] = '***@gmail.com' # enter your ema$
app.config['MAIL_PASSWORD'] = '****' # enter your password here
mail = Mail(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://doadmin:k6hjzqqbj408kaa7@covinsights-database-do-user-7582405-0.a.db.ondigitalocean.com:25060/CovInsights'
db.init_app(app)
@app.route("/", methods=['GET', 'POST'])
def test():
return 'true'
@app.route("/getGDPs", methods=['GET', 'POST'])
def getGDPs():
result = GDPs.query.all()
response = []
for row in result:
l = []
l.append(row.Country)
l.append(row.GDP)
l.append(row.Code)
response.append(l)
response = {
"data" : response
}
return response
@app.route("/getImpact", methods=['GET', 'POST'])
def getImpact():
response = { 'status' : True }
try:
content = json.loads(request.data)
country= content['country']
result = Impact.query.filter_by(Economy=country).all()
values = []
for row in result:
if row.Sector == '_All' :
response['gdp'] = row.GDP
response['Emp'] = row.Employment
else:
values.append(row.GDP)
response['values'] = values
currentGDP = GDPs.query.get(country)
response['currentGDP'] = currentGDP.GDP
print(response)
except Exception as e:
print('Exception:', e.__class__)
return {
'status': False,
'content': 'Unknown error. Please contact the developer.'
}
return response
@app.route("/getImpactPredicted", methods=['GET', 'POST'])
def getImpactPredicted():
response = { 'status' : True }
try:
content = json.loads(request.data)
country= content['country']
result = ImpactPredicted.query.filter_by(Economy=country).all()
values = []
for row in result:
if row.Sector == '_All' :
response['gdp'] = row.GDP
response['Emp'] = row.Employment
else:
values.append(row.GDP)
response['values'] = values
currentGDP = GDPs.query.get(country)
response['currentGDP'] = currentGDP.GDP
print(response)
except Exception as e:
print('Exception:', e.__class__)
return {
'status': False,
'content': 'Unknown error. Please contact the developer.'
}
return response
@app.route("/subscribe", methods=['GET', 'POST'])
def subscribe():
content = json.loads(request.data)
mailID = content['mailid']
response = {}
try:
msg = Message("Your report is here | CovInsights", recipients=[mailID])
msg.body = "Thank you for using our service!"
with app.open_resource("../CovInsights Report.pdf") as fp:
msg.attach("CovInsights Report.pdf", "application/pdf", fp.read())
mail.send(msg)
except Exception as e:
response['status'] = False
response['error'] = str(e)
return response
response['status'] = True
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
|
aftex261/DigitalOcean
|
B Sudharsan - CovInsights App/Backend Files/app.py
|
app.py
|
py
| 3,625 |
python
|
en
|
code
| 5 |
github-code
|
6
|
30578248385
|
import cv2
from picamera import PiCamera
from picamera.array import PiRGBArray
import time, socket, logging, configparser, argparse, sys
from utils import Utils
parser = argparse.ArgumentParser()
parser.add_argument('--d', nargs=1, default=None)
args = parser.parse_args()
APP_DIR = args.d[0] if args.d != None else "./"
CONFIGURATIONS = APP_DIR + 'configuration.ini'
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(APP_DIR + 'logs/video-streamer | ' + str(time.asctime()) + '.log'),
logging.StreamHandler()
]
)
config = configparser.ConfigParser()
if len(config.read(CONFIGURATIONS)) == 0:
logging.error("Could Not Read Configurations File: " + CONFIGURATIONS)
sys.exit()
DRONE_ID = config['drone']['id']
HOST_IP = config['cloud-app']['ip']
VIDEO_PORT = int( config['cloud-app']['video-port'])
GRAYSCALE = config['video']['grayscale'].lower() == 'true'
FRAMES_PER_SECOND = int( config['video']['fps'])
JPEG_QUALITY = int( config['video']['quality'])
WIDTH = int( config['video']['width'])
HEIGHT = int( config['video']['height'])
logging.info('FPS: %s Quality: %s Width %s Height %s Grayscale: %s',
str(FRAMES_PER_SECOND), str(JPEG_QUALITY), str(WIDTH), str(HEIGHT), GRAYSCALE)
logging.info('Drone ID: %s Video Recipient: %s:%s', str(DRONE_ID), str(HOST_IP), str(VIDEO_PORT))
camera = None
video_socket = None
while(True):
try:
camera = PiCamera()
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = FRAMES_PER_SECOND
rawCapture = PiRGBArray(camera, size=(WIDTH, HEIGHT))
time.sleep(0.1)
logging.info("Camera module initiated")
video_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
video_socket.connect((HOST_IP, VIDEO_PORT))
logging.info("Socket Opened, Video Streaming started")
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image_data = frame.array
image_data = cv2.rotate(image_data, cv2.ROTATE_180)
if GRAYSCALE:
image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2GRAY)
code, jpg_buffer = cv2.imencode(".jpg", image_data, [int(cv2.IMWRITE_JPEG_QUALITY), JPEG_QUALITY])
datagramMsgBytes = Utils.create_datagram_message(DRONE_ID, jpg_buffer)
video_socket.sendall(datagramMsgBytes)
rawCapture.truncate(0)
except Exception as e:
logging.error("Video Stream Ended: "+str(e))
if camera != None:
camera.close()
if video_socket != None:
video_socket.close()
time.sleep(2)
|
petkanov/drone-raspberry-py-app
|
video_streamer.py
|
video_streamer.py
|
py
| 2,824 |
python
|
en
|
code
| 2 |
github-code
|
6
|
11890081214
|
L,R = map(int,input().split())
sum = 0
if L%2 == 0:
start = L+1
else:
start = L
for i in range(start,R+1,2):
sum = sum + i
print(sum)
|
syedjaveed18/codekata-problems
|
Arrays/Q135.py
|
Q135.py
|
py
| 156 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31477418429
|
#!/usr/bin/env python
import argparse
import re
import json
from os.path import isfile
def a(lines):
return len(lines)
def t(requests):
stats = {}
for dict_ in requests:
method = dict_['method']
if method not in stats.keys():
stats[method] = 0
stats[method] += 1
ret = []
for key in stats:
dict_ = {}
dict_['method'] = key
dict_['count'] = stats[key]
ret.append(dict_)
return ret
def f(requests):
requests.sort(key=lambda x: x['url'])
ans = []
flag = True
tmp = {}
for elm in requests:
if flag:
tmp['url'] = elm['url']
tmp['count'] = 0
flag = False
if elm['url'] != tmp['url']:
ans.append(tmp)
tmp = {}
tmp['url'] = elm['url']
tmp['count'] = 1
else:
tmp['count'] += 1
ans.append(tmp)
ans.sort(key=lambda x: x['count'], reverse=True)
ans = ans[:10]
ret = []
for request in ans:
dict_ = {}
dict_['url'] = request['url']
dict_['count'] = request['count']
ret.append(dict_)
return ret
def c(requests):
requests = list(filter(lambda x: x['code'] in range(400, 500), requests))
requests.sort(key=lambda x: x['len'], reverse=True)
ans = requests[:5]
ret = []
for request in ans:
dict_ = {}
dict_['url'] = request['url']
dict_['code'] = request['code']
dict_['len'] = request['len']
dict_['ip'] = request['ip']
ret.append(dict_)
return ret
def s(requests):
requests = list(filter(lambda x: x['code'] in range(500, 600), requests))
requests.sort(key=lambda x: x['ip'])
ans = []
flag = True
tmp = {}
for elm in requests:
if flag:
tmp['ip'] = elm['ip']
tmp['count'] = 0
flag = False
if elm['ip'] != tmp['ip']:
ans.append(tmp)
tmp = {}
tmp['ip'] = elm['ip']
tmp['count'] = 1
else:
tmp['count'] += 1
ans.append(tmp)
ans.sort(key=lambda x: x['count'], reverse=True)
ans = ans[:5]
ret = []
for request in ans:
dict_ = {}
dict_['ip'] = request['ip']
dict_['count'] = request['count']
ret.append(dict_)
return ret
def to_requests(lines):
requests = []
for line in lines:
dict_ = {}
splited = re.split('[ "]', line)
dict_['ip'] = splited[0]
dict_['method'] = splited[6]
dict_['url'] = splited[7]
dict_['code'] = int(splited[10])
if splited[11] == '-':
dict_['len'] = 0
else:
dict_['len'] = int(splited[11])
requests.append(dict_)
return requests
def main():
outfile = 'analyzed'
parser = argparse.ArgumentParser(usage='analyze.py [--json] a | t | f | c | s <FILE>',
epilog=f'Имя выходного файла - "{outfile}".')
parser.add_argument('task', action='store', help='см. в README.md', choices=['a', 't', 'f', 'c', 's'])
parser.add_argument('file', action='store', metavar='FILE', help='входной файл')
parser.add_argument('--json', action='store_true', help='записать вывод в формате JSON')
args = parser.parse_args()
if isfile(outfile):
print(f"File '{outfile}' exists, overwrite? (yes/NO): ", end='')
in_ = input()
if not (in_ == 'y' or in_ == 'yes'):
raise FileExistsError()
with open(args.file) as fl:
lines = fl.read().split('\n')
if lines[-1] == '':
del lines[-1]
task = args.task
if task == 'a':
res = a(lines)
else:
requests = to_requests(lines)
if task == 't':
res = t(requests)
elif task == 'f':
res = f(requests)
elif task == 'c':
res = c(requests)
elif task == 's':
res = s(requests)
else:
raise Exception()
with open(outfile, 'w') as fl:
if args.json:
fl.write(json.dumps(res))
else:
if isinstance(res, list):
for line in res:
for key in line:
fl.write(str(line[key]) + ' ')
fl.write('\n')
elif isinstance(res, int):
fl.write(str(res))
else:
raise Exception()
if __name__ == '__main__':
main()
|
gatart/2021-1-MAILRU-SDET-Python-G-Talamanov
|
Homework_5/analyze.py
|
analyze.py
|
py
| 4,579 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3645997925
|
def is_leap(year):
leap = False
if year % 400 == 0 or (year % 4 == 0 and year % 100 != 0):
# if the year is divisible by 400 or divisible by 4 but not by 100, it's a leap year
leap = True
return leap
year = int(input("Enter a year: "))
print(is_leap(year))
|
IancuIonut/Leap_Year_Calculator
|
main.py
|
main.py
|
py
| 290 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9854004184
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
import backtrader.indicators as btind
import backtrader.feeds as btfeeds
class GoldenCross(bt.Strategy):
params = (('fast', 50), ('slow', 200),
('order_percentage', 0.95), ('ticker', 'SPY'))
def __init__(self):
self.fast_moving_average = bt.indicators.SMA(
self.data.close, period=self.params.fast, plotname='50 Day Moving Average'
)
self.slow_moving_average = bt.indicators.SMA(
self.data.close, period=self.params.slow, plotname='200 Day Moving Average'
)
self.crossover = bt.indicators.CrossOver(
self.fast_moving_average, self.slow_moving_average)
def next(self):
# If position size is 0, we own 0 shares
if self.position.size == 0:
# Crossover is 1, so Golden Cross happened
if self.crossover > 0:
amount_to_invest = (
self.params.order_percentage * self.broker.cash)
self.size = math.floor(amount_to_invest / self.data.close)
print("Buy {} share of {} at {}".format(
self.size, self.params.ticker, self.data.close[0]))
self.buy(size=self.size)
if self.position.size > 0:
if self.crossover < 0:
print("Sell {} shares of {} at {}".format(
self.size, self.params.ticker, self.data.close[0]))
self.close()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(GoldenCross)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
datapath = os.path.join(
modpath, '/Users/alfredopzr/Desktop/Coinbase-Python/Coinbase-Python/datas/SPY.csv')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 3),
# Do not pass values before this date
todate=datetime.datetime(2021, 9, 13),
# Do not pass values after this date
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(10000.0)
# Add a FixedSize sizer according to the stake
# cerebro.addsizer(bt.sizers.AllInSizer, percents=95)
# Set the commission
cerebro.broker.setcommission(commission=0.00)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Plot the result
cerebro.plot()
|
alfredopzr/backtesting-python
|
backtrader/strategies/GoldenCross.py
|
GoldenCross.py
|
py
| 3,173 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5707632801
|
'''
Posts routes
/posts (args: position, amount)
/post (args: id)
'''
from flask_api import status
from flask import request
from flask import Blueprint
from service.upload_image import UploadImage
from databases.models.post import Post, PostStatus
from databases.models.photo import Photo
from decorators.token import token_required
from exceptions.posts import PostArgsError, APIexception
from exceptions.token import DecodeToken
from exceptions.validate import InvalidImage
from config.db import db
from config.config import BASE_COUNT_POSTS, BASE_POSITION_POSTS, SECRET_KEY, ACTIVE_POST_STATUS
from validations.routes.new_post import PostValid
from tokens.token_hendler import TokenManager
posts = Blueprint('posts', __name__, template_folder='templates')
@posts.route('/posts', endpoint='get_posts', methods=['GET'])
@token_required
def get_posts():
'''Posts slize
?position=0&amount=0
'''
position = request.args.get('position', default=BASE_POSITION_POSTS, type=int)
amount = request.args.get('amount', default=BASE_COUNT_POSTS, type=int)
if position < 0 or amount < 0:
return PostArgsError.message, status.HTTP_400_BAD_REQUEST
slice_posts = Post.query.order_by(
Post.id.desc()).offset(position).limit(amount).all()
amount = Post.query.count()
return {'posts': slice_posts, 'size': amount}, status.HTTP_200_OK
@posts.route('/new-post', endpoint='set_post', methods=['POST'])
@token_required
def set_post():
'''Post
add new post
title: str
text: str
status: str
likes: int
view: int
shared: int
user: User
photos: Photo
'''
try:
post_form = PostValid(**request.form)
validate_date = post_form.validate()
except APIexception as error:
return error.message, status.HTTP_400_BAD_REQUEST
try:
user_id = TokenManager.get_id_user(SECRET_KEY ,request.headers['Access-Token'])
except DecodeToken as error:
return error.message, status.HTTP_400_BAD_REQUEST
img_photo = None
if request.method == 'POST' and 'file' in request.files:
try:
img_photo = UploadImage(request.files['file']).save_image()
except InvalidImage as error:
return error.message, status.HTTP_400_BAD_REQUEST
post_status = PostStatus.query.filter_by(name=ACTIVE_POST_STATUS).first()
if post_status is None:
status_post = PostStatus()
status_post.name = ACTIVE_POST_STATUS
db.session.add(status_post) # pylint: disable=no-member
db.session.commit() # pylint: disable=no-member
try:
new_post = Post()
new_post.title = validate_date['title']
new_post.text = validate_date['text']
new_post.status_id = PostStatus.query.filter_by(name=ACTIVE_POST_STATUS).first().id
new_post.user_id = user_id
db.session.add(new_post) # pylint: disable=no-member
db.session.commit() # pylint: disable=no-member
if img_photo is not None:
photo = Photo()
photo.photo = img_photo
photo.user_id = user_id
photo.post_id = new_post.id
db.session.add(photo) # pylint: disable=no-member
db.session.commit() # pylint: disable=no-member
except APIexception as error:
return error.message, status.HTTP_400_BAD_REQUEST
return {'post' : new_post}, status.HTTP_200_OK
|
Dolzhenkov-Andrii/api
|
routes/posts.py
|
posts.py
|
py
| 3,478 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25014580521
|
import os
move = input("Please enter move: ")
name = input("Please enter csv name: ")
path = f'dataset/{move}/'
files = os.listdir(path)
counter = 1
for file in files:
_, ext = os.path.splitext(file)
os.rename(os.path.join(path, file), os.path.join(path, name + str(counter) + ext))
counter += 1
|
ahbarari/bachelor-project
|
rename.py
|
rename.py
|
py
| 310 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40297312420
|
import random
# <--->
from .board import Spawn, Player, Launch
from .helpers import find_shortcut_routes
from .logger import logger
# <--->
def defend_shipyards(agent: Player):
board = agent.board
need_help_shipyards = []
for sy in agent.shipyards:
if sy.action:
continue
incoming_hostile_fleets = sy.incoming_hostile_fleets
incoming_allied_fleets = sy.incoming_allied_fleets
if not incoming_hostile_fleets:
continue
incoming_hostile_power = sum(x.ship_count for x in incoming_hostile_fleets)
incoming_hostile_time = min(x.eta for x in incoming_hostile_fleets)
incoming_allied_power = sum(
x.ship_count
for x in incoming_allied_fleets
if x.eta < incoming_hostile_time
)
ships_needed = incoming_hostile_power - incoming_allied_power
if sy.ship_count > ships_needed:
sy.set_guard_ship_count(min(sy.ship_count, int(ships_needed * 1.1)))
continue
# spawn as much as possible
num_ships_to_spawn = min(
int(agent.available_kore() // board.spawn_cost), sy.max_ships_to_spawn
)
if num_ships_to_spawn:
logger.debug(f"Spawn ships to protect shipyard {sy.point}")
sy.action = Spawn(num_ships_to_spawn)
need_help_shipyards.append(sy)
for sy in need_help_shipyards:
incoming_hostile_fleets = sy.incoming_hostile_fleets
incoming_hostile_time = min(x.eta for x in incoming_hostile_fleets)
for other_sy in agent.shipyards:
if other_sy == sy or other_sy.action or not other_sy.available_ship_count:
continue
distance = other_sy.distance_from(sy)
if distance == incoming_hostile_time - 1:
routes = find_shortcut_routes(
board, other_sy.point, sy.point, agent, other_sy.ship_count
)
if routes:
logger.info(f"Send reinforcements {other_sy.point}->{sy.point}")
other_sy.action = Launch(
other_sy.available_ship_count, random.choice(routes)
)
elif distance < incoming_hostile_time - 1:
other_sy.set_guard_ship_count(other_sy.ship_count)
|
w9PcJLyb/kore-beta-bot
|
src/defense.py
|
defense.py
|
py
| 2,341 |
python
|
en
|
code
| 10 |
github-code
|
6
|
72169387389
|
"""
One table verb initializations
"""
import itertools
from .operators import DataOperator
from .expressions import Expression
__all__ = ['define', 'create', 'sample_n', 'sample_frac', 'select',
'rename', 'distinct', 'unique', 'arrange', 'group_by',
'ungroup', 'group_indices', 'summarize',
'query', 'do', 'head', 'tail', 'pull', 'slice_rows',
# Aliases
'summarise', 'mutate', 'transmute',
]
class define(DataOperator):
"""
Add column to DataFrame
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : strs, tuples, optional
Expressions or ``(name, expression)`` pairs. This should
be used when the *name* is not a valid python variable
name. The expression should be of type :class:`str` or
an *interable* with the same number of elements as the
dataframe.
kwargs : dict, optional
``{name: expression}`` pairs.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 2, 3]})
>>> df >> define(x_sq='x**2')
x x_sq
0 1 1
1 2 4
2 3 9
>>> df >> define(('x*2', 'x*2'), ('x*3', 'x*3'), x_cubed='x**3')
x x*2 x*3 x_cubed
0 1 2 3 1
1 2 4 6 8
2 3 6 9 27
>>> df >> define('x*4')
x x*4
0 1 4
1 2 8
2 3 12
Notes
-----
If :obj:`plydata.options.modify_input_data` is ``True``,
:class:`define` will modify the original dataframe.
"""
def __init__(self, *args, **kwargs):
self.set_env_from_verb_init()
cols = []
exprs = []
for arg in args:
if isinstance(arg, str):
col = expr = arg
else:
col, expr = arg
cols.append(col)
exprs.append(expr)
_cols = itertools.chain(cols, kwargs.keys())
_exprs = itertools.chain(exprs, kwargs.values())
self.expressions = [Expression(stmt, col)
for stmt, col in zip(_exprs, _cols)]
class create(define):
"""
Create DataFrame with columns
Similar to :class:`define`, but it drops the existing columns.
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : strs, tuples, optional
Expressions or ``(name, expression)`` pairs. This should
be used when the *name* is not a valid python variable
name. The expression should be of type :class:`str` or
an *interable* with the same number of elements as the
dataframe.
kwargs : dict, optional
``{name: expression}`` pairs.
kwargs : dict, optional
``{name: expression}`` pairs.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 2, 3]})
>>> df >> create(x_sq='x**2')
x_sq
0 1
1 4
2 9
>>> df >> create(('x*2', 'x*2'), ('x*3', 'x*3'), x_cubed='x**3')
x*2 x*3 x_cubed
0 2 3 1
1 4 6 8
2 6 9 27
>>> df >> create('x*4')
x*4
0 4
1 8
2 12
"""
class sample_n(DataOperator):
"""
Sample n rows from dataframe
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
n : int, optional
Number of items from axis to return.
replace : boolean, optional
Sample with or without replacement. Default = False.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
inf and -inf values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames, 1 for Panels).
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> rs = np.random.RandomState(1234567890)
>>> df = pd.DataFrame({'x': range(20)})
>>> df >> sample_n(5, random_state=rs)
x
5 5
19 19
14 14
8 8
17 17
"""
def __init__(self, n=1, replace=False, weights=None,
random_state=None, axis=None):
self.kwargs = dict(n=n, replace=replace, weights=weights,
random_state=random_state, axis=axis)
class sample_frac(DataOperator):
"""
Sample a fraction of rows from dataframe
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : boolean, optional
Sample with or without replacement. Default = False.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
inf and -inf values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames, 1 for Panels).
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> rs = np.random.RandomState(1234567890)
>>> df = pd.DataFrame({'x': range(20)})
>>> df >> sample_frac(0.25, random_state=rs)
x
5 5
19 19
14 14
8 8
17 17
"""
def __init__(self, frac=None, replace=False, weights=None,
random_state=None, axis=None):
self.kwargs = dict(
frac=frac, replace=replace, weights=weights,
random_state=random_state, axis=axis)
class select(DataOperator):
"""
Select columns by name
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
names : tuple, optional
Names of columns in dataframe. Normally, they are strings
can include slice e.g :py:`slice('col2', 'col5')`.
You can also exclude columns by prepending a ``-`` e.g
py:`select('-col1')`, will include all columns minus than
*col1*.
startswith : str or tuple, optional
All column names that start with this string will be included.
endswith : str or tuple, optional
All column names that end with this string will be included.
contains : str or tuple, optional
All column names that contain with this string will be included.
matches : str or regex or tuple, optional
All column names that match the string or a compiled regex pattern
will be included. A tuple can be used to match multiple regexs.
drop : bool, optional
If ``True``, the selection is inverted. The unspecified/unmatched
columns are returned instead. Default is ``False``.
Examples
--------
>>> import pandas as pd
>>> x = [1, 2, 3]
>>> df = pd.DataFrame({'bell': x, 'whistle': x, 'nail': x, 'tail': x})
>>> df >> select('bell', 'nail')
bell nail
0 1 1
1 2 2
2 3 3
>>> df >> select('bell', 'nail', drop=True)
whistle tail
0 1 1
1 2 2
2 3 3
>>> df >> select('whistle', endswith='ail')
whistle nail tail
0 1 1 1
1 2 2 2
2 3 3 3
>>> df >> select('bell', matches=r'\\w+tle$')
bell whistle
0 1 1
1 2 2
2 3 3
You can select column slices too. Like :meth:`~pandas.DataFrame.loc`,
the stop column is included.
>>> df = pd.DataFrame({'a': x, 'b': x, 'c': x, 'd': x,
... 'e': x, 'f': x, 'g': x, 'h': x})
>>> df
a b c d e f g h
0 1 1 1 1 1 1 1 1
1 2 2 2 2 2 2 2 2
2 3 3 3 3 3 3 3 3
>>> df >> select('a', slice('c', 'e'), 'g')
a c d e g
0 1 1 1 1 1
1 2 2 2 2 2
2 3 3 3 3 3
You can exclude columns by prepending ``-``
>>> df >> select('-a', '-c', '-e')
b d f g h
0 1 1 1 1 1
1 2 2 2 2 2
2 3 3 3 3 3
Remove and place column at the end
>>> df >> select('-a', '-c', '-e', 'a')
b d f g h a
0 1 1 1 1 1 1
1 2 2 2 2 2 2
2 3 3 3 3 3 3
Notes
-----
To exclude columns by prepending a minus, the first column
passed to :class:`select` must be prepended with minus.
:py:`select('-a', 'c')` will exclude column ``a``, while
:py:`select('c', '-a')` will not exclude column ``a``.
"""
def __init__(self, *names, startswith=None, endswith=None,
contains=None, matches=None, drop=False):
def as_tuple(obj):
if obj is None:
return tuple()
elif isinstance(obj, tuple):
return obj
elif isinstance(obj, list):
return tuple(obj)
else:
return (obj,)
self.names = names
self.startswith = as_tuple(startswith)
self.endswith = as_tuple(endswith)
self.contains = as_tuple(contains)
self.matches = as_tuple(matches)
self.drop = drop
@staticmethod
def from_columns(*columns):
"""
Create a select verb from the columns specification
Parameters
----------
*columns : list-like | select | str | slice
Column names to be gathered and whose contents will
make values.
Return
------
out : select
Select verb representation of the columns.
"""
from .helper_verbs import select_all, select_at, select_if
n = len(columns)
if n == 0:
return select_all()
elif n == 1:
obj = columns[0]
if isinstance(obj, (select, select_all, select_at, select_if)):
return obj
elif isinstance(obj, slice):
return select(obj)
elif isinstance(obj, (list, tuple)):
return select(*obj)
elif isinstance(obj, str):
return select(obj)
else:
raise TypeError(
"Unrecognised type {}".format(type(obj))
)
else:
return select(*columns)
class rename(DataOperator):
"""
Rename columns
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : tuple, optional
A single positional argument that holds
``{'new_name': 'old_name'}`` pairs. This is useful if the
*old_name* is not a valid python variable name.
kwargs : dict, optional
``{new_name: 'old_name'}`` pairs. If all the columns to be
renamed are valid python variable names, then they
can be specified as keyword arguments.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> x = np.array([1, 2, 3])
>>> df = pd.DataFrame({'bell': x, 'whistle': x,
... 'nail': x, 'tail': x})
>>> df >> rename(gong='bell', pin='nail')
gong whistle pin tail
0 1 1 1 1
1 2 2 2 2
2 3 3 3 3
>>> df >> rename({'flap': 'tail'}, pin='nail')
bell whistle pin flap
0 1 1 1 1
1 2 2 2 2
2 3 3 3 3
Notes
-----
If :obj:`plydata.options.modify_input_data` is ``True``,
:class:`rename` will modify the original dataframe.
"""
lookup = None
def __init__(self, *args, **kwargs):
lookup = args[0] if len(args) else {}
self.lookup = {v: k for k, v in lookup.items()}
self.lookup.update({v: k for k, v in kwargs.items()})
class distinct(DataOperator):
"""
Select distinct/unique rows
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
columns : list-like, optional
Column names to use when determining uniqueness.
keep : {'first', 'last', False}, optional
- ``first`` : Keep the first occurence.
- ``last`` : Keep the last occurence.
- False : Do not keep any of the duplicates.
Default is False.
kwargs : dict, optional
``{name: expression}`` computed columns. If specified,
these are taken together with the columns when determining
unique rows.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 1, 2, 3, 4, 4, 5],
... 'y': [1, 2, 3, 4, 5, 5, 6]})
>>> df >> distinct()
x y
0 1 1
1 1 2
2 2 3
3 3 4
4 4 5
6 5 6
>>> df >> distinct(['x'])
x y
0 1 1
2 2 3
3 3 4
4 4 5
6 5 6
>>> df >> distinct(['x'], 'last')
x y
1 1 2
2 2 3
3 3 4
5 4 5
6 5 6
>>> df >> distinct(z='x%2')
x y z
0 1 1 1
2 2 3 0
>>> df >> distinct(['x'], z='x%2')
x y z
0 1 1 1
2 2 3 0
3 3 4 1
4 4 5 0
6 5 6 1
>>> df >> define(z='x%2') >> distinct(['x', 'z'])
x y z
0 1 1 1
2 2 3 0
3 3 4 1
4 4 5 0
6 5 6 1
"""
columns = None
keep = 'first'
def __init__(self, *args, **kwargs):
self.set_env_from_verb_init()
if len(args) == 1:
if isinstance(args[0], (str, bool)):
self.keep = args[0]
else:
self.columns = args[0]
elif len(args) == 2:
self.columns, self.keep = args
elif len(args) > 2:
raise Exception("Too many positional arguments.")
# define
if kwargs:
if self.columns is None:
self.columns = []
elif not isinstance(self.columns, list):
self.columns = list(self.columns)
_cols = list(kwargs.keys())
_exprs = list(kwargs.values())
self.columns.extend(_cols)
else:
_cols = []
_exprs = []
self.expressions = [Expression(stmt, col)
for stmt, col in zip(_exprs, _cols)]
class arrange(DataOperator):
"""
Sort rows by column variables
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : tuple
Columns/expressions to sort by.
reset_index : bool, optional (default: True)
If ``True``, the index is reset to a sequential range index.
If ``False``, the original index is maintained.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({'x': [1, 5, 2, 2, 4, 0],
... 'y': [1, 2, 3, 4, 5, 6]})
>>> df >> arrange('x')
x y
0 0 6
1 1 1
2 2 3
3 2 4
4 4 5
5 5 2
>>> df >> arrange('x', '-y')
x y
0 0 6
1 1 1
2 2 4
3 2 3
4 4 5
5 5 2
>>> df >> arrange('np.sin(y)')
x y
0 4 5
1 2 4
2 0 6
3 2 3
4 1 1
5 5 2
"""
expressions = None
def __init__(self, *args, reset_index=True):
self.set_env_from_verb_init()
self.reset_index = reset_index
name_gen = ('col_{}'.format(x) for x in range(100))
self.expressions = [
Expression(stmt, col)
for stmt, col in zip(args, name_gen)
]
class group_by(define):
"""
Group dataframe by one or more columns/variables
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : strs, tuples, optional
Expressions or ``(name, expression)`` pairs. This should
be used when the *name* is not a valid python variable
name. The expression should be of type :class:`str` or
an *interable* with the same number of elements as the
dataframe.
add_ : bool, optional
If True, add to existing groups. Default is to create
new groups.
kwargs : dict, optional
``{name: expression}`` pairs.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 5, 2, 2, 4, 0, 4],
... 'y': [1, 2, 3, 4, 5, 6, 5]})
>>> df >> group_by('x')
groups: ['x']
x y
0 1 1
1 5 2
2 2 3
3 2 4
4 4 5
5 0 6
6 4 5
Like :meth:`define`, :meth:`group_by` creates any
missing columns.
>>> df >> group_by('y-1', xplus1='x+1')
groups: ['y-1', 'xplus1']
x y y-1 xplus1
0 1 1 0 2
1 5 2 1 6
2 2 3 2 3
3 2 4 3 3
4 4 5 4 5
5 0 6 5 1
6 4 5 4 5
Columns that are grouped on remain in the dataframe after any
verb operations that do not use the group information. For
example:
>>> df >> group_by('y-1', xplus1='x+1') >> select('y')
groups: ['y-1', 'xplus1']
y-1 xplus1 y
0 0 2 1
1 1 6 2
2 2 3 3
3 3 3 4
4 4 5 5
5 5 1 6
6 4 5 5
Notes
-----
If :obj:`plydata.options.modify_input_data` is ``True``,
:class:`group_by` will modify the original dataframe.
"""
groups = None
def __init__(self, *args, add_=False, **kwargs):
self.set_env_from_verb_init()
super().__init__(*args, **kwargs)
self.add_ = add_
self.groups = [expr.column for expr in self.expressions]
class ungroup(DataOperator):
"""
Remove the grouping variables for dataframe
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 2, 3],
... 'y': [1, 2, 3]})
>>> df >> group_by('x')
groups: ['x']
x y
0 1 1
1 2 2
2 3 3
>>> df >> group_by('x') >> ungroup()
x y
0 1 1
1 2 2
2 3 3
"""
class group_indices(group_by):
"""
Generate a unique id for each group
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : strs, tuples, optional
Expressions or ``(name, expression)`` pairs. This should
be used when the *name* is not a valid python variable
name. The expression should be of type :class:`str` or
an *interable* with the same number of elements as the
dataframe. As this verb returns an array, the tuples have
no added benefit over strings.
kwargs : dict, optional
``{name: expression}`` pairs. As this verb returns an
array, keyword arguments have no added benefit over
:class:`str` positional arguments.
Returns
-------
out : numpy.array
Ids for each group
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 5, 2, 2, 4, 0, 4],
... 'y': [1, 2, 3, 4, 5, 6, 5]})
>>> df >> group_by('x')
groups: ['x']
x y
0 1 1
1 5 2
2 2 3
3 2 4
4 4 5
5 0 6
6 4 5
>>> df >> group_by('x') >> group_indices()
array([1, 4, 2, 2, 3, 0, 3])
You can pass the group column(s) as parameters to
:class:`group_indices`
>>> df >> group_indices('x*2')
array([1, 4, 2, 2, 3, 0, 3])
"""
class summarize(define):
"""
Summarise multiple values to a single value
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : strs, tuples, optional
Expressions or ``(name, expression)`` pairs. This should
be used when the *name* is not a valid python variable
name. The expression should be of type :class:`str` or
an *interable* with the same number of elements as the
dataframe.
kwargs : dict, optional
``{name: expression}`` pairs.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({'x': [1, 5, 2, 2, 4, 0, 4],
... 'y': [1, 2, 3, 4, 5, 6, 5],
... 'z': [1, 3, 3, 4, 5, 5, 5]})
Can take only positional, only keyword arguments or both.
>>> df >> summarize('np.sum(x)', max='np.max(x)')
np.sum(x) max
0 18 5
When summarizing after a :class:`group_by` operation
the group columns are retained.
>>> df >> group_by('y', 'z') >> summarize(mean_x='np.mean(x)')
y z mean_x
0 1 1 1.0
1 2 3 5.0
2 3 3 2.0
3 4 4 2.0
4 5 5 4.0
5 6 5 0.0
.. rubric:: Aggregate Functions
When summarizing the following functions can be used, they take
an array and return a *single* number.
- ``min(x)`` - Alias of :func:`numpy.amin` (a.k.a ``numpy.min``).
- ``max(x)`` - Alias of :func:`numpy.amax` (a.k.a ``numpy.max``).
- ``sum(x)`` - Alias of :func:`numpy.sum`.
- ``cumsum(x)`` - Alias of :func:`numpy.cumsum`.
- ``mean(x)`` - Alias of :func:`numpy.mean`.
- ``median(x)`` - Alias of :func:`numpy.median`.
- ``std(x)`` - Alias of :func:`numpy.std`.
- ``first(x)`` - First element of ``x``.
- ``last(x)`` - Last element of ``x``.
- ``nth(x, n)`` - *nth* value of ``x`` or ``numpy.nan``.
- ``n_distinct(x)`` - Number of distint elements in ``x``.
- ``n_unique(x)`` - Alias of ``n_distinct``.
- ``n()`` - Number of elements in current group.
The aliases of the Numpy functions save you from typing 3 or 5 key
strokes and you get better column names. i.e ``min(x)`` instead of
``np.min(x)`` or ``numpy.min(x)`` if you have Numpy imported.
>>> df = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5],
... 'y': [0, 0, 1, 1, 2, 3]})
>>> df >> summarize('min(x)', 'max(x)', 'mean(x)', 'sum(x)',
... 'first(x)', 'last(x)', 'nth(x, 3)')
min(x) max(x) mean(x) sum(x) first(x) last(x) nth(x, 3)
0 0 5 2.5 15 0 5 3
Summarizing groups with aggregate functions
>>> df >> group_by('y') >> summarize('mean(x)')
y mean(x)
0 0 0.5
1 1 2.5
2 2 4.0
3 3 5.0
>>> df >> group_by('y') >> summarize(y_count='n()')
y y_count
0 0 2
1 1 2
2 2 1
3 3 1
You can use ``n()`` even when there are no groups.
>>> df >> summarize('n()')
n()
0 6
"""
class query(DataOperator):
"""
Return rows with matching conditions
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
expr : str
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character
like ``@a + b``. Allowed functions are `sin`, `cos`, `exp`,
`log`, `expm1`, `log1p`, `sqrt`, `sinh`, `cosh`, `tanh`,
`arcsin`, `arccos`, `arctan`, `arccosh`, `arcsinh`,
`arctanh`, `abs` and `arctan2`.
reset_index : bool, optional (default: True)
If ``True``, the index is reset to a sequential range index.
If ``False``, the original index is maintained.
kwargs : dict
See the documentation for :func:`pandas.eval` for complete
details on the keyword arguments accepted by
:meth:`pandas.DataFrame.query`.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5],
... 'y': [0, 0, 1, 1, 2, 3],
... 'z': list('aabbcd')})
>>> df >> query('x % 2 == 0')
x y z
0 0 0 a
1 2 1 b
2 4 2 c
>>> df >> query('x % 2 == 0 & y > 0')
x y z
0 2 1 b
1 4 2 c
By default, Bitwise operators ``&`` and ``|`` have the same
precedence as the booleans ``and`` and ``or``.
>>> df >> query('x % 2 == 0 and y > 0')
x y z
0 2 1 b
1 4 2 c
``query`` works within groups
>>> df >> query('x == x.min()')
x y z
0 0 0 a
>>> df >> group_by('y') >> query('x == x.min()')
groups: ['y']
x y z
0 0 0 a
1 2 1 b
2 4 2 c
3 5 3 d
When working with strings, the values should be quoted.
>>> df >> query('z == "a"')
x y z
0 0 0 a
1 1 0 a
You can refer to variables in the environment by prefixing them
with an `@` character.
>>> w = list('rrbbst')
>>> df >> query('z == @w')
x y z
0 2 1 b
1 3 1 b
For more information see :meth:`pandas.DataFrame.query`. To query
rows and columns with ``NaN`` values, use :class:`dropna`
Notes
-----
:class:`~plydata.one_table_verbs.query` is the equivalent of
dplyr's `filter` verb but with slightly different python syntax
the expressions.
"""
expression = None
def __init__(self, expr, reset_index=True, **kwargs):
self.set_env_from_verb_init()
self.reset_index = reset_index
self.expression = expr
self.kwargs = kwargs
class do(DataOperator):
"""
Do arbitrary operations on a dataframe
Considering the *split-apply-combine* data manipulation
strategy, :class:`do` gives a window into which to place
the complex *apply* actions, and also control over the form of
results when they are combined. This allows
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
func : function, optional
A single function to apply to each group. *The
function should accept a dataframe and return a
dataframe*.
kwargs : dict, optional
``{name: function}`` pairs. *The function should
accept a dataframe and return an array*. The function
computes a column called ``name``.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({'x': [1, 2, 2, 3],
... 'y': [2, 3, 4, 3],
... 'z': list('aabb')})
Define a function that uses numpy to do a least squares fit.
It takes input from a dataframe and output is a dataframe.
``gdf`` is a dataframe that contains only rows from the current
group.
>>> def least_squares(gdf):
... X = np.vstack([gdf.x, np.ones(len(gdf))]).T
... (m, c), _, _, _ = np.linalg.lstsq(X, gdf.y, None)
... return pd.DataFrame({'intercept': c, 'slope': [m]})
Define functions that take x and y values and compute the
intercept and slope.
>>> def slope(x, y):
... return np.diff(y)[0] / np.diff(x)[0]
...
>>> def intercept(x, y):
... return y.values[0] - slope(x, y) * x.values[0]
Demonstrating do
>>> df >> group_by('z') >> do(least_squares)
groups: ['z']
z intercept slope
0 a 1.0 1.0
1 b 6.0 -1.0
We can get the same result, by passing separate functions
that calculate the columns independently.
>>> df >> group_by('z') >> do(
... intercept=lambda gdf: intercept(gdf.x, gdf.y),
... slope=lambda gdf: slope(gdf.x, gdf.y))
groups: ['z']
z intercept slope
0 a 1.0 1.0
1 b 6.0 -1.0
The functions need not return numerical values. Pandas columns can
hold any type of object. You could store result objects from more
complicated models. Each model would be linked to a group. Notice
that the group columns (``z`` in the above cases) are included in
the result.
Notes
-----
You cannot have both a position argument and keyword
arguments.
"""
single_function = False
def __init__(self, func=None, **kwargs):
if func is not None:
if kwargs:
raise ValueError(
"Unexpected positional and keyword arguments.")
if not callable(func):
raise TypeError(
"func should be a callable object")
if func:
self.single_function = True
self.expressions = [Expression(func, None)]
else:
stmts_cols = zip(kwargs.values(), kwargs.keys())
self.expressions = [
Expression(stmt, col) for stmt, col in stmts_cols
]
class head(DataOperator):
"""
Select the top n rows
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
n : int, optional
Number of rows to return. If the ``data`` is grouped,
then number of rows per group. Default is 5.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({
... 'x': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
... 'y': list('aaaabbcddd') })
>>> df >> head(2)
x y
0 1 a
1 2 a
Grouped dataframe
>>> df >> group_by('y') >> head(2)
groups: ['y']
x y
0 1 a
1 2 a
2 5 b
3 6 b
4 7 c
5 8 d
6 9 d
"""
def __init__(self, n=5):
self.n = n
class tail(DataOperator):
"""
Select the bottom n rows
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
n : int, optional
Number of rows to return. If the ``data`` is grouped,
then number of rows per group. Default is 5.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({
... 'x': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
... 'y': list('aaaabbcddd') })
>>> df >> tail(2)
x y
8 9 d
9 10 d
Grouped dataframe
>>> df >> group_by('y') >> tail(2)
groups: ['y']
x y
0 3 a
1 4 a
2 5 b
3 6 b
4 7 c
5 9 d
6 10 d
"""
def __init__(self, n=5):
self.n = n
class pull(DataOperator):
"""
Pull a single column from the dataframe
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
column : name
Column name or index id.
use_index : bool
Whether to pull column by name or by its integer
index. Default is False.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({
... 'x': [1, 2, 3],
... 'y': [4, 5, 6],
... 'z': [7, 8, 9]
... })
>>> df
x y z
0 1 4 7
1 2 5 8
2 3 6 9
>>> df >> pull('y')
array([4, 5, 6])
>>> df >> pull(0, True)
array([1, 2, 3])
>>> df >> pull(-1, True)
array([7, 8, 9])
Notes
-----
Always returns a numpy array.
If :obj:`plydata.options.modify_input_data` is ``True``,
:class:`pull` will not make a copy the original column.
"""
def __init__(self, column, use_index=False):
self.column = column
self.use_index = use_index
class slice_rows(DataOperator):
"""
Select rows
A wrapper around :class:`slice` to use when piping.
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
*args : tuple
(start, stop, step) as expected by the builtin :class:`slice`
type.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': range(10), 'y': range(100, 110)})
>>> df >> slice_rows(5)
x y
0 0 100
1 1 101
2 2 102
3 3 103
4 4 104
>>> df >> slice_rows(3, 7)
x y
3 3 103
4 4 104
5 5 105
6 6 106
>>> df >> slice_rows(None, None, 3)
x y
0 0 100
3 3 103
6 6 106
9 9 109
The above examples are equivalent to::
df[slice(5)]
df[slice(3, 7)]
df[slice(None, None, 3)]
respectively.
Notes
-----
If :obj:`plydata.options.modify_input_data` is ``True``,
:class:`slice_rows` will not make a copy the original dataframe.
"""
def __init__(self, *args):
self.slice = slice(*args)
# Aliases
mutate = define
transmute = create
unique = distinct
summarise = summarize
|
has2k1/plydata
|
plydata/one_table_verbs.py
|
one_table_verbs.py
|
py
| 34,171 |
python
|
en
|
code
| 271 |
github-code
|
6
|
18051200716
|
# 1014
X = int(input())
Y = float(input())
# regra de três pra saber quantos km foram andados por litro
km_per_l = X * 1 / Y
print('%.3f' % km_per_l, 'km/l')
|
lucastorres37/uriExercises
|
consumption.py
|
consumption.py
|
py
| 165 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
17625793432
|
from __future__ import print_function, division
import os
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Activation
from keras.layers import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU, ReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.losses import MeanAbsoluteError, BinaryCrossentropy
from keras import backend as K
import tensorflow as tf
import numpy as np
from losses import encoder_loss, generator_loss, discriminator_loss, code_discriminator_loss
class AlphaGAN():
def __init__(self, lambda_=1., lr1=0.0005, lr2=0.0001, beta1=0.9, beta2=0.999, model_save_path="./snapshots"):
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
self.model_save_path = model_save_path
self.input_dim = 29
self.x_shape = (self.input_dim, )
self.latent_dim = 16
self.base_n_count = 128
self.lambda_ = lambda_
self.lr1 = lr1
self.lr2 = lr2
self.beta1 = beta1
self.beta2 = beta2
self.bce = BinaryCrossentropy()
self.mae = MeanAbsoluteError()
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.code_discriminator = self.build_code_discriminator()
self.generator = self.build_generator()
self.encoder = self.build_encoder()
x = Input(shape=self.x_shape)
x_hat = self.generator(self.encoder(x))
self.alphagan_generator = Model([x], [x_hat])
def build_encoder(self):
model = Sequential(name="Encoder")
model.add(Dense(self.base_n_count * 2))
model.add(ReLU())
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.base_n_count))
model.add(ReLU())
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.latent_dim))
model.add(Activation('tanh'))
x = Input(shape=self.x_shape)
z = model(x)
model.summary()
return Model(x, z)
def build_generator(self):
model = Sequential(name="Generator")
model.add(Dense(self.base_n_count))
model.add(ReLU())
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.base_n_count * 2))
model.add(ReLU())
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.base_n_count * 4))
model.add(ReLU())
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.input_dim))
model.add(Activation('tanh'))
z = Input(shape=(self.latent_dim,))
x_gen = model(z)
model.summary()
return Model(z, x_gen)
def build_discriminator(self):
model = Sequential(name="Discriminator")
model.add(Dense(self.base_n_count * 4))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(self.base_n_count * 2))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(self.base_n_count))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(1, activation='sigmoid'))
x = Input(shape=self.x_shape)
validity = model(x)
model.summary()
return Model(x, validity)
def build_code_discriminator(self):
model = Sequential(name="CodeDiscriminator")
model.add(Dense(self.base_n_count * 4))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(self.base_n_count * 2))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(self.base_n_count))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(1, activation='sigmoid'))
z = Input(shape=(self.latent_dim,))
validity = model(z)
model.summary()
return Model(z, validity)
def build_e_train(self, batch_size, lr, beta1, beta2):
x_real = K.placeholder(shape=(batch_size,) + self.x_shape)
real_labels = K.placeholder(shape=(batch_size, 1))
z_hat = self.encoder(x_real)
c_z_hat = self.code_discriminator(z_hat)
x_rec = self.generator(z_hat)
# ================== Train E ================== #
l1_loss = self.mae(x_real, x_rec)
c_hat_loss = self.bce(c_z_hat, real_labels) # - self.bce(c_z_hat, fake_labels)
e_loss = l1_loss + c_hat_loss
e_training_updates = Adam(lr=lr, beta_1=beta1, beta_2=beta2) \
.get_updates(e_loss, self.encoder.trainable_weights)
e_train = K.function([x_real, real_labels], [e_loss], updates=e_training_updates)
return e_train
def build_g_train(self, batch_size, lr, beta1, beta2):
x_real = K.placeholder(shape=(batch_size,) + self.x_shape)
z = K.placeholder(shape=(batch_size, self.latent_dim))
real_labels = K.placeholder(shape=(batch_size, 1))
fake_labels = K.placeholder(shape=(batch_size, 1))
z_hat = self.encoder(x_real)
x_rec = self.generator(z_hat)
x_gen = self.generator(z)
d_rec = self.discriminator(x_rec)
d_gen = self.discriminator(x_gen)
# ================== Train E ================== #
l1_loss = 0.2 * self.mae(x_real, x_rec)
g_rec_loss = self.bce(d_rec, real_labels) # - self.bce(d_rec, fake_labels)
g_gen_loss = self.bce(d_gen, fake_labels) # - self.bce(d_gen, fake_labels)
g_loss = l1_loss + g_rec_loss + g_gen_loss
g_training_updates = Adam(lr=lr, beta_1=beta1, beta_2=beta2) \
.get_updates(g_loss, self.generator.trainable_weights)
g_train = K.function([x_real, z, real_labels, fake_labels], [g_loss], updates=g_training_updates)
return g_train
def build_e_g_train(self, batch_size, lr, beta1, beta2):
x_real = K.placeholder(shape=(batch_size,) + self.x_shape)
z = K.placeholder(shape=(batch_size, self.latent_dim))
real_labels = K.placeholder(shape=(batch_size, 1))
fake_labels = K.placeholder(shape=(batch_size, 1))
z_hat = self.encoder(x_real)
x_rec = self.generator(z_hat)
x_gen = self.generator(z)
d_rec = self.discriminator(x_rec)
d_gen = self.discriminator(x_gen)
c_z_hat = self.code_discriminator(z_hat)
# ================== Train G and E ================== #
l1_loss = self.mae(x_real, x_rec)
c_hat_loss = self.bce(c_z_hat, real_labels) # - self.bce(c_z_hat, fake_labels)
g_rec_loss = self.bce(d_rec, real_labels) # - self.bce(d_rec, fake_labels)
g_gen_loss = self.bce(d_gen, real_labels) # - self.bce(d_gen, fake_labels)
g_loss = l1_loss + g_rec_loss + c_hat_loss + g_gen_loss
g_training_updates = Adam(lr=lr, beta_1=beta1, beta_2=beta2) \
.get_updates(g_loss, self.alphagan_generator.trainable_weights)
g_train = K.function([x_real, z, real_labels, fake_labels], [g_loss], updates=g_training_updates)
return g_train
def build_d_train(self, batch_size, lr, beta1, beta2):
x_real = K.placeholder(shape=(batch_size,) + self.x_shape)
z = K.placeholder(shape=(batch_size, self.latent_dim))
real_labels = K.placeholder(shape=(batch_size, 1))
fake_labels = K.placeholder(shape=(batch_size, 1))
z_hat = self.encoder(x_real)
x_rec = self.generator(z_hat)
x_gen = self.generator(z)
d_real = self.discriminator(x_real)
d_rec = self.discriminator(x_rec)
d_gen = self.discriminator(x_gen)
# ================== Train D ================== #
d_real_loss = self.bce(d_real, real_labels)
d_rec_loss = self.bce(d_rec, fake_labels)
d_gen_loss = self.bce(d_gen, fake_labels)
d_loss = d_real_loss + d_rec_loss + d_gen_loss
d_training_updates = Adam(lr=lr, beta_1=beta1, beta_2=beta2) \
.get_updates(d_loss, self.discriminator.trainable_weights)
d_train = K.function([x_real, z, real_labels, fake_labels], [d_loss], updates=d_training_updates)
return d_train
def build_c_train(self, batch_size, lr, beta1, beta2):
x_real = K.placeholder(shape=(batch_size,) + self.x_shape)
z = K.placeholder(shape=(batch_size, self.latent_dim))
real_labels = K.placeholder(shape=(batch_size, 1))
fake_labels = K.placeholder(shape=(batch_size, 1))
z_hat = self.encoder(x_real)
c_z_hat = self.code_discriminator(z_hat)
c_z = self.code_discriminator(z)
# ================== Train C ================== #
c_hat_loss = self.bce(c_z_hat, real_labels)
c_z_loss = self.bce(c_z, fake_labels)
c_loss = c_hat_loss + c_z_loss
c_training_updates = Adam(lr=lr, beta_1=beta1, beta_2=beta2) \
.get_updates(c_loss, self.code_discriminator.trainable_weights)
c_train = K.function([x_real, z, real_labels, fake_labels], [c_loss], updates=c_training_updates)
return c_train
def train(self, X_train, epochs, batch_size=32, output_path='.', model_save_step=10):
# if not os.path.exists(os.path.join(output_path, 'logs/')):
# os.makedirs(os.path.join(output_path, 'logs/'))
real_labels = np.ones((batch_size, 1))
fake_labels = np.zeros((batch_size, 1))
# _, _, d_train, c_train = self.build_functions(batch_size, self.lr1, self.lr2, self.beta1, self.beta2)
e_train = self.build_e_train(batch_size, lr=self.lr1, beta1=self.beta1, beta2=self.beta2)
g_train = self.build_g_train(batch_size, lr=self.lr1, beta1=self.beta1, beta2=self.beta2)
d_train = self.build_d_train(batch_size, lr=self.lr2, beta1=self.beta1, beta2=self.beta2)
c_train = self.build_c_train(batch_size, lr=self.lr2, beta1=self.beta1, beta2=self.beta2)
e_g_train = self.build_e_g_train(batch_size, lr=self.lr1, beta1=self.beta1, beta2=self.beta2)
# train_step = self.build_train_step()
# Adversarial ground truths
session = K.get_session()
init = tf.global_variables_initializer()
session.run(init)
for epoch in range(epochs):
# Generate fake code
z = np.random.normal(size=(batch_size, self.latent_dim)).astype(np.float32)
# z_K.constant(z)
# Make a batch of true samples
idx = np.random.randint(0, X_train.shape[0], batch_size)
x_real = X_train[idx].astype(np.float32)
# e_loss, g_loss, d_loss, c_loss, = train_step(x_real, z)
#e_loss = e_train([x_real, real_labels])
#g_loss = g_train([x_real, z, real_labels, fake_labels])
g_loss = e_g_train([x_real, z, real_labels, fake_labels])
d_loss = d_train([x_real, z, real_labels, fake_labels])
c_loss = c_train([x_real, z, real_labels, fake_labels])
# d_loss = d_train([x_real, z])
# c_loss = c_train([x_real, z])
# Plot the progress
if epoch % 100 == 0:
print("%d [E loss: %f] [G loss: %f] [D loss: %f] [C loss: %f]" % \
(epoch, 0, g_loss[0], d_loss[0], c_loss[0]))
if epoch % model_save_step == 0:
self.generator.save(os.path.join(self.model_save_path, '{}_G.h5'.format(epoch)))
self.encoder.save(os.path.join(self.model_save_path, '{}_E.h5'.format(epoch)))
self.discriminator.save(os.path.join(self.model_save_path, '{}_D.h5'.format(epoch)))
self.code_discriminator.save(os.path.join(self.model_save_path, '{}_C.h5'.format(epoch)))
def load_pretrained_models(self, model_path_prefix):
self.generator.load_weights('%sG.h5' % model_path_prefix)
self.encoder.load_weights('%sE.h5' % model_path_prefix)
self.discriminator.load_weights('%sD.h5' % model_path_prefix)
self.code_discriminator.load_weights('%sC.h5' % model_path_prefix)
# if __name__ == '__main__':
# alphagan = AlphaGAN()
# alphagan.train(epochs=40000, batch_size=32)
|
royalsalute/fraud-creditcard-detection
|
alphagan.py
|
alphagan.py
|
py
| 12,147 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16046421800
|
from tkinter import *
from tkinter.messagebox import*
import sqlite3
root4=Tk()
h,w=root4.winfo_screenheight(),root4.winfo_screenwidth()
root4.geometry('%dx%d+0+0'%(w,h))
bus4=PhotoImage(file='.\\Bus_for_project.png')
Label(root4,image=bus4).grid(row=0,column=0,columnspan=12,padx=w/2.5)
Label(root4,text='Online Bus Booking System',font='Arial 20',fg='Red',bg='Sky Blue').grid(row=1,column=0,columnspan=12)
Label(root4,text='Add Bus Route Details',font='Arial 18',fg='Green2').grid(row=2,columnspan=12,pady=20)
def add_route():
route_id=r_id.get()
start_station=s_station.get()
start_id=s_id.get()
end_station=e_station.get()
end_id=e_id.get()
con4=sqlite3.Connection('Bus_DB')
cur4=con4.cursor()
cur4.execute('create table if not exists route(r_id varchar(5) not null primary key,s_name varchar(20),s_id varchar(5),e_name varchar(20),e_id varchar(5) )')
cur4.execute('select r_id from route')
res=cur4.fetchall()
if (route_id,) in res:
showerror('ERROR',"Route id already exists")
else:
start_station=start_station.lower()
end_station=end_station.lower()
cur4.execute('insert into route(r_id,s_name,s_id,e_name,e_id) values(?,?,?,?,?)',(route_id,start_station,start_id,end_station,end_id))
con4.commit()
showinfo('Success',"Route record added successfully!!")
Label(root4, text="Route ID", font='Arial 12', fg='black').grid(row=3, column=0)
r_id=Entry(root4)
r_id.grid(row=3, column=1)
Label(root4, text="Staring station", font='Arial 12', fg='black').grid(row=3, column=2)
s_station=Entry(root4)
s_station.grid(row=3, column=3)
Label(root4, text="Station ID", font='Arial 12', fg='black').grid(row=3, column=4)
s_id=Entry(root4)
s_id.grid(row=3, column=5)
Label(root4, text="Ending station", font='Arial 12', fg='black').grid(row=4, column=1)
e_station=Entry(root4)
e_station.grid(row=4, column=2)
Label(root4, text="Ending Station ID", font='Arial 12', fg='black').grid(row=4, column=3)
e_id=Entry(root4)
e_id.grid(row=4,column=4)
Button(root4, text="Add Route", font='Arial 12 ', bg='Pale Green', fg='black',command=add_route).grid(row=4, column=8)
Button(root4, text="Delete Route", font='Arial 12', bg='Pale Green2', fg='black').grid(row=4, column=9)
def ho():
root4.destroy()
import Home
home4=PhotoImage(file='.\\home.png')
Button(root4,image=home4,bg='Pale Green',command=ho).grid(row=3,column=8,pady=50)
root4.mainloop()
root4.mainloop()
|
akarshi19/Online-Bus-Booking-System
|
bus_route.py
|
bus_route.py
|
py
| 2,568 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9537972227
|
"""Slightly adapted code of Eric de Lange"""
import sys
import time
import os
DEFAULT= "Geen bericht, goed bericht"
CRITICAL = const(50)
ERROR = const(40)
WARNING = const(30)
INFO = const(20)
DEBUG = const(10)
NOTSET = const(0)
_level_str = {
CRITICAL: "CRITICAL",
ERROR: "ERROR",
WARNING: "WARNING",
INFO: "INFO",
DEBUG: "DEBUG"
}
_stream = sys.stderr # default output
_filename = None # overrides stream
_level = INFO # ignore messages which are less severe
_loggers = dict()
class Logger:
MAX_FILE_SIZE = 10000
def __init__(self, name, fn = None):
self.name = name
self.level = _level
self.filename = fn
def log(self, level, message = DEFAULT, **args):
if level < self.level:
return
try:
if args:
message = message.format(**args) # message {extra_info}, {"extra_info": "this_info"}
record = dict()
record["levelname"] = _level_str.get(level, str(level))
record["level"] = level
record["message"] = message
record["name"] = self.name
tm = time.localtime()
record["asctime"] = f"{tm[0]:4}-{tm[1]}-{tm[2]} {tm[3]:2}:{tm[4]:2}:{tm[5]:2}"
log_str = "{name}:{asctime} {levelname:8}--{message}\n".format(**record)
if self.filename is None:
_ = _stream.write(log_str)
else:
with open(self.filename, "a") as fp:
fp.write(log_str)
self.check_logfile(self.filename)
except Exception as e:
print("--- Logging Error ---")
print(repr(e))
print("Message: '" + message + "'")
print("Arguments:", args)
#print("Format String: '" + _format + "'")
raise e
def setLevel(self, level):
self.level = level
def debug(self, message, **args):
self.log(DEBUG, message, **args)
def info(self, message, **args):
self.log(INFO, message, **args)
def warning(self, message, **args):
self.log(WARNING, message, **args)
def error(self, message, *args):
self.log(ERROR, message, *args)
def critical(self, message, **args):
self.log(CRITICAL, message, **args)
def exception(self, exception, message, **args):
self.log(ERROR, message, **args)
if _filename is None:
sys.print_exception(exception, _stream)
else:
with open(_filename, "a") as fp:
sys.print_exception(exception, fp)
self.check_logfile(_filename)
def check_logfile(self,filename, max_filesize=MAX_FILE_SIZE):
stat = os.stat(filename)
filesize = stat[6]
if filesize >= max_filesize:
backup = filename[0:-3] + 'bak'
try:
os.remove(backup)
except OSError:
pass
os.rename(filename, backup)
def getLogger(name="pylontech",filename=None):
if name not in _loggers:
_loggers[name] = Logger(name,filename)
return _loggers[name]
def basicConfig(level=INFO, filename=None, filemode='a', format=None):
global _filename, _level, _format
_filename = filename
_level = level
if format is not None:
_format = format
if filename is not None and filemode != "a":
with open(filename, "w"):
pass # clear log file
def setLevel(level):
getLogger().setLevel(level)
def debug(message, *args):
getLogger().debug(message, *args)
def info(message, *args):
getLogger().info(message, *args)
def warning(message, *args):
getLogger().warning(message, *args)
def error(message, *args):
getLogger().error(message, *args)
def critical(message, *args):
getLogger().critical(message, *args)
def exception(exception, message, *args):
getLogger().exception(exception, message, *args)
if __name__ == '__main__':
logger= getLogger('mine')
logger.critical("this problem is critical")
logger.error("this is an error")
logger.warning("a warning message")
logger.info("message plus {extra_info}", **{"extra_info": "'this_extra_info'"})
try:
3/0
except ZeroDivisionError as ex:
logger.exception(ex, ex.args[0])
|
emsruderer/pylontech-micropython
|
Src/logging.py
|
logging.py
|
py
| 4,493 |
python
|
en
|
code
| 2 |
github-code
|
6
|
10916312087
|
import os
import sys
import subprocess
import pandas as pd
import numpy as np
import logging
def vvpkg(out, err, pool):
df1 = pd.read_csv(out, header = None, sep =']')
df2 = pd.read_csv(err, header = None)
df1 = df1.values[0]
df2 = df2.values[0]
hashes = list()
offsets = [0]
sizes = list()
match = dict()
for j in range(len(df1)):
if isinstance(df1[j], str):
sha = df1[j][df1[j].find("\""):df1[j].rfind('\"') + 1]
hashes.append(sha)
sizes.append(int(df2[j]))
offsets.append(offsets[j] + int(df2[j]))
if sha in pool:
assert pool[sha] == int(df2[j])
match[sha] = int(df2[j])
else:
pool[sha] = int(df2[j])
offsets.pop()
return hashes, sizes, offsets, pool, match
def inspect_tar(out):
'''
order = "bash offset.sh " + tarfile + " > out 2> err"
os.system(order)
'''
files, offsets = ([] for i in range(2))
files.append('header')
offsets.append(0)
with open(out, 'r') as f:
while True:
line = f.readline()
if line == '':
break
offset, size, file = line.split()
offsets.append(int(offset))
files.append(file)
return files, offsets
def offset_match(hashes, hoffsets, files, toffsets):
rv = list()
curr_hash = ''
curr_hoffset = 0
while len(hashes) > 0 and len(files) > 0:
if hoffsets[0] < toffsets[0]:
curr_hash = hashes.pop(0)
curr_hoffset = hoffsets.pop(0)
rv[len(rv) - 1][1].append((curr_hash, curr_hoffset))
elif toffsets[0] < hoffsets[0]:
curr_file = files.pop(0)
curr_toffset = toffsets.pop(0)
rv.append(((curr_file, curr_toffset), list()))
#adding the block before
rv[len(rv) - 1][1].append((curr_hash, curr_hoffset))
elif hoffsets[0] == toffsets[0]:
curr_file = files.pop(0)
curr_toffset = toffsets.pop(0)
curr_hash = hashes.pop(0)
curr_hoffset = hoffsets.pop(0)
rv.append(((curr_file, curr_toffset), list()))
rv[len(rv) - 1][1].append((curr_hash, curr_hoffset))
else:
raise Exception('this path should not happen')
if len(hashes) == 0:
while len(files) > 0:
curr_file = files.pop(0)
curr_toffset = toffsets.pop(0)
rv.append(((curr_file, curr_toffset), list()))
rv[len(rv) - 1][1].append((curr_hash, curr_hoffset))
elif len(files) == 0:
while len(hashes) > 0:
curr_hash = hashes.pop(0)
curr_hoffset = hoffsets.pop(0)
rv[len(rv) - 1][1].append((curr_hash, curr_hoffset))
else:
raise Exception('this path should not happen')
return rv
def layer_match(layers: list, match: dict) -> int:
size = 0
print(match)
for item in layers:
layername = item[0][0]
if layername[-4:] != '.tar':
continue
localsize = 0
cnt = 0
for hash in item[1]:
if hash[0] in match:
localsize += hash[1]
print(cnt)
else:
print(hash[0])
print('no good: %d'%cnt)
localsize = 0
break
size += localsize
return size
def iteration(image, tag, pool):
prefix = '/home/yutan/cdmt/'
out = prefix + 'tmp/' + image + '/' + tag + '.tar.bk'
err = prefix + 'sizes/' + image + '/' + tag + '.tar.sz'
file = prefix + 'taroffset/' + image + '/' + tag + '.tar.of'
hashes, hsizes, hoffsets, pool, match = vvpkg(out, err, pool)
assert len(hashes) == len(hoffsets) and len(hashes) == len(hsizes)
files, toffsets = inspect_tar(file)
assert len(files) == len(toffsets)
total_size = sum(hsizes)
layers = offset_match(hashes, hoffsets, files, toffsets)
match_size = layer_match(layers, match)
if match_size > 0:
print(image)
print(tag)
return match_size, total_size
def read_images(filename):
rv = list()
with open(filename) as f:
while True:
line = f.readline().rstrip("\n ")
if line == '':
break
rv.append(line)
return rv
def read_order(image):
rv = list()
prefix = "/home/yutan/cdmt/order/"
with open(prefix + image + '.txt', 'r') as f:
while True:
line = f.readline().rstrip('\n ')
if line == '':
break
if line[-4:] != '.tar':
raise Exception('file format not correct')
line = line[:-4]
rv.append(line)
return rv
def main():
filename = '/home/yutan/cdmt/data.txt'
images = read_images(filename)
os.remove('/home/yutan/cdmt/results/experiment7/table2.txt')
for image in images:
print(image)
pool = dict()
tags = read_order(image)
agg_match, agg_size = 0, 0
for tag in tags:
print(tag)
match, size = iteration(image, tag, pool)
agg_match += match
agg_size += size
with open('/home/yutan/cdmt/results/experiment7/table2.txt', 'a+') as f:
f.write(image)
f.write(',')
f.write(str(agg_match/agg_size))
f.write('\n')
if __name__ == "__main__":
main()
|
depaul-dice/CDMT
|
experiment7/experiment7.py
|
experiment7.py
|
py
| 5,491 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32017653936
|
import socket
HOST = 'localhost'
PORT = 8080
def send_coins(amount):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:
client_socket.connect((HOST, PORT))
client_socket.sendall(str(amount).encode())
print(f'Sent coins: {amount}')
if __name__ == '__main__':
amount = 10.5 # Amount of coins to transfer
send_coins(amount)
|
SibinThomasQuad/PYTHON_COIN_TRANSFER
|
sender.py
|
sender.py
|
py
| 383 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73510537787
|
class Solution:
def duplicateZeros(self, arr: List[int]) -> None:
"""
Do not return anything, modify arr in-place instead.
first i should cop
the problem is i have to add the value if t
"""
copy =[]
l=0
for i in range(len(arr)):
if arr[i]==0:
copy.append(0)
copy.append(0)
else:
copy.append(arr[i])
for i in range(len(arr)):
arr[i] = copy[i]
|
yonaSisay/a2sv-competitive-programming
|
1089-duplicate-zeros/1089-duplicate-zeros.py
|
1089-duplicate-zeros.py
|
py
| 510 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73484336507
|
import mc_package.sleep_periods as sleep_periods
import pandas as pd
MS_PER_DAY = 1000*60*60*24
MS_PER_HOUR = 1000*60*60
def sleep_duration(part_id, start, end):
sleep_res = sleep_periods.sleep_periods(part_id, start=start, end=end)
if sleep_res is None:
return None
res = sleep_periods.sleep_periods(part_id, start=start, end=end)['values']
if res is not None:
on, off, qual, starts = res
res_df = pd.DataFrame(list(zip(on, off, qual, starts)), columns = ['Onset', 'Offset', 'Quality', 'Hour_Start'])
res_df['Start'] = res_df['Hour_Start']*MS_PER_HOUR + sleep_periods.time_shift(start, 17)
res_df['Duration'] = res_df['Offset'] - res_df['Onset']
else:
res_df = pd.DataFrame([[None, None, None, None, sleep_periods.time_shift(start, 17)]], columns = ['Onset', 'Offset', 'Quality', 'Duration', 'Start'])
return res_df[['Onset', 'Offset', 'Quality', 'Duration', 'Start']]
|
carlan1/mc_package
|
sleep_duration.py
|
sleep_duration.py
|
py
| 943 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36849582573
|
import os
os.chdir("/home/ghiggi/Projects/deepsphere-weather")
import sys
sys.path.append("../")
import shutil
import argparse
import dask
import glob
import time
import torch
import zarr
import numpy as np
import xarray as xr
## DeepSphere-Weather
from modules.utils_config import read_config_file
from modules.utils_config import get_model_settings
from modules.utils_config import get_training_settings
from modules.utils_config import get_ar_settings
from modules.utils_config import get_dataloader_settings
from modules.utils_config import check_same_dict
from modules.utils_config import get_pytorch_model
from modules.utils_config import set_pytorch_settings
from modules.utils_config import load_pretrained_model
from modules.utils_config import print_tensor_info
from modules.utils_io import get_ar_model_tensor_info
from modules.predictions_autoregressive import AutoregressivePredictions
## Functions within AutoregressivePredictions
from modules.dataloader_autoregressive import remove_unused_Y
from modules.dataloader_autoregressive import get_aligned_ar_batch
from modules.dataloader_autoregressive import AutoregressiveDataset
from modules.dataloader_autoregressive import AutoregressiveDataLoader
from modules.utils_autoregressive import check_ar_settings
from modules.utils_autoregressive import check_input_k
from modules.utils_autoregressive import check_output_k
from modules.utils_io import _get_feature_order
from modules.utils_zarr import check_chunks
from modules.utils_zarr import check_rounding
from modules.utils_zarr import rechunk_Dataset
from modules.utils_zarr import write_zarr
from modules.utils_torch import check_device
from modules.utils_torch import check_pin_memory
from modules.utils_torch import check_asyncronous_gpu_transfer
from modules.utils_torch import check_prefetch_in_gpu
from modules.utils_torch import check_prefetch_factor
from modules.utils_swag import bn_update
## Project specific functions
import modules.my_models_graph as my_architectures
## Side-project utils (maybe migrating to separate packages in future)
from modules.xscaler import LoadScaler
from modules.xscaler import SequentialScaler
# -------------------------------------------------------------------------.
data_dir = "/ltenas3/DeepSphere/data/preprocessed_ds/ERA5_HRES"
model_dir = "/data/weather_prediction/experiments_GG/new/RNN-AR6-UNetSpherical-Healpix_400km-Graph_knn-k20-MaxAreaPooli/"
# -------------------------------------------------------------------------.
# Read config file
cfg_path = os.path.join(model_dir, "config.json")
cfg = read_config_file(fpath=cfg_path)
# Some special options to adjust for prediction
cfg["dataloader_settings"]["autotune_num_workers"] = False
cfg["training_settings"]["gpu_training"] = True # to run prediction in GPU if possible
##------------------------------------------------------------------------.
### Retrieve experiment-specific configuration settings
model_settings = get_model_settings(cfg)
ar_settings = get_ar_settings(cfg)
training_settings = get_training_settings(cfg)
dataloader_settings = get_dataloader_settings(cfg)
dataloader_settings["num_workers"] = 10
##------------------------------------------------------------------------.
#### Load Zarr Datasets
data_sampling_dir = os.path.join(data_dir, cfg["model_settings"]["sampling_name"])
data_dynamic = xr.open_zarr(
os.path.join(data_sampling_dir, "Data", "dynamic", "time_chunked", "dynamic.zarr")
)
data_bc = xr.open_zarr(
os.path.join(data_sampling_dir, "Data", "bc", "time_chunked", "bc.zarr")
)
data_static = xr.open_zarr(os.path.join(data_sampling_dir, "Data", "static.zarr"))
# - Select dynamic features
# data_dynamic = data_dynamic[['z500','t850']]
##------------------------------------------------------------------------.
### Prepare static data
# - Keep land-surface mask as it is
# - Keep sin of latitude and remove longitude information
data_static = data_static.drop(["sin_longitude", "cos_longitude"])
# - Scale orography between 0 and 1 (is already left 0 bounded)
data_static["orog"] = data_static["orog"] / data_static["orog"].max()
# - One Hot Encode soil type
# ds_slt_OHE = xscaler.OneHotEnconding(data_static['slt'])
# data_static = xr.merge([data_static, ds_slt_OHE])
# data_static = data_static.drop('slt')
# - Load static data
data_static = data_static.load()
##------------------------------------------------------------------------.
#### Define scaler to apply on the fly within DataLoader
# - Load scalers
dynamic_scaler = LoadScaler(
os.path.join(data_sampling_dir, "Scalers", "GlobalStandardScaler_dynamic.nc")
)
bc_scaler = LoadScaler(
os.path.join(data_sampling_dir, "Scalers", "GlobalStandardScaler_bc.nc")
)
# # - Create single scaler
scaler = SequentialScaler(dynamic_scaler, bc_scaler)
##------------------------------------------------------------------------.
### Define pyTorch settings (before PyTorch model definition)
# - Here inside is eventually set the seed for fixing model weights initialization
# - Here inside the training precision is set (currently only float32 works)
device = set_pytorch_settings(training_settings)
##------------------------------------------------------------------------.
## Retrieve dimension info of input-output Torch Tensors
tensor_info = get_ar_model_tensor_info(
ar_settings=ar_settings,
data_dynamic=data_dynamic,
data_static=data_static,
data_bc=data_bc,
)
print_tensor_info(tensor_info)
# Check that tensor_info match between model training and now
check_same_dict(model_settings["tensor_info"], tensor_info)
##------------------------------------------------------------------------.
### Define the model architecture
model = get_pytorch_model(module=my_architectures, model_settings=model_settings)
###-----------------------------------------------------------------------.
## Load a pre-trained model
load_pretrained_model(model=model, model_dir=model_dir)
###-----------------------------------------------------------------------.
### Transfer model to the device (i.e. GPU)
model = model.to(device)
###-----------------------------------------------------------------------.
## AutoregressivePredictions arguments
forecast_reference_times = np.datetime64("2016-12-26T23:00:00.000000000")
forecast_reference_times1 = np.datetime64("2016-06-26T23:00:00.000000000")
forecast_reference_times = [forecast_reference_times, forecast_reference_times1]
ar_iterations = 2 * 365 * 4
ar_iterations = 20
batch_size = 32
ar_blocks = None
forecast_zarr_fpath = None
num_workers = 10 # dataloader_settings['num_workers']
bc_generator = None
ar_batch_fun = get_aligned_ar_batch
scaler_transform = scaler
scaler_inverse = scaler
# Dataloader options
device = device
batch_size = batch_size # number of forecasts per batch
prefetch_factor = dataloader_settings["prefetch_factor"]
prefetch_in_gpu = dataloader_settings["prefetch_in_gpu"]
pin_memory = dataloader_settings["pin_memory"]
asyncronous_gpu_transfer = dataloader_settings["asyncronous_gpu_transfer"]
# Autoregressive settings
input_k = ar_settings["input_k"]
output_k = ar_settings["output_k"]
forecast_cycle = ar_settings["forecast_cycle"]
stack_most_recent_prediction = ar_settings["stack_most_recent_prediction"]
# Prediction options
forecast_reference_times = forecast_reference_times
ar_blocks = ar_blocks
ar_iterations = ar_iterations # How many time to autoregressive iterate
keep_first_prediction = True
# Save options
zarr_fpath = forecast_zarr_fpath # None --> do not write to disk
rounding = 2 # Default None. Accept also a dictionary
compressor = "auto" # Accept also a dictionary per variable
chunks = "auto"
# 1 Valid timestep : OK
forecast_reference_times = np.datetime64("2018-12-26T23:00:00.000000000")
### 2 (valid) timesteps --> OK
forecast_reference_times1 = np.datetime64("2018-12-26T22:00:00.000000000")
forecast_reference_times2 = np.datetime64("2018-12-26T23:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
## One valid, one unvalid
forecast_reference_times1 = np.datetime64("2018-12-26T23:00:00.000000000")
forecast_reference_times2 = np.datetime64("2018-12-27T00:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
## 1 Unvalid (future) timestep --> OK: raise correct error
forecast_reference_times = np.datetime64("2018-12-27T00:00:00.000000000")
## 1 Unvalid timestep (past) --> OK: raise correct error
forecast_reference_times = np.datetime64("1980-01-01T07:00:00.000000000")
forecast_reference_times = np.datetime64("1970-01-01T07:00:00.000000000")
## 2 unvalid (future) timesteps --> OK: raise correct error
forecast_reference_times1 = np.datetime64("2018-12-27T00:00:00.000000000")
forecast_reference_times2 = np.datetime64("2018-12-27T01:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
## 2 unvalid (past) timesteps --> OK: raise correct error
forecast_reference_times1 = np.datetime64("1980-01-01T07:00:00.000000000")
forecast_reference_times2 = np.datetime64("1980-01-01T06:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
# ----
### No duplicate (unvalid) timesteps --> OK raise correct error
forecast_reference_times1 = np.datetime64("2018-12-27T00:00:00.000000000")
forecast_reference_times2 = np.datetime64("2018-12-27T00:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
### No duplicate (valid) timesteps --> OK raise correct error
forecast_reference_times1 = np.datetime64("2018-12-26T23:00:00.000000000")
forecast_reference_times2 = np.datetime64("2018-12-26T23:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
## Empty list --> OK raise correct error
forecast_reference_times = []
# ----
## AutoregressivePredictions arguments
forecast_reference_times = np.datetime64("2016-12-26T23:00:00.000000000")
forecast_reference_times1 = np.datetime64("2016-06-26T23:00:00.000000000")
forecast_reference_times = [forecast_reference_times, forecast_reference_times1]
ar_iterations = 2 * 365 * 4
dask.config.set(scheduler="synchronous")
ds_forecasts = AutoregressivePredictions(
model=model,
# Data
data_dynamic=data_dynamic,
data_static=data_static,
data_bc=data_bc,
scaler_transform=scaler,
scaler_inverse=scaler,
# Dataloader options
device=device,
batch_size=batch_size, # number of forecasts per batch
num_workers=dataloader_settings["num_workers"],
prefetch_factor=dataloader_settings["prefetch_factor"],
prefetch_in_gpu=dataloader_settings["prefetch_in_gpu"],
pin_memory=dataloader_settings["pin_memory"],
asyncronous_gpu_transfer=dataloader_settings["asyncronous_gpu_transfer"],
# Autoregressive settings
input_k=ar_settings["input_k"],
output_k=ar_settings["output_k"],
forecast_cycle=ar_settings["forecast_cycle"],
stack_most_recent_prediction=ar_settings["stack_most_recent_prediction"],
# Prediction options
forecast_reference_times=forecast_reference_times,
ar_blocks=ar_blocks,
ar_iterations=ar_iterations, # How many time to autoregressive iterate
# Save options
zarr_fpath=forecast_zarr_fpath, # None --> do not write to disk
rounding=2, # Default None. Accept also a dictionary
compressor="auto", # Accept also a dictionary per variable
chunks="auto",
)
print(ds_forecasts)
ds_forecasts.to_zarr("/ltenas3/DeepSphere/tmp/2ysim.zarr")
###-----------------------------------------------------------------------.
## DEBUG Code within AutoregressivePredictions
##------------------------------------------------------------------------.
## Checks arguments
device = check_device(device)
pin_memory = check_pin_memory(
pin_memory=pin_memory, num_workers=num_workers, device=device
)
asyncronous_gpu_transfer = check_asyncronous_gpu_transfer(
asyncronous_gpu_transfer=asyncronous_gpu_transfer, device=device
)
prefetch_in_gpu = check_prefetch_in_gpu(
prefetch_in_gpu=prefetch_in_gpu, num_workers=num_workers, device=device
)
prefetch_factor = check_prefetch_factor(
prefetch_factor=prefetch_factor, num_workers=num_workers
)
##------------------------------------------------------------------------.
# Check that autoregressive settings are valid
# - input_k and output_k must be numpy arrays hereafter !
input_k = check_input_k(input_k=input_k, ar_iterations=ar_iterations)
output_k = check_output_k(output_k=output_k)
check_ar_settings(
input_k=input_k,
output_k=output_k,
forecast_cycle=forecast_cycle,
ar_iterations=ar_iterations,
stack_most_recent_prediction=stack_most_recent_prediction,
)
ar_iterations = int(ar_iterations)
##------------------------------------------------------------------------.
### Retrieve feature info of the forecast
features = _get_feature_order(data_dynamic)
##------------------------------------------------------------------------.
# Check Zarr settings
WRITE_TO_ZARR = zarr_fpath is not None
if WRITE_TO_ZARR:
# - If zarr fpath provided, create the required folder
if not os.path.exists(os.path.dirname(zarr_fpath)):
os.makedirs(os.path.dirname(zarr_fpath))
# - Set default chunks and compressors
# ---> -1 to all optional dimensions (i..e nodes, lat, lon, ens, plevels,...)
dims = list(data_dynamic.dims)
dims_optional = np.array(dims)[
np.isin(dims, ["time", "feature"], invert=True)
].tolist()
default_chunks = {dim: -1 for dim in dims_optional}
default_chunks["forecast_reference_time"] = 1
default_chunks["leadtime"] = 1
default_compressor = zarr.Blosc(cname="zstd", clevel=0, shuffle=2)
# - Check rounding settings
rounding = check_rounding(rounding=rounding, variable_names=features)
##------------------------------------------------------------------------.
# Check ar_blocks
if not isinstance(ar_blocks, (int, float, type(None))):
raise TypeError("'ar_blocks' must be int or None.")
if isinstance(ar_blocks, float):
ar_blocks = int(ar_blocks)
if not WRITE_TO_ZARR and isinstance(ar_blocks, int):
raise ValueError("If 'zarr_fpath' not specified, 'ar_blocks' must be None.")
if ar_blocks is None:
ar_blocks = ar_iterations + 1
if ar_blocks > ar_iterations + 1:
raise ValueError("'ar_blocks' must be equal or smaller to 'ar_iterations'")
PREDICT_ar_BLOCKS = ar_blocks != (ar_iterations + 1)
##------------------------------------------------------------------------.
### Define DataLoader subset_timesteps
forecast_reference_times = check_timesteps_format(forecast_reference_times)
check_no_duplicate_timesteps(
forecast_reference_times, var_name="forecast_reference_times"
)
forecast_reference_times.sort() # ensure the temporal order
subset_timesteps = None
if forecast_reference_times is not None:
if len(forecast_reference_times) == 0:
raise ValueError(
"If you don't want to specify specific 'forecast_reference_times', set it to None"
)
t_res_timedelta = np.diff(data_dynamic.time.values)[0]
subset_timesteps = forecast_reference_times + -1 * max(input_k) * t_res_timedelta
##------------------------------------------------------------------------.
### Create training Autoregressive Dataset and DataLoader
dataset = AutoregressiveDataset(
data_dynamic=data_dynamic,
data_bc=data_bc,
data_static=data_static,
bc_generator=bc_generator,
scaler=scaler_transform,
# Dataset options
subset_timesteps=subset_timesteps,
training_mode=False,
# Autoregressive settings
input_k=input_k,
output_k=output_k,
forecast_cycle=forecast_cycle,
ar_iterations=ar_iterations,
stack_most_recent_prediction=stack_most_recent_prediction,
# GPU settings
device=device,
)
dataset[0]
self = dataset
self.subset_timesteps
self.idxs
len(self)
|
deepsphere/deepsphere-weather
|
dev/w_debug_predictions.py
|
w_debug_predictions.py
|
py
| 15,927 |
python
|
en
|
code
| 56 |
github-code
|
6
|
15551615128
|
import pandas as pd
class MalformedFileFormat(Exception):
pass
def get_ratings(file):
ratings = []
with open(file, "r") as fd:
try:
raw = fd.readline().strip()
while raw:
movie_id, rating, *_ = raw.split(",")
ratings.append((int(movie_id), int(rating)))
raw = fd.readline().strip()
except Exception as e:
raise MalformedFileFormat(
f"File does not follow ratings convention: {str(e)}"
)
return ratings
def save_recommendations(
file, predictions, recommendations, rated, movies_ls, limit, lock
):
with lock:
counter = 0
iter = 0
with open(file, "w") as fd:
fd.write("Predicted rating,Movie\n")
while counter != limit:
j = recommendations[iter]
if j not in rated:
record = (
f"{predictions[j]:0.2f},{movies_ls[j]}\n"
if "," not in movies_ls[j]
else f'{predictions[j]:0.2f},"{movies_ls[j]}"\n'
)
fd.write(record)
counter += 1
iter += 1
def save_model_evaluation(file, predictions, ratings, movies_ls, lock):
with lock:
with open(file, "w") as fd:
fd.write("Original prediction,Predicted rating,Movie\n")
for i in range(len(ratings)):
if ratings[i] > 0:
record = (
f"{ratings[i]},{predictions[i]:0.2f},{movies_ls[i]}\n"
if "," not in movies_ls[i]
else f'{ratings[i]},{predictions[i]:0.2f},"{movies_ls[i]}"\n'
)
fd.write(record)
def save_model_learn_history(file, history, lock):
with lock:
with open(file, "w") as fd:
fd.write("Iteration,Loss\n")
for iter, loss in history:
fd.write(f"{iter},{int(loss)}\n")
def get_model_evaluation(file, lock):
with lock:
mdeval_data = []
with open(file, "r") as fd:
_ = fd.readline() # header
data = fd.readline().strip()
while data:
original, predicted, *other = data.split(",")
mdeval_data.append((float(original), float(predicted)))
data = fd.readline().strip()
return mdeval_data
def get_model_learn_history(file, lock):
with lock:
history = []
with open(file, "r") as fd:
_ = fd.readline() # header
data = fd.readline().strip()
while data:
iteration, loss = data.split(",")
history.append((int(iteration), int(loss)))
data = fd.readline().strip()
return history
def get_recommendations(file, lock):
with lock:
df = pd.read_csv(
file,
header=0,
index_col=0,
delimiter=",",
quotechar='"',
)
return df["Movie"].to_list()
|
lukaszmichalskii/recommender-system
|
src/application/files_operations.py
|
files_operations.py
|
py
| 3,122 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12657059462
|
# level: medium
# 思路:dfs 将节点设为子树和,统计出现最多的字数和
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import defaultdict
class Solution(object):
nodes = defaultdict(int)
def findFrequentTreeSum(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
self.nodes.clear()
self.dfs(root)
result = []
vals = []
m = -9999
# print(self.nodes)
for key, val in self.nodes.items():
if val > m: m = val
for key, val in self.nodes.items():
if val == m:
result.append(key)
return result
def dfs(self, root):
if root == None:
return
if root.left != None:
self.dfs(root.left)
root.val += root.left.val
if root.right != None:
self.dfs(root.right)
root.val += root.right.val
self.nodes[root.val] += 1
if __name__ == '__main__':
ans = Solution()
print(ans.findFrequentTreeSum([5, 2, -3]))
|
PouringRain/leetcode
|
508.py
|
508.py
|
py
| 1,214 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36884065642
|
#!/usr/bin/python3
"""
Module for Base class
"""
import json
class Base:
""" Base class """
__nb_objects = 0
def __init__(self, id=None):
""" ctor for Base Class """
self.id = id
if id is None:
Base.__nb_objects += 1
self.id = Base.__nb_objects
@staticmethod
def to_json_string(l):
""" converts list to json string """
if l is None or len(l) == 0:
return "[]"
return json.dumps(l)
@classmethod
def save_to_file(cls, l):
""" save list of objs to a json file """
with open(cls.__name__ + ".json", "w", encoding="utf-8") as f:
if l is None:
f.write(Base.to_json_string([]))
else:
li = []
for obj in l:
li.append(obj.to_dictionary())
f.write(Base.to_json_string(li))
@staticmethod
def from_json_string(json_s):
""" converts json string to python object """
if json_s is None or not json_s:
return []
else:
return json.loads(json_s)
@classmethod
def create(cls, **dictionary):
""" create a Base inheritanced object based
on dictionary"""
from models.rectangle import Rectangle
from models.square import Square
name = cls.__name__
if name == "Rectangle":
dummy = Rectangle(3, 8)
else:
dummy = Square(1)
dummy.update(**dictionary)
return dummy
@classmethod
def load_from_file(cls):
""" loads objects from a json file """
from os.path import exists
filename = cls.__name__ + ".json"
if not exists(filename):
return []
with open(filename, "r", encoding="utf-8") as f:
s = f.read()
instances = []
dics = Base.from_json_string(s)
for elem in dics:
instances.append(cls.create(**elem))
return instances
@classmethod
def save_to_file_csv(cls, list_objs):
""" saves objects to a csv file """
import csv
name = cls.__name__ + ".csv"
f = open(name, "w", encoding="utf-8")
writer = csv.writer(f)
for obj in list_objs:
dic = obj.to_dictionary()
values = list(dic.values())
keys = list(dic.keys())
csv_dic = []
csv_dic.append(keys)
csv_dic.append(values)
writer.writerow(keys)
writer.writerow(values)
@classmethod
def load_from_file_csv(cls):
""" loads objects from a csv file """
import csv
from os.path import exists
name = cls.__name__ + ".csv"
if not exists(name):
return []
f = open(name, "r", encoding="utf-8")
reader = csv.reader(f)
objs = []
rect = True if cls.__name__ == "Rectangle" else False
for row in reader:
dic = {}
keys = reader[row]
values = row[row + 1]
for i in range(len(keys)):
dic[keys[i]] = values[i]
objs.append(cls.create(**dic))
return objs
@staticmethod
def draw(list_rectangles, list_squares):
""" draws rect and square objects to a canvas """
import turtle
turt = turtle.Turtle()
|
Samigirum/alx-higher_level_programming
|
0x0C-python-almost_a_circle/models/base.py
|
base.py
|
py
| 3,403 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72531866429
|
from typing import Final
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
from ._common import column_created_datetime, column_modified_datetime
from .base import metadata
# Intentionally includes the term "SECRET" to avoid leaking this value on a public domain
VENDOR_SECRET_PREFIX: Final[str] = "OSPARC_VARIABLE_VENDOR_SECRET_"
services_vendor_secrets = sa.Table(
"services_vendor_secrets",
#
# - A secret is an environment value passed to the service at runtime
# - A vendor can associate secrets (e.g. a license code) to any of the services it owns
# - secrets_map
# - keys should be prefixed with OSPARC_VARIABLE_VENDOR_SECRET_ (can still normalize on read)
# - values might be encrypted
#
metadata,
sa.Column(
"service_key",
sa.String,
doc="A single environment is allowed per service",
),
sa.Column(
"service_base_version",
sa.String,
doc="Defines the minimum version (included) from which these secrets apply",
),
sa.Column(
"product_name",
sa.String,
sa.ForeignKey(
"products.name",
name="fk_services_name_products",
onupdate="CASCADE",
ondelete="CASCADE",
),
# NOTE: since this is part of the primary key this is required
# NOTE: an alternative would be to not use this as a primary key
server_default="osparc",
doc="Product Identifier",
),
sa.Column(
"secrets_map",
JSONB,
nullable=False,
server_default=sa.text("'{}'::jsonb"),
doc="Maps OSPARC_VARIABLE_VENDOR_SECRET_* identifiers to a secret value (could be encrypted) "
"that can be replaced at runtime if found in the compose-specs",
),
# TIME STAMPS ----
column_created_datetime(timezone=True),
column_modified_datetime(timezone=True),
# CONSTRAINTS --
sa.ForeignKeyConstraint(
["service_key", "service_base_version"],
["services_meta_data.key", "services_meta_data.version"],
onupdate="CASCADE",
ondelete="CASCADE",
# NOTE: this might be a problem: if a version in the metadata is deleted,
# all versions above will take the secret_map for the previous one.
),
sa.PrimaryKeyConstraint(
"service_key",
"service_base_version",
"product_name",
name="services_vendor_secrets_pk",
),
)
|
ITISFoundation/osparc-simcore
|
packages/postgres-database/src/simcore_postgres_database/models/services_environments.py
|
services_environments.py
|
py
| 2,469 |
python
|
en
|
code
| 35 |
github-code
|
6
|
27603501009
|
import io
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from langdetect import detect
def pdf2string(path):
"""
From a given pdf path, it creates a string of the pdf.
:param path: Path to the pdf file
:return: string of the pdf file
"""
file_in = open(path, 'rb')
# Create a PDF interpreter object. (pdfminer)
retstr = io.StringIO()
rsrcmgr = PDFResourceManager()
device = TextConverter(rsrcmgr, retstr, codec='utf-8', laparams=LAParams())
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Process each page contained in the document.
for page in PDFPage.get_pages(file_in):
interpreter.process_page(page)
data = retstr.getvalue()
return data
def string2txt(string, path):
"""
From a given string, creates a .txt file on the given path.
:param string: The string to be converted to .txt
:param path: The path of the .txt file
:return: File created
"""
# Writes the string with the encoding wanted
with open(path, 'w', encoding='utf-8') as file_out:
file_out.write(string)
file_out.close()
def detect_language(string):
"""
For a given string, returns the language it is writen in.
:param string: the string to be analysed
:return: the language detected (string)
"""
return detect(string)
|
n1ur0/Document_Clustering
|
pdfparser.py
|
pdfparser.py
|
py
| 1,484 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8217311297
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
plt.style.use("acaps")
"""
Compare households owning agricultural land in 2018 and 2019 in the host community.
"""
# Read in the data
df_2018 = pd.read_csv("../../data/processed/MSNA_Host_2018.csv")
df_2019 = pd.read_csv("../../data/processed/MSNA_Host_2019.csv")
# Calculate proportions and merge the datasets
df_counts_2018 = df_2018["hh_agri_land"].value_counts(normalize=True).reset_index().rename(columns={"index": "answer", "hh_agri_land": "percent"})
df_counts_2018["year"] = "2018"
df_counts_2019 = df_2019["agricultural_land"].value_counts(normalize=True).reset_index().rename(columns={"index": "answer", "agricultural_land": "percent"})
df_counts_2019["year"] = "2019"
df_counts = df_counts_2018.append(df_counts_2019)
df_counts["percent"] = df_counts["percent"]*100.0
df_counts["answer"] = df_counts["answer"].replace({"yes": "Yes", "no": "No"})
# Create the plot
fig, ax = plt.subplots(figsize=(10,8))
ax = sns.barplot(x="answer", y="percent", hue="year", data=df_counts)
plt.title("Percentage of households in the host community\nwith agricultural land in 2018 and 2019", fontsize=18)
plt.legend(fontsize=16, loc="upper right")
plt.xlabel(None)
plt.ylabel("Percent of households (%)", fontsize=16)
plt.ylim([0,100])
plt.xticks(rotation=0, fontsize=14)
# Add percentages to the bars
for p in ax.patches:
width = p.get_width()
height = p.get_height() if not math.isnan(p.get_height()) else 0.0
x, y = p.get_xy()
ax.annotate('{:.0%}'.format(round(height)/100.0), (x + width/2, y + height+2), ha='center', fontsize=14)
plt.tight_layout()
plt.savefig("agricultural_land.png")
plt.close()
|
zackarno/coxs_msna_sector_analysis
|
host_community/analysis/housing_barplots/agri_land.py
|
agri_land.py
|
py
| 1,732 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34047796819
|
import argparse
from resnet import resnet
import os
import sys
from test import test
import tensorflow as tf
from read_data import fasionAI_data
def parse_args():
parser=argparse.ArgumentParser(description="test resnet for FasionAI")
parser.add_argument("--image_data",dest="image_data",\
help="the image data to test",default="image_test",type=str)
parser.add_argument('--bboxes_of_image',dest='bboxes_of_image',
help='图像的bboxes记录',default='bboxes_of_train_image_index.csv',type=str)
parser.add_argument("--weights_path",dest="weights_path",\
help="the .ckpt file to load",type=str)
# parser.add_argument("")
print(len(sys.argv))
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args=parser.parse_args()
return args
if __name__ == '__main__':
args=parse_args()
image_test_data=args.image_data
data_abspath=os.path.abspath("read_data.py")
bboxes_of_image=args.bboxes_of_image
data_absdir=os.path.split(data_abspath)[0]
annopath=os.path.join(data_absdir,"Annotations/label.csv")
imdb=fasionAI_data(data_absdir,annopath,image_test_data,bboxes_of_image,False)
weights_path=args.weights_path
net=resnet(is_train=False)
# saver=tf.train.Saver()
test(net,imdb,weights_path)
|
hx121071/FashionAI_resnet
|
base/test_net.py
|
test_net.py
|
py
| 1,380 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39503143836
|
number, limit = map(int, input().split())
def brute(k):
if len(k) == limit:
print(' '.join(map(str, k)))
return 0
for i in range(1, number+1):
if i in k:
continue
brute(k+[i])
brute([])
'''
a, b = map(int, input().split())
k = [0] * b
def brute(index, start, n, m):
if index == b:
for j in k:
if k.count(j) > 1:
return 0
print(' '.join(map(str, k)))
return 0
for i in range(start, len(n)):
k[index] = n[i]
brute(index+1, 0, n, m)
number = list(range(a+1))
number.remove(0)
brute(0, 0, number, b)
'''
|
decentra1ized/baekjoon_solution
|
15649 N과 M (1).py
|
15649 N과 M (1).py
|
py
| 652 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25267249528
|
'''
You are given a 2-D array with dimensions X.
Your task is to perform the sum tool over axis 0 and then find the product of that result
'''
import numpy
N,M = input().split()
A = numpy.array([input().split() for _ in range(int(N))],int)
prodd = numpy.sum(A,axis=0)
print(numpy.prod(prodd))
|
malvika-chauhan/Innomatics-Internship
|
Python Programming Task/Task - 6 (Numpy - Both for Basic and Adv User)/9.py
|
9.py
|
py
| 304 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4376865590
|
import sys
import signal
import argparse
from dictmaster.util import load_plugin
last_broadcast_msg = " "
def broadcast(msg, overwrite=False):
global last_broadcast_msg
if overwrite:
sys.stdout.write("\r{}".format(" "*len(last_broadcast_msg.strip())))
msg = "\r"+msg
else:
if last_broadcast_msg[0] == "\r":
msg = "\n"+msg
msg += "\n"
last_broadcast_msg = msg
sys.stdout.write(msg)
sys.stdout.flush()
def cli_main():
parser = argparse.ArgumentParser(description='Download and convert dictionaries.')
parser.add_argument('plugin', metavar='PLUGIN', type=str, help='The plugin to use.')
parser.add_argument('--popts', action="store", nargs="+", default=[],
help=("Option string passed to the plugin."))
parser.add_argument('--reset', action="store_true", default=False,
help=("Discard data from last time."))
parser.add_argument('--force-process', action="store_true", default=False,
help=("Discard processed data from last time (keep fetched data)."))
parser.add_argument('-o', '--output', action="store", default="", type=str,
help=("Work and output directory."))
args = parser.parse_args()
plugin = load_plugin(args.plugin, popts=args.popts, dirname=args.output)
if plugin == None: sys.exit("Plugin not found or plugin broken.")
plugin.force_process = args.force_process
if args.reset:
broadcast("Resetting plugin data in '{}'.".format(plugin.output_directory))
plugin.reset()
elif args.force_process:
plugin.stages['Processor'].reset()
broadcast("Running plugin '{}'.".format(args.plugin))
broadcast("Output will be written to '{}'.".format(plugin.output_directory))
plugin.start()
def ctrl_c(signal, frame):
broadcast("User interrupt. Stopping the plugin...")
plugin.cancel()
signal.signal(signal.SIGINT, ctrl_c)
while plugin.is_alive():
broadcast(plugin.progress(), True)
plugin.join(1)
broadcast("Plugin '{}' quit.".format(args.plugin))
if not plugin._canceled:
broadcast("Optimize data...")
plugin.optimize_data()
broadcast("Export as StarDict file...")
plugin.export()
|
tuxor1337/dictmaster
|
dictmaster/cli/main.py
|
main.py
|
py
| 2,301 |
python
|
en
|
code
| 32 |
github-code
|
6
|
26201696351
|
from dolfin import *
import math
import numpy as np
import logging
import matplotlib.pyplot as plt
from unconstrainedMinimization import InexactNewtonCG
logging.getLogger('FFC').setLevel(logging.WARNING)
logging.getLogger('UFL').setLevel(logging.WARNING)
set_log_active(False)
# Set the level of noise:
noise_std_dev = .3
# Load the image from file
data = np.loadtxt('image.dat', delimiter=',')
np.random.seed(seed=1)
noise = noise_std_dev*np.random.randn(data.shape[0], data.shape[1])
# Set up the domain and the finite element space.
Lx = float(data.shape[1])/float(data.shape[0])
Ly = 1.
mesh = RectangleMesh(Point(0,0),Point(Lx,Ly),200, 100)
V = FunctionSpace(mesh, "Lagrange",1)
# Generate the true image (u_true) and the noisy data (u_0)
class Image(Expression):
def __init__(self, Lx, Ly, data, **kwargs):
self.data = data
self.hx = Lx/float(data.shape[1]-1)
self.hy = Ly/float(data.shape[0]-1)
def eval(self, values, x):
j = int(math.floor(x[0]/self.hx))
i = int(math.floor(x[1]/self.hy))
values[0] = self.data[i,j]
trueImage = Image(Lx,Ly,data, degree=1)
noisyImage = Image(Lx,Ly,data+noise, degree=1)
u_true = interpolate(trueImage, V)
u_0 = interpolate(noisyImage, V)
plt.figure(figsize=[12,24])
plt.subplot(1,2,1)
plot(u_true, title="True Image")
plt.subplot(1,2,2)
plot(u_0, title="Noisy Image")
plt.show()
|
uvilla/inverse17
|
Assignment3/tntv.py
|
tntv.py
|
py
| 1,395 |
python
|
en
|
code
| 3 |
github-code
|
6
|
36386456035
|
import altair as alt
from vega_datasets import data
source = data.cars()
chart = alt.Chart(source).mark_circle(size=60, clip=False).transform_calculate(
x = alt.datum.Horsepower-100,
y = alt.datum.Miles_per_Gallon - 25
).encode(
x=alt.X('x:Q', axis=alt.Axis(offset=-150)),
y=alt.Y('y:Q', axis=alt.Axis(offset=-190)),
color='Origin',
).configure_axisX(
domainWidth =3
).configure_axisY(
domainWidth =3
)
# save
chart.save('debug.svg')
|
noahzhy/charts_synthetic_data
|
test.py
|
test.py
|
py
| 464 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1921452426
|
import pyomo.environ as pe
from lci import LifeCycleInventory
from superstructure import Superstructure
from utils.properties import molar_weight
from utils.utils import sum_rule
from utils.save_results import ResultManager
from utils.solve_model import Solver
import time as time
import pickle
def range_len(start, stop, len):
step = (stop - start) / (len - 1)
list = []
i = start
while i < stop:
list.append(i)
i += step
list.append(stop)
return list
def rep_solve_tcm(x):
lci = LifeCycleInventory('millgas2what')
lci.model = pe.ConcreteModel('millgas2what')
scale = 1000000000
'Ströme des Stahlwerks als Parameter'
lci.model.cog_steelMill = pe.Param(initialize=39700000000 / scale) # in kg, scaled
lci.model.bfg_steelMill = pe.Param(initialize=1740550000000 / scale) # in kg, scaled
lci.model.electricity_steelMill = pe.Param(initialize=-2298568545000 / scale)
lci.model.heat_steelMill = pe.Param(initialize=-894991475000 / scale)
'Definition der Verbindungspunkte'
connect_list = ['Mill gas COG [kg]', 'Mill gas BFG/BOFG [kg]', 'Electricity [MJ]', 'Heat [MJ]']
connector_lp = {}
lci.model.connect_lp = pe.Var(connect_list, initialize=0, bounds=(-10000, 10000))
for c in connect_list:
connector_lp[c] = lci.model.connect_lp[c]
'Gesamtbilanzen'
lci.model.cog_balance = pe.Constraint(
expr=0 == - lci.model.cog_steelMill + lci.model.connect_lp['Mill gas COG [kg]'])
lci.model.bfg_balance = pe.Constraint(
expr=0 == - lci.model.bfg_steelMill + lci.model.connect_lp['Mill gas BFG/BOFG [kg]'])
lci.model.electricity_balance = pe.Constraint(
expr=0 == - lci.model.electricity_steelMill + lci.model.connect_lp['Electricity [MJ]'])
lci.model.heat_balance = pe.Constraint(expr=0 == - lci.model.heat_steelMill + lci.model.connect_lp['Heat [MJ]'])
'Ab hier wird das Modell mit der LCI-Klasse zusammengebaut'
lci.import_from_excel('Life Cycle Inventory_v19.xlsx', 'A-Matrix', 'End of life')
lci.set_up_lp(scale)
lci.import_connector(connector_lp) # Durch deaktivieren dieser Zeile wird nur die Chem. Ind. betrachtet
# lci.activate_scenario('Electricity Today')
# lci.activate_scenario('Electricity Best Case')
lci.activate_scenario('Electricity user-defined', x)
lci.activate_scenario('Separation GDP') # Schaltet alle linearen Prozesse für Hüttengastrennung aus
# lci.deactivate_process('CARBON DIOXIDE as emission to air')
# lci.deactivate_process('AMMONIA FROM NATURAL GAS BY STEAM REFORMING BY ICI "AMV" PROCESS incl CO2 capture')
lci.activate_scenario('CCU high TRL only')
# lci.activate_scenario('No high TRL CCU')
lci.deactivate_process('CARBON DIOXIDE - ammonia plant')
lci.deactivate_process('Water gas shift reaction')
lci.lp.ammonia_constraint = pe.Constraint(
expr=lci.lp.s['AMMONIA FROM NATURAL GAS BY STEAM REFORMING BY ICI "AMV" PROCESS incl CO2 capture'] - 228.9 <= 0)
lci.deactivate_process('TDI - neue Route_v2 exklusive Methylformate production')
lci.deactivate_process('Polycarbonate - neue Route')
lci.deactivate_process('Methylformate productionaus TDI neue Route v2')
lci.construct_demand_constraints()
lci.construct_objective()
lci.model.add_component('lp', lci.lp)
'Lösen und Ergebnisse darstellen'
# lci.model.pprint()
solver = Solver()
solver.solve_lp(lci, 'glpk')
# solver.test_feasibility() # Muss an neue Demand-Constraints angepasst werden
results = {'x': x, 'z': pe.value(lci.objective) * lci.scale}
return results
def rep_solve_gdp(x):
""" Separation System / Flowsheet construction """
# COG Separation
s = Superstructure("Combined Model")
s.initial_stream(31, 0, 300, 1, 'COG')
s.create_unit('Compressor', 'C3', 31, 32)
s.create_unit('PSA', 'PSA3', 32, 33, 34, 'H2')
s.create_unit('Splitter', 'S5', 34, 35, 36)
s.create_streams(37)
s.model.p_35 = pe.Constraint(expr=s.model.p[37] == 1)
s.model.t_35 = pe.Constraint(expr=s.model.t[37] == 300)
s.model.cc_35 = pe.Constraint(expr=1 == sum_rule(s.streams[37].y, s.streams[37].substances))
s.mix_streams('M3', 36, 37, 38)
s.create_unit('Heat Exchanger', 'HE5', 38, 39)
s.create_disjunct_reactor('methane_reforming', 'por', 'R2_POR', 39, 40, 'POR')
s.create_disjunct_reactor('methane_reforming', 'cdr', 'R2_CDR', 39, 40, 'CDR')
s.model.cdr.y_37 = pe.Constraint(expr=s.model.y[37, 'CO2'] == 1)
s.model.cdr.add = pe.Constraint(expr=s.model.n[37] == 1 * s.model.n[36] * s.model.y[36, 'CH4'])
s.model.cdr.q_constr = pe.Constraint(expr=s.model.q['R2_POR'] == 0)
s.model.por.y_37 = pe.Constraint(expr=s.model.y[37, 'O2'] == 1)
s.model.por.add = pe.Constraint(expr=s.model.n[37] == 0.48 * s.model.n[36] * s.model.y[36, 'CH4'])
s.model.por.q_constr = pe.Constraint(expr=s.model.q['R2_CDR'] == 0)
# B(O)FG Separation
s.initial_stream(1, 0, 300, 1, 'B(O)FG')
s.create_unit('Heat Exchanger', 'HE1', 1, 2)
s.create_unit('TSA', 'TSA1', 2, 3, 4, 'CO')
s.create_unit('Splitter', 'S1', 3, 20, 21)
s.initial_stream(22, 0, 400, 1, {'H2O': 1})
s.model.add_h2o = pe.Constraint(expr=s.model.n[22] == s.model.n[21])
s.mix_streams('M1', 21, 22, 23)
s.create_unit('Heat Exchanger', 'HE3', 23, 24)
s.create_reactor('R1', 24, 25, 'WGSR')
s.mix_streams('M2', 25, 4, 5)
s.create_unit('Heat Exchanger', 'HE4', 5, 6)
s.create_unit('Compressor', 'C1', 6, 7)
s.create_disjunct_unit('co2_bofg', 'cca_1', 'CCA', 'CCA1', 7, 8, 9, 'CO2')
s.create_disjunct_unit('co2_bofg', 'psa_1', 'PSA', 'PSA1', 7, 8, 9, 'CO2')
s.model.psa_1.q_cca = pe.Constraint(expr=s.model.q['CCA1'] == 0)
s.model.psa_1.t6_constraint = pe.Constraint(expr=s.model.t[6] >= 273.15 + 30)
# s.model.t6_constraint = pe.Constraint(expr=s.model.t[6] >= 273.15 + 30)
# s.create_unit('PSA', 'PSA1', 7, 8, 9, 'CO2')
# s.create_unit('CCA', 'CCA1', 7, 8, 9, 'CO2')
s.create_unit('Compressor', 'C2', 9, 10)
s.create_unit('PSA', 'PSA2', 10, 11, 12, 'H2')
scale = 1000000000
'Ströme des Stahlwerks als Parameter'
s.model.cog_steelMill = pe.Param(initialize=39700000000 / scale) # in kg, scaled
s.model.bfg_steelMill = pe.Param(initialize=1740550000000 / scale) # in kg, scaled
s.model.electricity_steelMill = pe.Param(initialize=-2298568545000 / scale)
s.model.heat_steelMill = pe.Param(initialize=-894991475000 / scale)
s.connect_list = ['Mill gas COG [kg]', 'Hydrogen (H2) [kg]', 'Electricity [MJ]', 'Heat [MJ]', 'SYNTHESIS GAS (1:1)',
'SYNTHESIS GAS (2:1)', 'Carbon dioxide (CO2) [kg]', 'Methane (CH4) [kg]', 'Oxygen (O2) [kg]',
'Mill gas BFG/BOFG [kg]', 'Carbon monoxide (CO) [kg]', 'CO2 to atm [kg]', 'STEAM [kg]'
]
connector_lp = {}
s.model.connect_lp = pe.Var(s.connect_list, initialize=0, bounds=(-10000, 10000))
for c in s.connect_list:
connector_lp[c] = s.model.connect_lp[c]
# Hier kann eingestellt werden, welcher Anteil der Hüttengase verwertet werden darf
# x = 0.0001 # ACHTUNG! x = 0 führt zu infeasible
# s.model.force_cog = pe.Constraint(expr=s.model.n[31] <= x * s.model.cog_steelMill / molar_weight('COG'))
# s.model.force_bfg = pe.Constraint(expr=s.model.n[1] <= x * s.model.bfg_steelMill / molar_weight('B(O)FG'))
s.model.cog_balance = pe.Constraint(
expr=0 == - s.model.cog_steelMill + s.model.connect_lp['Mill gas COG [kg]'] + s.model.n[31] * molar_weight(
'COG'))
s.model.bfg_balance = pe.Constraint(
expr=0 == - s.model.bfg_steelMill + s.model.connect_lp['Mill gas BFG/BOFG [kg]'] + s.model.n[1] * molar_weight(
'B(O)FG'))
s.model.el_balance = pe.Constraint(expr=0 == - s.model.connect_lp['Electricity [MJ]'] - sum_rule(s.model.w,
s.model.workSet) + s.model.electricity_steelMill)
s.model.heat_balance = pe.Constraint(
expr=0 == - s.model.connect_lp['Heat [MJ]'] - sum_rule(s.model.q, s.model.heatSet) + s.model.heat_steelMill)
s.model.co2_atm_balance = pe.Constraint(
expr=0 == s.model.connect_lp['CO2 to atm [kg]'] - s.model.n[12] * molar_weight('CO2') * (
s.model.y[12, 'CO2'] + s.model.y[12, 'CO']))
# s.model.co2_atm_balance = pe.Constraint(expr=0 == s.model.connect_lp['CO2 to atm [kg]'] - s.model.n[12] * molar_weight('CO2') * (s.model.y[12, 'CO2'] + s.model.y[12, 'CO']) * 0.05)
# s.model.co2_atm_balance = pe.Constraint(expr=0 == s.model.connect_lp['CO2 to atm [kg]'])
s.model.steam_balance = pe.Constraint(
expr=0 == s.model.connect_lp['STEAM [kg]'] + s.model.n[22] * molar_weight('H2O'))
s.model.co_balance = pe.Constraint(
expr=0 == s.model.connect_lp['Carbon monoxide (CO) [kg]'] - s.model.n[20] * molar_weight('CO'))
s.model.ch4_balance = pe.Constraint(
expr=s.model.connect_lp['Methane (CH4) [kg]'] == s.model.n[35] * s.model.y[35, 'CH4'] * molar_weight('CH4'))
s.model.h2_balance = pe.Constraint(
expr=s.model.connect_lp['Hydrogen (H2) [kg]'] == s.model.n[33] * molar_weight('H2') + s.model.n[
11] * molar_weight('H2'))
# s.model.cdr.syngas11_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (1:1)'] == s.model.n[40] * molar_weight({'H2': 0.5, 'CO': 0.5})) # N2 wird als SynGas angenommen
s.model.cdr.syngas11_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (1:1)'] == s.model.n[40] * (
s.model.y[40, 'H2'] * molar_weight('H2') + s.model.y[40, 'CO'] * molar_weight('CO')))
s.model.por.syngas11_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (1:1)'] == 0)
# s.model.por.syngas21_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (2:1)'] == s.model.n[40] * molar_weight({'H2': 0.67, 'CO': 0.33})) # N2 wird als SynGas angenommen
s.model.por.syngas21_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (2:1)'] == s.model.n[40] * (
s.model.y[40, 'H2'] * molar_weight('H2') + s.model.y[40, 'CO'] * molar_weight('CO')))
s.model.cdr.syngas21_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (2:1)'] == 0)
s.model.cdr.co2_balance = pe.Constraint(
expr=s.model.connect_lp['Carbon dioxide (CO2) [kg]'] == - s.model.n[37] * molar_weight('CO2') + s.model.n[
8] * molar_weight('CO2'))
s.model.por.co2_balance = pe.Constraint(
expr=s.model.connect_lp['Carbon dioxide (CO2) [kg]'] == s.model.n[8] * molar_weight('CO2'))
# s.model.cdr.co2_balance = pe.Constraint(expr=s.model.connect_lp['Carbon dioxide (CO2) [kg]'] == 0)
# s.model.por.co2_balance = pe.Constraint(expr=s.model.connect_lp['Carbon dioxide (CO2) [kg]'] == 0)
s.model.por.o2_balance = pe.Constraint(
expr=s.model.connect_lp['Oxygen (O2) [kg]'] == - s.model.n[37] * molar_weight('O2'))
s.model.cdr.o2_balance = pe.Constraint(expr=s.model.connect_lp['Oxygen (O2) [kg]'] == 0)
# s.model.no_co2_sep = pe.Constraint(expr=s.model.n[8] * molar_weight('CO2') == 147)
# s.model.no_h2_sep = pe.Constraint(expr=s.model.n[11] == 0)
""" Set up TCM """
lci = LifeCycleInventory('millgas2what')
lci.import_from_excel('Life Cycle Inventory_v19.xlsx', 'A-Matrix', 'End of life')
lci.set_up_lp(scale)
lci.import_connector(connector_lp) # Durch deaktivieren dieser Zeile wird nur die Chem. Ind. betrachtet
# lci.activate_scenario('Electricity Today')
# lci.activate_scenario('Electricity Best Case')
lci.activate_scenario('Electricity user-defined', x)
lci.activate_scenario('Separation GDP') # Schaltet alle linearen Prozesse für Hüttengastrennung aus
# lci.deactivate_process('CARBON DIOXIDE as emission to air')
# lci.deactivate_process('AMMONIA FROM NATURAL GAS BY STEAM REFORMING BY ICI "AMV" PROCESS incl CO2 capture')
lci.activate_scenario('CCU high TRL only')
lci.deactivate_process('CARBON DIOXIDE - ammonia plant')
lci.deactivate_process('Water gas shift reaction')
lci.lp.ammonia_constraint = pe.Constraint(
expr=lci.lp.s['AMMONIA FROM NATURAL GAS BY STEAM REFORMING BY ICI "AMV" PROCESS incl CO2 capture'] - 228.9 <= 0)
lci.deactivate_process('TDI - neue Route_v2 exklusive Methylformate production')
lci.deactivate_process('Polycarbonate - neue Route')
lci.deactivate_process('Methylformate productionaus TDI neue Route v2')
lci.construct_demand_constraints()
lci.construct_objective()
s.import_lci(lci)
s.create_disjunctions()
""" Solve overall model """
solver = Solver()
""" Save values in dict"""
ind_var = {}
c_val = {}
rec_val = {}
heat_val = {}
el_val = {}
s_dict = {}
cog_dict = {}
bfg_dict = {}
# LCI-Prozesse, die verfolgt werden sollen
s_dict_list = ['CARBON DIOXIDE as emission to air', 'INC Carbon monoxide',
'treatment of blast furnace gas, in power plant, DE', 'Verbrennung COG in BHKW',
'AMMONIA FROM NATURAL GAS BY STEAM REFORMING BY ICI "AMV" PROCESS incl CO2 capture',
'CARBON DIOXIDE - air capture', 'Electricity, user-defined']
try:
solver.solve_gdp(s)
obj = pe.value(s.objective)
for d in s.disjuncts.keys():
ind_var[d] = pe.value(s.disjuncts[d].indicator_var)
for c in s.connect_list:
c_val[c] = pe.value(connector_lp[c]) * lci.scale
for c in s.model.zetaSet:
rec_val[c] = pe.value(s.model.zeta[c])
for c in s.model.heatSet:
heat_val[c] = pe.value(s.model.q[c]) * lci.scale
for c in s.model.workSet:
el_val[c] = pe.value(s.model.w[c]) * lci.scale
for i in s_dict_list:
s_dict[i] = pe.value(s.model.utilization.s[i]) * lci.scale
'Diagramme für COG / B(O)FG'
cog_dict['H2'] = pe.value(s.model.n[33]) * molar_weight({'H2': 1}) * lci.scale
cog_dict['N2'] = pe.value(s.model.n[35]) * pe.value(s.model.y[35, 'N2']) * molar_weight({'N2': 1}) * lci.scale
bfg_dict['H2'] = pe.value(s.model.n[11]) * molar_weight({'H2': 1}) * lci.scale
cog_dict['N2'] = pe.value(s.model.n[12]) * pe.value(s.model.y[12, 'N2']) * molar_weight({'N2': 1}) * lci.scale
bfg_dict['CO2'] = pe.value(s.model.n[8]) * molar_weight({'CO2': 1}) * lci.scale
except ValueError:
obj = 0
return {'x': x, 'z': obj * lci.scale, 'i': ind_var, 'c': c_val, 'rec': rec_val, 'q': heat_val, 'w': el_val,
's': s_dict, 'cog': cog_dict, 'bfg': bfg_dict}
x_vector = range_len(0.002, 0.2, 30)
results_tcm = {}
results_gdp = {}
n = 0
while n < len(x_vector):
results_tcm[n] = rep_solve_tcm(x_vector[n])
print('TCM solved', n + 1)
results_gdp[n] = rep_solve_gdp(x_vector[n])
print('GDP solved', n + 1)
n += 1
results = {'tcm': results_tcm, 'gdp': results_gdp}
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
# name = input("enter file name")
save_object(results, '20200825_v19_tcm')
|
jkleinekorte/millgas2what
|
src/repeated_solving_el_impact.py
|
repeated_solving_el_impact.py
|
py
| 15,332 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10094748311
|
# coding=utf-8
import MeCab
import sys
if len(sys.argv) == 1:
print("mkdir.py <file> [univ]\n")
sys.exit(1)
if len(sys.argv) == 3 and sys.argv[2] == 'univ':
dictype = '固有名詞'
nauntype = '組織'
else:
dictype = '名詞'
nauntype = '一般'
tagger = MeCab.Tagger('-Oyomi')
out = sys.argv[1].replace(".txt", ".csv")
fo = open(out, 'w')
fi = open(sys.argv[1], 'r')
line = fi.readline()
while line:
naun = line.replace('\n', '')
yomi = tagger.parse(naun).replace('\n', '')
fo.write('{naun},*,*,1000,名詞,{dictype},{nauntype},*,*,*,{naun},{yomi},{yomi}\n'.format(naun=naun, dictype=dictype, nauntype=nauntype, yomi=yomi))
line = fi.readline()
fi.close();
fo.close()
|
l-plantarum/chiebukuro
|
mkdic.py
|
mkdic.py
|
py
| 716 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8034052289
|
#Prototype 4
# importing the necessary libraries
import cv2
import numpy as np
import numpy as np
import os
import cv2
# defining the crack detector function
# here weak_th and strong_th are thresholds for
# double thresholding step
def PCD(img, weak_th = None, strong_th = None):
# conversion of image to grayscale
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Noise reduction step
img = cv2.GaussianBlur(img, (5, 5), 1.6)
# Calculating the gradients
gx = cv2.Sobel(np.float32(img), cv2.CV_64F, 1, 0, 3)
gy = cv2.Sobel(np.float32(img), cv2.CV_64F, 0, 1, 3)
# Conversion of Cartesian coordinates to polar
mag, ang = cv2.cartToPolar(gx, gy, angleInDegrees = True)
# setting the minimum and maximum thresholds
# for double thresholding
mag_max = np.max(mag)
if not weak_th:weak_th = mag_max * 0.1
if not strong_th:strong_th = mag_max * 0.5
# getting the dimensions of the input image
height, width = img.shape
# Looping through every pixel of the grayscale
# image
for i_x in range(width):
for i_y in range(height):
grad_ang = ang[i_y, i_x]
grad_ang = abs(grad_ang-180) if abs(grad_ang)>180 else abs(grad_ang)
# selecting the neighbours of the target pixel
# according to the gradient direction
# In the x axis direction
if grad_ang<= 22.5:
neighb_1_x, neighb_1_y = i_x-1, i_y
neighb_2_x, neighb_2_y = i_x + 1, i_y
# top right (diagonal-1) direction
elif grad_ang>22.5 and grad_ang<=(22.5 + 45):
neighb_1_x, neighb_1_y = i_x-1, i_y-1
neighb_2_x, neighb_2_y = i_x + 1, i_y + 1
# In y-axis direction
elif grad_ang>(22.5 + 45) and grad_ang<=(22.5 + 90):
neighb_1_x, neighb_1_y = i_x, i_y-1
neighb_2_x, neighb_2_y = i_x, i_y + 1
# top left (diagonal-2) direction
elif grad_ang>(22.5 + 90) and grad_ang<=(22.5 + 135):
neighb_1_x, neighb_1_y = i_x-1, i_y + 1
neighb_2_x, neighb_2_y = i_x + 1, i_y-1
# Now it restarts the cycle
elif grad_ang>(22.5 + 135) and grad_ang<=(22.5 + 180):
neighb_1_x, neighb_1_y = i_x-1, i_y
neighb_2_x, neighb_2_y = i_x + 1, i_y
# Non-maximum suppression step
if width>neighb_1_x>= 0 and height>neighb_1_y>= 0:
if mag[i_y, i_x]<mag[neighb_1_y, neighb_1_x]:
mag[i_y, i_x]= 0
continue
if width>neighb_2_x>= 0 and height>neighb_2_y>= 0:
if mag[i_y, i_x]<mag[neighb_2_y, neighb_2_x]:
mag[i_y, i_x]= 0
weak_ids = np.zeros_like(img)
strong_ids = np.zeros_like(img)
ids = np.zeros_like(img)
# double thresholding step
for i_x in range(width):
for i_y in range(height):
grad_mag = mag[i_y, i_x]
if grad_mag<weak_th:
mag[i_y, i_x]= 0
elif strong_th>grad_mag>= weak_th:
ids[i_y, i_x]= 1
else:
ids[i_y, i_x]= 2
# finally returning the magnitude of
# gradients of edges
return mag
# Creating a VideoCapture object to read the video
cap = cv2.VideoCapture('assets\sample.mp4')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('assets/Out.mp4', fourcc, 20.0, (640,480))
# Loop until the end of the video
while (cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.resize(frame, (540, 380), fx = 0, fy = 0,
interpolation = cv2.INTER_CUBIC)
# Display the resulting frame
cv2.imshow('Frame', frame)
# conversion of BGR to grayscale is necessary to apply this operation
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
crack_frame = PCD(frame)
blur = cv2.blur(crack_frame,(3,3))
#img_log = np.array(blur,dtype=np.uint8)
# Morphological Closing Operator
#kernel = np.ones((5,5),np.uint8)
#closing = cv2.morphologyEx(blur, cv2.MORPH_CLOSE, kernel)
# Create feature detecting method
# sift = cv2.xfeatures2d.SIFT_create()
# surf = cv2.xfeatures2d.SURF_create()
# orb = cv2.ORB_create(nfeatures=150)
# Make featured Image
# keypoints, descriptors = orb.detectAndCompute(closing, None)
# featuredImg = cv2.drawKeypoints(closing, keypoints, None)
# adaptive thresholding to use different threshold
# values on different regions of the frame.
#Thresh = cv2.adaptiveThreshold(crack_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
#cv2.THRESH_BINARY_INV, 11, 2)
cv2.imshow('C_frame', blur)
out.write(blur)
# define q as the exit button
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# release the video capture object
cap.release()
# Closes all the windows currently opened.
cv2.destroyAllWindows()
|
thanhtung48c/AUV-Crack-Detection-Model
|
script.py
|
script.py
|
py
| 5,431 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18941943109
|
# 字典的运算
# 求最小值、最大值、排序
prices = {
'ACME': 45.23,
'AAPL': 612.78,
'IBM': 205.55,
'HPQ': 37.20,
'FB': 10.75
}
min_price= min(zip(prices.values(),prices.keys())) # zip()将键值的殊勋反转
print(min_price) # (10.75, 'FB')
max_price= max(zip(prices.values(),prices.keys()))
print(max_price) # (612.78, 'AAPL')
# 价格从小到大排序
prices_sorted= sorted(zip(prices.values(),prices.keys()))
print(prices_sorted)
# [(10.75, 'FB'), (37.2, 'HPQ'),(45.23, 'ACME'), (205.55, 'IBM'),(612.78, 'AAPL')]
min(prices,key=lambda k: prices[k]) # 'FB'
max(prices,key=lambda k: prices[k]) # 'AAPL'
# 多个键拥有相同的值是 键会决定返回结果
prices1= {'AAA': 45.23, 'BBB': '45.23'}
min_price1= min(zip(prices1.values(),prices1.keys())) # zip()将键值的殊勋反转
print(min_price1) # (45.23, 'AAA')
max_price1= max(zip(prices1.values(),prices1.keys()))
print(max_price1) # (45.23, 'BBB')
|
DoubleBlock/PythonCookbook
|
Chapter1/1.8.py
|
1.8.py
|
py
| 949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27810749441
|
__author__ = 'Matt Clarke-Lauer'
__email__ = '[email protected]'
__credits__ = ['Matt Clarke-Lauer']
__date__ = 8 / 1 / 13
__version__ = '0.1'
__status__ = 'Development'
import log
name = "libraryApisUsed"
description = "Gets the used library apis"
result = []
def getName():
"return analysis name"
return name
def getDescription():
"return analysis description"
return description
def getResults(results):
results["Library Apis Used"] = result
return results
def run(classes, dependencies, sharedobjs):
global result
log.info("Analysis: Library Api Check")
result = dependencies["internal"]
|
mclarkelauer/AndroidAnalyzer
|
Analysis/plugins/libraryApisUsed/__init__.py
|
__init__.py
|
py
| 639 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2966396264
|
'''
@File : ImageReward.py
@Time : 2023/02/28 19:53:00
@Auther : Jiazheng Xu
@Contact : [email protected]
@Description: ImageReward Reward model for reward model.
'''
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from config.options import *
from config.utils import *
from models.blip_pretrain import blip_pretrain
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
class MLP(nn.Module):
def __init__(self, input_size):
super().__init__()
self.input_size = input_size
self.layers = nn.Sequential(
nn.Linear(self.input_size, 1024),
#nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(1024, 128),
#nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(128, 64),
#nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(64, 16),
#nn.ReLU(),
nn.Linear(16, 1)
)
# initial MLP param
for name, param in self.layers.named_parameters():
if 'weight' in name:
nn.init.normal_(param, mean=0.0, std=1.0/(self.input_size+1))
if 'bias' in name:
nn.init.constant_(param, val=0)
def forward(self, input):
return self.layers(input)
class ImageReward(nn.Module):
def __init__(self, device='cpu'):
super().__init__()
self.device = device
self.blip = blip_pretrain(pretrained=config['blip_path'], image_size=config['BLIP']['image_size'], vit=config['BLIP']['vit'])
self.preprocess = _transform(config['BLIP']['image_size'])
self.mlp = MLP(config['ImageReward']['mlp_dim'])
if opts.fix_base:
self.blip.requires_grad_(False)
for name, parms in self.blip.named_parameters():
if '_proj' in name:
parms.requires_grad_(False)
# fix certain ratio of layers
self.image_layer_num = 24 if config['BLIP']['vit'] == 'large' else 12
if opts.fix_rate > 0:
text_fix_num = "layer.{}".format(int(12 * opts.fix_rate))
image_fix_num = "blocks.{}".format(int(self.image_layer_num * opts.fix_rate))
for name, parms in self.blip.text_encoder.named_parameters():
parms.requires_grad_(False)
if text_fix_num in name:
break
for name, parms in self.blip.visual_encoder.named_parameters():
parms.requires_grad_(False)
if image_fix_num in name:
break
def loose_layer(self, fix_rate):
text_layer_id = [f"layer.{id}" for id in range(int(12 * fix_rate), 13)]
image_layer_id = [f"blocks.{id}" for id in range(int(24 * fix_rate), 25)]
for name, parms in self.blip.text_encoder.named_parameters():
for text_id in text_layer_id:
if text_id in name:
parms.requires_grad_(True)
for name, parms in self.blip.visual_encoder.named_parameters():
for image_id in image_layer_id:
if image_id in name:
parms.requires_grad_(True)
def forward(self, batch_data):
# encode data
if opts.rank_pair:
batch_data = self.encode_pair(batch_data)
else:
batch_data = self.encode_data(batch_data)
# forward
emb_better, emb_worse = batch_data['emb_better'], batch_data['emb_worse']
reward_better = self.mlp(emb_better)
reward_worse = self.mlp(emb_worse)
reward = torch.concat((reward_better, reward_worse), dim=1)
return reward
def encode_pair(self, batch_data):
text_ids, text_mask, img_better, img_worse = batch_data['text_ids'], batch_data['text_mask'], batch_data['img_better'], batch_data['img_worse']
text_ids = text_ids.view(text_ids.shape[0], -1).to(self.device) # [batch_size, seq_len]
text_mask = text_mask.view(text_mask.shape[0], -1).to(self.device) # [batch_size, seq_len]
img_better = img_better.to(self.device) # [batch_size, C, H, W]
img_worse = img_worse.to(self.device) # [batch_size, C, H, W]
# encode better emb
image_embeds_better = self.blip.visual_encoder(img_better)
image_atts_better = torch.ones(image_embeds_better.size()[:-1], dtype=torch.long).to(self.device)
emb_better = self.blip.text_encoder(text_ids,
attention_mask = text_mask,
encoder_hidden_states = image_embeds_better,
encoder_attention_mask = image_atts_better,
return_dict = True,
).last_hidden_state # [batch_size, seq_len, feature_dim]
emb_better = emb_better[:, 0, :].float()
# encode worse emb
image_embeds_worse = self.blip.visual_encoder(img_worse)
image_atts_worse = torch.ones(image_embeds_worse.size()[:-1], dtype=torch.long).to(self.device)
emb_worse = self.blip.text_encoder(text_ids,
attention_mask = text_mask,
encoder_hidden_states = image_embeds_worse,
encoder_attention_mask = image_atts_worse,
return_dict = True,
).last_hidden_state
emb_worse = emb_worse[:, 0, :].float()
# get batch data
batch_data = {
'emb_better': emb_better,
'emb_worse': emb_worse,
}
return batch_data
def encode_data(self, batch_data):
txt_better, txt_worse = [], []
for item in batch_data:
text_input = self.blip.tokenizer(item["prompt"], padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(self.device)
txt_set = []
for generations in item["generations"]:
# image encode
img_path = os.path.join(config['image_base'], generations)
pil_image = Image.open(img_path)
image = self.preprocess(pil_image).unsqueeze(0).to(self.device)
image_embeds = self.blip.visual_encoder(image)
# text encode cross attention with image
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(self.device)
text_output = self.blip.text_encoder(text_input.input_ids,
attention_mask = text_input.attention_mask,
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
return_dict = True,
)
txt_set.append(text_output.last_hidden_state[:,0,:])
labels = item["ranking"]
for id_l in range(len(labels)):
for id_r in range(id_l+1, len(labels)):
if labels[id_l] < labels[id_r]:
txt_better.append(txt_set[id_l])
txt_worse.append(txt_set[id_r])
elif labels[id_l] > labels[id_r]:
txt_better.append(txt_set[id_r])
txt_worse.append(txt_set[id_l])
# torch.Size([sample_num, feature_dim])
txt_better = torch.cat(txt_better, 0).float()
txt_worse = torch.cat(txt_worse, 0).float()
batch_data = {
'emb_better': txt_better,
'emb_worse': txt_worse,
}
return batch_data
|
THUDM/ImageReward
|
train/src/ImageReward.py
|
ImageReward.py
|
py
| 8,579 |
python
|
en
|
code
| 761 |
github-code
|
6
|
36797461084
|
"""Utility for ROC-AUC Visualization"""
import matplotlib.pyplot as plt
from src.utils import infoutils
def plot_signle_roc_auc(cfg, auc, fpr, tpr):
"""
Plots signel ROC Curve
Args:
cfg (cfgNode): Model configurations
auc (float): Area under the ROC curve
fpr (list): False positive rates
tpr (list): True positive rates
"""
plt.figure()
plt.plot(fpr, tpr, color='darkorange',
lw=cfg.VISUALIZE.PLIT_LINEWIDTH, label='ROC curve (area = %0.6f)' % auc)
plt.plot([0, 1], [0, 1], color='navy',
lw=cfg.VISUALIZE.PLIT_LINEWIDTH, label='Random Classifier ROC (area = 0.5)', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.01])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Cperating Characteristic (ROC)' + '\n' + \
infoutils.get_dataset_features_name(cfg) + '\n' + infoutils.get_full_model_without_features(cfg))
plt.legend(loc="lower right")
plt.show()
|
KhaledElTahan/Real-World-Anomaly-Detection
|
src/visualization/roc_auc.py
|
roc_auc.py
|
py
| 1,016 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8173760199
|
# Get Networks in Org
# Meraki API Reference:
# https://developer.cisco.com/meraki/api-latest/#!list-the-networks-that-the-user-has-privileges-on-in-an-organization
import tokens
import requests
import json
base_url = "https://api.meraki.com/api/v1"
resource_path = f"/organizations/{tokens.ORG_ID}/networks"
url = base_url + resource_path
payload = None
headers = {
"Accept": "application/json",
"X-Cisco-Meraki-API-Key": tokens.API_KEY
}
response = requests.request('GET', url, headers=headers, data = payload)
#print(response.text.encode('utf8'))
json_data = response.json()
print(json.dumps(json_data, indent=2))
|
jtsu/meraki_python
|
merakiScripts/2_getNetworks.py
|
2_getNetworks.py
|
py
| 633 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40071000142
|
import collections
class Solution(object):
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
res = collections.Counter(ransomNote) - collections.Counter(magazine)
return not res #(res == collections.Counter())
def main():
solution = Solution()
a = "bcjefgecdabaa"
b = "hfebdiicigfjahdddiahdajhaidbdgjihdbhgfbbccfdfggdcacccaebh"
print ('Output:', solution.canConstruct(a,b))
if __name__ == '__main__':
main()
|
lucy9215/leetcode-python
|
383_ransomNote.py
|
383_ransomNote.py
|
py
| 554 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73363651708
|
#только приватный доступ
import requests
import munch
#получить список доменов
#i.ivanov
headers = { 'PddToken': 'F4KTVIPLCFULRWCDFGJIKNGUPEWQUEMSKDG7DDFJZDPILB3JXLOQ'}
#x@yandex
headers = { 'PddToken': 'E7UIU2AHR33EOXDJ5W6R2Q2WRNW4TGCI5MZ2U6DOX5YKBEJW334A' }
url = 'https://pddimp.yandex.ru/api2/admin/domain/domains?'
r=requests.get(url,headers=headers)
obj = munch.munchify(r.json())
print(r.json())
#добавить ящик
url = 'https://pddimp.yandex.ru/api2/admin/email/add'
payload = {'domain': 'bellatrix.xyz', 'login': 'test2', 'password' : 'hardpass'}
headers = { 'PddToken': 'F4KTVIPLCFULRWCDFGJIKNGUPEWQUEMSKDG7DDFJZDPILB3JXLOQ'}
r=requests.post(url,data=payload,headers=headers)
#домен bellatrix.xyz
#заблокировать почтовый ящик
headers = { 'PddToken': 'F4KTVIPLCFULRWCDFGJIKNGUPEWQUEMSKDG7DDFJZDPILB3JXLOQ'}
payload = {'domain': 'bellatrix.xyz', 'login': 'test2', 'enabled': 'no' }
url = 'https://pddimp.yandex.ru/api2/admin/email/edit'
r=requests.post(url,data=payload,headers=headers)
#добавить зам. администратора домена
url = 'https://pddimp.yandex.ru/api2/admin/deputy/list?domain=mrtkt74.ru'
#получить список замов
r = requests.get(url,headers=headers)
#добавить зама
url = 'https://pddimp.yandex.ru//api2/admin/deputy/add'
payload = {'domain': 'mrtkt74.ru', 'login': 'i.ivanov'}
headers = { 'PddToken': 'E7UIU2AHR33EOXDJ5W6R2Q2WRNW4TGCI5MZ2U6DOX5YKBEJW334A' }
r=requests.post(url,data=payload,headers=headers)
|
expo-lux/scripts
|
python/x_createuser.py
|
x_createuser.py
|
py
| 1,573 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
42602973405
|
import random
nombre = str(input("Hola por favor ingrese su nombre: "))
print("=====================================================")
print(f"Bienvenido {nombre} al juego 'ADIVINA EL NÚMERO'\n")
print("Reglas del juego: \n"
"a) El usuario debera ingresar un numero entero.\n"
"b) Debera adivinar el número entre 1 al 100.\n"
"c) Tendra solo 8 intentos para adivinar el número.")
print("=====================================================")
print(" Buena Suerte!!!")
intentos = 0
numero_rand = random.randint(1,100)
print(numero_rand) #TESTER DEL Numero random "LUEGO SE LO ELIMINA"
while(intentos < 8):
numero = (input("Ingrese el numero aquí: "))
if numero.isdigit():
numero = int(numero)
intentos = intentos + 1
if numero < numero_rand and intentos < 8:
print(f"El numero a adivinar es mayor, intentos restantes: {8 - intentos}")
elif numero > numero_rand and intentos <8:
print(f"EL numero a adivinar es menor, intentos restantes: {8 - intentos}")
elif numero == numero_rand:
print(f"Felicidades {nombre} has adivinado el numero '{numero_rand}' en solo {intentos} intentos!!!")
break
else:
print(f"Game Over, superaste los intentos fallidos!!!. El número a divinar era: '{numero_rand}'")
else:
print("Has ingresado una letra o un numero decimal .Por favor ingresa un numero entero!!!")
# GRUPO 1
# Suarez German Daniel
# Schaab Medina Nelson
# Abson Lucas Bautista
# Durnbeck Federico
# Landriel Lautaro
# Giroldi Lucas Ezequiel
# Sanso Pedro
# Zabala Villalba Daiana Jacqueline
# Nuñez Juan
# Neumann Lucas
# Barboza Lucas David
|
NelsonSCH/PythonInformatorio2023
|
clase3/d3_g1.py
|
d3_g1.py
|
py
| 1,751 |
python
|
es
|
code
| 0 |
github-code
|
6
|
35245234184
|
from flask import render_template, url_for, flash, redirect, request, make_response, send_from_directory
from os import path
from csaptitude import app, db, bcrypt
from csaptitude.forms import TestResultsForm, TestRegistrationForm, TestLoginForm
from csaptitude.models import User, TestResult, QuestionResponse
from flask_login import login_user, current_user, logout_user, login_required
from sqlalchemy import desc
# Indexes of correct answers to test questions
correctAnswers = [2, 0, 5, 1, 4, 5, 2, 2, 1, 3, 4, 0, 4, 2, 5, 3, 0, 2, 1, 5, 0, 1, 5, 4, 0, 1, 5, 2, 1]
@app.route('/')
def home():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('about.html', title='About the Test')
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = TestRegistrationForm();
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(student_id=form.studentId.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been created!', 'success')
login_user(user)
next = request.args.get('next')
return redirect(next or url_for('test'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('test'))
form = TestLoginForm()
if form.validate_on_submit():
user = User.query.filter_by(student_id=form.studentId.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('test'))
else:
flash('Login Unsuccessful. Please check the Student ID and password', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/test', methods = ['GET', 'POST'])
@login_required
def test():
form = TestResultsForm()
if form.validate_on_submit():
#print (request.user_agent.version)
score = 0
answers = form.answers.data.split(',')
elapsedTimes = form.questionTimes.data.split(',')
test = TestResult(
user_id=current_user.id,
elapsed_time_ms=int(form.elapsedTime.data),
platform=request.user_agent.platform,
browser=request.user_agent.browser,
browser_version=request.user_agent.version,
language=request.user_agent.language)
db.session.add(test)
db.session.flush()
for index, ans in enumerate(answers):
if not not ans:
correct = correctAnswers[index]==int(ans)
quest = QuestionResponse(
test_result_id=test.id,
is_example=index < 3,
question_num=index - 3,
response=int(ans),
correct=correct,
elapsed_time_ms = (0 if elapsedTimes[index] == "NaN" else int(elapsedTimes[index])))
db.session.add(quest)
if correct and index >= 3:
score += 1
db.session.commit()
flash(f'Test Submitted! Your score is {score}', 'success')
return redirect(url_for('test_results'))
return render_template('test.html', form=form)
@app.route("/account")
@login_required
def account():
testResult = TestResult.query.filter_by(user_id=current_user.id).order_by(desc(TestResult.id)).first()
score = (QuestionResponse.query
.filter_by(test_result_id=testResult.id)
.filter_by(is_example=False)
.filter_by(correct=True)
.order_by('question_num')
.count()
if testResult else None)
date = testResult.created_at.strftime("%B %d, %Y at %H:%M UTC") if testResult else None
return render_template('account.html', title='Account', score=score, date=date)
@app.route("/results")
@login_required
def test_results():
testResult = TestResult.query.filter_by(user_id=current_user.id).order_by(desc(TestResult.id)).first()
score = None
answered = None
correct = None
if testResult:
answered = (QuestionResponse.query
.filter_by(test_result_id=testResult.id)
.filter_by(is_example=False)
.order_by('question_num').all())
answered = [a.question_num + 1 for a in answered]
correct = (QuestionResponse.query
.filter_by(test_result_id=testResult.id)
.filter_by(is_example=False)
.filter_by(correct=True)
.order_by('question_num').all())
score = len(correct)
correct = [a.question_num + 1 for a in correct]
correct = [c in correct for c in list(range(1, 27))]
[c in correct for c in list(range(1, 27))]
return render_template('results.html', title="Test Results", answered=answered,
correct=correct, score=score)
@app.route("/data/byquest-wide")
@login_required
def by_quest_wide():
if not current_user.is_admin:
flash('You do not have access to this information.', 'danger')
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('account'))
query = (db.session.query(User,TestResult,QuestionResponse)
.filter(User.id == TestResult.user_id)
.filter(TestResult.id == QuestionResponse.test_result_id)
.filter(QuestionResponse.is_example == False)
.order_by(User.id, TestResult.id, QuestionResponse.question_num))
#print(query.statement.compile())
query = query.all()
data = 'id,email,test,date,elapsed_time_ms,q.' + ',q.'.join(str(e) for e in range(1,27))
prev_test = None
next_quest = 0
for (user, test, quest) in query:
if (test.id != prev_test):
prev_test = test.id
next_quest = 0
data += '\n'
data +=f'{user.student_id},{user.email},{test.id},{test.created_at},{test.elapsed_time_ms}'
for num in range (next_quest, quest.question_num):
data += ','
next_quest = quest.question_num + 1
data += f',{quest.correct + 0}'
#print (f'{user.student_id}, {test.id}, {quest.question_num}, {quest.correct}')
response = make_response(data)
response.headers["Content-Disposition"] = "attachment; filename=export.csv"
response.headers["Content-type"] = "text/csv"
return response
@app.route('/favicon.ico')
def favicon():
return send_from_directory(path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
|
DoctorHayes/AptitudeTest-CS
|
csaptitude/routes.py
|
routes.py
|
py
| 6,245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70107497149
|
from bs4 import BeautifulSoup
import requests
from pprint import pprint
def main():
url = 'https://remote.co'
remote_co_html = requests.get(url)
soup = BeautifulSoup(remote_co_html.content,"html.parser")
#the_main_class = soup.find("body",class_="home blog remote-co").main.find("div",class_="container pt-4").find("div",class_="row").find("div",class_="col").find_all("div",class_="card bg-light mb-3").find("div")
the_main_class = soup.main.find("div",class_="container pt-4").find_all("div",class_="card bg-light mb-3")[1].find("div",class_="card-body").find_all('h6')#
#pprint(the_main_class)
jobs = []
for eachheader in the_main_class:
jobs.append(eachheader.string)
pprint(jobs)
#for eachmarker in the_main_class:
# each_card = eachmarker.find_all('div',class_='card')
# for each_job in each_card:
# print(each_job.img['alt'])
#for each_entry in each_card:
# each_job = each_entry.find('div',class_='card-body').img['alt']
# print(each_job)
#pprint(the_job)
#[0].find('div',class_='card-body').img['alt']
#pprint(the_main_class)
if __name__ == '__main__':
main()
|
cthacker-udel/Python-WebScraper
|
remoteCoScraper.py
|
remoteCoScraper.py
|
py
| 1,208 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8770897157
|
import asyncio
import logging
import sys
import time
import pyautogui
import pydirectinput
import qasync
from PySide6 import QtWidgets, QtCore
from front import Ui_MainWindow
def use_quick_access_inventory():
print("Use quick access of inventory")
pydirectinput.keyDown('1')
time.sleep(0.1)
pydirectinput.keyUp('1')
pydirectinput.keyDown('2')
time.sleep(0.1)
pydirectinput.keyUp('2')
pydirectinput.keyDown('3')
time.sleep(0.1)
pydirectinput.keyUp('3')
pydirectinput.keyDown('4')
time.sleep(0.1)
pydirectinput.keyUp('4')
def logout():
print("Logout")
time.sleep(0.1)
pydirectinput.leftClick(1264, 967)
time.sleep(1)
pydirectinput.leftClick(655, 573)
@qasync.asyncSlot()
async def terminate_spot(time_attack):
pydirectinput.keyDown('1')
await asyncio.sleep(0.1)
pydirectinput.keyUp('1')
await asyncio.sleep(0.1)
pydirectinput.keyDown('space')
await asyncio.sleep(time_attack)
pydirectinput.keyUp('space')
pydirectinput.keyDown('z')
await asyncio.sleep(0.1)
pydirectinput.keyUp('z')
@qasync.asyncSlot()
async def change_chanel(chanel, time_change):
if chanel < 0 or chanel > 4:
print(f'Chanel cant be {chanel}')
return
get_window("Insomnia Helper", False)
get_window('InsomniaMT2', True)
print(f'Chanel is changing to chanel {chanel}')
if chanel == 1:
await asyncio.sleep(0.1)
pydirectinput.leftClick(1156, 81)
await asyncio.sleep(time_change)
return
if chanel == 2:
await asyncio.sleep(0.1)
pydirectinput.leftClick(1154, 102)
await asyncio.sleep(time_change)
return
if chanel == 3:
await asyncio.sleep(0.1)
pydirectinput.leftClick(1160, 127)
await asyncio.sleep(time_change)
return
if chanel == 4:
await asyncio.sleep(0.1)
pydirectinput.leftClick(1174, 147)
await asyncio.sleep(time_change)
return
def use_main_inventory_slots(number):
if number < 1 or number > 5:
return False
print(f'Use slot {number} of main inventory')
if number == 1:
time.sleep(0.1)
pydirectinput.leftClick(1137, 645)
return True
if number == 2:
time.sleep(0.1)
pydirectinput.leftClick(1168, 645)
return True
if number == 3:
time.sleep(0.1)
pydirectinput.leftClick(1200, 645)
return True
if number == 4:
time.sleep(0.1)
pydirectinput.leftClick(1234, 645)
return True
if number == 5:
time.sleep(0.1)
pydirectinput.leftClick(1264, 645)
return True
def open_main_inventory():
print("Opening main inventory")
time.sleep(0.1)
pydirectinput.leftClick(1192, 970)
def get_window(title, client):
while True:
hwnd = pyautogui.getWindowsWithTitle(title)
if len(hwnd) <= 0:
return None
if len(hwnd) >= 2:
print(f'Number of finding windows is {len(hwnd)}')
window_to_remove = pyautogui.getWindowsWithTitle("InsomniaMT2 Klient")
window_to_remove[0].maximize()
decison = pyautogui.confirm(text='Remove this window?', title='Remove', buttons=['OK', 'Cancel'])
if decison == 'Cancel':
return None
if decison == 'OK':
window_to_remove[0].close()
time.sleep(0.1)
if len(hwnd) == 1:
hwnd[0].activate()
if client:
hwnd[0].moveTo(0, 0)
else:
hwnd[0].moveTo(1280, 0)
return hwnd
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
self.setWindowTitle("Insomnia Helper")
self.stackedWidget.setCurrentIndex(0)
# Variable
self.can_running = False
self.counter_chanel = 1
# Connect push button
self.autoDropping.clicked.connect(self.auto_dropping)
self.bossHelpper.clicked.connect(self.boss_helper)
self.back_button.clicked.connect(self.back_start_page)
self.back_button_1.clicked.connect(self.back_start_page)
self.start_dropping.clicked.connect(self.start_dropping_fun)
def auto_dropping(self):
self.stackedWidget.setCurrentIndex(1)
def boss_helper(self):
self.stackedWidget.setCurrentIndex(2)
def back_start_page(self):
self.stackedWidget.setCurrentIndex(0)
def start_dropping_fun(self):
print("button clicked")
self.can_running = not self.can_running
if self.can_running:
self.main()
def keyPressEvent(self, event):
key = event.key()
# if key == 61:
# self.can_running = True
# print("Key started is clicked")
# self.main()
# return
#
# if key == 45:
# print("Key stopped is clicked")
# self.can_running = False
# return
print(f"Found key {key} --> dont have action for this key")
@qasync.asyncSlot()
async def main(self):
print("Dropping is started")
try:
while True:
print(self.can_running)
if not self.can_running:
print("Dropping is stopped")
break
await self.auto_dropping_fun()
except:
print("Error with dropping, try again")
@qasync.asyncSlot()
async def auto_dropping_fun(self):
if not self.ch1_check_box.isChecked() and self.counter_chanel == 1:
self.counter_chanel += 1
if not self.ch2_check_box.isChecked() and self.counter_chanel == 2:
self.counter_chanel += 1
if not self.ch3_check_box.isChecked() and self.counter_chanel == 3:
self.counter_chanel += 1
if not self.ch4_check_box.isChecked() and self.counter_chanel == 4:
self.counter_chanel += 1
if self.counter_chanel > 4:
self.counter_chanel = 1
await asyncio.sleep(0.1)
return
await terminate_spot(self.atact_time.value())
await change_chanel(self.counter_chanel, self.change_chanel_time.value())
self.counter_chanel += 1
def start_application(): # Start Application with qasync
app = QtWidgets.QApplication(sys.argv)
loop = qasync.QEventLoop(app)
window = MainWindow()
window.show()
logging.info("Starting application Insomnia bot ...")
with loop:
loop.run_forever()
start_application()
|
arekszatan/botClicker
|
InsomniaBot.py
|
InsomniaBot.py
|
py
| 6,684 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13209639303
|
import PIL
import pyautogui
def popper():
while True:
try:
box = pyautogui.locateOnScreen("C:/Users/Bryan/Documents/GitHub/Cuhacking/decline.png", confidence = 0.55)
loc = pyautogui.center(box)
print(loc)
pyautogui.click(loc.x, loc.y)
break
except:
print("bropkebobuo")
|
RogerLamTd/Cuhacking
|
AutomatedQueuePopper/League.py
|
League.py
|
py
| 374 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7606759311
|
import datetime
import time
from .. import emails
from ..database import get_sql_connection
from ..models import Account, Session as SqlSession
__description__ = 'Send out summary emails.'
def send_out_emails():
session = SqlSession()
today = datetime.date.today()
accounts = session.query(Account) \
.filter(Account.receive_summary_email == True) # noqa
for account in accounts:
try:
email = emails.Summary(account, today)
except RuntimeError: # no tasks
continue
with emails.Mailer() as mailer:
mailer.send(email)
def command(args):
get_sql_connection()
if args.forever:
while True:
tomorrow = datetime.datetime.utcnow() + datetime.timedelta(days=1)
tomorrow = tomorrow.replace(hour=4, minute=0)
diff = tomorrow - datetime.datetime.utcnow()
time.sleep(diff.total_seconds())
send_out_emails()
else:
send_out_emails()
def add_subparser(subparsers):
parser = subparsers.add_parser('send-summary-emails', help=__description__)
parser.add_argument('--forever', action='store_true')
parser.set_defaults(func=command)
|
thomasleese/gantt-charts
|
ganttcharts/cli/send_summary_emails.py
|
send_summary_emails.py
|
py
| 1,214 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6123307630
|
import os
import sys
import shutil
import logging
import argparse
import warnings
import re
from pathlib import Path
from ..backend_funcs.convert import parse_validator
import subprocess as sub
import pandas as pd
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('fw-heudiconv-validator')
def escape_ansi(line):
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
return ansi_escape.sub('', line)
def find_all(regex, text):
match_list = []
while True:
match = re.search(regex, text)
if match:
match_list.append(match.group(0))
text = text[match.end():]
else:
return match_list
def validate_local(path, verbose, tabulate='.'):
logger.info("Launching bids-validator...")
command = ['bids-validator', path]
if verbose:
command.extend(['--verbose'])
p = sub.Popen(command, stdout=sub.PIPE, stdin=sub.PIPE, stderr=sub.PIPE, universal_newlines=True)
output, error = p.communicate()
logger.info(output)
if p.returncode != 0:
logger.info(error)
if os.path.exists(tabulate):
logger.info("Parsing issues and writing to issues.csv")
command = ['bids-validator', path, '--json', '--verbose']
with open(tabulate + '/issues.json', "w") as outfile:
p2 = sub.run(command, stdout=outfile)
if p2.returncode == 0:
issues_df_full = parse_validator(tabulate + '/issues.json')
issues_df_full.to_csv(tabulate + '/issues.csv', index=False)
return p.returncode
def fw_heudiconv_export(proj, subjects=None, sessions=None, destination="tmp", name="bids_directory", key=None):
logger.info("Launching fw-heudiconv-export...")
command = ['fw-heudiconv-export', '--project', ' '.join(proj), '--destination', destination, '--directory-name', name]
if subjects:
command.extend(['--subject'] + subjects)
if sessions:
command.extend(['--session'] + sessions)
if key:
command.extend(['--api-key'] + [key])
p = sub.Popen(command, stdout=sub.PIPE, stdin=sub.PIPE, stderr=sub.PIPE, universal_newlines=True)
output, error = p.communicate()
logger.info(output)
if p.returncode != 0:
logger.info(error)
return p.returncode
def get_parser():
parser = argparse.ArgumentParser(
description="Validate BIDS-curated data on Flywheel. A simple wrapper around the original BIDS Validator https://github.com/bids-standard/bids-validator")
parser.add_argument(
"--directory",
help="Temp space used for validation",
default=".",
required=False,
type=str
)
parser.add_argument(
"--project",
help="The project on Flywheel",
nargs="+"
)
parser.add_argument(
"--subject",
help="The subject(s) on Flywheel to validate",
nargs="+",
default=None,
type=str
)
parser.add_argument(
"--session",
help="The session(s) on Flywheel to validate",
nargs="+",
default=None,
type=str
)
parser.add_argument(
"--verbose",
help="Pass on <VERBOSE> flag to bids-validator",
default=False,
action='store_true'
)
parser.add_argument(
"--tabulate",
default=".",
required=False,
type=str,
help="Directory to save tabulation of errors"
)
parser.add_argument(
"--api-key",
help="API Key",
action='store',
default=None
)
parser.add_argument(
"--dry-run",
help=argparse.SUPPRESS,
action='store_false',
default=None
)
return parser
def main():
logger.info("{:=^70}\n".format(": fw-heudiconv validator starting up :"))
parser = get_parser()
args = parser.parse_args()
exit = 1
if not args.project:
logger.error("No project on Flywheel specified!")
sys.exit(exit)
success = fw_heudiconv_export(proj=args.project, subjects=args.subject, sessions=args.session, destination=args.directory, name='bids_directory', key=args.api_key)
if success == 0:
path = Path(args.directory, 'bids_directory')
exit = validate_local(path, args.verbose, args.tabulate)
shutil.rmtree(path)
else:
logger.error("There was a problem downloading the data to a temp space for validation!")
logger.info("Done!")
logger.info("{:=^70}".format(": Exiting fw-heudiconv validator :"))
sys.exit(exit)
if __name__ == "__main__":
main()
|
PennLINC/fw-heudiconv
|
fw_heudiconv/cli/validate.py
|
validate.py
|
py
| 4,603 |
python
|
en
|
code
| 5 |
github-code
|
6
|
75241216826
|
from typing import Any, Type
from aiohttp import BasicAuth
from ..internal.gateway import Gateway
from ..internal.http import HTTPClient
from .cache import CacheStore, Store
_BASE_MODELS: dict[Any, Any] = {}
class State:
"""The central bot cache."""
def __init__(
self,
token: str,
# cache-options
max_messages: int,
max_members: int,
intents: int,
# "advanced" options
base_url: str = "https://discord.com/api/v10",
proxy: str | None = None,
proxy_auth: BasicAuth | None = None,
# classes
store_class: Type[Store] = Store,
model_classes: dict[Any, Any] = _BASE_MODELS,
) -> None:
self._token = token
self.cache = CacheStore(store_class)
self.cache["messages"] = max_messages
self.cache["members"] = max_members
self.max_members = max_members
self.intents = intents
self.http = HTTPClient(token)
self.gateway = Gateway(self)
|
VincentRPS/pycv3
|
pycord/state/core.py
|
core.py
|
py
| 1,011 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14959398109
|
from mock import Mock
from yoti_python_sandbox.doc_scan.check import SandboxZoomLivenessCheckBuilder
from yoti_python_sandbox.doc_scan.check.report.breakdown import SandboxBreakdown
from yoti_python_sandbox.doc_scan.check.report.recommendation import (
SandboxRecommendation,
)
def test_zoom_liveness_check_should_set_correct_liveness_type():
recommendation_mock = Mock(spec=SandboxRecommendation)
breakdown_mock = Mock(spec=SandboxBreakdown)
check = (
SandboxZoomLivenessCheckBuilder()
.with_recommendation(recommendation_mock)
.with_breakdown(breakdown_mock)
.build()
)
assert check.liveness_type == "ZOOM"
def test_zoom_liveness_check_build_result_object():
recommendation_mock = Mock(spec=SandboxRecommendation)
breakdown_mock = Mock(spec=SandboxBreakdown)
check = (
SandboxZoomLivenessCheckBuilder()
.with_recommendation(recommendation_mock)
.with_breakdown(breakdown_mock)
.build()
)
assert check.result.report.recommendation is not None
assert check.result.report.recommendation == recommendation_mock
assert len(check.result.report.breakdown) == 1
assert check.result.report.breakdown[0] == breakdown_mock
|
getyoti/yoti-python-sdk-sandbox
|
yoti_python_sandbox/tests/doc_scan/check/test_sandbox_liveness_check.py
|
test_sandbox_liveness_check.py
|
py
| 1,243 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21884345926
|
import cv2
def distance(p1, p2):
# D8 distance
return max(abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
def gamma(img):
totalpixels = img.shape[0] * img.shape[1]
color_dict = {}
for i in range(len(img)):
for j in range(len(img[i])):
tc = tuple(img[i][j])
if (tc in color_dict):
color_dict[tc].append((i, j))
else:
color_dict[tc] = [(i, j)]
probability = {}
for d in range(8):
for color in color_dict:
count = 0
k = color_dict[color]
for p1 in range(len(k)):
for p2 in range(p1, len(k)):
if (distance(k[p1], k[p2]) == d):
count += 1
if color not in probability:
probability[color] = [0 for i in range(8)]
probability[color][d] = float(count) / totalpixels
return probability
img = cv2.imread("bed.jpg")
img2 = cv2.imread("img2.jpg")
g1 = gamma(img)
g2 = gamma(img2)
s = 0
m = 0
for color in g1:
if color not in g2:
continue
m += 1
for d in range(8):
s += abs(g1[color][d] - g2[color][d]) / float(1 + g1[color][d] + g2[color][d])
s /= float(m)
print(s)
|
NitigyaPant/MCA_assignment
|
test.py
|
test.py
|
py
| 1,025 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19700703411
|
import discord
from discord.ext import commands
import urllib.parse, urllib.request
import requests
import googlesearch
import re
import json
bot = commands.Bot(description = "Im just a Kid", command_prefix ="@")
@bot.event
async def on_ready():
print("IM READYYY")
@bot.command(pass_context=True)
async def search(ctx, *args):
sites = [" Stadium Goods"]
urllist = []
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
await ctx.send("working on your request")
#SX
keywords = ''
for word in args:
keywords += word + '%20'
json_string = json.dumps({"params": f"query={keywords}&hitsPerPage=20&facets=*"})
byte_payload = bytes(json_string, 'utf-8')
algolia = {"x-algolia-agent": "Algolia for vanilla JavaScript 3.32.0", "x-algolia-application-id": "XW7SBCT9V6", "x-algolia-api-key": "6bfb5abee4dcd8cea8f0ca1ca085c2b3"}
with requests.Session() as session:
r = session.post("https://xw7sbct9v6-dsn.algolia.net/1/indexes/products/query", params=algolia, verify=False, data=byte_payload, timeout=30)
results = r.json()["hits"][0]
apiurl = f"https://stockx.com/api/products/{results['url']}?includes=market,360¤cy=USD"
header = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,ja-JP;q=0.8,ja;q=0.7,la;q=0.6',
'appos': 'web',
'appversion': '0.1',
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
}
response = requests.get(apiurl, verify=False, headers=header)
prices = response.json()
general = prices['Product']
market = prices['Product']['market']
sizes = prices['Product']['children']
embed = discord.Embed(title='StockX', color=0x43dd36)
embed.set_thumbnail(url=results['thumbnail_url'])
embed.add_field(name=general['title'], value='https://stockx.com/' + general['urlKey'], inline=False)
embed.add_field(name='SKU/PID:', value=general['styleId'], inline=True)
embed.add_field(name='Colorway:', value=general['colorway'], inline=True)
embed.add_field(name='Retail Price:', value=f"${general['retailPrice']}", inline=False)
for size in sizes:
if len(sizes[size]['market']) != 0:
if (sizes[size]['market']['lowestAsk'] != 0 and sizes[size]['market']['highestBid'] != 0):
embed.add_field(name = f"Size: {sizes[size]['shoeSize']}", value=f"Low Ask: $ {sizes[size]['market']['lowestAsk']}\n High Bid: $ {sizes[size]['market']['highestBid']}", inline=True)
embed.set_footer(text='GitHub: TestingYG', icon_url ="https://image.shutterstock.com/image-photo/cute-little-chicken-isolated-on-260nw-520818805.jpg")
sku = general['styleId']
for x in sites:
for i in (googlesearch.search(sku+x ,tld='co.in',lang='en',num=10,stop=1,pause=2)):
urllist.append(str(i))
#SG
req = urllib.request.Request(urllist[0], headers=headers)
resp = urllib.request.urlopen(req)
respdata = str(resp.read())
find = re.findall('"sizeLabel":.*?."f', respdata)
find = ''.join(find)
size = re.findall('"sizeLabel".*?,', find)
size = ''.join(size)
size = re.sub('sizeLabel":"', "", size)
size = re.sub('"', "", size)
size = size[:-1]
size = size.split(",")
price = re.findall('"price":.*?"f', find)
price = ''.join(price)
price = re.sub('"f', "", price)
price = re.sub('"price":', " ", price)
price = re.sub(", ", " ", price)
price = price[:-1]
price = re.sub('"', "", price)
price = re.sub('null', '0', price)
price = price.split(" ")
price = price[1:]
StadiumGoods = dict(zip(size, price))
embedSG = discord.Embed(title='Stadium Goods', color=0xd1d8d0)
embedSG.set_thumbnail(url=results['thumbnail_url'])
embedSG.add_field(name=general['title'], value= urllist[0], inline=False)
embedSG.add_field(name='SKU/PID:', value=general['styleId'], inline=True)
embedSG.add_field(name='Colorway:', value=general['colorway'], inline=True)
embedSG.add_field(name='Retail Price:', value=f"${general['retailPrice']}", inline=False)
for k,v in StadiumGoods.items():
if v != '0':
embedSG.add_field(name=f'Size: {k}', value= f"$ {v}", inline=True)
embedSG.set_footer(text='GitHub: TestingYG', icon_url ="https://image.shutterstock.com/image-photo/cute-little-chicken-isolated-on-260nw-520818805.jpg")
#GOAT
keywords = f"{general['styleId']}%20"
json_string = json.dumps({"params": f"facets=%2A&hitsPerPage=20&query={keywords}"})
byte_payload = bytes(json_string, 'utf-8')
algolia = {"x-algolia-agent": "Algolia for vanilla JavaScript 3.32.0", "x-algolia-application-id": "2FWOTDVM2O", "x-algolia-api-key": "ac96de6fef0e02bb95d433d8d5c7038a"}
with requests.Session() as session:
r = session.post("https://2fwotdvm2o-dsn.algolia.net/1/indexes/product_variants_v2/query", params=algolia, verify=False, data=byte_payload, timeout=30)
results1 = r.json()["hits"][0]
apiurl = f"https://www.goat.com/web-api/v1/product_variants?productTemplateId={results1['slug']}"
url = f"https://www.goat.com/sneakers/{results1['slug']}/available-sizes"
header = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,ja-JP;q=0.8,ja;q=0.7,la;q=0.6',
'appos': 'web',
'appversion': '0.1',
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
}
response = requests.get(apiurl, verify=False, headers=header)
prices = response.json()
dic = {}
for x in range(len(prices)):
if (prices[x]["shoeCondition"] == "new_no_defects" and prices[x]["boxCondition"] == "good_condition"):
reduce = prices[x]["lowestPriceCents"]["amount"] / 100
dic[prices[x]["size"]] = int(reduce)
embedG = discord.Embed(title='Goat', color=0x000000)
embedG.set_thumbnail(url=results['thumbnail_url'])
embedG.add_field(name=general['title'], value= url, inline=False)
embedG.add_field(name='SKU/PID:', value=general['styleId'], inline=True)
embedG.add_field(name='Colorway:', value=general['colorway'], inline=True)
embedG.add_field(name='Retail Price:', value=f"${general['retailPrice']}", inline=False)
if len(dic) != 0:
for k,v in dic.items():
embedG.add_field(name = f"Size: {k}", value=f"$ {v}", inline=True)
embedG.set_footer(text='GitHub: TestingYG', icon_url ="https://image.shutterstock.com/image-photo/cute-little-chicken-isolated-on-260nw-520818805.jpg")
await ctx.send(embed=embed)
await ctx.send(embed=embedSG)
await ctx.send(embed=embedG)
bot.run("")
|
TestingYG/ProjectDumButt
|
DumButtv2.py
|
DumButtv2.py
|
py
| 7,169 |
python
|
en
|
code
| 1 |
github-code
|
6
|
75188756666
|
# 하기 코드의 가능한 개선방향
# 2차원 리스트를 활용하고, 좌표에 조건을 걸어 해당하지 않는 것은 수행하지 않음
# try-except를 활용
def pop_reverse_st(n):
for i in range(5):
if reverse_st[i]:
result.append(reverse_st[i].pop())
T = int(input())
for test_case in range(1, T+1):
st = [list(input()) for _ in range(5)]
reverse_st = []
result = [] # 세로로 읽은 결과 저장 리스트
maxLen = 0
# stack처럼 pop해주기 위해 역순으로된 문자열 리스트 생성
for i in range(5):
reverse_st.append(st[i][::-1])
# 문자열 중 가장 긴 길이
for arr in reverse_st:
temp = len(arr)
if temp > maxLen:
maxLen = temp
# 가장 긴 길이만큼 세로순으로 pop 시도
for i in range(maxLen):
pop_reverse_st(5)
ans = ''.join(result)
print(f'#{test_case} {ans}')
|
zacinthepark/Problem-Solving-Notes
|
swea/0819_의석이의세로로말해요.py
|
0819_의석이의세로로말해요.py
|
py
| 941 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
38088951612
|
# -*- coding: utf-8 -*-
"""
Production Mapper
Michael Troyer
[email protected]
"""
import datetime
import os
import traceback
from collections import defaultdict
import arcpy
arcpy.env.addOutputsToMap = False
arcpy.env.overwriteOutput = True
##---Functions-------------------------------------------------------------------------------------
def build_where_clause(table, field, valueList):
"""
Takes a list of values and constructs a SQL WHERE
clause to select those values within a given field and table.
"""
# Add DBMS-specific field delimiters
fieldDelimited = arcpy.AddFieldDelimiters(arcpy.Describe(table).path, field)
# Determine field type
fieldType = arcpy.ListFields(table, field)[0].type
# Add single-quotes for string field values
if str(fieldType) == 'String':
valueList = ["'%s'" % value for value in valueList]
# Format WHERE clause in the form of an IN statement
whereClause = "%s IN(%s)" % (fieldDelimited, ', '.join(map(str, valueList)))
return whereClause
def intersect_and_get_attributes(source_layer, intersect_layer, intersect_field):
arcpy.SelectLayerByLocation_management(intersect_layer, 'INTERSECT', source_layer)
if not arcpy.Describe(intersect_layer).FIDSet.split(';'):
return []
with arcpy.da.SearchCursor(intersect_layer, intersect_field) as cur:
values = [row[0] for row in cur]
arcpy.SelectLayerByAttribute_management(intersect_layer, "CLEAR_SELECTION")
return values
class Toolbox(object):
def __init__(self):
self.label = "Production Mapper"
self.alias = "Production_Mapper"
# List of tool classes associated with this toolbox
self.tools = [ProductionMapper]
class ProductionMapper(object):
def __init__(self):
self.label = "ProductionMapper"
self.description = ""
self.canRunInBackground = True
def getParameterInfo(self):
input_fc=arcpy.Parameter(
displayName="Input Feature Class",
name="Input Feature Class",
datatype="Feature Class",
parameterType="Required",
direction="Input",
)
project_id=arcpy.Parameter(
displayName="Project ID",
name="Project ID",
datatype="String",
parameterType="Optional",
)
title=arcpy.Parameter(
displayName="Project Title",
name="Project Title",
datatype="String",
parameterType="Optional",
)
author=arcpy.Parameter(
displayName="Author",
name="Author",
datatype="String",
parameterType="Optional",
)
template=arcpy.Parameter(
displayName="Select Map Template",
name="Select Map Template",
datatype="DEMapDocument",
parameterType="Required",
direction="Input",
)
output_mxd=arcpy.Parameter(
displayName="Output Map Document",
name="Output Map Document",
datatype="DEMapDocument",
parameterType="Required",
direction="Output",
)
return [input_fc, project_id, title, author, template, output_mxd]
def isLicensed(self):
return True
def updateParameters(self, params):
params[0].filter.list = ["Polygon"]
return
def updateMessages(self, params):
return
def execute(self, params, messages):
input_fc, project_id, title, author, template, output_mxd = params
try:
# for param in params:
# arcpy.AddMessage('{} [Value: {}]'.format(param.name, param.value))
layer = arcpy.MakeFeatureLayer_management(input_fc.value, "in_memory\\tmp")
mxd = arcpy.mapping.MapDocument(template.valueAsText)
df = arcpy.mapping.ListDataFrames(mxd)[0]
database = r'.\Production_Mapper.gdb'
counties_layer = arcpy.MakeFeatureLayer_management(os.path.join(database, 'Counties'), r'in_memory\Counties')
quads_layer = arcpy.MakeFeatureLayer_management(os.path.join(database, 'Quad_Index_24k'), r'in_memory\Quads')
plss_layer = arcpy.MakeFeatureLayer_management(os.path.join(database, 'PLSS_FirstDivision'), r'in_memory\PLSS')
utm_zone_layer = arcpy.MakeFeatureLayer_management(os.path.join(database, 'UTM_Zones'), r'in_memory\UTM_Zone')
counties = intersect_and_get_attributes(layer, counties_layer, 'LABEL')
plss = intersect_and_get_attributes(layer, plss_layer, 'FRSTDIVID')
quads = intersect_and_get_attributes(layer, quads_layer, 'QUADNAME')
utm_zone = intersect_and_get_attributes(layer, utm_zone_layer, 'UTM_Zone')
# Counties
county_text = 'County(s):\n{}'.format(', '.join(counties))
arcpy.AddMessage(county_text)
# Quads
quad_text = "7.5' Quad(s):\n{}".format(', '.join(quads))
arcpy.AddMessage(quad_text)
# PLSS
plss_data = defaultdict(list)
for row in plss:
pm = int(row[2:4])
tw = row[5:7] + row[8]
rg = row[10:12] + row[13]
sn = int(row[17:19])
plss_data[(pm, tw, rg)].append(sn)
plss_text = '\n'.join(
[
'PM {} | Twn {} | Rng {} \nSections: {}'.format(
pm, tw, rg, ', '.join([str(s) for s in sorted(secs)])
)
for (pm, tw, rg), secs in plss_data.items()
]
)
arcpy.AddMessage(plss_text)
# UTM Coordinates
dissolve = arcpy.Dissolve_management(layer, r'in_memory\dissolve')
dissolve_layer = arcpy.MakeFeatureLayer_management(dissolve, r'in_memory\dissolve_layer')
with arcpy.da.SearchCursor(dissolve_layer, "SHAPE@XY") as cur:
for pt, in cur:
mX, mY = pt
utm_e = round(mX, 0)
utm_n = round(mY, 0)
utm_text = '{}N | {} mN | {} mE'.format(max(utm_zone), utm_n, utm_e)
arcpy.AddMessage(utm_text)
# Date
now = datetime.datetime.now()
date_text = r'Map Date: {}/{}/{}'.format(now.month, now.day, now.year)
arcpy.AddMessage(date_text)
# Get and update the layout elements
layout_elements = {le.name: le for le in arcpy.mapping.ListLayoutElements(mxd)}
for field, update in {
'County': county_text,
'Quad': quad_text,
'PLSS': plss_text,
'UTM': utm_text,
'Date': date_text,
'Project ID': project_id.valueAsText,
'Title': title.valueAsText,
'Author': author.valueAsText,
}.items():
if update:
try:
layout_elements[field].text = update
except KeyError:
pass
# Update map and save output
arcpy.mapping.AddLayer(df, arcpy.mapping.Layer(input_fc.valueAsText), "TOP")
df.extent = arcpy.Describe(layer).extent
df.scale = round(df.scale * 1.25, -2)
arcpy.RefreshActiveView
arcpy.RefreshTOC
output = output_mxd.valueAsText
if not output.endswith('.mxd'):
output += '.mxd'
mxd.saveACopy(output)
# Clean up
for item in (layer, counties_layer, quads_layer, plss_layer, utm_zone_layer, dissolve):
try:
arcpy.Delete_management(item)
except:
pass
except:
arcpy.AddError(str(traceback.format_exc()))
return
|
MichaelTroyer/ArcGIS_NRCS_Production_Mapper
|
Production_Mapper.pyt
|
Production_Mapper.pyt
|
pyt
| 8,168 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2665839986
|
from math import sqrt, sinh, cosh
class HeatSink:
def __init__(self,
baseLength: float = 0.865,
baseWidth: float = 0.5,
baseDepth: float = 0.003,
noFinsLength: float = 45,
noFinsWidth: float = 15,
finWidth: float = 0.03,
finLength: float = 0.005,
finDepth: float = 0.02,
conductivity: float = 205,
contactConductivity: float = 3,
timThickness: float = 0.001,
h_bar: float = 22.7,
):
self.noFinsLength = round(noFinsLength)
self.noFinsWidth = round(noFinsWidth)
self.baseLength = baseLength
self.baseWidth = baseWidth
self.baseDepth = baseDepth
self.noFinsLength = noFinsLength
self.noFinsWidth = noFinsWidth
self.finWidth = finWidth
self.finLength = finLength
self.finDepth = finDepth
self.spacingLength = (self.baseLength - (self.noFinsLength * self.finLength)) / (self.noFinsLength + 1)
while self.spacingLength < 0:
self.noFinsLength -= 1
self.spacingLength = (self.baseLength - (self.noFinsLength * self.finLength)) / (self.noFinsLength + 1)
self.spacingWidth = (self.baseWidth - (self.noFinsWidth * self.finWidth)) / (self.noFinsWidth + 1)
while self.spacingWidth < 0:
self.noFinsWidth -= 1
self.spacingWidth = (self.baseWidth - (self.noFinsWidth * self.finWidth)) / (self.noFinsWidth + 1)
self.noFins = self.noFinsLength * self.noFinsWidth
self.conductivity = conductivity
self.baseArea = self.baseLength * self.baseWidth
self.finArea = self.finLength * self.finWidth
self.finPerimeter = 2 * self.finLength + 2 * self.finWidth
self.volume = self.baseArea * self.baseDepth + self.noFins * (self.finArea * self.finDepth)
self.mass = self.volume * 2700 + 1.4135
self.cost = self.mass * 2
self.contactConductivity = contactConductivity
self.timThickness = timThickness
self.h_hs = h_bar
def base_temperature(self, t_2, Q_hs_cond):
return t_2 - Q_hs_cond / self.baseArea * self.baseDepth / self.conductivity
def tim_temperature(self, t_2, Q_solar):
return t_2 + Q_solar * self.timThickness / (self.contactConductivity * self.baseArea)
def fin_convection(self, t_b, fluid_properties):
def calculate_fin_constants(h_bar, p, k, a_c, t_b, t_inf):
m = sqrt((h_bar * p) / (k * a_c))
M = sqrt(h_bar * p * k * a_c) * (t_b - t_inf)
return m, M
m, M = calculate_fin_constants(self.h_hs, self.finPerimeter, self.conductivity,
self.finArea, t_b, fluid_properties.T_inf)
finTipHeatTransfer = self.noFins * M * (sinh(m * self.finDepth) +
(self.h_hs / (m * self.conductivity))
* cosh(m * self.finDepth)) / \
(cosh(m * self.finDepth) +
(self.h_hs / (m * self.conductivity)) *
sinh(m * self.finDepth))
finHeatTransfer = finTipHeatTransfer + self.noFins * self.h_hs * self.finPerimeter * \
self.finDepth * (t_b - fluid_properties.T_inf)
return finHeatTransfer
|
southwelljake/HeatSinkModelling
|
src/heatSink.py
|
heatSink.py
|
py
| 3,505 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74083767549
|
from django.core import checks
from django.core.checks import Warning
from django.test import SimpleTestCase, override_settings
class SystemChecksTestCase(SimpleTestCase):
def test_checks(self):
errors = checks.run_checks()
assert errors == []
with override_settings(
INSTALLED_APPS=[
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'huey_monitor',
]
):
errors = checks.run_checks()
assert errors == [
Warning(
'"bx_django_utils" not in INSTALLED_APPS',
id='huey_monitor.E001',
)
]
|
boxine/django-huey-monitor
|
huey_monitor_project/tests/test_checks.py
|
test_checks.py
|
py
| 790 |
python
|
en
|
code
| 69 |
github-code
|
6
|
27812632483
|
import sys
from osgeo import ogr
fn=r'D:\BackUp\projectsBackup\Qgis\pygis\res\ne_50m_populated_places.shp'
ds=ogr.Open(fn,0)
if ds is None:
sys.exit('could not open {0}.'.format(fn))
lyr=ds.GetLayer(0)
#此图层要素总量
num_features=lyr.GetFeatureCount()
print(num_features)
#根据要素编号Fid获取对应图层
third_feature=lyr.GetFeature(num_features-1)
print(third_feature.NAME)
del ds
|
xuewenqian/pythonGis
|
ogr/获取特定的要素.py
|
获取特定的要素.py
|
py
| 406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36294686570
|
# 단어 s의 가운데 글자를 반환하는 함수, solution을 만들어 보세요. 단어의 길이가 짝수라면 가운데 두글자를 반환하면 됩니다.
# s = "abcde"
s = "qwer"
# s = "avcxvxcv"
def solution(s):
half = len(s)//2
result = s[half] if len(s) % 2 else s[half-1 : half+1]
print(result)
solution(s)
|
minkimhere/algorithm_python
|
03_middle_num.py
|
03_middle_num.py
|
py
| 334 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
22850158142
|
#!/usr/bin/env python
# coding: utf-8
from bs4 import BeautifulSoup as bs
import pandas as pd
from splinter import Browser
import requests
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
def scrape_all():
mars={}
url_mars_news = 'https://redplanetscience.com/'
browser.visit(url_mars_news)
# html object
html = browser.html
# Parse with Beautiful Soup
soup = bs(html, 'html.parser')
mars_news_title = soup.find("div", class_="content_title").text
news_title_scrape = mars_news_title
news_title_scrape
mars["news_title"]=news_title_scrape
mars_news_paragraph = soup.find("div", class_="article_teaser_body").text
news_p_scrape = mars_news_paragraph
news_p_scrape
mars["news_p"]=news_p_scrape
mars_url = "https://spaceimages-mars.com/"
browser.visit(mars_url)
html = browser.html
soup = bs(html, 'html.parser')
# Retrieve featured image link
relative_image_path = soup.find_all('img')[1]["src"]
featured_image_url_scrape = mars_url+relative_image_path
featured_image_url_scrape
mars["featured_image"]=featured_image_url_scrape
mars_profile = 'https://galaxyfacts-mars.com/'
mars_profile_table = pd.read_html(mars_profile)
mars_table_df = mars_profile_table[1]
new_mars_df = mars_table_df.rename(columns={0:'Mars Planet Profile', 1:''})
new_mars_df
mars_facts_scrape = new_mars_df.to_html()
mars["facts"]=mars_facts_scrape
mars_hemi_pics='https://marshemispheres.com/'
browser.visit(mars_hemi_pics)
html = browser.html
soup = bs(html, 'html.parser')
main_urls = soup.find_all('div',class_='item')
# Create list to hold dicts
urls_for_h_images_scrape=[]
for main_url in main_urls:
hemisphere_image_dict = {}
h_title = main_url.find('div', class_='description').find('a', class_='itemLink product-item').h3.text
h_images = mars_hemi_pics + main_url.find('a',class_='itemLink product-item')['href']
browser.visit(h_images)
html = browser.html
soup = bs(html, 'html.parser')
full_image_url = soup.find('div',class_='downloads').a['href']
#print(full_image_url)
# Append title and image urls of hemisphere to dictionary
hemisphere_image_dict['h_title'] = h_title
hemisphere_image_dict['full_img_url'] = 'https://marshemispheres.com/'+full_image_url
urls_for_h_images_scrape.append(hemisphere_image_dict)
mars["hemispheres"]=urls_for_h_images_scrape
return mars
if __name__ == "__main__":
print(scrape_all())
|
iegatlanta/web-scraping-challenge
|
Mission_to_Mars/scrape_mars.py
|
scrape_mars.py
|
py
| 2,817 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30792880675
|
def my_function():
result = 3*2
return result # return is an output keyword
output = my_function()
print(output)
def format_name(f_name, l_name):
formatted_f_name = f_name.title()
formatted_l_name = l_name.title()
return f"{formatted_f_name} {formatted_l_name}"
# when the word return is encountered, the code after that line is not executed or ignored as it assumes that it is the end of the function
formatted_string = format_name("shrijan", "LAKHEY")
print(formatted_string)
# print vs return
# what if we want to use the output of a function as an input for another? Thats why it is more suitable to use return keyword to return the output form a function rather then just printing it
|
shrijanlakhey/100-days-of-Python
|
010/functions_with_output.py
|
functions_with_output.py
|
py
| 720 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41475591670
|
import httpx
import asyncio
import logging
import discord
from discord.ext import tasks
from redbot.core import Config, commands
IDENTIFIER = 4175987634255572345 # Random to this cog
ishtakar_world_id = "3f1cd819f97e"
default_server = "Ishtakar"
realm_data_url = "https://nwdb.info/server-status/data.json"
default_guild = {
"default_realm": "Ishtakar",
"server_channel": None,
}
logger = logging.getLogger("red.psykzz.cogs")
logger.setLevel(logging.DEBUG)
class ServerStatus(commands.Cog):
"Provider server status"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(
self, identifier=IDENTIFIER, force_registration=True
)
self.config.register_guild(**default_guild)
self.refresh_queue_data.start()
def cog_unload(self):
self.refresh_queue_data.cancel()
@tasks.loop(minutes=5.0)
async def refresh_queue_data(self):
logger.info("Starting queue task")
try:
self.queue_data = await self.get_queue_data(worldId=None)
await self.update_monitor_channels()
except Exception:
logger.exception("Error in task")
logger.info("Finished queue task")
async def get_queue_data(self, worldId=ishtakar_world_id):
"""Refresh data from remote data"""
try:
extra_qs = f"worldId={worldId}" if worldId else ""
response = await http_get(
f"https://nwdb.info/server-status/servers.json?{extra_qs}"
)
if not response.get("success"):
logger.error("Failed to get server status data")
return
servers = response.get("data", {}).get("servers", [])
return {
self.parse_server(server).get("worldName"): self.parse_server(server)
for server in servers
}
except Exception:
logger.exception("Exception while downloading new data")
def parse_server(self, server):
(
connectionCountMax,
connectionCount,
queueCount,
queueTime,
worldName,
worldSetName,
region,
status,
active,
worldId,
a,
b,
) = server
return {
"connectionCountMax": connectionCountMax,
"connectionCount": connectionCount,
"queueCount": queueCount,
"queueTime": queueTime,
"worldName": worldName,
"worldSetName": worldSetName,
"region": region,
"status": status,
"active": active,
"worldId": worldId,
"a-val": a,
"b-val": b,
}
async def get_guild_monitor_channel(self, guild):
guild_config = self.config.guild(guild)
channel_id = await guild_config.server_channel()
realm_name = await guild_config.default_realm()
# Check if the channel is valid
if not channel_id or channel_id == "0":
logging.warn(f"Skipping {guild}...")
return
# If the channel doesn't exist, reset configuration and return
channel = self.bot.get_channel(channel_id)
if not channel:
await guild_config.server_channel.set(None)
return
return channel
async def update_guild_channel(self, guild):
logger.info(f"Updating guild {guild}...")
channel = await self.get_guild_monitor_channel(guild)
server_status = await self.get_server_status(realm_name)
if not server_status:
return
new_channel_name = server_status.split("-")[1]
# Avoid updates if the name matches
if channel.name == new_channel_name:
return
await channel.edit(name=new_channel_name)
async def update_monitor_channels(self):
# iterate through bot discords and get the guild config
for guild in self.bot.guilds:
self.update_guild_channel(guild)
async def get_server_status(self, server_name, data=None):
if not data:
data = self.queue_data
server_data = data.get(server_name)
if not server_data:
return "No server data available - Loading data..."
online = server_data.get("connectionCount", -1)
max_online = server_data.get("connectionCountMax", -1)
in_queue_raw = int(server_data.get("queueCount", -1))
in_queue = in_queue_raw if in_queue_raw > 1 else 0
status = server_data.get("status", -1)
if status == 4:
return f"{server_name}: {online}/{max_online} Offline - Server maintenance"
return f"{server_name}: {online}/{max_online} Online - {in_queue} in queue."
async def get_world_id(self, server_name):
if not self.queue_data:
return
server_data = self.queue_data.get(server_name)
if not server_data:
return
return server_data.get("worldId")
@commands.command()
async def queue(self, ctx, server: str = None):
"Get current queue information"
if ctx.guild and server is None:
guild_config = self.config.guild(ctx.guild)
server = await guild_config.default_realm()
if not server:
await ctx.send("You must provide a server in DMs. `.queue <server>`")
return
worldId = await self.get_world_id(server)
data = await self.get_queue_data(worldId=worldId)
msg = await self.get_server_status(server, data)
await ctx.send(msg)
@commands.command()
@commands.guild_only()
@commands.admin_or_permissions(manage_channels=True)
async def monitor(self, ctx, voice_channel: discord.VoiceChannel = None):
"Start updating a channel wth the current realm status"
# Check if the bot has permission to the channel
bot_perms = voice_channel.permissions_for(ctx.me)
if not bot_perms.manage_channels:
await ctx.send(f'I require the "Manage Channels" permission for {voice_channel.mention} to execute that command.')
return
guild_config = self.config.guild(ctx.guild)
await guild_config.server_channel.set(voice_channel.id if voice_channel else None)
if voice_channel:
await ctx.send(f"Setup {voice_channel} as the monitor channel.")
else:
await ctx.send(f"Disabled monitor channel.")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_channels=True)
@commands.admin_or_permissions(manage_channels=True)
async def forcemonitor(self, ctx):
"Force an update of the monitor voice channel wth the current realm status"
voice_channel = await self.get_guild_monitor_channel(ctx.guild)
bot_perms = voice_channel.permissions_for(ctx.me)
if not bot_perms.manage_channels:
await ctx.send(f'I require the "Manage Channels" permission for {voice_channel.mention} to execute that command.')
return
await self.update_guild_channel(ctx.guild)
await ctx.send("Forced monitor channel update.")
@commands.command()
@commands.guild_only()
@commands.admin_or_permissions(manage_channels=True)
async def queueset(self, ctx, server: str = None):
"Set the default server for this discord server"
guild_config = self.config.guild(ctx.guild)
if server is None:
realm = await guild_config.default_realm()
await ctx.send(f"Current server: '{realm}'.")
return
server_data = self.queue_data.get(server)
if not server_data:
await ctx.send(f"Can't find '{server}' in the server list.")
return
await guild_config.default_realm.set(server)
await ctx.send(f"Server updated to '{server}'.")
async def http_get(url):
max_attempts = 3
attempt = 0
while (
max_attempts > attempt
): # httpx doesn't support retries, so we'll build our own basic loop for that
try:
async with httpx.AsyncClient() as client:
r = await client.get(url, headers={"user-agent": "psykzz-cogs/1.0.0"})
if r.status_code == 200:
return r.json()
else:
attempt += 1
await asyncio.sleep(5)
except (httpx._exceptions.ConnectTimeout, httpx._exceptions.HTTPError):
attempt += 1
await asyncio.sleep(5)
pass
|
psykzz/cogs
|
nw_server_status/server_status.py
|
server_status.py
|
py
| 8,630 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21512060680
|
out = open('output2.py', 'w')
out.write("""
total = 0
"""
)
def encode(s):
return s.replace("\\", "\\\\").replace("\"", "\\\"")
try:
with open('input.txt', 'r') as f:
for line in f:
s = line.strip()
out.write('total = total + len("\\"%s\\"") - %d\n' % (encode(encode(s)), len(s)))
out.write("""
print total
"""
)
finally:
out.close()
|
snocorp/adventofcode2015
|
day8/part2.py
|
part2.py
|
py
| 399 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72757346749
|
from syscore.objects import arg_not_supplied
from syscore.genutils import flatten_list
from dataclasses import dataclass
import pandas as pd
EMPTY_INSTRUMENT = ""
class futuresInstrument(object):
def __init__(self, instrument_code: str):
self._instrument_code = instrument_code
@property
def instrument_code(self):
return self._instrument_code
def empty(self):
return self.instrument_code == EMPTY_INSTRUMENT
@classmethod
def create_from_dict(futuresInstrument, input_dict):
# Might seem pointless, but (a) is used in original code, (b) gives a nice consistent feel
return futuresInstrument(input_dict["instrument_code"])
def as_dict(self):
# Might seem pointless, but (a) is used in original code, (b) gives a nice consistent feel
return dict(instrument_code=self.instrument_code)
def __eq__(self, other):
return self.instrument_code == other.instrument_code
@property
def key(self):
return self.instrument_code
def __repr__(self):
return str(self.instrument_code)
@dataclass
class instrumentMetaData:
Description: str = ""
Pointsize: float = 0.0
Currency: str = ""
AssetClass: str = ""
Slippage: float = 0.0
PerBlock: float = 0.0
Percentage: float = 0.0
PerTrade: float = 0.0
def as_dict(self) -> dict:
keys = self.__dataclass_fields__.keys()
self_as_dict = dict([(key, getattr(self, key)) for key in keys])
return self_as_dict
@classmethod
def from_dict(instrumentMetaData, input_dict):
keys = instrumentMetaData.__dataclass_fields__.keys()
args_list = [input_dict[key] for key in keys]
return instrumentMetaData(*args_list)
@dataclass
class futuresInstrumentWithMetaData:
instrument: futuresInstrument
meta_data: instrumentMetaData
@property
def instrument_code(self) -> str:
return self.instrument.instrument_code
@property
def key(self) -> str:
return self.instrument_code
def as_dict(self) -> dict:
meta_data_dict = self.meta_data.as_dict()
meta_data_dict["instrument_code"] = self.instrument_code
return meta_data_dict
@classmethod
def from_dict(futuresInstrumentWithMetaData, input_dict):
instrument_code = input_dict.pop("instrument_code")
instrument = futuresInstrument(instrument_code)
meta_data = instrumentMetaData.from_dict(input_dict)
return futuresInstrumentWithMetaData(instrument, meta_data)
@classmethod
def create_empty(futuresInstrumentWithMetaData):
instrument = futuresInstrument(EMPTY_INSTRUMENT)
meta_data = instrumentMetaData()
instrument_with_metadata = futuresInstrumentWithMetaData(instrument, meta_data)
return instrument_with_metadata
def empty(self):
return self.instrument.empty()
class listOfFuturesInstrumentWithMetaData(list):
def as_df(self):
instrument_codes = [
instrument_object.instrument_code for instrument_object in self
]
meta_data_keys = [
instrument_object.meta_data.as_dict().keys() for instrument_object in self
]
meta_data_keys_flattened = flatten_list(meta_data_keys)
meta_data_keys_unique = list(set(meta_data_keys_flattened))
meta_data_as_lists = dict(
[
(
metadata_name,
[
getattr(instrument_object.meta_data, metadata_name)
for instrument_object in self
],
)
for metadata_name in meta_data_keys_unique
]
)
meta_data_as_dataframe = pd.DataFrame(
meta_data_as_lists, index=instrument_codes
)
return meta_data_as_dataframe
class assetClassesAndInstruments(dict):
def __repr__(self):
return str(self.as_pd())
def get_instrument_list(self) -> list:
return list(self.keys())
@classmethod
def from_pd_series(self, pd_series: pd.Series):
instruments = list(pd_series.index)
asset_classes = list(pd_series.values)
as_dict = dict(
[
(instrument_code, asset_class)
for instrument_code, asset_class in zip(instruments, asset_classes)
]
)
return assetClassesAndInstruments(as_dict)
def all_asset_classes(self) -> list:
asset_classes = list(self.values())
unique_asset_classes = list(set(asset_classes))
unique_asset_classes.sort()
return unique_asset_classes
def as_pd(self) -> pd.Series:
instruments = [key for key in self.keys()]
asset_classes = [value for value in self.values()]
return pd.Series(asset_classes, index=instruments)
def all_instruments_in_asset_class(
self, asset_class: str, must_be_in=arg_not_supplied
) -> list:
asset_class_instrument_list = [
instrument
for instrument, item_asset_class in self.items()
if item_asset_class == asset_class
]
if must_be_in is arg_not_supplied:
return asset_class_instrument_list
## we need to filter
filtered_asset_class_instrument_list = [
instrument
for instrument in asset_class_instrument_list
if instrument in must_be_in
]
return filtered_asset_class_instrument_list
class instrumentCosts(object):
def __init__(
self,
price_slippage: float = 0.0,
value_of_block_commission: float = 0.0,
percentage_cost: float = 0.0,
value_of_pertrade_commission: float = 0.0,
):
self._price_slippage = price_slippage
self._value_of_block_commission = value_of_block_commission
self._percentage_cost = percentage_cost
self._value_of_pertrade_commission = value_of_pertrade_commission
@classmethod
def from_meta_data(instrumentCosts, meta_data: instrumentMetaData):
return instrumentCosts(
price_slippage=meta_data.Slippage,
value_of_block_commission=meta_data.PerBlock,
percentage_cost=meta_data.Percentage,
value_of_pertrade_commission=meta_data.PerTrade,
)
def __repr__(self):
return (
"instrumentCosts slippage %f block_commission %f percentage cost %f per trade commission %f "
% (
self.price_slippage,
self.value_of_block_commission,
self.percentage_cost,
self.value_of_pertrade_commission,
)
)
@property
def price_slippage(self):
return self._price_slippage
@property
def value_of_block_commission(self):
return self._value_of_block_commission
@property
def percentage_cost(self):
return self._percentage_cost
@property
def value_of_pertrade_commission(self):
return self._value_of_pertrade_commission
def calculate_cost_percentage_terms(
self, blocks_traded: float, block_price_multiplier: float, price: float
) -> float:
cost_in_currency_terms = self.calculate_cost_instrument_currency(
blocks_traded, block_price_multiplier=block_price_multiplier, price=price
)
value_per_block = price * block_price_multiplier
total_value = blocks_traded * value_per_block
cost_in_percentage_terms = cost_in_currency_terms / total_value
return cost_in_percentage_terms
def calculate_cost_instrument_currency(
self, blocks_traded: float, block_price_multiplier: float, price: float
) -> float:
value_per_block = price * block_price_multiplier
slippage = self.calculate_slippage_instrument_currency(
blocks_traded, block_price_multiplier=block_price_multiplier
)
commission = self.calculate_total_commission(
blocks_traded, value_per_block=value_per_block
)
return slippage + commission
def calculate_total_commission(self, blocks_traded: float, value_per_block: float):
### YOU WILL NEED TO CHANGE THIS IF YOUR BROKER HAS A MORE COMPLEX STRUCTURE
per_trade_commission = self.calculate_per_trade_commission()
per_block_commission = self.calculate_cost_per_block_commission(blocks_traded)
percentage_commission = self.calculate_percentage_commission(
blocks_traded, value_per_block
)
return max([per_block_commission, per_trade_commission, percentage_commission])
def calculate_slippage_instrument_currency(
self, blocks_traded: float, block_price_multiplier: float
) -> float:
return abs(blocks_traded) * self.price_slippage * block_price_multiplier
def calculate_per_trade_commission(self):
return self.value_of_pertrade_commission
def calculate_cost_per_block_commission(self, blocks_traded):
return abs(blocks_traded) * self.value_of_block_commission
def calculate_percentage_commission(self, blocks_traded, price_per_block):
trade_value = self.calculate_trade_value(blocks_traded, price_per_block)
return self._percentage_cost * trade_value
def calculate_trade_value(self, blocks_traded, value_per_block):
return abs(blocks_traded) * value_per_block
|
ahalsall/pysystrade
|
sysobjects/instruments.py
|
instruments.py
|
py
| 9,474 |
python
|
en
|
code
| 4 |
github-code
|
6
|
71360101948
|
#!/usr/bin/python3
import os
import sys
import time
import jsonpickle
class TODODescription(object):
def __init__(self, todoName, startTime = -1) -> None:
self.todoName = todoName
self.startTime = startTime
self.stopTime = -1
def display(self, id=''):
print('%s %s' % (str(id), str(self.todoName)))
class TaskDescription(object):
def __init__(self, name, path, startTime):
self.taskPath = path
self.taskName: str = name
self.startTime = startTime
self.stopTime = -1
self.status = ''
self.todos: list = []
def addTODO(self, todo: TODODescription):
self.todos.append(todo)
def removeTODO(self, id):
self.todos.pop(int(id))
def display(self):
print('\n')
print('Task name = %s' % str(self.taskName))
print('Task start time = %s' % str(self.startTime))
print('Task stop time = %s' % str(self.stopTime))
print('Task status = %s' % str(self.status))
print('\nTODO:s\n')
todoID = 0
for todo in self.todos:
todo.display(todoID)
todoID += 1
class App:
taskList = [TaskDescription]
jsonName = 'tasks.json'
argCount = 0
def __init__(self, workingDir):
self.workingDir = workingDir
self.jsonPath = self.workingDir + '/%s' % App.jsonName
self.taskCount = 0
def printTaskList(self):
print('Total number of tasks: %d' % len(self.taskList))
for task in self.taskList:
task.display()
def createTasksJSON(self):
with open(App.jsonName, 'w') as f:
tasks = jsonpickle.encode(self.taskList)
f.write(str(tasks))
f.close()
def addTODO(self, taskID, todo):
if self.taskCount - 1 < taskID:
print('ID is too big, cant add TODO')
return
self.taskList[taskID].addTODO(todo)
print('Added a new TODO for task ID %d' % taskID)
print(todo)
def removeTODO(self, taskID, todoID):
if self.taskCount - 1 < taskID:
print('task ID is too big, cant add TODO')
return
if taskID < 0:
print('task ID cant be below 0!')
return
if int(todoID) >= len(self.taskList[taskID].todos):
print('Invalid todo id')
return
self.taskList[taskID].removeTODO(todoID)
print('Removed TODO ID %d from task ID %d' % (int(todoID), int(taskID)))
def addTask(self, taskName: str):
task:TaskDescription = TaskDescription(taskName, os.getcwd(), time.time())
self.taskList.append(task)
self.taskCount += 1
print('Created Task ID = %d' % int(self.taskCount-1))
return self.taskCount - 1
def removeTask(self, taskID):
if taskID >= 0 and taskID < self.taskCount:
del self.taskList[taskID]
self.taskCount -= 1
print('TaskID %d removed!' % taskID)
def getTaskCount(self):
count = 0
for _ in self.taskList:
count += 1
return count
def loadTasks(self):
# Check for existing tasks.json file
if not os.path.exists(self.jsonPath):
print("No tasks.json found! Creating a new one.")
self.createTasksJSON()
tasks = []
with open(App.jsonName, 'r') as f:
file = jsonpickle.decode(f.read())
for task in file:
if isinstance(task, TaskDescription):
tasks.append(task)
self.taskList = tasks
self.taskCount = self.getTaskCount()
def saveTasks(self):
with open(App.jsonName, 'w') as f:
tasks = jsonpickle.encode(self.taskList)
f.write(str(tasks))
f.close()
def getIdFromName(self, name):
count = 0
for task in self.taskList:
count += 1
if task.taskName == name:
return count - 1
return -1
# Valid args are: create/remove/status/add-todo/remove-todo/list <taskname> (<description>)
def parseCmdArgs(self, args):
App.argCount = len(args)
if App.argCount <= 1:
print('ERROR: No args given! Usage: app.py <action> <taskname> (<description>) ')
return
action = args[1]
# Only action without having to specify task name
if action == 'list':
self.printTaskList()
return
if App.argCount <= 2:
print('Only 1 argument given and its not <list>')
return
taskname = args[2]
taskID = self.getIdFromName(taskname) # Will return -1 if not found
if action == 'create':
if taskID >= 0:
print('Task with that name already exists!')
return
taskID = self.addTask(taskname)
elif action == 'remove':
if len(self.taskList) < 1:
print('No task to remove!')
return
self.removeTask(taskID)
elif action == 'status':
if taskID < 0:
print('No task with that name!')
else:
self.taskList[taskID].display()
elif action == 'add-todo':
if taskID >= 0:
description = args[3]
todo = TODODescription(description)
self.addTODO(taskID, todo)
elif action == 'remove-todo':
description = args[3]
self.removeTODO(taskID, description)
elif action == 'pause':
print('TODO: Pause tasks todo for time tracking')
elif action == 'continue':
print('TODO: pause tasks todo for time tracking')
else:
print('Unknown action!')
return
if __name__ == '__main__':
app = App(os.getcwd())
app.loadTasks()
app.parseCmdArgs(sys.argv)
app.saveTasks()
|
Dechode/TODO-App
|
app.py
|
app.py
|
py
| 5,984 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30575033790
|
from DataEngine.DataAdapters.MongoAdapter.MongoAdapter import MongoAdapter
from Domain.EquityCorporateData import EquityCorporateData
from Domain.BuildEnumMethods import BuildEnumMethods
from datetime import date
ma : MongoAdapter = MongoAdapter()
testEquity = EquityCorporateData().build(
method = BuildEnumMethods.MANUAL,
ticker="test",
commonCompanyName = "test",
ipoYear = date.today(),
industry = "test",
sector = "test",
country = "test"
)
def testConnect():
assert(1 == 1)
def testSaveEquityCorporateDataDocument():
ma.saveDocument(testEquity)
def testRemoveEquityCorporateDataDocument():
ma.removeDocument(testEquity)
def getDataForDate():
"""
This test assumes you have a fully loaded database
Or at least a database with the following ticker in it with some data for that date
"""
retDf = ma.getDataForDate("2020-01-30", ["AAPL", "A"])
assert(retDf.loc[retDf["ticker"] == "AAPL"]["Close"][0] == 84.379997)
def getDataForDates():
"""
This test assumes you have a fully loaded database
Or at least a database with the following ticker in it with some data for that date
"""
retDf = ma.getDataForDateRange("2020-01-01", "2020-01-30", ["AAPL", "A"])
assert(retDf.loc[retDf["ticker"] == "AAPL"]["Open"][2] == 73.447502)
testConnect()
testSaveEquityCorporateDataDocument()
testRemoveEquityCorporateDataDocument()
getDataForDate()
getDataForDates()
|
jminahan/backtest_framework
|
DataEngine/Tests/DataAdapters/MongoAdapter/MongoAdapterTest.py
|
MongoAdapterTest.py
|
py
| 1,494 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5718228664
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
####################################################
# Summer 2017 COMS 4771 AI Homework 2
# File: Helper.py
# Name: Qipeng Chen
# UNI: qc2201
####################################################
import math
from Grid import Grid, vecIndex
EMPTY_WEIGHT = 3.0
MAX_WEIGHT = 1.0
SMOOTH_WEIGHT = 0.5
MONO_WEIGHT = 0.5
CORNNER_WEIGHT = 3.0
SIZE = 4
MAX_IDX = SIZE - 1
CORNER_IDX = set([(0, 0), (0, MAX_IDX), (MAX_IDX, 0), (MAX_IDX, MAX_IDX)])
def get_children(grid, dirs=vecIndex):
children = []
for x in dirs:
gridCopy = grid.clone()
if gridCopy.move(x):
children.append((x, gridCopy))
return children
def score(grid):
empty_cells, max_cell, smooth = 0, 0, 0.0
mono_row, mono_col = [[0.0] * SIZE] * 2
max_pos, corner = [], 0
for i in xrange(SIZE):
# empty_cells & cell_sum
for j in xrange(SIZE):
# update empty_cells
if grid.map[i][j] == 0:
empty_cells += 1
continue
# update cell_sum & max_cell
if grid.map[i][j] > max_cell:
max_cell = grid.map[i][j]
max_pos = [(i, j)]
if grid.map[i][j] == max_cell:
max_pos.append((i, j))
# smoothness & monotonicity
for j in xrange(MAX_IDX):
# update smoothness & monotonicity: left/right direction
diff = grid.map[i][j+1] - grid.map[i][j]
mono_row[i] += 1 if diff > 0 else -1
smooth += 1 if diff == 0 else -math.log(abs(diff), 2)
# update smoothness & monotonicity: up/down direction
diff = grid.map[j+1][i] - grid.map[j][i]
mono_col[i] += 1 if diff > 0 else -1
smooth += 1 if diff == 0 else -math.log(abs(diff), 2)
max_cell = math.log(max_cell, 2)
monotonicity = sum(map(abs, mono_row)) + sum(map(abs, mono_col)) * max_cell
for idx in max_pos:
if idx in CORNER_IDX:
corner = max_cell
break
return EMPTY_WEIGHT * empty_cells \
+ MAX_WEIGHT * max_cell \
+ SMOOTH_WEIGHT * smooth \
+ MONO_WEIGHT * monotonicity \
+ CORNNER_WEIGHT * corner
|
asd123cqp/coms4701-artificial-intelligence
|
hw2_2048/Helper.py
|
Helper.py
|
py
| 2,251 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3580079410
|
import json
from statistics import mean
import numpy as np
import os
from bokeh.plotting import output_file, figure, save
from bokeh.layouts import gridplot
from src.utils.tools import listdir, hash_append
def combined(ids, name, legend=None, y_range=(0, 900)):
summaries = {}
episodes = []
for key_ in settings: # [key_ for key_ in settings if len(settings[key_]) > 1]:
id = [f.split(' := ') for f in key_.split('\n') if f.split(' := ')[0] == 'id'][0][1]
if not any([id == id_ for id_ in ids]):
continue
rewards = {}
for res_folder in settings[key_]:
with open(os.path.join(res_folder, 'metrics.json'), 'r') as f:
metrics = json.load(f)
for episode, score in zip(metrics['episodes'], metrics['t_scores']):
if score is not None and episode % 25 == 0:
hash_append(rewards, episode, score)
episodes = sorted(rewards.keys())
episodes = episodes[:min(39, len(episodes))]
quart1 = [np.percentile(rewards[ep], 25) for ep in episodes]
median = [np.percentile(rewards[ep], 50) for ep in episodes]
quart3 = [np.percentile(rewards[ep], 75) for ep in episodes]
summaries[id] = (quart1, median, quart3)
COLORS = ("royalblue", "orchid", "seagreen", "sienna", "darkkhaki")
output_file(os.path.join('./res/plots', name.lower() + '.html'), title=name)
s = figure(width=720, height=int(360 * (y_range[1] - y_range[0]) / 900), title="Performance",
x_axis_label='episodes', y_axis_label='score', y_range=y_range)
for id in sorted(summaries.keys(), key=lambda x: ids.index(x)):
ii = ids.index(id)
s.line(episodes[:len(summaries[id][1])], summaries[id][1], line_color=COLORS[ii], line_width=2, line_alpha=0.75,
legend_label=legend[ids.index(id)])
s.varea(episodes[:len(summaries[id][1])], summaries[id][0], summaries[id][2], fill_color=COLORS[ii],
fill_alpha=0.25)
# s.legend.location = "top_left"
s.legend.location = "bottom_right"
save(gridplot([[s]]))
settings = {}
folders = []
for fname in ('gradient', 'imitation', 'planet'):
for folder in listdir(f"../{fname}/res/results"):
folders.append((folder, fname))
for results_folder, algorithm in folders:
with open(os.path.join(results_folder, 'hyperparameters.txt'), 'r') as f:
s = ""
for line in f.readlines():
if 'seed' not in line:
if line.split(' ')[0] == 'id':
id = f"id := {algorithm}_{line.split(' ')[-1]}"
print(id.replace('\n', ''))
s += id
else:
s += line
try:
with open(os.path.join(results_folder, 'metrics.json'), 'r') as f:
metrics = json.load(f)
if len(metrics['steps']) >= 1000 or 'data_aggregation' in id:
hash_append(settings, s, results_folder)
except FileNotFoundError:
pass
combined(['planet_lov', 'planet_vanilla'], 'planet_validation',
legend=['With Latent Overshooting', 'Without Latent Overshooting'])
combined(['planet_lov', 'gradient_ar4'], 'planet_gradient',
legend=['CEM Planner', 'Gradient-Based Optimization'])
combined(['planet_lov', 'imitation_data_aggregation', 'imitation_policy_aggregation'], 'planet_imitation',
legend=['CEM Planner', 'Data Aggregation', 'Policy Aggregation'], y_range=(-200, 900))
combined(['planet_lov', 'planet_latency', 'planet_latency2', 'planet_latency4'], 'latency_planet',
legend=['no latency', '1 timestep', '2 timesteps', '4 timesteps'])
combined(['gradient_ar4', 'gradient_ar4_lat1', 'gradient_ar4_lat2', 'gradient_ar4_lat4', 'gradient_ar4_lat8'], 'latency_gradient',
legend=['no latency', '1 timestep', '2 timesteps', '4 timesteps', '8 timesteps'])
combined(['planet_lov', 'planet_ar4', 'planet_ar8', 'planet_ar12'], 'planet_cf',
legend=['2 timesteps', '4 timesteps', '8 timesteps', '12 timesteps'])
combined(['gradient_ar2_4andreas', 'gradient_ar4', 'gradient_ar8', 'gradient_ar12'], 'gradient_cf',
legend=['2 timesteps', '4 timesteps', '8 timesteps', '12 timesteps'])
|
MatthijsBiondina/WorldModels
|
planet/plots.py
|
plots.py
|
py
| 4,228 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8891107927
|
import cv2
import numpy as np
classify_body = cv2.CascadeClassifier('haarcascade_fullbody.xml')
vid_capture = cv2.VideoCapture('people_walking.mp4')
while vid_capture.isOpened():
ret,frame = vid_capture.read()
frame = cv2.resize(frame, None,fx=0.5,fy=0.5, interpolation = cv2.INTER_LINEAR)
grayscale_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
bodies_detected = classify_body.detectMultiScale(grayscale_img,1.2,3)
for(x,y,w,h) in bodies_detected:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,255), 2)
cv2.imshow('Pedestrian',frame)
if cv2.waitKey(1) == 13:
break
vid_capture.release()
cv2.destroyAllWindows()
|
RudraCS18/Object-Detection-using-openCV-python
|
pedestrian detection.py
|
pedestrian detection.py
|
py
| 707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32279135386
|
#!/usr/bin/env python
"""Simple script to fetch data from the bslparlour home stream"""
import datetime
import os
import subprocess as sp
import yaml
import tweepy
import myconf
def dictify(results):
return_dict = dict()
for result in results:
return_dict[result.id] = result
return return_dict
def merge_all_yamls(tweet_data_dir="tweet_data"):
yamls = []
for f in os.listdir(tweet_data_dir):
yamls.append(yaml.load(open(os.path.join(tweet_data_dir, f), "r")))
all_yamls_gen = (dictify(x) for x in yamls)
all_yamls = dict()
for x in all_yamls_gen:
all_yamls.update(x)
return all_yamls
def main():
tweepy_auth = tweepy.OAuthHandler(
myconf.consumer_key,
myconf.consumer_secret,
)
tweepy_auth.set_access_token(
myconf.access_key,
myconf.access_secret,
)
tweepy_api = tweepy.API(tweepy_auth)
timestamp = datetime.datetime.now().timestamp()
with open("tweet_data/tweepy_{}.yaml".format(timestamp), "w") as f:
yaml.dump(tweepy_api.home_timeline(count=40), f)
all_yamls = merge_all_yamls()
try:
all_yamls_previous = yaml.load(open("tweet_data/tweepy_all.yaml", "r"))
except FileNotFoundError:
all_yamls_previous = dict()
if len(all_yamls_previous) < len(all_yamls):
with open("tweet_data/tweepy_all.yaml", "w") as f:
yaml.dump(all_yamls, f)
# Commit to repo
sp.check_call("git add tweet_data/* && git commit -m 'Automated data commit.' && git push",
shell=True)
if __name__ == '__main__':
main()
|
natfarleydev/mr-retweet
|
get_tweet_data.py
|
get_tweet_data.py
|
py
| 1,627 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36939207944
|
import json
import paho.mqtt.client as pmqtt
class mqtt():
"""HIAS iotJumpWay MQTT Module
This module connects devices, applications, robots and software to
the HIAS iotJumpWay MQTT Broker.
"""
def __init__(self,
helpers,
client_type,
configs):
""" Initializes the class. """
self.configs = configs
self.client_type = client_type
self.isConnected = False
self.helpers = helpers
self.program = "HIAS iotJumpWay MQTT Module"
self.mqtt_config = {}
self.module_topics = {}
self.agent = [
'host',
'port',
'location',
'zone',
'entity',
'name',
'un',
'up'
]
self.helpers.logger.info(self.program + " initialization complete.")
def configure(self):
""" Connection configuration.
Configures the HIAS iotJumpWay MQTT connnection.
"""
self.client_id = self.configs['name']
for param in self.agent:
if self.configs[param] is None:
raise ConfigurationException(param + " parameter is required!")
# Sets MQTT connection configuration
self.mqtt_config["tls"] = "/etc/ssl/certs/DST_Root_CA_X3.pem"
self.mqtt_config["host"] = self.configs['host']
self.mqtt_config["port"] = self.configs['port']
# Sets MQTT topics
self.module_topics["statusTopic"] = '%s/Agents/%s/%s/Status' % (
self.configs['location'], self.configs['zone'], self.configs['entity'])
# Sets MQTT callbacks
self.integrity_callback = None
self.helpers.logger.info(
"iotJumpWay " + self.client_type + " connection configured.")
def start(self):
""" Connection
Starts the HIAS iotJumpWay MQTT connection.
"""
self.mqtt_client = pmqtt.Client(client_id=self.client_id, clean_session=True)
self.mqtt_client.will_set(self.module_topics["statusTopic"], "OFFLINE", 0, False)
self.mqtt_client.tls_set(self.mqtt_config["tls"], certfile=None, keyfile=None)
self.mqtt_client.on_connect = self.on_connect
self.mqtt_client.on_message = self.on_message
self.mqtt_client.on_publish = self.on_publish
self.mqtt_client.on_subscribe = self.on_subscribe
self.mqtt_client.username_pw_set(str(self.configs['un']), str(self.configs['up']))
self.mqtt_client.connect(self.mqtt_config["host"], self.mqtt_config["port"], 10)
self.mqtt_client.loop_start()
self.helpers.logger.info(
"iotJumpWay " + self.client_type + " connection started.")
def on_connect(self, client, obj, flags, rc):
""" On connection
On connection callback.
"""
if self.isConnected != True:
self.isConnected = True
self.helpers.logger.info("iotJumpWay " + self.client_type + " connection successful.")
self.helpers.logger.info("rc: " + str(rc))
self.status_publish("ONLINE")
self.subscribe()
def status_publish(self, data):
""" Status publish
Publishes a status.
"""
self.mqtt_client.publish(self.module_topics["statusTopic"], data)
self.helpers.logger.info("Published to " + self.client_type + " status.")
def on_subscribe(self, client, obj, mid, granted_qos):
""" On subscribe
On subscription callback.
"""
self.helpers.logger.info("iotJumpWay " + self.client_type + " subscription")
def on_message(self, client, obj, msg):
""" On message
On message callback.
"""
split_topic = msg.topic.split("/")
conn_type = split_topic[1]
if conn_type == "Agents":
topic = split_topic[4]
elif conn_type == "Robotics":
topic = split_topic[3]
elif conn_type == "Applications":
topic = split_topic[3]
elif conn_type == "Staff":
topic = split_topic[3]
elif conn_type == "Devices":
topic = split_topic[4]
elif conn_type == "HIASBCH":
topic = split_topic[4]
elif conn_type == "HIASCDI":
topic = split_topic[4]
elif conn_type == "HIASHDI":
topic = split_topic[4]
self.helpers.logger.info(msg.payload)
self.helpers.logger.info("iotJumpWay " + conn_type + " " \
+ msg.topic + " communication received.")
if topic == 'Integrity':
if self.integrity_callback == None:
self.helpers.logger.info(
conn_type + " Integrity callback required (integrity_callback) !")
else:
self.integrity_callback(msg.topic, msg.payload)
def publish(self, channel, data, channel_path = ""):
""" Publish
Publishes a iotJumpWay MQTT payload.
"""
if channel == "Custom":
channel = channel_path
else:
channel = '%s/Agents/%s/%s/%s' % (self.configs['location'],
self.configs['zone'], self.configs['entity'], channel)
self.mqtt_client.publish(channel, json.dumps(data))
self.helpers.logger.info("Published to " + channel)
return True
def subscribe(self, application = None, channelID = None, qos=0):
""" Subscribe
Subscribes to an iotJumpWay MQTT channel.
"""
channel = '%s/#' % (self.configs['location'])
self.mqtt_client.subscribe(channel, qos=qos)
self.helpers.logger.info("-- Agent subscribed to all channels")
return True
def on_publish(self, client, obj, mid):
""" On publish
On publish callback.
"""
self.helpers.logger.info("Published: "+str(mid))
def on_log(self, client, obj, level, string):
""" On log
On log callback.
"""
print(string)
def disconnect(self):
""" Disconnect
Disconnects from the HIAS iotJumpWay MQTT Broker.
"""
self.status_publish("OFFLINE")
self.mqtt_client.disconnect()
self.mqtt_client.loop_stop()
|
leukaemiamedtech/hiasbch-mqtt-blockchain-agent
|
modules/mqtt.py
|
mqtt.py
|
py
| 6,273 |
python
|
en
|
code
| 4 |
github-code
|
6
|
6734779664
|
#Create a house using starter code
#Import turtle
import turtle
#Set background to Navy Blue
turtle.bgcolor('navyblue')
shelly = turtle.Turtle()
#Start to create house
#Make first big yellow square for base structure of house
shelly.begin_fill() #Start the fill of color
shelly.color('yellow')
for i in range(4):
shelly.forward(100)
shelly.left(90)
shelly.end_fill() #End the fill of color
shelly.penup()
shelly.goto(-20,100) #Move the turtle to next area
shelly.pendown()
#Make a red triangle roof
shelly.begin_fill() #Start the fill of the roof
shelly.color('red')
shelly.left(60)
for i in range(2):
shelly.forward(140)
shelly.right(120)
shelly.forward(140)
shelly.end_fill() #End the fill of color for roof
#Create first window
shelly.penup()
shelly.goto(25,80) #Move to window position
shelly.pendown()
shelly.begin_fill() #Start filling window color
shelly.color('gray')
for i in range(4):
shelly.forward(20)
shelly.left(90)
shelly.end_fill() #End filling window color
#Create second window
shelly.penup()
shelly.goto(95,80) #Move to window position
shelly.pendown()
shelly.begin_fill() #Start filling window color
shelly.color('gray')
for i in range(4):
shelly.forward(20)
shelly.left(90)
shelly.end_fill() #End filling window color
#Create door under space between windows
shelly.penup()
shelly.goto(60,46)
shelly.pendown()
shelly.begin_fill() #Start filling door color
shelly.color('white')
for i in range(2):
shelly.forward(20)
shelly.left(90)
shelly.forward(45)
shelly.left(90)
shelly.end_fill() #End filling door color
#Hide the turtle when finished
shelly.hideturtle()
|
AruneemB/Basic-Art-With-Turtle
|
Art_Experiment2.py
|
Art_Experiment2.py
|
py
| 1,705 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22382302471
|
import time
from odoo.addons.web.controllers import main as report
from odoo.http import content_disposition, request, route
from odoo.tools.safe_eval import safe_eval
class ReportController(report.ReportController):
@route()
def report_routes(self, reportname, docids=None, converter=None, **data):
# Trick the main reporter to think we want an HTML report
new_converter = converter if converter != "xml" else "html"
response = super(ReportController, self).report_routes(
reportname, docids, new_converter, **data)
# If it was an XML report, just download the generated response
if converter == "xml":
# XML header must be before any spaces, and it is a common error,
# so let's fix that here and make developers happier
response.data = response.data.strip()
response.headers.set("Content-Type", "text/xml")
response.headers.set('Content-length', len(response.data))
# set filename
action_report_obj = request.env['ir.actions.report']
report = action_report_obj._get_report_from_name(reportname)
filename = report.name
if docids:
ids = [int(x) for x in docids.split(",")]
records = request.env[report.model].browse(ids)
if report.print_report_name and not len(records) > 1:
filename = safe_eval(report.print_report_name,
{'object': records, 'time': time})
response.headers.set(
'Content-Disposition',
content_disposition(filename + ".xml"))
return response
|
detian08/bsp_addons
|
reporting-engine-11.0/report_xml/controllers/main.py
|
main.py
|
py
| 1,704 |
python
|
en
|
code
| 1 |
github-code
|
6
|
361475644
|
import numpy as np
import pandas as pd
def output(time, station_dict):
text = []
text.append("\nTime: {}".format(time))
text.append('------------------------------------------------------')
station_ids = pd.read_csv('input_data/stations_state.csv')['station_id'].tolist()
for station in station_ids:
station_obj = station_dict[station]
text.append('\tStation: {}'.format(station))
text.append('\t\tNumber of Idle Vehicles: {}'.format(len(station_obj.car_list)))
text.append('\t\tAvailable Parking: {}'.format(station_obj.calc_parking()))
text.append(
'\t\tNumber of People En_Route: {}'.format(len(station_obj.get_en_route_list())))
# text.append('Errors: {}'.format(errors))
np.save('output_files/state_data/station_state', station_dict)
return text
def write(file, text):
output_file = open(file, 'w')
for item in text:
for x in item:
output_file.write("%s\n" % x)
print("\n\noutput_files/station_overview.txt created")
output_file.close()
|
Nick-Masri/ASL-HA-MO-Simulator-Project
|
simulator/output_formatting/overview.py
|
overview.py
|
py
| 1,074 |
python
|
en
|
code
| 2 |
github-code
|
6
|
46051636676
|
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.contrib import messages
from django.utils.translation import gettext as _
from djconfig import config
from spirit.core.utils.views import is_post, post_data
from spirit.core.utils.paginator import yt_paginate
from spirit.core.utils.decorators import administrator_required
from .forms import CommentFlagForm
from ..models import CommentFlag, Flag
@administrator_required
def detail(request, pk):
flag = get_object_or_404(CommentFlag, pk=pk)
form = CommentFlagForm(
user=request.user,
data=post_data(request),
instance=flag)
if is_post(request) and form.is_valid():
form.save()
messages.info(request, _("The flag has been moderated!"))
return redirect(reverse("spirit:admin:flag:index"))
flags = yt_paginate(
Flag.objects.filter(comment=flag.comment),
per_page=config.comments_per_page,
page_number=request.GET.get('page', 1)
)
return render(
request=request,
template_name='spirit/comment/flag/admin/detail.html',
context={
'flag': flag,
'flags': flags,
'form': form})
@administrator_required
def _index(request, queryset, template):
flags = yt_paginate(
queryset,
per_page=config.comments_per_page,
page_number=request.GET.get('page', 1)
)
context = {'flags': flags, }
return render(request, template, context)
def opened(request):
return _index(
request,
queryset=CommentFlag.objects.filter(is_closed=False),
template='spirit/comment/flag/admin/open.html'
)
def closed(request):
return _index(
request,
queryset=CommentFlag.objects.filter(is_closed=True),
template='spirit/comment/flag/admin/closed.html'
)
|
nitely/Spirit
|
spirit/comment/flag/admin/views.py
|
views.py
|
py
| 1,916 |
python
|
en
|
code
| 1,153 |
github-code
|
6
|
2768444611
|
import pandas as pd
import math
from sklearn import linear_model
import numpy as np
def predict_using_sklearn():
test_scores = pd.read_csv(r'C:\Users\Polina\Desktop\Python\Pandas\test_scores.csv')
reg = linear_model.LinearRegression()
reg.fit(test_scores[['math']], test_scores.cs)
return reg.coef_, reg.intercept_
def gradient_descent(x,y):
m_curr=b_curr=0
iterations = 1000000
n = len(x)
learning_rate = 0.0002
cost_previous = 0
for i in range(iterations):
y_predicted = m_curr*x + b_curr
cost = (1/n)*sum([val**2 for val in (y-y_predicted)])
md = -(2/n)*sum(x*(y-y_predicted))
bd = -(2/n)*sum(y -y_predicted)
m_curr = m_curr- learning_rate*md
b_curr = b_curr - learning_rate*bd
if math.isclose(cost, cost_previous, rel_tol=1e-20):
break
cost_previous = cost
return m_curr, b_curr
if __name__ == '__main__':
df = pd.read_csv(r"C:\Users\Polina\Desktop\Python\Pandas\test_scores.csv")
x = np.array(df.math)
y = np.array(df.cs)
m, b = gradient_descent(x,y)
print("Using gradient descent function: Coef {} Intercept {}".format(m, b))
m_sklearn, b_sklearn = predict_using_sklearn()
print("Using sklearn: Coef {} Intercept {}".format(m_sklearn,b_sklearn))
|
CarlBrendt/Data-Analysis
|
Gradient_descent_with_no_train_test.py
|
Gradient_descent_with_no_train_test.py
|
py
| 1,356 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25743059465
|
class Solution(object):
def twoSum(self, nums, target):
itr = len(nums)
d = {}
for i in xrange(itr):
if ((target - nums[i]) in d.iterkeys()):
return [d[target - nums[i]], i]
else:
d[nums[i]] = i
''' Naive half solution
def twoSum(self, nums, target):
itr = len(nums)
for i in xrange(itr):
for j in xrange(i):
if (target == nums[i] + nums[j]):
return [j,i]
'''
''' Naive solution
def twoSum(self, nums, target):
itr = len(nums)
for i in xrange(itr):
for j in xrange(itr):
if (target == nums[i] + nums[itr-j-1]):
return [i,j-1]
'''
|
chaitan64arun/algo-ds
|
leetcode/twosum.py
|
twosum.py
|
py
| 756 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26038332576
|
from __future__ import annotations
import pytest
@pytest.mark.parametrize(
"variables, expected_data",
[
(
{"name": r"pants_explorer\."},
{
"rules": [
{"name": "pants_explorer.server.graphql.rules.get_graphql_uvicorn_setup"},
]
},
),
(
{"name": r"\.graphql\."},
{
"rules": [
{"name": "pants_explorer.server.graphql.rules.get_graphql_uvicorn_setup"},
]
},
),
(
{"limit": 0},
{"rules": []},
),
(
{"limit": 0},
{"rules": []},
),
],
)
def test_rules_query(
schema, queries: str, variables: dict, expected_data: dict, context: dict
) -> None:
actual_result = schema.execute_sync(
queries, variable_values=variables, context_value=context, operation_name="TestRulesQuery"
)
assert actual_result.errors is None
assert actual_result.data == expected_data
|
pantsbuild/pants
|
pants-plugins/pants_explorer/server/graphql/query/rules_test.py
|
rules_test.py
|
py
| 1,078 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
27390159801
|
# 2-Way Partition
# http://rosalind.info/problems/par/
from utilities import get_file, get_answer_file
def quick_sort2(arr):
if len(arr) <= 1:
return arr
pivot = arr[len(arr)//2]
left_arr, equal_arr, right_arr = [], [], []
for num in arr:
if num < pivot:
left_arr.append(num)
elif num > pivot:
right_arr.append(num)
else:
equal_arr.append(num)
return quick_sort2(left_arr) + equal_arr + quick_sort2(right_arr)
def quick_sort(arr):
def sort(low, high):
if high <= low:
return
mid = partition(low, high)
sort(low, mid-1)
sort(mid, high)
def partition(low, high):
pivot = arr[(low + high) // 2]
while low <= high:
while arr[low] < pivot:
low += 1
while arr[high] > pivot:
high -= 1
if low <= high:
arr[low], arr[high] = arr[high], arr[low]
low, high = low + 1, high - 1
return low
return sort(0, len(arr) - 1)
with get_file() as file:
len_array = int(file.readline().rstrip())
num_array = list(map(int, file.readline().split()))
with get_answer_file() as file:
quick_sort(num_array)
print(" ".join(map(str, num_array)), file=file)
|
Delta-Life/Bioinformatics
|
Rosalind/Algorithmic Heights/code/PAR.py
|
PAR.py
|
py
| 1,335 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39254517776
|
from django.db import models
from django.db.models import Case, Count, IntegerField, When
class CountryManager(models.Manager):
def aggregate_integration_statuses(self):
from proco.connection_statistics.models import CountryWeeklyStatus
return self.get_queryset().aggregate(
countries_joined=Count(Case(When(
last_weekly_status__integration_status__in=[
CountryWeeklyStatus.SCHOOL_MAPPED,
CountryWeeklyStatus.STATIC_MAPPED,
CountryWeeklyStatus.REALTIME_MAPPED,
], then=1),
output_field=IntegerField())),
countries_connected_to_realtime=Count(Case(When(
last_weekly_status__integration_status=CountryWeeklyStatus.REALTIME_MAPPED, then=1),
output_field=IntegerField())),
countries_with_static_data=Count(Case(When(
last_weekly_status__integration_status=CountryWeeklyStatus.STATIC_MAPPED, then=1),
output_field=IntegerField()),
),
countries_with_static_and_realtime_data=Count(Case(When(
last_weekly_status__integration_status= (CountryWeeklyStatus.STATIC_MAPPED or CountryWeeklyStatus.REALTIME_MAPPED), then=1),
output_field=IntegerField()) ,
),
)
|
unicef/Project-Connect-BE
|
proco/locations/managers.py
|
managers.py
|
py
| 1,353 |
python
|
en
|
code
| 2 |
github-code
|
6
|
15366112732
|
import numpy as np
import os
try:
import welib.fastlib.fastlib as fastlib
except:
import fastlib
def CPLambdaExample():
""" Example to determine the CP-CT Lambda Pitch matrices of a turbine.
This scrip uses the function CPCT_LambdaPitch which basically does the same as ParametricExample
above.
"""
ref_dir = 'NREL5MW/' # Folder where the fast input files are located (will be copied)
main_file = 'Main_Onshore_OF2.fst' # Main file in ref_dir, used as a template
FAST_EXE = 'NREL5MW/OpenFAST2_x64s_ebra.exe' # Location of a FAST exe (and dll)
# --- Computing CP and CT matrices for range of lambda and pitches
Lambda = np.linspace(0.1,10,3)
Pitch = np.linspace(-10,10,4)
CP,CT,Lambda,Pitch,MaxVal,result = fastlib.CPCT_LambdaPitch(ref_dir,main_file,Lambda,Pitch,fastExe=FAST_EXE,ShowOutputs=False,nCores=4,TMax=10)
print('CP max',MaxVal)
# --- Plotting matrix of CP values
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
LAMBDA, PITCH = np.meshgrid(Lambda, Pitch)
CP[CP<0]=0
surf = ax.plot_surface(LAMBDA, PITCH, np.transpose(CP), cmap=cm.coolwarm, linewidth=0, antialiased=True,alpha=0.8)
ax.scatter(MaxVal['lambda_opt'],MaxVal['pitch_opt'],MaxVal['CP_max'],c='k',marker='o',s=20)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
if __name__=='__main__':
CPLambdaExample()
|
rhaghi/welib
|
welib/fastlib/_examples/Example_CPLambdaPitch.py
|
Example_CPLambdaPitch.py
|
py
| 1,520 |
python
|
en
|
code
| null |
github-code
|
6
|
35060216092
|
from flask import Flask, render_template, request, jsonify
import atexit
import cf_deployment_tracker
import os
import json
import requests
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
db_name = 'mydb'
client = None
db = None
'''
if 'VCAP_SERVICES' in os.environ:
vcap = json.loads(os.getenv('VCAP_SERVICES'))
print('Found VCAP_SERVICES')
if 'cloudantNoSQLDB' in vcap:
creds = vcap['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif os.path.isfile('vcap-local.json'):
with open('vcap-local.json') as f:
vcap = json.load(f)
print('Found local VCAP_SERVICES')
creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
'''
# On Bluemix, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8080
port = int(os.getenv('PORT', 8080))
def loadApiKeys(mltype):
with open('apikeys.json') as data_file:
apikeys = json.load(data_file)
if mltype=="classification":
return apikeys['classification']
elif mltype=="prediction":
return apikeys['prediction']
else:
print("Algorithm doesn't exist")
@app.route('/')
def home():
return render_template('prediction.html')
@app.route('/prediction')
def render_prediction():
return render_template('prediction.html')
@app.route('/classification')
def render_classification():
return render_template('classification.html')
# /* Endpoint to greet and add a new visitor to database.
# * Send a POST request to localhost:8080/api/visitors with body
# * {
# * "name": "Bob"
# * }
# */
@app.route('/prediction/getPrediction', methods=['POST'])
def get_prediction():
try:
apikeys=loadApiKeys('prediction')
if apikeys == None:
print("Api Keys file has some issue")
return_dict = {"predicted_interest_rate":"Some Error occured with api keys file"}
return json.dumps(return_dict)
else:
credit_score=request.json['credit_score']
og_first_time_home_buyer=request.json['og_first_time_home_buyer']
og_upb=request.json['og_upb']
og_loan_term=request.json['og_loan_term']
og_quarter_year=request.json['og_quarter_year']
og_seller_name=request.json['og_seller_name']
og_servicer_name=request.json['og_servicer_name']
algoType = request.json['algoType']
#print(str(algoType)+"\t"+str(credit_score)+"\t"+str(og_first_time_home_buyer)+"\t"+str(og_upb)+"\t"+str(og_loan_term)+"\t"+str(og_quarter_year)+"\t"+str(og_seller_name)+"\t"+str(og_servicer_name))
#make ai call
if algoType=="pred_df":
url=apikeys['boosteddecisiontree']['url']
api_key=apikeys['boosteddecisiontree']['apikey']
elif algoType=="pred_nn":
url=apikeys['neuralnetwork']['url']
api_key=apikeys['neuralnetwork']['apikey']
elif algoType=="pred_lr":
url=apikeys['linearregression']['url']
api_key=apikeys['linearregression']['apikey']
data = {
"Inputs": {
"input1":
{
"ColumnNames": ["CREDIT_SCORE", "FIRST_HOME_BUYER_FLAG", "OG_UPB", "OG_LOANTERM", "SELLER_NAME", "SERVICE_NAME", "OG_QUARTERYEAR"],
"Values": [ [credit_score,og_first_time_home_buyer,og_upb,og_loan_term,og_seller_name,og_servicer_name,og_quarter_year]]
}, },
"GlobalParameters": {
}
}
body = str.encode(json.dumps(data))
#url = 'https://ussouthcentral.services.azureml.net/workspaces/5de0e8bd28f74cf9a40babb3f1799a53/services/300d6267d2f843c9a5975621ff077a09/execute?api-version=2.0&details=true'
#api_key = 'wQWgTpa3GyVACzg7Q6jVDdwt5JEDnfdvqqG21PKDr+UHmZWRQJh1XfrtLVON846vEDEXoDgnruZ1s9zd4Drzyw==' # Replace this with the API key for the web service
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
response = requests.post(url, data=body,headers=headers)
#print(response.content)
response_json=json.loads(response.content)
predicted_interest_rate=response_json['Results']['output1']['value']['Values'][0][7]
if predicted_interest_rate == "":
predicted_interest_rate = "Some error occured"
return_dict = {"predicted_interest_rate":predicted_interest_rate}
return json.dumps(return_dict)
except:
return_dict = {"predicted_interest_rate":"Some error occured"}
return json.dumps(return_dict)
@app.route('/classification/getClassification', methods=['POST'])
def get_classification():
try:
apikeys=loadApiKeys('classification')
if apikeys == None:
print("Api Keys file has some issue")
classified_as="Some Error occured with api keys file"
scored_probability = ""
return_dict = {"classified_as":classified_as,"scored_probability":scored_probability}
return json.dumps(return_dict)
else:
curr_act_upb=request.json['curr_act_upb']
loan_age=request.json['loan_age']
months_to_legal_maturity=request.json['months_to_legal_maturity']
curr_interest_rate=request.json['crr_interest_rate']
curr_deferred_upb=request.json['curr_deferred_upb']
algoType = request.json['algoType']
#print(curr_act_upb+"\t"+loan_age+"\t"+months_to_legal_maturity+"\t"+curr_interest_rate+"\t"+curr_deferred_upb)
#make ai call
if algoType=="pred_df":
url=apikeys['decisionjungle']['url']
api_key=apikeys['decisionjungle']['apikey']
elif algoType=="pred_nn":
url=apikeys['bayestwopoint']['url']
api_key=apikeys['bayestwopoint']['apikey']
elif algoType=="pred_lr":
url=apikeys['logisticregression']['url']
api_key = apikeys['logisticregression']['apikey']
data = {
"Inputs": {
"input1":
{
"ColumnNames": ["CUR_ACT_UPB", "LOAN_AGE", "MONTHS_LEGAL_MATURITY", "CURR_INTERESTRATE", "CURR_DEF_UPB"],
"Values": [[curr_act_upb, loan_age, months_to_legal_maturity, curr_interest_rate, curr_deferred_upb]]
}, },
"GlobalParameters": {
}
}
body = str.encode(json.dumps(data))
#url = 'https://ussouthcentral.services.azureml.net/workspaces/5de0e8bd28f74cf9a40babb3f1799a53/services/300d6267d2f843c9a5975621ff077a09/execute?api-version=2.0&details=true'
#api_key = 'wQWgTpa3GyVACzg7Q6jVDdwt5JEDnfdvqqG21PKDr+UHmZWRQJh1XfrtLVON846vEDEXoDgnruZ1s9zd4Drzyw==' # Replace this with the API key for the web service
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
response = requests.post(url, data=body,headers=headers)
#print(response.content)
response_json=json.loads(response.content)
if response_json['Results']['output1']['value']['Values'][0][5] == "0":
scored_probability=response_json['Results']['output1']['value']['Values'][0][6]
classified_as="Non-Delinquent"
elif response_json['Results']['output1']['value']['Values'][0][5] == "1":
scored_probability=response_json['Results']['output1']['value']['Values'][0][6]
classified_as="Delinquent"
else:
classified_as="Some Error occured in Classification"
scored_probability = ""
return_dict = {"classified_as":classified_as,"scored_probability":scored_probability}
return json.dumps(return_dict)
except:
return_dict = {"classified_as":"Some Error occured."}
return json.dumps(return_dict)
# /**
# * Endpoint to get a JSON array of all the visitors in the database
# * REST API example:
# * <code>
# * GET http://localhost:8080/api/visitors
# * </code>
# *
# * Response:
# * [ "Bob", "Jane" ]
# * @return An array of all the visitor names
# */
@app.route('/api/visitors', methods=['POST'])
def put_visitor():
user = request.json['name']
if client:
data = {'name':user}
db.create_document(data)
return 'Hello %s! I added you to the database.' % user
else:
print('No database')
return 'Hello %s!' % user
@atexit.register
def shutdown():
if client:
client.disconnect()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
|
vishalsatam/DeploymentOfMLAlgoOnCloud
|
Flask Application/webApp.py
|
webApp.py
|
py
| 9,720 |
python
|
en
|
code
| 2 |
github-code
|
6
|
3920778
|
import sys
a = int(input())
w = [0] * (a + 1)
w[1] = int(input())
tmp = [[0 for i in range(502)] for j in range(502)]
tmp[1][0] = w[1]
if a == 1:
print(w[1])
else:
for i in range(2, a + 1):
k = [int(i) for i in sys.stdin.readline().split()]
tmp[i][0] = tmp[i-1][0] + k[0]
for j in range(1, i):
tmp[i][j] = max(tmp[i-1][j-1], tmp[i-1][j]) + k[j]
tmp[i][i] = tmp[i-1][i-1] + k[i-1]
w[i] = max(tmp[i])
print(w[a])
|
Winmini/CodingTest
|
BOJ/1932.py
|
1932.py
|
py
| 476 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10272912392
|
import sys
import user_login
from PyQt4 import QtGui,QtCore
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = user_login.Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
naresh-chaudhary/Institute-Checkpost-Management-System
|
Main.py
|
Main.py
|
py
| 258 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5995812394
|
#!/usr/bin/env python
import numpy as np
import healpy as hp
import pylab
import matplotlib.pyplot as plt
import time
import mocklc
import matplotlib
import sepmat
import gpkernel
import scipy
import emcee
import sys
import time
Ns=2000
np.random.seed(17)
#set geometry
inc=45.0/180.0*np.pi
Thetaeq=np.pi
zeta=60.0/180.0*np.pi
Pspin=23.9344699/24.0 #Pspin: a sidereal day
wspin=2*np.pi/Pspin
Porb=365.242190402
worb=2*np.pi/Porb
Ni=1024
obst=np.linspace(0.0,Porb,Ni)
# test map
nside=16
npix=hp.nside2npix(nside)
mmap=hp.read_map("/home/kawahara/exomap/sot/data/mockalbedo16.fits")
mask=(mmap>0.0)
mmap[mask]=1.0
M=len(mmap)
#generating light curve
Thetav=worb*obst
Phiv=np.mod(wspin*obst,2*np.pi)
WI,WV=mocklc.comp_weight(nside,zeta,inc,Thetaeq,Thetav,Phiv)
W=WV[:,:]*WI[:,:]
lc=np.dot(W,mmap)
sigma=np.mean(lc)*0.1
noise=np.random.normal(0.0,sigma,len(lc))
lc=lc+noise
## RBF kernel
nside=16
npix=hp.nside2npix(nside)
sep=sepmat.calc_sepmatrix(nside)
## optimization
tag="RBFobl"
## spin and hyperparameter MCMC sampling using emcee
def log_prior(theta):
p_zeta,p_Thetaeq,p_gamma,p_alpha=theta
if 0.0 < p_zeta < np.pi and 0.0 < p_Thetaeq < 2*np.pi and 1.e-10 < p_gamma < np.pi/3.0 and 1.e-10 < p_alpha:
return np.log(np.sin(p_zeta)/p_alpha/p_gamma)
return -np.inf
def log_likelihood(theta, d, covd):
p_zeta,p_Thetaeq,p_gamma,p_alpha=theta
WI,WV=mocklc.comp_weight(nside,p_zeta,inc,p_Thetaeq,Thetav,Phiv)
Wp=WV[:,:]*WI[:,:]
#KS=p_alpha*gpkernel.Matern32(sep,p_gamma)
KS=p_alpha*gpkernel.RBF(sep,p_gamma)
Cov = covd + Wp@[email protected]
sign,logdet=np.linalg.slogdet(Cov)
Pi_d=scipy.linalg.solve(Cov,d,assume_a="pos")
prop = -0.5*logdet-0.5*d@Pi_d #-0.5*np.shape(cov)[0]*np.log(2.0*np.pi)
return prop
def log_probability(theta, d, covd):
lp = log_prior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(theta, d, covd)
gam0=0.29298260376811
alpha0=sigma**2*0.774263682681127
pos = np.array([zeta,Thetaeq,gam0,alpha0])+ 1e-4 * np.random.randn(16, 4)
nwalkers, ndim = pos.shape
#Assumming we know the data covariance
covd=sigma**2*np.eye(Ni)
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, args=(lc, covd))
sampler.run_mcmc(pos, Ns, progress=True);
flat_samples = sampler.get_chain(discard=100, thin=15, flat=True)
#samples = sampler.get_chain()
#print(samples)
labels=["zeta","Thetaeq","gamma","alpha"]
inputgeo=[inc,Thetaeq,zeta,Pspin,Porb,obst]
np.savez("flat_sample"+tag,flat_samples,W,lc,inputgeo)
import corner
fig = corner.corner(flat_samples, labels=labels, truths=[zeta,Thetaeq,None,None])
plt.savefig("corner_"+tag+".png")
plt.savefig("corner_"+tag+".pdf")
plt.show()
|
HajimeKawahara/sot
|
src/sot/dymap/static_sampling.py
|
static_sampling.py
|
py
| 2,866 |
python
|
en
|
code
| 4 |
github-code
|
6
|
73944785148
|
import pathlib
import numpy as np
import h5py
import cv2
import argparse
def load_episodes(directory, capacity=None):
# The returned directory from filenames to episodes is guaranteed to be in
# temporally sorted order.
filenames = sorted(directory.glob('*.npz'))
if capacity:
num_steps = 0
num_episodes = 0
for filename in reversed(filenames):
length = int(str(filename).split('-')[-1][:-4])
num_steps += length
num_episodes += 1
if num_steps >= capacity:
break
filenames = filenames[-num_episodes:]
episodes = {}
for filename in filenames:
try:
with filename.open('rb') as f:
episode = np.load(f)
episode = {k: episode[k] for k in episode.keys()}
# Conversion for older versions of npz files.
if 'is_terminal' not in episode:
episode['is_terminal'] = episode['discount'] == 0.
except Exception as e:
print(f'Could not load episode {str(filename)}: {e}')
continue
episodes[str(filename)] = episode
return episodes
def main():
# Include argument parser
parser = argparse.ArgumentParser(description='Convert npz files to hdf5.')
parser.add_argument('--input_dir', type=str, required=True,
help='Path to input files')
parser.add_argument('--output_dir', type=str, required=True,
help='Path to output files')
args = parser.parse_args()
step_type = np.ones(501)
step_type[0] = 0
step_type[500] = 2
output = {}
episodes = load_episodes(pathlib.Path(args.input_dir))
episodes = list(episodes.values())
actions = [e['action'] for e in episodes]
discounts = [e['discount'] for e in episodes]
observations = []
for e in episodes:
resized_images = np.empty((501, 84, 84, 3), dtype=e['image'].dtype)
for (k, i) in enumerate(e['image']):
resized_images[k] = cv2.resize(i, dsize=(84, 84), interpolation=cv2.INTER_CUBIC)
observations.append(resized_images.transpose(0, 3, 1, 2))
rewards = [e['reward'] for e in episodes]
step_types = [step_type for _ in episodes]
output['action'] = np.concatenate(actions)
output['discount'] = np.concatenate(discounts)
output['observation'] = np.concatenate(observations)
output['reward'] = np.concatenate(rewards)
output['step_type'] = np.concatenate(step_types)
out_dir = pathlib.Path(args.output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
with h5py.File(out_dir / 'data.hdf5', 'w') as shard_file:
for k, v in output.items():
shard_file.create_dataset(k, data=v, compression='gzip')
if __name__ == '__main__':
main()
|
conglu1997/v-d4rl
|
conversion_scripts/npz_to_hdf5.py
|
npz_to_hdf5.py
|
py
| 2,833 |
python
|
en
|
code
| 64 |
github-code
|
6
|
21025162572
|
#!/usr/bin/env python3
"""
Implementation of R3PTAR
"""
import logging
import signal
import sys
from ev3dev2.motor import OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D, MediumMotor, LargeMotor
from ev3dev2.sensor.lego import InfraredSensor
from ev3dev2.sound import Sound
from threading import Thread, Event
from time import sleep
log = logging.getLogger(__name__)
class MonitorRemoteControl(Thread):
"""
A thread to monitor R3PTAR's InfraredSensor and process signals
from the remote control
"""
def __init__(self, parent):
Thread.__init__(self)
self.parent = parent
self.shutdown_event = Event()
def __str__(self):
return "MonitorRemoteControl"
def run(self):
STRIKE_SPEED_PCT = 40
while True:
if self.shutdown_event.is_set():
log.info('%s: shutdown_event is set' % self)
break
#log.info("proximity: %s" % self.parent.remote.proximity)
if self.parent.remote.proximity < 30:
self.parent.speaker.play('snake-hiss.wav', Sound.PLAY_NO_WAIT_FOR_COMPLETE)
self.parent.strike_motor.on_for_seconds(speed=STRIKE_SPEED_PCT, seconds=0.5)
self.parent.strike_motor.on_for_seconds(speed=(STRIKE_SPEED_PCT * -1), seconds=0.5)
self.parent.remote.process()
sleep(0.01)
class R3PTAR(object):
def __init__(self,
drive_motor_port=OUTPUT_B,
strike_motor_port=OUTPUT_D,
steer_motor_port=OUTPUT_A,
drive_speed_pct=60):
self.drive_motor = LargeMotor(drive_motor_port)
self.strike_motor = LargeMotor(strike_motor_port)
self.steer_motor = MediumMotor(steer_motor_port)
self.speaker = Sound()
STEER_SPEED_PCT = 30
self.remote = InfraredSensor()
self.remote.on_channel1_top_left = self.make_move(self.drive_motor, drive_speed_pct)
self.remote.on_channel1_bottom_left = self.make_move(self.drive_motor, drive_speed_pct * -1)
self.remote.on_channel1_top_right = self.make_move(self.steer_motor, STEER_SPEED_PCT)
self.remote.on_channel1_bottom_right = self.make_move(self.steer_motor, STEER_SPEED_PCT * -1)
self.shutdown_event = Event()
self.mrc = MonitorRemoteControl(self)
# Register our signal_term_handler() to be called if the user sends
# a 'kill' to this process or does a Ctrl-C from the command line
signal.signal(signal.SIGTERM, self.signal_term_handler)
signal.signal(signal.SIGINT, self.signal_int_handler)
def make_move(self, motor, speed):
def move(state):
if state:
motor.on(speed)
else:
motor.stop()
return move
def shutdown_robot(self):
if self.shutdown_event.is_set():
return
self.shutdown_event.set()
log.info('shutting down')
self.mrc.shutdown_event.set()
self.remote.on_channel1_top_left = None
self.remote.on_channel1_bottom_left = None
self.remote.on_channel1_top_right = None
self.remote.on_channel1_bottom_right = None
self.drive_motor.off(brake=False)
self.strike_motor.off(brake=False)
self.steer_motor.off(brake=False)
self.mrc.join()
def signal_term_handler(self, signal, frame):
log.info('Caught SIGTERM')
self.shutdown_robot()
def signal_int_handler(self, signal, frame):
log.info('Caught SIGINT')
self.shutdown_robot()
def main(self):
self.mrc.start()
self.shutdown_event.wait()
if __name__ == '__main__':
# Change level to logging.DEBUG for more details
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)5s %(filename)s: %(message)s")
log = logging.getLogger(__name__)
# Color the errors and warnings in red
logging.addLevelName(logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING))
log.info("Starting R3PTAR")
snake = R3PTAR()
snake.main()
log.info("Exiting R3PTAR")
|
ev3dev/ev3dev-lang-python-demo
|
robots/R3PTAR/r3ptar.py
|
r3ptar.py
|
py
| 4,270 |
python
|
en
|
code
| 59 |
github-code
|
6
|
17371120296
|
import json
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
from django.contrib.auth import get_user_model
from rest_framework_simplejwt.tokens import RefreshToken
User = get_user_model()
AUTHOR = 'author'
EXECUTOR = 'executor'
AUTHOR_EMAIL = '[email protected]'
NEW_AUTHOR_EMAIL = '[email protected]'
EXECUTOR_EMAIL = '[email protected]'
AUTHOR_ROLE = 'author'
EXECUTOR_ROLE = 'executor'
START_BALANCE = 500
NEW_BALANCE = 1
USERS_LIST_URL = reverse('users-list')
RESPOND_NEW_DATA = {'author': 2, 'task': 1}
class TaskModelTest(APITestCase):
def setUp(self):
self.admin = User.objects.create_superuser(
username='admin',
email='[email protected]',
balance=START_BALANCE,
freeze_balance=START_BALANCE,
is_staff=True,
is_superuser=True)
self.author = User.objects.create_user(
username=AUTHOR,
email=AUTHOR_EMAIL,
balance=START_BALANCE,
freeze_balance=START_BALANCE,
role=AUTHOR_ROLE)
self.executor = User.objects.create_user(
username=EXECUTOR,
email=EXECUTOR_EMAIL,
balance=START_BALANCE,
freeze_balance=START_BALANCE,
role=EXECUTOR_ROLE)
self.ADMIN_DETAIL_URL = reverse('users-detail', args=[self.admin.id])
self.AUTHOR_ADD_BALANCE_URL = reverse('users-balance')
self.USER_CHANGE_DATA_URL = reverse('users-me')
self.AUTHOR_DETAIL_URL = reverse('users-detail', args=[self.author.id])
self.EXECUTOR_DETAIL_URL = reverse('users-detail', args=[self.executor.id])
self.executor_client = APIClient()
self.executor_client.force_authenticate(user=self.executor)
self.admin_client = APIClient()
self.admin_client.force_authenticate(user=self.admin)
self.token = RefreshToken.for_user(self.author)
self.author_token = self.token.access_token
self.auth_client = APIClient()
self.auth_client.credentials(HTTP_AUTHORIZATION=f'Bearer {self.author_token}')
def test_admin_get_users_list(self):
response = self.admin_client.get(USERS_LIST_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['results']), 3)
def test_not_admin_cant_get_users_list(self):
response = self.auth_client.get(USERS_LIST_URL)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_admin_can_update_user_data(self):
balance_before = self.author.balance
response = self.admin_client.patch(
self.AUTHOR_DETAIL_URL,
data=json.dumps({'balance': NEW_BALANCE}),
content_type='application/json')
self.author.refresh_from_db()
balance_after = self.author.balance
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(balance_before, START_BALANCE)
self.assertEqual(balance_after, NEW_BALANCE)
def test_balance_url_add_money_to_balance(self):
balance_before = self.author.balance
response = self.auth_client.patch(
self.AUTHOR_ADD_BALANCE_URL,
data=json.dumps({'balance': NEW_BALANCE}),
content_type='application/json')
self.author.refresh_from_db()
balance_after = self.author.balance
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(balance_before, balance_after-NEW_BALANCE)
def test_not_admin_cant_update_user_data(self):
email_before = self.author.email
response = self.auth_client.patch(
self.AUTHOR_DETAIL_URL,
data=json.dumps({'email': NEW_AUTHOR_EMAIL}),
content_type='application/json')
self.author.refresh_from_db()
email_after = self.author.email
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(email_before, email_after)
def test_user_can_update_own_data(self):
email_before = self.author.email
response = self.auth_client.patch(
self.USER_CHANGE_DATA_URL,
data=json.dumps({'email': NEW_AUTHOR_EMAIL}),
content_type='application/json')
self.author.refresh_from_db()
email_after = self.author.email
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(email_before, AUTHOR_EMAIL)
self.assertEqual(email_after, NEW_AUTHOR_EMAIL)
|
vavsar/freelance_t
|
tests/users/test_views.py
|
test_views.py
|
py
| 4,583 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70106082748
|
'''
1. import opencv
2. load image
3. load model
4. adjuct image gray
5. Check and mark face
3. create window
4. show image
5. pause window
6. close window
'''
import numpy as np
import cv2
# print(cv2.__version__)
# Load image
img = cv2.imread("./images/ufc.jpeg")
# Load model
face_cascade = cv2.CascadeClassifier(
'./data/haarcascade_frontalface_default.xml')
# Adjust image gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find faces
faces = face_cascade.detectMultiScale(
gray,
# scaleFactor=1.15,
# minNeighbors=5,
# minSize=(5, 5),
# flags = cv2.HAAR_SCALE_IMAGE
)
# Mark faces
for(x, y, w, h) in faces:
# image, location, size, color, line width
cv2.rectangle(img, (x, y), (x+w, y+w), (0, 255, 0), 2)
# Create widnow
cv2.namedWindow("faceImage")
# Show image
cv2.imshow('ufc show', img)
#
cv2.waitKey(0)
cv2.destroyAllWindow()
|
benjaminhuanghuang/opencv-study
|
cv-find-face.py
|
cv-find-face.py
|
py
| 900 |
python
|
en
|
code
| 0 |
github-code
|
6
|
575101998
|
#ChickenCrossing.py
#A.Colwell(2016)
'''
Set up function to represent crossing a lane with argument of
how many chickens to pass.
repeat eight times starting with 1000 chickens.
store the result in a list.
print out list results.
'''
import random
def chickenCrossing(chicks):
died = int(round(random.randint(0,chicks)*.1))
survived = chicks - died
#print(died)
return survived
for x in range(9):
survivors = [1000]
for i in range(8):
chickens = survivors[i]
survived = chickenCrossing(chickens)
survivors.append(survived)
#print(survivors)
print('Starting with 1000 chickens, the number surviving crossing each lane:')
for i in range(1,9):
print('Survived lane ',i,' were ',survivors[i],' representing ',
(survivors[i])//10,'% of original flock.',sep='')
print('With',survivors[-1],'successfully crossing the road.')
print('Meaning that only ',(survivors[i])//10,'% of the chickens survived.\n',sep='')
|
MrColwell/PythonProfessionalLearning
|
PythonForTeachers/StudentCode/Exam Scripts/ChickenCrossing.py
|
ChickenCrossing.py
|
py
| 1,056 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.