seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
41237359815
|
import os, sys
import multiprocessing as mp
def pipeline(config, genome, protocol, cells, minreads, name, fq1, fq2, dir,
top_million_reads, step, parallel):
"""
Run the Data Processing Pipeline...
#. Stats and count the barcode from pair-end 1 sequences;
#. Read the barcode counts files;
#. Correct the barcode with 1bp mismatch;
#. Stats the mismatch barcode reads and sequences;
#. Determine wheather mutate on the last base (show A/T/C/G with similar ratio at the last base);
#. Filter by whitelist;
#. Filter by read counts (>=min_reads);
#. Print the number of barcode and reads retained after each steps.
Usage:
::
from baseqDrops import pipeline
pipeline("", "hg38", "10X", 1000, minreads, name, fq1, fq2, dir, top_million_reads, step, parallel)
#specify the length of UMI and barcodes
pipeline("", "hg38", "10X", 1000, minreads, name, fq1, fq2, dir, top_million_reads, step, parallel)
#Run in command line
baseqdrops
Protocols:
#. For 10X: 16bp Barcode and 10 bp UMI => 10X (most commonly used)
#. For 10X: 14bp Barcode and 5/10 bp UMIs => 10X_14_10 / 10X_14_5 (For some old version data)
#. For DropSeq ==> dropseq
#. For inDrop ==> indrop
Args:
config: The path of configuration file;
genome: Genome version (hg38, mm10, hg38_mm10);
cells: Max number of cells;
minreads: Minimum number of reads for a cell barcode (10000);
name: Samplename;
fq1, fq2: Path to fastq reads;
dir: The folder for processing, a folder with samplename will be created;
top_million_reads: Number of reads used for processing in Million;
step: Steps to run;
parallel: How many thread to use;
Steps:
count
stats
split
star
tagging
table
"""
from . import count_barcode
from . import valid_barcode
from . import split_by_barcode
from .barcode.split_fast import split_by_barcode_faster
from .tagging.prime3 import check_reference_files
#Set Config Files...
if config:
if not os.path.exists(config):
sys.exit("[error] The config file does not exist in: {}!".format(config))
os.environ["BASEQCFG"] = config
#Check Annotation Files
print('[info] Checking Reference Files...')
check_reference_files(genome)
print('[info] Start Processing Your RNA-Seq Dataset ...')
dir = os.path.abspath(os.path.join(dir, name))
bc_counts = os.path.join(dir, "barcode_count_{}.csv".format(name))
bc_stats = os.path.join(dir, "barcode_stats_{}.csv".format(name))
bc_splits_dir = os.path.join(dir, "barcode_splits")
align_dir = os.path.join(dir, "star_align")
tagging_dir = os.path.join(dir, "read_tagging")
tpm_table = os.path.join(dir, "Result.UMIs.{}.txt".format(name))
reads_table = os.path.join(dir, "Result.Reads.{}.txt".format(name))
from itertools import product
barcode_prefix = [x[0] + x[1] for x in list(product('ATCG', repeat=2))]
dirs = [dir, align_dir, tagging_dir, bc_splits_dir]
for dir in dirs:
if not os.path.exists(dir):
os.mkdir(dir)
#Check the existance of the files
if not os.path.exists(fq1):
sys.exit("[error] Fastq-1 does not exist!")
if not os.path.exists(fq2):
sys.exit("[error] Fastq-2 does not exist!")
#count barcode
if step in ["all", "count"]:
print("[info] Counting the barcodes ...")
count_barcode(fq1, bc_counts, protocol, min_reads=50, topreads=int(top_million_reads))
#aggregate
if step in ["all", "stats"]:
print("[info] Aggregating the barcodes errors and get valid ones ...")
valid_barcode(protocol, bc_counts, max_cell=cells, min_reads=minreads, output=bc_stats)
#barcode split
if step in ["all", "split"]:
print("[info] Split the barcode for each Barcode Prefix ...")
split_by_barcode_faster(name, protocol, bc_stats, fq1, fq2,
bc_splits_dir, int(top_million_reads))
#run alignment
if step in ["all", "star"]:
from .star import star_align
star_align(bc_splits_dir, align_dir, name, genome, parallel=int(parallel))
#run reads tagging
if step in ["all", "tagging"]:
from .tagging.prime3 import tagging_reads
print('[info] Tagging the reads to genes...')
pool = mp.Pool(processes=int(parallel))
result = []
for bc in barcode_prefix:
bamfile = os.path.join(align_dir, "{}/{}.sort.bam".format(bc, bc))
outfile = os.path.join(tagging_dir, "tagging.{}.txt".format(bc))
#tagging_reads(genome, bamfile, outfile)
result.append(pool.apply_async(tagging_reads, (genome, bamfile, outfile,)))
pool.close()
pool.join()
#run Table aggragation
if step in ["all", "table"]:
print('[info] Build gene expression table from the tagging files...')
from .aggregate import read_barcode_gene_file, write_to_table
pool = mp.Pool(processes=int(parallel))
result = []
for bc in barcode_prefix:
filepath = os.path.join(tagging_dir, "tagging.{}.txt".format(bc))
result.append(pool.apply_async(read_barcode_gene_file, (filepath, 1)))
pool.close()
pool.join()
from itertools import chain
barcodes_all = [x.get()[0] for x in result]
barcodes_lists = list(chain(*barcodes_all))
exp = {}
UMIs_all = [x.get()[1] for x in result]
for UMI in UMIs_all:
for gene in UMI:
if gene in exp:
exp[gene].update(UMI[gene])
else:
exp[gene] = UMI[gene]
write_to_table(barcodes_lists, exp, tpm_table, "UMI Table")
exp = {}
Reads_all = [x.get()[2] for x in result]
for UMI in Reads_all:
for gene in UMI:
if gene in exp:
exp[gene].update(UMI[gene])
else:
exp[gene] = UMI[gene]
write_to_table(barcodes_lists, exp, reads_table, "Read Count Table")
if __name__ == "__main__":
print("begin running pipeline")
|
beiseq/baseqDrops
|
package/baseqDrops/pipeline.py
|
pipeline.py
|
py
| 6,329 |
python
|
en
|
code
| 13 |
github-code
|
6
|
39485917026
|
from ast import Tuple
from app.screen.titled_screen import TitledScreen
from app.globals import State, MovieService, TheatreService, BookingService
from core.viewmodels import Ticket
from core.utils import TheatreUtils
class TicketManagementScreen(TitledScreen):
def __init__(self):
super().__init__("Ticket", "Management")
self.options = [self.change_owner, self.change_time]
self.option_names = ["Change owner", "Select new time"]
def fetch_ticket(self) -> Tuple(Ticket, bool):
ticket = None
while True:
t_id = input("Enter ticket id (Enter 0 to exit): ")
if not t_id.isdigit():
print("Enter a valid ticket id")
continue
t_id = int(t_id)
if t_id == 0:
return ticket, True
ticket = BookingService.get(t_id)
if ticket.id != t_id:
print("Ticket #", t_id, " does not exist")
continue
break
return ticket, False
def deregister_ticket(self, ticket):
TheatreService.deregister_ticket(ticket)
def register_ticket(self, ticket):
TheatreService.register_ticket(ticket)
BookingService.update(ticket)
def change_owner(self, ticket):
while True:
name = input("Enter your name (Enter \"exit\" to cancel): ")
confirm = input(f"Are you sure you want to change {ticket.owner} to {name}? (Enter \"yes\" to confirm): ")
if confirm != "yes":
continue
self.deregister_ticket(ticket)
ticket.owner = name
self.register_ticket(ticket)
print("Ticket Successfully registered")
return
pass
def check_if_seats_available(self, ticket: Ticket, date: str, time: str) -> bool:
theatreB = TheatreService.get(ticket.movie, date, time)
seats = theatreB.seats
for seat in ticket.seats:
row, col = TheatreUtils.split_seat(seat)
r_int = TheatreUtils.letter_to_index(row)
c_int = col - 1
if seats[r_int][c_int] == True:
return False
return True
def change_time(self, ticket: Ticket):
times = ["2PM", "5PM", "8PM"]
print("Available times")
for idx, time in enumerate(times):
print(idx + 1, time)
while True:
user_choice = input("Select a time (Enter \"exit\" to cancel): ")
if user_choice == "exit":
return
if not user_choice.isdigit():
print("Enter a valid number")
continue
user_choice = int(user_choice)
if user_choice > len(times) or user_choice <= 0:
print("Enter a valid option")
continue
time = times[user_choice - 1]
if time == ticket.time:
print("Select a different time than displayed on the ticket")
continue
if not self.check_if_seats_available(ticket, ticket.date, time):
print("Seats displayed on the ticket are unavailable for the time selected")
continue
confirm = input(f"Are you sure you want to change from {ticket.time} to {time} (Enter \"yes\" to confirm): ")
if confirm == "yes":
break
time = times[user_choice - 1]
self.deregister_ticket(ticket)
ticket.time = time
self.register_ticket(ticket)
def start(self):
super().start()
ticket, exit = self.fetch_ticket()
if exit:
return self.goBack()
while True:
print("Ticket details:", ticket)
print("Select an option")
for idx, option in enumerate(self.option_names):
print(f"{idx + 1}: {option}")
user_choice = 0
while True:
user_choice = input("Enter a number (Enter 0 to exit): ")
if user_choice == "exit":
return
if not user_choice.isdigit():
print("Enter a valid number")
continue
user_choice = int(user_choice)
if user_choice > len(self.options) or user_choice < 0:
print("Enter a valid option")
continue
break
if user_choice == 0:
break
self.options[user_choice-1](ticket)
return self.goBack()
|
IsaTippens/Groupware
|
app/staff/ticket_management.py
|
ticket_management.py
|
py
| 4,674 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31036723867
|
'''
Source code modified from https://github.com/budzianowski/PyTorch-Beam-Search-Decoding/blob/master/decode_beam.py
implementation of beam search on GPT-2's logits
'''
import operator
import torch
import torch.nn as nn
import torch.nn.functional as F
from queue import PriorityQueue
import sys
class BeamSearchNode(object):
def __init__(self, hiddenstate, previousNode, wordId, logProb, length):
'''
:param hiddenstate:
:param previousNode:
:param wordId:
:param logProb:
:param length:
'''
self.h = hiddenstate
self.prevNode = previousNode
self.wordid = wordId
self.logp = logProb
self.leng = length
def eval(self, alpha=1.0):
reward = 0
# Add here a function for shaping a reward
return self.logp / float(self.leng - 1 + 1e-6) + alpha * reward
def __lt__(self, x):
if(self.eval() < x.eval()):
return True
else:
return False
def beam_decode_sentence(hidden_X, config,num_generate=1, beam_size = 5, batch_size = 1):
'''
generate a sentence based on beam search
:param hidden_X: hidden_X of sentence embedding (1024) with/without projection
:param model: GPT-2 model
:param tokenizer: GPT-2 tokenizer
:return: decoded_batch
'''
#SOS_token = tokenizer.encode("<|endoftext|>")
beam_width = beam_size
topk = num_generate # how many sentence do you want to generate
past = None
model = config['model']
tokenizer =config['tokenizer']
eos = [tokenizer.encode(tokenizer.eos_token)]
EOS_token = eos
hidden_X_unsqueeze = torch.unsqueeze(hidden_X, 0)
hidden_X_unsqueeze = torch.unsqueeze(hidden_X_unsqueeze, 0) #[1,1,embed_dim]
decoded_batch = []
# decoding goes sentence by sentence
for idx in range(batch_size):
# Number of sentence to generate
endnodes = []
number_required = min((topk + 1), topk - len(endnodes))
# starting node - hidden vector, previous node, word id, logp, length hiddenstate, previousNode, wordId, logProb, length
node = BeamSearchNode(past, None, torch.tensor([[220]]).cuda(), 0, 1) # 220 refers to single space ' ' on GPT
nodes = PriorityQueue()
# start the queue
nodes.put((-node.eval(), node))
qsize = 1
# start beam search
for text_len in range(50):
# give up when decoding takes too long
if qsize > 2000: break
# fetch the best node
try:
score, n = nodes.get()
except:
print('Cannot get nodes')
while not nodes.empty():
next_item = nodes.get()
print(next_item)
prev_input = n.wordid
past = n.h
if n.wordid.item() == EOS_token[0] and n.prevNode != None:
endnodes.append((score, n))
# if we reached maximum # of sentences required
if len(endnodes) >= number_required:
break
else:
print('continue')
continue
# decode for one step using decoder
if(text_len == 0):
logits, past = model(inputs_embeds=hidden_X_unsqueeze,past_key_values = past,return_dict=False)
else:
logits, past = model(prev_input,past_key_values = past, attention_mask=None, return_dict=False)
logits = logits[:, -1, :]
probs = torch.softmax(logits, dim=-1)
# PUT HERE REAL BEAM SEARCH OF TOP
log_prob, indexes = torch.topk(probs, beam_width)
nextnodes = []
for new_k in range(beam_width):
decoded_t = indexes[0][new_k].view(1, -1)
log_p = log_prob[0][new_k].item()
#### hiddenstate, previousNode, wordId, logProb, length
node = BeamSearchNode(past, n, decoded_t, n.logp + log_p, n.leng + 1)
score = -node.eval()
nextnodes.append((score, node))
# put them into queue
for i in range(len(nextnodes)):
score, nn = nextnodes[i]
try:
nodes.put((score, nn))
except:
print('Cannot put nodes')
print(score)
print(nn)
# increase qsize
qsize += len(nextnodes) - 1
# for loop ends here
# choose nbest paths, back trace them
if len(endnodes) == 0:
endnodes = [nodes.get() for _ in range(topk)]
utterances = []
text = []
for score, n in sorted(endnodes, key=operator.itemgetter(0)):
utterance = []
utterance.append(n.wordid.item())
# back trace
while n.prevNode != None:
n = n.prevNode
utterance.append(n.wordid.item())
utterance = utterance[::-1]
utterances.append(utterance)
decode_process = tokenizer.decode(utterance[1:-1])
text.append(decode_process)
decoded_batch.append(utterances)
return text
def greedy_decode(decoder_hidden, encoder_outputs, target_tensor):
'''
:param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence
:param decoder_hidden: input tensor of shape [1, B, H] for start of the decoding
:param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence
:return: decoded_batch
'''
batch_size, seq_len = target_tensor.size()
decoded_batch = torch.zeros((batch_size, MAX_LENGTH))
decoder_input = torch.LongTensor([[SOS_token] for _ in range(batch_size)], device=device)
for t in range(MAX_LENGTH):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.data.topk(1) # get candidates
topi = topi.view(-1)
decoded_batch[:, t] = topi
decoder_input = topi.detach().view(-1, 1)
return decoded_batch
|
HKUST-KnowComp/GEIA
|
decode_beam_search.py
|
decode_beam_search.py
|
py
| 6,377 |
python
|
en
|
code
| 22 |
github-code
|
6
|
34600680585
|
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(Z):
"""sigmoid
Arguments:
Z {[np.array]} -- [Wx + b]
Returns:
A - [np.array] -- [1 / 1+exp(- Wx + b)]
cache - Z
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
"""rectified linear unit
Arguments:
Z {[np.array]} -- [Wx + b]
Returns:
A - [np.array] -- [max(0,Z)]
cache - Z
"""
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA - the activated gradient
cache - Z
Returns:
dZ - Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# for z <= 0, set dz to 0
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA - the acitvated gradient
cache - Z
Returns:
dZ - Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def print_mislabeled_images(classes, X, y, p):
"""
Plots images where predictions and truth were different.
X -- dataset
y -- true labels
p -- predictions
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
print(mislabeled_indices)
num_images = len(mislabeled_indices[0])
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(2, num_images, i + 1)
plt.imshow(X[:,index].reshape(64,64,3))
plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
plt.show()
|
anantsrivastava30/deeplearning
|
dnn_utils.py
|
dnn_utils.py
|
py
| 1,924 |
python
|
en
|
code
| 2 |
github-code
|
6
|
74787884667
|
# This file is part of "Junya's self learning project about Neural Network."
#
# "Junya's self learning project about Neural Network"
# is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "Junya's self learning project about Neural Network"
# is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# (c) Junya Kaneko <[email protected]>
import numpy as np
from matplotlib import pyplot
import logging
__author__ = 'Junya Kaneko <[email protected]>'
def sigmoid(z, alpha=1, theta=0):
return 1 / (1 + np.exp(-alpha * (z - theta)))
def draw_sigmoid_graphs():
dom = np.array([x / 100 for x in range(-300, 300)])
alphas = [1, 10, 50, 100]
nrows = 2
ncols = int(len(alphas)/2)
for i, alpha in enumerate(alphas):
values = np.array([0.0] * len(dom))
for j, x in enumerate(dom):
values[j] = sigmoid(x, alpha=alpha)
pyplot.subplot(nrows, ncols, i + 1)
pyplot.title('Alpha = %s' % alpha)
pyplot.xlabel('x')
pyplot.ylabel('sigmoid(x)')
pyplot.plot(dom, values)
pyplot.tight_layout()
pyplot.show()
class Element:
def __init__(self, w, alpha, theta):
self._w = w if isinstance(w, np.ndarray) else np.array(w)
self._alpha = alpha
self._theta = theta
def out(self, input, alpha=None, theta=None):
_input = input if isinstance(input, np.ndarray) else np.array(input)
_alpha = alpha if alpha else self._alpha
_theta = theta if theta else self._theta
logging.debug('PARAMETERS (alpha, theta): (%s, %s)' % (_alpha, _theta))
logging.debug('INPUT: (%s), SIGMOID: %s' % (','.join([str(i) for i in _input]), sigmoid(self._w.dot(_input), _alpha, _theta)))
if sigmoid(self._w.dot(_input), _alpha, _theta) > 0.5:
logging.debug('OUTPUT: %s' % 1)
return 1
else:
logging.debug('OUTPUT: %s' % 0)
return 0
def __call__(self, input, alpha=None, theta=None):
return self.out(input, alpha, theta)
class AndElement(Element):
def __init__(self):
super().__init__(w=[1, 1], alpha=1, theta=1.5)
class OrElement(Element):
def __init__(self):
super().__init__(w=[1, 1], alpha=1, theta=0.5)
class XorElement:
def out(self, input):
or_element = OrElement()
i1 = np.array([input[0], -input[1]])
i2 = np.array([-input[0], input[1]])
o1 = or_element(i1)
o2 = or_element(i2)
return or_element([o1, o2])
def __call__(self, input):
return self.out(input)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
draw_sigmoid_graphs()
and_element = AndElement()
or_element = OrElement()
xor_element = XorElement()
assert and_element([1, 1])
assert not and_element([1, 0])
assert not and_element([0, 1])
assert not and_element([0, 0])
assert or_element([1, 1])
assert or_element([1, 0])
assert or_element([0, 1])
assert not or_element([0, 0])
assert not xor_element([1, 1])
assert xor_element([1, 0])
assert xor_element([0, 1])
assert not xor_element([0, 0])
|
junyakaneko/learning-and-neural-network
|
chapter3/problem1.py
|
problem1.py
|
py
| 3,651 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41843852670
|
# Code courtesy: https://towardsdatascience.com/support-vector-machine-python-example-d67d9b63f1c8
# Theory: https://www.youtube.com/watch?v=_PwhiWxHK8o
import numpy as np
import cvxopt
from sklearn.datasets.samples_generator import make_blobs
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
class SVM:
"""
"""
def __init__(self):
self.alpha = None
self.w = None
self.b = None
self.support_vectors = None
self.support_vector_y = None
def fit(self, X_train, y_train):
n_samples, n_features = X_train.shape
# P = X_train^T X_train
K = np.zeros((n_samples, n_samples))
for i in range(n_samples):
for j in range(n_samples):
K[i, j] = np.dot(X_train[i], X_train[j])
P = cvxopt.matrix(np.outer(y_train, y_train) * K)
# q = -1 (1xN)
q = cvxopt.matrix(np.ones(n_samples) * -1)
# A = y_train^T
A = cvxopt.matrix(y_train, (1, n_samples))
# b = 0
b = cvxopt.matrix(0.0)
# -1 (NxN)
G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
# 0 (1xN)
h = cvxopt.matrix(np.zeros(n_samples))
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
# Lagrange multipliers
a = np.ravel(solution['x'])
# Lagrange have non zero lagrange multipliers
sv = a > 1e-5
ind = np.arange(len(a))[sv]
self.alpha = a[sv]
self.support_vectors = X_train[sv]
self.support_vector_y = y_train[sv]
# Intercept
self.b = 0
for n in range(len(self.alpha)):
self.b += self.support_vector_y[n]
self.b -= np.sum(self.alpha * self.support_vector_y * K[ind[n], sv])
self.b /= len(self.alpha)
# Weights
self.w = np.zeros(n_features)
for n in range(len(self.alpha)):
self.w += self.alpha[n] * self.support_vector_y[n] * self.support_vectors[n]
def predict(self, X_test):
return self.sign(np.dot(X_test, self.w) + self.b)
def f1_score(self, X_test, y_test):
pass
if __name__ == '__main__':
X, y = make_blobs(n_samples=250, centers=2, random_state=100, cluster_std=1)
y[y == 0] = -1
tmp = np.ones(len(X))
y = tmp * y
plt.scatter(X[:, 0], X[:, 1], c=y, cmap='winter')
plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
svm = SVM()
svm.fit(X_train, y_train)
def f(x, w, b, c=0):
return (-w[0] * x - b + c) / w[1]
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap='winter')
# w.x + b = 0
a0 = -4
a1 = f(a0, svm.w, svm.b)
b0 = 4;
b1 = f(b0, svm.w, svm.b)
plt.plot([a0, b0], [a1, b1], 'k')
# w.x + b = 1
a0 = -4;
a1 = f(a0, svm.w, svm.b, 1)
b0 = 4;
b1 = f(b0, svm.w, svm.b, 1)
plt.plot([a0, b0], [a1, b1], 'k--')
# w.x + b = -1
a0 = -4;
a1 = f(a0, svm.w, svm.b, -1)
b0 = 4;
b1 = f(b0, svm.w, svm.b, -1)
plt.plot([a0, b0], [a1, b1], 'k--')
plt.show()
|
gmortuza/machine-learning-scratch
|
machine_learning/instance_based/svm/svm.py
|
svm.py
|
py
| 3,218 |
python
|
en
|
code
| 6 |
github-code
|
6
|
74492815226
|
"""Wrapper a los proceso de scrapping
"""
import tempfile
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# pylint: disable=unused-import
from scrapper.procesos.patentes_inpi_novedades import patentes_inpi_novedades
from scrapper.procesos.zonaprop import zonaprop
from scrapper.procesos.dummy import dummy_download_file
from scrapper.procesos.inpi_novedades import inpi_novedades
def get_chrome_driver(download_folder, show=False):
"""Configura y retorna el driver chrome
Args:
download_folder (str): Carpeta de descarga deseada
show (bool): Establece si se muestra la operación en el navegador
Returns:
driver: retorna el objeto driver
"""
chrome_options = Options()
if not show:
chrome_options.add_argument("--headless=new")
chrome_options.add_argument(f"--download-directory={download_folder}")
chrome_options.add_argument("--start-maximized")
chrome_options.add_experimental_option("prefs", {
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"download.default_directory": download_folder,
"safebrowsing.enabled": True,
'safebrowsing.disable_download_protection': True
})
chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
caps = DesiredCapabilities().CHROME
#caps["pageLoadStrategy"] = "normal" # complete
#caps["pageLoadStrategy"] = "eager" # interactive
caps["pageLoadStrategy"] = "none"
driver = webdriver.Chrome(options=chrome_options, desired_capabilities=caps)
return driver
def scrap(proceso,
config,
log,
inputparam=None,
inputfile=None,
outputpath = None,
show_browser=False):
"""Ejecución generica de un proceso modelo de scrapping
Args:
proceso (str): Nombre del proceps
config (Config): Objeto de Configuración
log (Log): Objeto de Logging
inputparam (str, optional): Cadena variable de parámetros dada por el usuario.
Defaults to None.
inputfile (str, optional): Archivo de entrada. Defaults to None.
outputpath (str, optional): Carpeta de salida de los resultados. Defaults to None.
show_browser (bool, optional): Se muestra muestra el navegador durante el proceso
de scrapping.
Defaults to False.
Returns:
List: Lista de valores capturados
"""
if outputpath is None:
workpath = tempfile.mkdtemp()
else:
workpath = outputpath
temp_download_folder = os.path.join(workpath, "tmp")
os.makedirs(temp_download_folder, exist_ok=True)
log.info(f"Carpeta de descarga: {temp_download_folder}")
driver = get_chrome_driver(download_folder=temp_download_folder, show=show_browser)
section = "proc:" + proceso
function_name = config[section]["function"]
datos = []
if function_name in globals():
function = globals()[function_name]
log.info(f"Invocando a: {function_name}")
try:
datos = function(driver=driver,
log=log,
parametros=config[section],
inputfile=inputfile,
tmpdir=workpath,
inputparam=inputparam,
outputpath=outputpath,
show_browser = show_browser)
# pylint: disable=broad-except
except Exception as err:
log.exception("General en la invocación a la función de scrapping")
log.exception(str(err))
else:
log.error(f"proceso {function_name} no implementado")
return datos
|
pmoracho/scrapper-2
|
scrapper/scrapping.py
|
scrapping.py
|
py
| 3,958 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32163459953
|
# import torch
# import torch.nn as nn
# from torch.autograd import Variable
import tensorflow as tf
class reparameterize(tf.keras.Model):
def __init__(self):
super(reparameterize, self).__init__()
@tf.function
def call(self, mu, logvar, sample_num=1, phase='training'):
if phase == 'training':
std = tf.math.exp(logvar*0.5)
eps = tf.keras.backend.random_normal(shape=[1]+std.get_shape().as_list()[1:], mean=0., stddev=1.)
return (eps*std)+mu
else:
raise ValueError('Wrong phase. Always assume training phase.')
# elif phase == 'test':tf.math.exp(
# return mu
# elif phase == 'generation':
# eps = Variable(logvar.data.new(logvar.size()).normal_())
# return eps
def run():
x1 = tf.random.normal([16,64])
x2 = tf.random.normal([16,64])
import time
s = time.time()
sample(x1,x2)
print(time.time() - s)
if __name__ =="__main__":
sample = reparameterize()
for i in range(20):
run()
print(len(sample.trainable_variables))
|
mihirp1998/ProbabilisticNeuralProgrammedNetwork_Tensorflow
|
lib/reparameterize.py
|
reparameterize.py
|
py
| 1,077 |
python
|
en
|
code
| 8 |
github-code
|
6
|
50277551
|
from typing import *
# ref https://leetcode.cn/problems/naming-a-company/solution/by-endlesscheng-ruz8/
from collections import defaultdict
class Solution:
def distinctNames(self, ideas: List[str]) -> int:
group = defaultdict(int)
for s in ideas:
group[s[1:]] |= 1 << (ord(s[0]) - ord('a'))
ans = 0
# 定义 \textit{cnt}[i][j]cnt[i][j] 表示组中首字母不包含 ii 但包含 jj 的组的个数。枚举每个组,统计 \textit{cnt}cnt,
# 同时枚举该组的首字母 ii 和不在该组的首字母 jj,答案即为 \textit{cnt}[i][j]cnt[i][j] 的累加值
cnt = [[0] * 26 for _ in range(26)]
for mask in group.values():
for i in range(26):
if mask >> i & 1 == 0:
for j in range(26):
if mask >> j & 1:
cnt[i][j] += 1
else:
for j in range(26):
if mask >> j & 1 == 0:
ans += cnt[i][j]
return ans * 2
|
code-cp/leetcode
|
solutions/6094/main.py
|
main.py
|
py
| 1,093 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34965299401
|
def two_split(nums,target,lo,high):
while(lo<=high):
middle = (lo + high)/2
if(nums[middle] < target):
lo = middle+1
elif(nums[middle]>target):
high = middle-1
else:
return middle
return lo
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
n = len(nums) -1
lo = 0
high=n
if(n<0):
return -1
while(nums[lo]> nums[high]):
middle = (lo+high)/2
if(nums[middle]>nums[high]):
lo = middle+1
else:
high = middle
print(lo)
if(nums[n]<target<=nums[lo-1]):
m = two_split(nums,target,0,lo-1)
elif(nums[lo]<target<=nums[n]):
m = two_split(nums,target,lo,n)
else:
m = lo
if(nums[m]==target):
return m
else:
return -1
|
qingyuannk/phoenix
|
binary_search/search_rotate_array.py
|
search_rotate_array.py
|
py
| 1,018 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21669508962
|
import requests
from common.log import GetLogger
log = GetLogger().get_logger()
class BaseRequest():
def __init__(self):
pass
def get(self,url,params=None,**kwargs):
try:
response=requests.get(url,params=params,**kwargs)
log.info("==========接口API请求开始===========")
log.info("请求URL:{}/n请求参数:{}".format(url,params))
log.info("接口请求成功!返回参数:{}".format(response.text))
log.info("===========结束===========")
except Exception as e:
log.error("接口请求异常!{0}".format(e))
def post(self,url,data=None,json=None,headers=None,**kwargs):
try:
respose = requests.post(url=url,data=data.encode("utf-8"),json=json,headers=headers,**kwargs)
log.info("===========接口API请求开始===========")
log.info("请求URL:{}".format(url))
log.info("请求参数:{}".format(data,json))
log.info("响应参数:{}".format(respose.text))
log.info("响应时间:{}ms".format(respose.elapsed.total_seconds()*1000))
log.info("===========结束===========")
return respose
except Exception as e:
log.error("接口请求异常!{0}".format(e))
# if __name__ == '__main__':
# Headers = {
# "content-type": "application/json"
# }
# a=BaseRequest()
# a.get("http://192.168.122.105:9900/basic-user/web/business/system/list")
# data={
# "businessSystemCode": "DAC",
# "businessSystemId": 0,
# "businessSystemKey": "DAC",
# "businessSystemName": "基础配置中心"
# }
# a.post("http://192.168.122.105:9900/basic-user/web/business/system/save",data=data,headers=Headers)
|
menghuai1995/PythonAutoCode
|
API_Autotest/common/baseRequest.py
|
baseRequest.py
|
py
| 1,787 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72729152507
|
#!/usr/bin/env python3
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import PoseWithCovarianceStamped
#from nav_msgs.msg import Odometry
from math import floor
import matplotlib.pyplot as plt
import numpy as np
#teste
#This node receives topic 'scanner' and 'poser' and adds noise to them
#--------------------------------------------------------------------------------
# NOISE
# 68% of the observations lie within 1 standard deviation of the mean;
# 95% lie within two standard deviation of the mean;
# 99.9% lie within 3 standard deviations of the mean
SCAN_NOISE = 0.0 # percentagem
POSE_NOISE = 0.0 # metros
ANG_NOISE = 0.0 # percentagem
#--------------------------------------------------------------------------------
# Import your custom code to implement your algorithm logic here
# for example:
class Noiser_Node:
def __init__(self):
# Initialize some necessary variables here
self.node_frequency = None
self.pub_scan = None
self.pub_pose = None
self.scan_sensor = LaserScan
self.position = PoseWithCovarianceStamped
# Initialize the ROS node
rospy.init_node('noiser_node')
rospy.loginfo_once('Noiser Node has started')
# Load parameters from the parameter server
self.load_parameters()
# Initialize the publishers and subscribers
self.initialize_subscribers()
self.initialize_publishers()
# Initialize the timer with the corresponding interruption to work at a constant rate
#self.initialize_timer()
def initialize_publishers(self):
# Initialize the publisher to the topic '/output_topic'
self.pub_scan = rospy.Publisher('scan', LaserScan, queue_size=10)
self.pub_pose = rospy.Publisher('amcl_pose', PoseWithCovarianceStamped, queue_size=10)
def load_parameters(self):
"""
Load the parameters from the configuration server (ROS)
"""
# Node frequency of operation
self.node_frequency = rospy.get_param('node_frequency', 30)
rospy.loginfo('Node Frequency: %s', self.node_frequency)
def initialize_subscribers(self):
"""
Initialize the subscribers to the topics. You should subscribe to
sensor data and odometry (if applicable) here
"""
# Subscribe to the topic '/fake_sensor_topic'
rospy.Subscriber('/scanner', LaserScan, self.callback_scan)
rospy.Subscriber('/poser', PoseWithCovarianceStamped, self.callback_pose)
def callback_scan(self, msg):
self.scan_sensor = msg
scan_noise = abs(np.random.normal(1,SCAN_NOISE,360))
self.scan_sensor.ranges *= scan_noise
self.pub_scan.publish(self.scan_sensor)
def callback_pose(self,msg):
self.position = msg
pose_noise = np.random.normal(0,POSE_NOISE,2)
self.position.pose.pose.position.x += pose_noise[0]
self.position.pose.pose.position.y += pose_noise[1]
ang = 2*np.arcsin(msg.pose.pose.orientation.z)
self.position.pose.pose.orientation.z = np.sin((ang + np.pi*np.random.normal(0,ANG_NOISE,1))/2)
rospy.loginfo('NOISED at: %s', rospy.get_time())
self.pub_pose.publish(self.position)
def main():
# Create an instance of the DemoNode class
node = Noiser_Node()
# Spin to keep the script for exiting
rospy.spin()
if __name__ == '__main__':
main()
|
joaofgois/saut_ogm
|
scripts/noiser.py
|
noiser.py
|
py
| 3,485 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34861021547
|
from statuspage.forms import StatusPageModelForm
from utilities.forms import StaticSelect
from ..models import Component, ComponentGroup
__all__ = (
'ComponentForm',
'ComponentGroupForm',
)
class ComponentForm(StatusPageModelForm):
fieldsets = (
('Component', (
'name', 'link', 'description', 'component_group', 'status', 'show_historic_incidents', 'visibility',
'order',
)),
)
class Meta:
model = Component
fields = (
'name', 'link', 'description', 'component_group', 'status', 'show_historic_incidents', 'visibility', 'order'
)
widgets = {
'component_group': StaticSelect(),
'status': StaticSelect(),
}
class ComponentGroupForm(StatusPageModelForm):
fieldsets = (
('Component Group', (
'name', 'description', 'visibility', 'order', 'collapse'
)),
)
class Meta:
model = ComponentGroup
fields = (
'name', 'description', 'visibility', 'order', 'collapse'
)
widgets = {
'collapse': StaticSelect(),
}
|
Status-Page/Status-Page
|
statuspage/components/forms/models.py
|
models.py
|
py
| 1,145 |
python
|
en
|
code
| 45 |
github-code
|
6
|
3005447062
|
import os
class Event_Controler:
def __init__(self, rede_p1, rede_p2):
self.need_objects = True
self.comandos = []
self.redep1 = rede_p1
self.redep2 = rede_p2
def set_objects(self, bola, player_1, player_2, partida):
self.bola = bola
self.player_1 = player_1
self.player_2 = player_2
self.p1_controler = player_1.controle
self.p2_controler = player_2.controle
self.partida = partida
def atualiza_lista(self):
self.comandos = []
a = self.bola.get_posicao()
r1 = self.redep1.run([self.bola.get_posicao()[0], self.bola.get_posicao()[1], self.player_1.posicao.x])
r2 = self.redep2.run([self.bola.get_posicao()[0], self.bola.get_posicao()[1], self.player_2.posicao.x])
if r1 < 0.5:
self.comandos.append(self.p1_controler.esquerda)
else:
self.comandos.append(self.p1_controler.direita)
if r2 < 0.5:
self.comandos.append(self.p2_controler.esquerda)
else:
self.comandos.append(self.p2_controler.direita)
def get_eventos_list(self):
self.atualiza_lista()
return self.comandos
def finalizar(self):
self.salvar()
def salvar(self):
placar = self.partida.placar_p1 - self.partida.placar_p2
salvar = self.redep1
sufixo = '-P1'
if placar < 0:
salvar = self.redep2
sufixo = '-P2'
placar = -placar
file = open('Redes' + sufixo + '.txt', 'a')
file.write(str(placar) + ',')
# Salva Pesos das Camadas Ocultas
for a in range(len(salvar.hlw)):
for b in range(len(salvar.hlw[a])):
for c in range(len(salvar.hlw[a][b])):
if c is not len(salvar.hlw[a][b])-1:
file.write(str(salvar.hlw[a][b][c]) + ';')
else:
file.write(str(salvar.hlw[a][b][c]))
if b is not len(salvar.hlw[a])-1:
file.write('_')
if a is not len(salvar.hlw) - 1:
file.write(':')
#Salva Pesos da Camada de Saida
file.write(',')
for a in range(len(salvar.output_weights)):
file.write('')
for b in range(len(salvar.output_weights[a])):
if b is not len(salvar.output_weights[a])-1:
file.write(str(salvar.output_weights[a][b]) + ';')
else:
file.write(str(salvar.output_weights[a][b]))
if a is not len(salvar.output_weights) - 1:
file.write('_')
# Salva os bias das Camadas ocultas
file.write(',')
for a in range(len(salvar.hlb)):
file.write('')
for b in range(len(salvar.hlb[a])):
if b is not len(salvar.hlb[a]) - 1:
file.write(str(salvar.hlb[a][b]) + ';')
else:
file.write(str(salvar.hlb[a][b]))
if a is not len(salvar.hlb) - 1:
file.write('_')
# Salva os bias da Camada de Saida
file.write(',')
for a in range(len(salvar.output_bias)):
if a is not len(salvar.output_bias) - 1:
file.write(str(salvar.output_bias[a]) + ';')
else:
file.write(str(salvar.output_bias[a]))
file.write('\n')
file.close()
|
MateusRosario/PingPongGameSimpleNNAI
|
Neural_Net/Event_Controler.py
|
Event_Controler.py
|
py
| 3,465 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37976582239
|
"""
Revised Monitor for screen addition, removing logging & LED
Author: Howard Webb
Date: 10/09/2018
Controller to collect GPS and Sonar data and integrate into a single record
May also collect non-serial data (ie. turbidity)
Store to a file, incrementing for each new run
"""
from __future__ import print_function
from GPS import GPS
from Sonar import Sonar
# from oneWireTemp import getTempC
# from Turbidity import Turbidity
import serial
# Routine to check serial ports for GPS or Sonar messages
# No guarantee which device is on which port
from PortScan import PortScan
from i2c_lcd_util import lcd
from time import sleep
from datetime import datetime
class Monitor(object):
def __init__(self):
"""
Build monitor with GPS and Sonar
:param logger:
"""
# Object level holders for data
# used for recording
self._time = None
self._lat = None
self._lon = None
self._depth = None
self._temp = None
# used for dispaly
self._time_msg = ""
self._lat_msg = ""
self._lon_msg = ""
self._depth_msg = ""
self._temp_msg = ""
self._lcd = lcd()
self._lcd.lcd_display_string("Waiting GPS", 1)
# Option to use test logger or recording logger
# self._tur = Turbidity()
# Get file for logging data
self._file = self.getFile()
self.writeHeader()
self._GPS = None
self._Sonar = None
# Get list of ports and check who is using
# avoids problems when switch USB location
ps = PortScan()
port_list = ps.getPorts()
print(port_list)
if 'GPS' in port_list.keys():
self._GPS = GPS(self.recorder, port_list['GPS'])
else:
# Fail gracefully
self._GPS = None
self._lcd.lcd_display_string("No GPS port found", 1)
raise SystemExit
self._lcd.lcd_display_string("Waiting Sonar", 1)
if 'Sonar' in port_list.keys():
self._Sonar = Sonar(self.recorder, port_list['Sonar'])
else:
# Fail
self._Sonar = None
self._lcd.lcd_display_string("No Sonar found", 1)
raise SystemExit
self._lcd.lcd_display_string(" "*15)
self._lcd.lcd_display_string("Data", 1, 1)
while True:
self._GPS.get()
self._Sonar.get()
#sleep(0.5)
def getFile(self):
name = self.getFileName()
return self.openFile(name)
def getFileName(self):
"""
Create file name for next log
Assumes format 'Log_000.csv'
Will incriment the number for the next file
:return: the filename
"""
import glob
filename = '/home/pi/Data/Log_000.csv'
files = glob.glob("/home/pi/Data/*.csv")
if len(files) > 0:
files.sort()
last_file = files[-1]
next_nbr = int(last_file.split('.')[0].split('_')[1])
next_nbr += 1
filename = "{}{}{}".format('/home/pi/Data/Log_', format(next_nbr, '0>3'), '.csv')
return filename
def openFile(self, name):
"""
Open a file for logging data
:return: the file descriptor
"""
return open(name, 'a')
def writeHeader(self):
# write header
rec = "{}, {}, {}, {}, {}".format('Time', 'Latitude', 'Longitude', 'Depth', 'Temperature')
self.save(rec)
def save(self, rec):
"""
appandes the characters in rec into the file
:param rec: the characters to be saved
:return: None, but also prints the rec
"""
self._file.write(rec)
self._file.flush()
def recorder(self, values):
"""
Callback to get messages, when have all three - save them as one record. A log value is printed
when all measurement values have non None values.
:param values:
:return: None
"""
# print "Values", values
# gets data from GPS and Sonar
#print(values)
#print(values["name"])
if values['name'] is None:
print("No Msg Name")
if values['name'] == 'GPRMC': # Time and velocity
#print("GPRMC", values)
self._time = values["data"]["time"]
self._lat = values["data"]["lat"]
self._lon = values["data"]["lon"]
self._lat_msg = '{:.6f}'.format(self._lat)
self._lon_msg = '{:.6f}'.format(self._lon)
if values['name'] == 'GPGLL': # Time and location
print("GPGLL", values)
self._time = values["data"]["time"]
self._lat = values["data"]["lat"]
self._lon = values["data"]["lon"]
self._lat_msg = '{:.6f}'.format(self._lat)
self._lon_msg = '{:.6f}'.format(self._lon)
elif values['name'] == 'SDDBT': # Depth
#print("SDDBT", values)
depth = values["data"]["depth"]
# check for missing data
if depth == '':
depth = 0
self._depth = depth
#print(type(self._depth), self._depth)
self._depth_msg = 'D{:04.1f}'.format(float(self._depth))
elif values['name'] == 'YXMTW': # Temperature
#print("YXMTW", values)
self._temp = values["data"]["temp"]
#print(type(self._temp), self._temp)
self._temp_msg = 'T{}'.format(self._temp)
# Save record when have all parts
#print(datetime.now().strftime("%S"))
#print(values["name"], self._lat, self._depth, self._time)
if (self._lat is not None) and (self._depth is not None):
print(values["name"], self._lat, self._depth, self._time)
self.finishLogging()
# display data
self._lcd.lcd_display_string(self._lat_msg, 1, 1)
self._lcd.lcd_display_string(self._depth_msg, 1, 11)
self._lcd.lcd_display_string(self._lon_msg, 2)
self._lcd.lcd_display_string(self._temp_msg, 2, 11)
# clear data for next round of sentences
self._time = None
self._lat = None
self._lon = None
self._depth = None
self._temp = None
def finishLogging(self):
"""
save the logging data to the file
:return: None
"""
rec = "\n{}, {}, {}, {}, {}".format(self._time, self._lat, self._lon, self._depth, self._temp)
self.save(rec)
def test():
""" Quick test with dummy logger """
print("Test Monitor")
main()
def main():
mon = Monitor()
if __name__ == "__main__":
main()
|
webbhm/Sonar-GPS
|
GPS/Monitor.py
|
Monitor.py
|
py
| 7,011 |
python
|
en
|
code
| 2 |
github-code
|
6
|
12064640805
|
"""
A Module for Encrypt and Decrypt message
"""
import ReadWriteFileManagement
# Import the Fernet module
from cryptography.fernet import Fernet
DATABASE_DIR_PATH = "../../databases/chat_db/"
FILE_NAME = "encryptKey.key"
"""
A function for generating an encryption key.
the function generate the using Fernet class,
and saves the key into file in a specific path
"""
def generate_encryption_key():
# generate a random key for encryption
encrypt_key = Fernet.generate_key()
# Writing the key into a file in order to decode the messages when needed
if ReadWriteFileManagement.create_chat_file(FILE_NAME) is not None:
ReadWriteFileManagement.create_chat_file(FILE_NAME)
# Stores the encryption key into the encryption file
with open(DATABASE_DIR_PATH + FILE_NAME, 'wb') as encryptKey:
encryptKey.write(encrypt_key)
"""
A function for Encrypt a text message
"""
def message_encrypt(message_to_encrypt):
# Opening the file that stores the encrypt key
with open(DATABASE_DIR_PATH + FILE_NAME, 'rb') as en_Key:
key = en_Key.read()
# Creating an instant of the Fernet class with encrypt_key,
# so we can encrypt each message using Fernet methods
fernet = Fernet(key)
# E ncrypt message
enc_message = fernet.encrypt(message_to_encrypt.encode())
return enc_message
"""
A function for Decrypt a text message
"""
def message_decrypt(message_to_decrypt):
# Opening the file that stores the encrypt key
with open(DATABASE_DIR_PATH + FILE_NAME, 'rb') as encryptKey:
key = encryptKey.read()
# Creating an instant of the Fernet class with encrypt_key,
# so we can encrypt each message using Fernet methods
fernet = Fernet(key)
decode_message = fernet.decrypt(message_to_decrypt.decode())
return decode_message
if __name__ == "__main__":
generate_encryption_key()
message = "Hello"
enc_m = message_encrypt(message)
print(enc_m.decode())
dec_m = message_decrypt(enc_m)
print(dec_m.decode())
|
RoyYotam/My-Chat
|
src/help classes/EncryptMessage.py
|
EncryptMessage.py
|
py
| 2,030 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10136905988
|
import cv2 as cv
import copy
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread("/home/arkaprabha/CViiing/photos/cameron.jpeg")
cv.imshow("image",img)
def reframe(frame=None,scale=0.75):
width= int(frame.shape[1] + scale)
height = int(frame.shape[0] + scale)
dimen = (width,height)
return cv.resize(frame,dimen,cv.INTER_AREA)
def drawshapes(frame,n1,m1,n2,m2):
cv.rectangle(frame,(n1,m1),(n2,m2),(0,255,0),thickness=2)
cv.circle(frame,(n2//2,m2 + m2//2),40,(0,0,255),thickness = 3)
cv.putText(frame,"Didn't meant to put the circle there :)",(0,m2+10),cv.FONT_HERSHEY_TRIPLEX,0.25,(255,255,255),1)
cv.imshow("Bound",frame)
def translate(img, x,y):
# -x -> Left
# -y -> Up
# x -> Right
# y -> Down
transMat = np.float32([[1,0,x],[0,1,y]])
dimensions = (img.shape[1],img.shape[0])
return cv.warpAffine(img, transMat, dimensions)
def rotate(img,angle,rotPoint = None,scale=1.0):
(height,width)= img.shape[:2]
if (rotPoint is None):
rotPoint= (width//2,height//2)
rotmat = cv.getRotationMatrix2D(rotPoint, angle,scale) #third factor is scale here we pass 1.0 as we dont need to scale for rotation
dimens = (width,height)
return cv.warpAffine(img,rotmat,dimens)
#basically shows our hot Dr.Cameron magnified/zoomed 50 times
im2 = reframe(img,500)
cv.imshow("image2",im2)
#draw shapes on the image
x = copy.copy(img)
drawshapes(x,10,10,img.shape[0]//2,img.shape[1]//2)
#grayscale
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cv.imshow("Gray",gray)
# blur
# gaussian blurr
x = copy.copy(im2)
blurrr = cv.GaussianBlur(x,(41,41),cv.BORDER_DEFAULT)
cv.imshow("Blur",blurrr)
# median blurr
mblur = cv.medianBlur(x,3)
cv.imshow("Median blur",mblur)
# bilateral
bilateral = cv.bilateralFilter(x, 5 ,15,15)
cv.imshow("Bilateral",bilateral)
#edge cascade
canny = cv.Canny(x,90,100)
cv.imshow("canny",canny)
#dilating the image
dilated = cv.dilate(x,(40,40),iterations=5)
cv.imshow("Dilated",dilated)
#eroding the image
eroded = cv.erode(x,(10,10),iterations = 5)
cv.imshow("eroded",eroded)
#translate
translated = translate(x,-100,10)
cv.imshow("Translated",translated)
#rotate
rotated = rotate(x,45)
cv.imshow("Rotated",rotated)
#resize
resized = cv.resize(x,(1000,1000), interpolation = cv.INTER_CUBIC)
cv.imshow("Resized", resized)
#flipping
flip = cv.flip(x,0)
cv.imshow('Flipped',flip)
#thresholding
gray2 = cv.cvtColor(im2,cv.COLOR_BGR2GRAY)
ret,thresh = cv.threshold(gray2,100,125,cv.THRESH_BINARY)
cv.imshow("Thresholded",thresh) #produces a really hot image
print(ret)
#contours
#RETR_LIST = list of all contours
#RETR_EXTERNAL = list of all external contours
#RETR_TREE = list of all heirarchical contours
#Contour approximations
#CHAIN_APPROX_NONE = No approximations
#CHAIN_APPROX_SIMPLE = Compresses all contours into simpler ones(layman explanation)
contours, heirarchies = cv.findContours(thresh,cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE)
print(f'{len(contours)} contours found')
blank = np.zeros(x.shape[:2],dtype='uint8')
cv.drawContours(blank,contours,-1,(255,0,0),1)
cv.imshow("Contours",blank)
#color spaces
hsv = cv.cvtColor(x, cv.COLOR_BGR2HSV)
cv.imshow("HSV",hsv)
#rgb
rgb = cv.cvtColor(x, cv.COLOR_BGR2RGB)
cv.imshow("RGB",rgb)
#lab
lab = cv.cvtColor(x, cv.COLOR_BGR2LAB)
cv.imshow("LAB",lab)
#color_channels
b,g,r = cv.split(im2)
blank = np.zeros(im2.shape[:2],dtype='uint8')
blue = cv.merge([b,blank,blank])
green = cv.merge([blank,g,blank])
red = cv.merge([blank,blank,r])
cv.imshow("Blue",blue)
cv.imshow("Green",green)
cv.imshow("red",red)
#edge detection
#laplacian
lap = cv.Laplacian(gray, cv.CV_64F)
lap = np.uint8(np.absolute(lap))
cv.imshow("Laplacian edge",lap)
#sobel
sobelx = cv.Sobel(gray, cv.CV_64F,1,0)
sobely = cv.Sobel(gray, cv.CV_64F,0,1)
cv.imshow("Sobelx gray",sobelx)
cv.imshow("Sobely gray",sobely)
#histogram
#grayscale histogram
gray_hist = cv.calcHist([gray],[0],None,[256],[0,256])
plt.plot(gray_hist)
plt.title("HISTORGRAM")
#color histogram
colors = ('b','g','r')
for i,col in enumerate(colors):
hist = cv.calcHist([img],[i],None,[256],[0,256])
plt.plot(hist , color = col)
plt.show()
cv.waitKey(0)
|
ArkaprabhaChakraborty/CViiing
|
python/basics.py
|
basics.py
|
py
| 4,193 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13303958661
|
from fastapi import APIRouter, Depends
from app.dependencies import verify_api_key
from app.libraries.libpoller import Poller
from app.schemas.poller import PollerModel, PollerUpdateModel, PollerCreateModel
router = APIRouter(tags=["poller"])
oPoller = Poller()
# Poller Requests ( API_KEY required )
@router.get("/poller/devices")
async def get_poller_devices(poller = Depends(verify_api_key)):
return await oPoller.get_poller_devices(poller)
# CRUID Poller Requests ( JWT required )
@router.get("/poller/schema")
async def get_poller_schema(joined: bool = False):
return await oPoller.get_poller_schema(joined=joined)
@router.get("/poller")
async def get_poller_list(joined: bool = False, limit: int = 100, offset: int = 0, sortField: str = None, sortOrder: str = "asc", search: str = ""):
return await oPoller.get_poller_list(joined=joined, limit=limit, offset=offset, sortField=sortField, sortOrder=sortOrder, search=search)
@router.get("/poller/{pollerid}")
async def get_poller(pollerid: int, joined: bool = False):
return await oPoller.get_poller(pollerid, joined=joined)
@router.post("/poller")
async def create_poller(poller: PollerCreateModel):
return await oPoller.create_poller(poller)
@router.put("/poller/{pollerid}")
async def update_poller(pollerid: int, poller: PollerUpdateModel):
return await oPoller.update_poller(pollerid, poller)
@router.delete("/poller/{pollerid}")
async def delete_poller(pollerid: int):
return await oPoller.delete_poller(pollerid)
|
treytose/Pyonet-API
|
pyonet-api/app/routers/poller.py
|
poller.py
|
py
| 1,534 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15549783234
|
import random
import csv
import time
from src.world import World
from src.user import User
from src.euclidregion import EuclidRegion
from src.regionprovider import GreedyRegionProvider
from src.userprofile import UserProfile
from src.gridregion import GridRegion
region_provider = GreedyRegionProvider.unmodified_generator(None)
world = World(None, region_provider)
MAX_AREA = 100
MAP_SIZE = 300
default_profile = UserProfile(10, MAX_AREA, 3)
def add_random_user(world):
x = random.uniform(0, MAP_SIZE)
y = random.uniform(0, MAP_SIZE)
world.add_user(x, y, default_profile)
def coords_in_bounds(user:User):
x_in = MAX_AREA <= user.xcoord <= MAP_SIZE - MAX_AREA
y_in = MAX_AREA <= user.ycoord <= MAP_SIZE - MAX_AREA
return x_in and y_in
USER_COUNT = 7500
GENERATIONS = 1
for _ in range(0, USER_COUNT):
add_random_user(world)
with open('bias.csv', 'w', newline='') as results:
writer = csv.writer(results)
writer.writerow(["Generation #",
"Region Anonymity",
"Distance to Boundary",
"Probability of distrance to boundary",
"Is corner",
"area",
"user x",
"user y",
"x1",
"x2",
"y1",
"y2"])
for gen in range(0, GENERATIONS):
user:User
start = int(round(time.time() * 1000))
middler_users = [u for u in world.users if coords_in_bounds(u)]
print(len(middler_users))
for user in middler_users:
print(int(round(time.time() * 1000)) - start)
start = int(round(time.time() * 1000))
user.update_region()
region:EuclidRegion
region = user.current_region()
data = [gen,
region.privacy,
region.user_dist_to_boundary,
region.user_location_likelihoods[region.user_dist_to_boundary],
region.is_corner,
region.area(),
user.xcoord,
user.ycoord,
region.x_min,
region.x_max,
region.y_min,
region.y_max]
writer.writerow(data)
#print(f"privacy: {region.privacy} \
#\n Distance: {region.user_dist_to_boundary} Expected: {region.user_location_likelihood} \
#\n size: {region.area()} \
#\n location:({user.xcoord}, {user.ycoord}) with region {user.current_region()}")
|
ems236/EECS456XRegions
|
bias_test.py
|
bias_test.py
|
py
| 2,362 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71176383548
|
import boto3, datetime, time
region = 'us-west-2'
accesskey = os.environ['ACCESSKEY']
secretkey = os.environ['SECRET_ACCESSKEY']
def send_firehose(message: str):
client = boto3.client('firehose', aws_access_key_id=accesskey, aws_secret_access_key=secretkey, region_name=region)
# Send message to firehose
response = client.put_record(
DeliveryStreamName=os.environ['FIREHOSE_NAME'],
Record={
'Data': message
}
)
return response
|
kfunamizu/python_commonlib
|
firehose/src/firehose.py
|
firehose.py
|
py
| 479 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2931948188
|
from typing import Dict
from network.api.base import BaseRepository
import re
from http.cookies import SimpleCookie
import dukpy
import requests
import http.client
from network.api.base_mainpage_cookies_loader import BaseMainPageCookiesLoader
IPP_JS_PATH = 'resources/ipp.js'
class PagesRepository(BaseRepository, BaseMainPageCookiesLoader):
def __init__(self, user_agent: str, cookies: Dict[str, str] = None):
super().__init__(user_agent=user_agent, cookies=cookies)
def get_main_page_cookies(self):
# first we get ipp cookies
self.get_ipp_cookies()
# then we load main page with ipp cookies
# and get more cookies
self.load_main_page()
def get_cookies(self): # override BaseMainPageCookiesLoader's method
return self.get_main_page_cookies()
def get_ipp_cookies(self):
# send request to main page
response = self.load_main_page(False)
# get js for tokens generation
js_code_regex_result = \
re.findall(r'<script type=\"text/javascript\">(.*)</script>', response.text, flags=re.S | re.M)
assert len(js_code_regex_result) > 0
js_code = js_code_regex_result[0]
# edit js to extract cookies from script's result
with open(IPP_JS_PATH, encoding='utf8') as f:
part_of_code = f.read()
js_code = part_of_code + '\r\n' + js_code + '\r\n' + 'document.cookie'
# evaluate js
ipp_cookies_string = dukpy.evaljs(js_code, user_agent=self.user_agent)
# update cookies
ipp_cookies = SimpleCookie()
ipp_cookies.load(ipp_cookies_string)
for k, v in ipp_cookies.items():
self.cookies[k] = v.value
def load_main_page(self, update_cookies=True) -> requests.Response:
http.client._MAXHEADERS = 1000
# send request to main page
response = requests.get(
'https://dns-shop.ru/',
headers={
'accept': '*/*',
'user-agent': self.user_agent,
'accept-language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7',
},
cookies=self.cookies,
)
if update_cookies:
self.update_cookies(response.cookies)
return response
|
Emperator122/dns-shop_price_comparator
|
network/api/pages.py
|
pages.py
|
py
| 2,270 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17466244152
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: GAN.py
# Author: Yuxin Wu <[email protected]>
import tensorflow as tf
import numpy as np
from tensorpack import (QueueInputTrainerBase, TowerContext,
get_global_step_var)
from tensorpack.tfutils.summary import summary_moving_average, add_moving_summary
from tensorpack.dataflow import DataFlow
class GANTrainer(QueueInputTrainerBase):
def __init__(self, config, g_vs_d=3):
super(GANTrainer, self).__init__(config)
self._build_enque_thread()
if g_vs_d > 1:
self._opt_g = g_vs_d
self._opt_d = 1
else:
self._opt_g = 1
self._opt_d = int(1.0 / g_vs_d)
def _setup(self):
with TowerContext(''):
actual_inputs = self._get_input_tensors_noreuse()
self.model.build_graph(actual_inputs)
self.gs_incr = tf.assign_add(get_global_step_var(), 1, name='global_step_incr')
self.g_min = self.config.optimizer.minimize(self.model.g_loss,
var_list=self.model.g_vars, name='g_op')
self.d_min = self.config.optimizer.minimize(self.model.d_loss,
var_list=self.model.d_vars)
self.d_min = tf.group(self.d_min, summary_moving_average(), name='d_op')
def run_step(self):
for _ in range(self._opt_g):
self.sess.run(self.g_min)
for _ in range(self._opt_d):
self.sess.run(self.d_min)
self.sess.run(self.gs_incr)
class RandomZData(DataFlow):
def __init__(self, shape):
super(RandomZData, self).__init__()
self.shape = shape
def get_data(self):
while True:
yield [np.random.uniform(-1, 1, size=self.shape)]
def build_GAN_losses(vecpos, vecneg):
sigmpos = tf.sigmoid(vecpos)
sigmneg = tf.sigmoid(vecneg)
tf.histogram_summary('sigmoid-pos', sigmpos)
tf.histogram_summary('sigmoid-neg', sigmneg)
d_loss_pos = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
vecpos, tf.ones_like(vecpos)), name='d_loss_pos')
d_loss_neg = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
vecneg, tf.zeros_like(vecneg)), name='d_loss_neg')
d_pos_acc = tf.reduce_mean(tf.cast(sigmpos > 0.5, tf.float32), name='pos_acc')
d_neg_acc = tf.reduce_mean(tf.cast(sigmneg < 0.5, tf.float32), name='neg_acc')
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
vecneg, tf.ones_like(vecneg)), name='g_loss')
d_loss = tf.add(d_loss_pos, d_loss_neg, name='d_loss')
add_moving_summary(d_loss_pos, d_loss_neg,
g_loss, d_loss,
d_pos_acc, d_neg_acc)
return g_loss, d_loss
|
jxwufan/NLOR_A3C
|
tensorpack/examples/GAN/GAN.py
|
GAN.py
|
py
| 2,676 |
python
|
en
|
code
| 16 |
github-code
|
6
|
9303272302
|
import os
import sys
import shutil
if not os.path.exists("build"):
os.mkdir("build")
os.chdir("build")
code = os.system("cmake .. -DPK_USE_CJSON=ON -DPK_USE_BOX2D=ON")
assert code == 0
code = os.system("cmake --build . --config Release")
assert code == 0
if sys.platform == "win32":
shutil.copy("Release/main.exe", "../main.exe")
shutil.copy("Release/pocketpy.dll", "../pocketpy.dll")
elif sys.platform == "darwin":
shutil.copy("main", "../main")
shutil.copy("libpocketpy.dylib", "../libpocketpy.dylib")
else:
shutil.copy("main", "../main")
shutil.copy("libpocketpy.so", "../libpocketpy.so")
|
0Armaan025/pocketpy
|
cmake_build.py
|
cmake_build.py
|
py
| 624 |
python
|
en
|
code
| null |
github-code
|
6
|
5200005552
|
"""
The production code for predicting smell events and sending push notifications
(sending push notifications requires the rake script in the smell-pittsburgh-rails repository)
"""
import sys
from util import log, generateLogger, computeMetric, isFileHere
import pandas as pd
from getData import getData
from preprocessData import preprocessData
from computeFeatures import computeFeatures
#from selectFeatures import selectFeatures
from trainModel import trainModel
import joblib
from datetime import timedelta, datetime
import os
import subprocess
# The flag to determine the server type
SERVER = "staging"
#SERVER = "production"
# The flag for enabling the rake call to send push notifications
#ENABLE_RAKE_CALL = False
ENABLE_RAKE_CALL = True
# The path for storing push notification data
DATA_PATH = "data_production/"
def main(argv):
mode = None
if len(argv) >= 2:
mode = argv[1]
if mode == "train":
train()
elif mode == "predict":
predict()
else:
print("Use 'python main.py [mode]'; mode can be 'train' or 'predict'")
def train(f_hr=8, b_hr=3, thr=40, method="HCR"):
"""
Train the machine learning model for predicting smell events
Input:
f_hr: the number of hours to look further and compute responses (Y),
...which is the sum of smell ratings (that are larger than 3) over the future f_hr hours
b_hr: the number of hours to look back and compute features (X),
...which are the sensor readings (on ESDR) over the past b_hr hours
thr: the threshold for binning the smell value into two classes (for classification)
method: the method used in the "trainModel.py" file
"""
p = DATA_PATH
# Set logger
logger = generateLogger(p+"log.log")
log("--------------------------------------------------------------------------", logger)
log("--------------------------------- Train --------------------------------", logger)
# Get and preprocess data
end_dt = datetime.now() - timedelta(hours=24)
start_dt = end_dt - timedelta(hours=8000)
log("Get data from " + str(start_dt) + " to " + str(end_dt), logger)
df_esdr_array_raw, df_smell_raw = getData(start_dt=start_dt, end_dt=end_dt, logger=logger)
df_esdr, df_smell = preprocessData(df_esdr_array_raw=df_esdr_array_raw, df_smell_raw=df_smell_raw, logger=logger)
# Compute features
df_X, df_Y, df_C = computeFeatures(df_esdr=df_esdr, df_smell=df_smell, f_hr=f_hr, b_hr=b_hr, thr=thr, is_regr=False,
add_inter=False, add_roll=False, add_diff=False, logger=logger, out_p_mean=p+"mean.csv", out_p_std=p+"std.csv")
# Select features
# NOTE: currently, the best model uses all the features
#df_X, df_Y = selectFeatures(df_X, df_Y, logger=logger, out_p=p+"feat_selected.csv")
# Train, save, and evaluate model
# NOTE: to know more about the model, see the "HybridCrowdClassifier.py" file
model = trainModel({"X": df_X, "Y": df_Y, "C": df_C}, method=method, out_p=p+"model.pkl", logger=logger)
metric = computeMetric(df_Y, model.predict(df_X, df_C), False)
for m in metric:
log(metric[m], logger)
def predict(f_hr=8, b_hr=3, thr=40):
"""
Predict smell events using the trained machine learning model
For the description of the input arguments, see the docstring in the train() function
"""
p = DATA_PATH
# Set logger
logger = generateLogger(p+"log.log")
log("--------------------------------------------------------------------------", logger)
log("-------------------------------- Predict -------------------------------", logger)
# Get data for previous b_hr hours
end_dt = datetime.now()
start_dt = end_dt - timedelta(hours=b_hr+1)
log("Get data from " + str(start_dt) + " to " + str(end_dt), logger)
df_esdr_array_raw, df_smell_raw = getData(start_dt=start_dt, end_dt=end_dt, logger=logger)
df_esdr, df_smell = preprocessData(df_esdr_array_raw=df_esdr_array_raw, df_smell_raw=df_smell_raw, logger=logger)
if len(df_esdr) < b_hr+1:
log("ERROR: Length of esdr is less than " + str(b_hr+1) + " hours", logger)
log("Length of esdr = " + str(len(df_esdr)), logger)
return
# Compute features
df_X, _, df_C = computeFeatures(df_esdr=df_esdr, df_smell=df_smell, f_hr=f_hr, b_hr=b_hr, thr=thr, is_regr=False,
add_inter=False, add_roll=False, add_diff=False, logger=logger, in_p_mean=p+"mean.csv", in_p_std=p+"std.csv")
if len(df_X) != 1:
log("ERROR: Length of X is not 1", logger)
log("Length of X = " + str(len(df_X)), logger)
return
# Select features
# NOTE: currently, the best model uses all the features
#df_feat_selected = pd.read_csv(p+"feat_selected.csv")
#df_X = df_X[df_feat_selected.columns]
# Load model
log("Load model...", logger)
model = joblib.load(p+"model.pkl")
# Predict result
# For the hybrid crowd classifier
# if pred==0, no event
# if pred==1, event predicted by the base estimator
# if pred==2, event detected by the crowd
# if pred==3, event both predicted by the base estimator and detected by the crowd
y_pred = model.predict(df_X, df_C)[0]
log("Prediction for " + str(end_dt) + " is " + str(y_pred), logger)
# Send the predictive push notification
if y_pred in (1, 3): pushType1(end_dt, logger)
# Send the crowd-based notification
# NOTE: comment out the next line when migrating its function to the rails server
if y_pred in (2, 3): pushType2(end_dt, logger)
def pushType1(end_dt, logger):
"""
Send type 1 push notification (predicted by the classifier)
Input:
end_dt: the ending time for getting the ESDR data that is used for prediction (which is the current time)
logger: the python logger created by the generateLogger() function
"""
p = DATA_PATH
# Read the push notification file
nst_p = p + "notification_sent_times.csv"
if isFileHere(nst_p):
# If the file that stores the notification sending time exist,
# ...load the times and check if we already sent the notification at the same date before
df_nst = pd.read_csv(nst_p, parse_dates=["DateTime"])
last_date = df_nst["DateTime"].dt.date.iloc[-1] # the last date that we send the notification
current_date = end_dt.date()
if current_date == last_date: # check if the dates (only year, month, and day) match
# We already sent push notifications to users today, do not send it again until next day
log("Ignore this prediction because we already sent a push notification today", logger)
return
else:
# If the file did not exist, create a new pandas Dataframe to store the time when we send notifications
df_nst = pd.DataFrame(data=[], columns=["DateTime"])
# Send push notification to users
if ENABLE_RAKE_CALL:
# NOTE: Python by default uses the sh terminal but we want it to use bash,
# ...because "source" and "bundle" only works for bash on the Hal11 machine
# ...(on the sh terminal we will want to use "." instead of "source")
cmd = 'source /etc/profile ; cd /var/www/rails-apps/smellpgh/' + SERVER + '/current/ ; bundle exec rake firebase_push_notification:send_prediction["/topics/SmellReports"] RAILS_ENV=' + SERVER + ' >> /var/www/smell-pittsburgh-prediction/py/prediction/data_production/push.log 2>&1'
subprocess.call(["bash", "-c", cmd])
# Create a CSV file that writes the time when the system send the push notification
log("A prediction push notification was sent to users", logger)
df_nst = df_nst.append({"DateTime": end_dt}, ignore_index=True) # append a row to the pandas Dataframe
df_nst.to_csv(nst_p, index=False) # re-write the Dataframe to a CSV file
def pushType2(end_dt, logger):
"""
Send type 2 push notification (verified by the crowd)
Input:
end_dt: the ending time for getting the ESDR data that is used for prediction (which is the current time)
logger: the python logger created by the generateLogger() function
"""
p = DATA_PATH
# Read the crowd push notification file
cvnst_p = p + "crow_verified_notification_sent_times.csv"
if isFileHere(cvnst_p):
# If the file that stores the notification sending time exist,
# ...load the times and check if we already sent the notification at the same date before
df_cvnst = pd.read_csv(cvnst_p, parse_dates=["DateTime"])
last_date = df_cvnst["DateTime"].dt.date.iloc[-1] # the last date that we send the notification
current_date = end_dt.date()
if current_date == last_date: # check if the dates (only year, month, and day) match
# We already sent crowd-verified push notifications to users today, do not send it again until next day
log("Ignore this crowd-verified event because we already sent a push notification today", logger)
return
else:
# If the file did not exist, create a new pandas Dataframe to store the time when we send notifications
df_cvnst = pd.DataFrame(data=[], columns=["DateTime"])
# Send crowd-verified push notification to users
if ENABLE_RAKE_CALL:
# NOTE: Python by default uses the sh terminal but we want it to use bash,
# ...because "source" and "bundle" only works for bash on the Hal11 machine
# ...(on the sh terminal we will want to use "." instead of "source")
cmd = 'source /etc/profile ; cd /var/www/rails-apps/smellpgh/' + SERVER + '/current/ ; bundle exec rake firebase_push_notification:send_prediction_type2["/topics/SmellReports"] RAILS_ENV=' + SERVER + ' >> /var/www/smell-pittsburgh-prediction/py/prediction/data_production/crow_verified_push.log 2>&1'
subprocess.call(["bash", "-c", cmd])
# Create a CSV file that writes the time when the system send the push notification
log("A crowd-verified push notification was sent to users", logger)
df_cvnst = df_cvnst.append({"DateTime": end_dt}, ignore_index=True) # append a row to the pandas Dataframe
df_cvnst.to_csv(cvnst_p, index=False) # re-write the Dataframe to a CSV file
if __name__ == "__main__":
main(sys.argv)
|
CMU-CREATE-Lab/smell-pittsburgh-prediction
|
py/prediction/production.py
|
production.py
|
py
| 10,561 |
python
|
en
|
code
| 6 |
github-code
|
6
|
26986918036
|
# -*- coding: utf-8 -*-
import base64
import pytest
from nameko_grpc.headers import (
HeaderManager,
check_decoded,
check_encoded,
comma_join,
decode_header,
encode_header,
filter_headers_for_application,
sort_headers_for_wire,
)
class TestEncodeHeader:
def test_binary(self):
assert encode_header(("foo-bin", b"123")) == (
b"foo-bin",
base64.b64encode(b"123"),
)
def test_string_value(self):
assert encode_header(("foo", "123")) == (b"foo", b"123")
class TestDecodeHeader:
def test_binary(self):
assert decode_header((b"foo-bin", base64.b64encode(b"123"))) == (
"foo-bin",
b"123",
)
def test_binary_with_truncated_padding(self):
padded_value = base64.b64encode(b"1234")
assert padded_value.endswith(b"=")
trimmed_value = padded_value[:-2]
assert decode_header((b"foo-bin", trimmed_value)) == (
"foo-bin",
b"1234",
)
def test_string_value(self):
assert decode_header((b"foo", b"123")) == ("foo", "123")
class TestFilterHeadersForApplication:
def test_no_application(self):
headers = [(":status", "1"), ("content-type", "2"), ("grpc-foo", "3")]
assert filter_headers_for_application(headers) == []
def test_all_application(self):
headers = [("foo", "1"), ("bar", "2"), ("baz", "3")]
assert filter_headers_for_application(headers) == headers
def test_filter(self):
headers = [
("foo", "1"),
(":status", "1"),
("bar", "2"),
("content-type", "2"),
("baz", "3"),
]
assert filter_headers_for_application(headers) == [
("foo", "1"),
("bar", "2"),
("baz", "3"),
]
class TestSortHeadersForWire:
def test_empty(self):
unsorted = []
for_wire = []
assert sort_headers_for_wire(unsorted) == for_wire
def test_already_sorted(self):
unsorted = [
(":status", "1"),
("content-type", "2"),
("grpc-foo", "3"),
("other", "4"),
]
for_wire = [
(":status", "1"),
("content-type", "2"),
("grpc-foo", "3"),
("other", "4"),
]
assert sort_headers_for_wire(unsorted) == for_wire
def test_sort(self):
unsorted = [
("content-type", "2"),
(":status", "1"),
("other", "4"),
("grpc-foo", "3"),
]
for_wire = [
(":status", "1"),
("content-type", "2"),
("grpc-foo", "3"),
("other", "4"),
]
assert sort_headers_for_wire(unsorted) == for_wire
def test_multi_sort(self):
unsorted = [
("content-type", "1"),
("te", "2"),
(":status", "3"),
(":authority", "4"),
("other", "5"),
("grpc-foo", "6"),
("grpc-bar", "7"),
("more", "8"),
(":method", "9"),
]
for_wire = [
(":status", "3"),
(":authority", "4"),
(":method", "9"),
("content-type", "1"),
("te", "2"),
("grpc-foo", "6"),
("grpc-bar", "7"),
("other", "5"),
("more", "8"),
]
assert sort_headers_for_wire(unsorted) == for_wire
class TestCheckEncoded:
def test_empty(self):
assert check_encoded([]) is None
def test_good(self):
assert check_encoded([(b"foo", b"bar")]) is None
def test_bad_name(self):
with pytest.raises(AssertionError):
check_encoded([("foo", b"bar")])
def test_bad_value(self):
with pytest.raises(AssertionError):
check_encoded([(b"foo", "bar")])
class TestCheckDecoded:
def test_empty(self):
assert check_decoded([]) is None
def test_good(self):
assert check_decoded([("foo", "bar")]) is None
def test_bad_name(self):
with pytest.raises(AssertionError):
check_decoded([(b"foo", "bar")])
def test_bad_value(self):
with pytest.raises(AssertionError):
check_decoded([("foo", b"bar")])
def test_good_binary(self):
assert check_decoded([("foo-bin", b"bar")]) is None
def test_bad_binary(self):
with pytest.raises(AssertionError):
check_decoded([("foo-bin", "bar")])
class TestCommaJoin:
def test_string(self):
assert comma_join(["foo", "bar"]) == "foo,bar"
def test_bytes(self):
assert comma_join([b"foo", b"bar"]) == b"foo,bar"
def test_mixed(self):
with pytest.raises(TypeError):
comma_join([b"foo", "bar"])
class TestHeaderManager:
def test_get(self):
manager = HeaderManager()
manager.set(("foo", "bar"))
assert manager.get("foo") == "bar"
def test_get_with_default(self):
manager = HeaderManager()
assert manager.get("foo", "baz") == "baz"
def test_get_multi(self):
manager = HeaderManager()
manager.set(("foo", "bar"), ("foo", "baz"))
assert manager.get("foo") == "bar,baz"
def test_set(self):
manager = HeaderManager()
manager.set(("x", "y"), ("foo", "bar"))
assert manager.get("x") == "y"
assert manager.get("foo") == "bar"
manager.set(("foo", "baz")) # clears existing foo, only
assert manager.get("foo") == "baz"
assert manager.get("x") == "y"
def test_set_from_wire(self):
manager = HeaderManager()
manager.set((b"foo", b"bar"), from_wire=True)
assert manager.get("foo") == "bar"
def test_append(self):
manager = HeaderManager()
manager.set(("x", "y"), ("foo", "bar"))
assert manager.get("x") == "y"
assert manager.get("foo") == "bar"
manager.append(("foo", "baz")) # appends to foo
assert manager.get("foo") == "bar,baz"
assert manager.get("x") == "y"
def test_append_from_wire(self):
manager = HeaderManager()
manager.set(("foo", "bar"))
manager.append((b"foo", b"baz"), from_wire=True)
assert manager.get("foo") == "bar,baz"
def test_for_wire(self):
manager = HeaderManager()
manager.set(("x", "y"), ("foo", "bar"))
assert manager.for_wire == [(b"x", b"y"), (b"foo", b"bar")]
|
nameko/nameko-grpc
|
test/test_headers.py
|
test_headers.py
|
py
| 6,536 |
python
|
en
|
code
| 57 |
github-code
|
6
|
12701288482
|
from collections import deque
import sys
d = deque()
n = int(sys.stdin.readline().rstrip())
for i in range(n):
order = sys.stdin.readline().rstrip().split()
a = order[0]
if a == "push":
d.append(order[1])
elif a == "pop":
if d:
print(d.popleft())
else:
print(-1)
elif a == "size":
print(len(d))
elif a == "empty":
if d:
print(0)
else:
print(1)
elif a == "front":
if d:
print(d[0])
else:
print(-1)
else:
if d:
print(d[-1])
else:
print(-1)
|
MinChoi0129/Algorithm_Problems
|
BOJ_Problems/18258.py
|
18258.py
|
py
| 647 |
python
|
en
|
code
| 2 |
github-code
|
6
|
20794460102
|
import numpy as np
import matplotlib.pyplot as plt
x = []; y = []
for i in range(100):
x.append(np.sin(np.pi/48*i))
y.append(2-2*np.cos(np.pi/48*i))
plt.plot(x, y)
plt.show()
|
duynamrcv/mpc_ros
|
test.py
|
test.py
|
py
| 185 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73886255546
|
import psycopg2
import os
import http.server
import socketserver
import logging
import sys
class IndexHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
self.path = 'index.html'
return http.server.SimpleHTTPRequestHandler.do_GET(self)
print("start app")
app_port = os.environ.get("APP_PORT")
connection = psycopg2.connect(
user=os.environ.get("DB_USER"),
password=os.environ.get("DB_PASSWORD"),
host=os.environ.get("DB_HOST"),
port=os.environ.get("DB_PORT"),
database=os.environ.get("DB_NAME")
)
cursor = connection.cursor()
with open("index.html", "w") as f:
try:
cursor.execute("SELECT * from test;")
for i in cursor:
f.write(f"{i}\n")
except (Exception, Error) as error:
print(error)
Handler = IndexHandler
with socketserver.TCPServer(("", int(app_port)), Handler) as httpd:
print("Serving at port: ", int(app_port))
httpd.serve_forever()
|
hed854/tp3-kubernetes
|
frontend/app.py
|
app.py
|
py
| 978 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73510645947
|
from collections import defaultdict
N = int(input())
for i in range(N):
forwared = defaultdict(int)
reverse = defaultdict(int)
m,n= map(int,input().split())
nums = []
ans = 0
for i in range(m):
temp = list(map(int,input().split()))
nums.append(temp)
for i in range(m):
for j in range(n):
temp = i-j
temp2 = i+j
forwared[temp]+=nums[i][j]
reverse[temp2]+=nums[i][j]
for i in range(m):
for j in range(n):
sum = forwared[i-j]+reverse[i+j]-nums[i][j]
ans = max(ans,sum)
print(ans)
|
yonaSisay/a2sv-competitive-programming
|
xsum.py
|
xsum.py
|
py
| 627 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2862637647
|
#coding:utf-8
from API.APIMode import Bit_ZAPI
import time
import urllib
path='/api_v1/tradeAdd'
secret ='a5ccc6b23756d49229844de248b2839c'
params = {}
params['api_key'] = '425c89d5d669ea4de9e20379604505e6'
params['timestamp'] = str(int(time.time()))
params['nonce'] = str(int(time.time() % 1000000))
params['coin'] = str('atm_btc')
params['type'] = 'out'
params['price'] = '0.00000734'
params['number'] = '10'
params['coin'] = 'pok_btc'
params['tradepwd'] = 'qwerqwer'
params = sorted(params.items(),key=lambda params:params[0])
# print params
params = dict(params)
print(params)
print( urllib.parse.urlencode(params))
print( Bit_ZAPI().signature(Path=path,Secret=secret,Params=params))
|
lyonLeeLPL/Bit-Z
|
Transact/Test.py
|
Test.py
|
py
| 692 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74182190907
|
'''
* กลุ่มที่ : 23010016
* 65010195 ชลศักดิ์ อนุวารีพงษ์
* chapter : 1 item : 4 ครั้งที่ : 0002
* Assigned : Tuesday 4th of July 2023 03:28:37 PM --> Submission : Tuesday 4th of July 2023 03:39:57 PM
* Elapsed time : 11 minutes.
* filename : exercise4.py
'''
def odd_list(al):
income_list = al
odd_num_list = []
for num in income_list:
if num % 2 != 0:
odd_num_list.append(num)
else:
continue
return odd_num_list
print(" ***Function Odd List***")
ls = [int(e) for e in input("Enter list numbers : ").split()]
opls = odd_list(ls)
print("Input list : ", ls, "\nOutput list : ", opls)
|
chollsak/KMITL-Object-Oriented-Data-Structures-2D
|
Python1/exercise4.py
|
exercise4.py
|
py
| 725 |
python
|
en
|
code
| 0 |
github-code
|
6
|
810350566
|
'''Random Pick Index - https://leetcode.com/problems/random-pick-index/
Given an integer array nums with possible duplicates, randomly output the index of a given target number.
You can assume that the given target number must exist in the array.
Implement the Solution class:
Solution(int[] nums) Initializes the object with the array nums.
int pick(int target) Picks a random index i from nums where nums[i] == target. If there are multiple valid i's,
then each index should have an equal probability of returning.
Example 1:
Input
["Solution", "pick", "pick", "pick"]
[[[1, 2, 3, 3, 3]], [3], [1], [3]]
Output
[null, 4, 0, 2]
'''
class Solution:
def __init__(self, nums: List[int]):
self.nums = nums
def pick(self, target: int) -> int:
indexes = []
for i in range(len(self.nums)):
if self.nums[i] == target:
indexes.append(i)
return random.choice(indexes)
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.pick(target)
# 0(1) space
class Solution:
def __init__(self, nums: List[int]):
self.nums = nums
def pick(self, target: int) -> int:
count = 0
index = 0
for i in range(len(self.nums)):
if self.nums[i] == target:
count += 1
if random.randint(0, count - 1) == 0:
index = i
return index
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.pick(target)
|
Saima-Chaity/Leetcode
|
Array_String/Random Pick Index.py
|
Random Pick Index.py
|
py
| 1,557 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8454082456
|
from uuid import uuid4
from sqlalchemy import text
from critique_wheel.adapters.sqlalchemy import iam_repository
from critique_wheel.critiques.models.critique import Critique
from critique_wheel.members.models.IAM import MemberStatus
from critique_wheel.members.value_objects import MemberId
from critique_wheel.works.models.work import Work
from critique_wheel.works.value_objects import WorkId
def test_repository_can_save_a_basic_member(
session, active_valid_member, valid_work, valid_critique
):
member = active_valid_member
member.id = MemberId()
repo = iam_repository.SqlAlchemyMemberRepository(session)
assert member.works == []
assert member.critiques == []
repo.add(member)
valid_work.id = WorkId()
valid_work.member_id = member.id
member.add_work(valid_work)
member.add_critique(valid_critique)
session.commit()
rows = list(
session.execute(
text(
"SELECT id, username, email, password, member_type, status FROM members"
)
)
)
assert rows == [
(
member.id.get_uuid(),
member.username,
member.email,
member.password,
member.member_type.value,
member.status.value,
)
]
def test_repository_can_get_a_member_by_id(
session, valid_member, valid_work, valid_critique
):
valid_member.id = MemberId()
valid_member.status = MemberStatus.ACTIVE
member = valid_member
repo = iam_repository.SqlAlchemyMemberRepository(session)
repo.add(member)
valid_work.member_id = member.id
valid_critique.member_id = member.id
member.add_work(valid_work)
member.add_critique(valid_critique)
assert len(member.works) == 1
assert len(member.critiques) == 1
session.commit()
stmt = text('SELECT * FROM "members" WHERE id=:id').bindparams(
id=valid_member.id.get_uuid(),
)
rows = session.execute(stmt).fetchall()
assert len(rows) == 1
retrieved_works = session.query(Work).filter_by(member_id=valid_member.id).all()
assert len(retrieved_works) == 1
assert retrieved_works[0].title == valid_work.title
retrieved_critiques = session.query(Critique).filter_by(member_id=member.id).all()
assert len(retrieved_critiques) == 1
assert retrieved_critiques[0].critique_about == valid_critique.critique_about
assert retrieved_works[0].member_id == member.id
assert retrieved_critiques[0].member_id == member.id
assert repo.get_member_by_id(member.id) == member
assert member in repo.list()
def test_repository_can_get_a_member_by_email(session, valid_member):
valid_member.id = MemberId()
valid_member.status = MemberStatus.ACTIVE
valid_member.email = "[email protected]"
member = valid_member
repo = iam_repository.SqlAlchemyMemberRepository(session)
repo.add(member)
session.commit()
assert repo.get_member_by_email(member.email) == member
def test_resository_can_get_a_member_by_username(session, valid_member):
valid_member.id = MemberId()
valid_member.status = MemberStatus.ACTIVE
valid_member.username = "yet_another_username"
member = valid_member
repo = iam_repository.SqlAlchemyMemberRepository(session)
repo.add(member)
session.commit()
assert repo.get_member_by_username(member.username) == member
def test_resository_can_get_a_list_of_members(
session, valid_member, active_valid_member
):
member = valid_member
member_2 = active_valid_member
valid_member.status = MemberStatus.ACTIVE
repo = iam_repository.SqlAlchemyMemberRepository(session)
repo.add(member)
repo.add(member_2)
session.commit()
assert member and member_2 in repo.list()
def test_repository_returns_None_for_no_member_found(session):
repo = iam_repository.SqlAlchemyMemberRepository(session)
username, email, id = "not_in_db", "[email protected]", uuid4()
assert repo.get_member_by_username(username) is None
assert repo.get_member_by_email(email) is None
assert repo.get_member_by_id(id) is None
|
davidjnevin/ddd_critiquewheel
|
critique_wheel/tests/integration/test_IAM_repository.py
|
test_IAM_repository.py
|
py
| 4,132 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2046128502
|
import os
import sys
RASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(RASE_DIR)
import time
import threading
from verify import Verify
from scrapy import cmdline
from multiprocessing import Process
from Bearcat_ProxyPool.settings import SPIDER_TIME
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
def start_blspider(spider_name, frequency):
args = ['scrapy', 'crawl', spider_name]
while True:
p = Process(target=cmdline.execute, args=(args,))
p.start()
p.join()
time.sleep(frequency)
if __name__ == '__main__':
confs = [{"spider_name": "xici", "frequency": SPIDER_TIME},
{"spider_name": "xila", "frequency": SPIDER_TIME},
{"spider_name": "ihuan", "frequency": SPIDER_TIME},
{"spider_name": "ip3366", "frequency": SPIDER_TIME},
{"spider_name": "nima", "frequency": SPIDER_TIME},
{"spider_name": "kuai", "frequency": SPIDER_TIME}]
for conf in confs:
process = Process(target=start_blspider, args=(conf.get("spider_name"), conf.get("frequency", 0)))
process.start()
proces = threading.Thread(target=Verify().main())
proces.start()
|
yuzhiyizhan/Bearcat_ProxyPool
|
main.py
|
main.py
|
py
| 1,261 |
python
|
en
|
code
| 2 |
github-code
|
6
|
18525593045
|
import argparse
import os
import mmcv
import torch
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from drp.apis import set_random_seed, single_gpu_test
from drp.datasets import build_dataloader, build_dataset
from drp.models import build_model
from drp.datasets.pipelines.utils import get_weight
def parse_args():
parser = argparse.ArgumentParser(description='drp tester')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--out', help='output result pickle file')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results')
parser.add_argument(
'--save-path',
default=None,
type=str,
help='path to store images and if not given, will not save image')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# set random seeds
if args.seed is not None:
if rank == 0:
print('set random seed to', args.seed)
set_random_seed(args.seed, deterministic=args.deterministic)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
loader_cfg = {
**dict((k, cfg.data[k]) for k in ['workers_per_gpu'] if k in cfg.data),
**dict(
samples_per_gpu=1,
drop_last=False,
shuffle=False,
dist=distributed),
**cfg.data.get('test_dataloader', {})
}
data_loader = build_dataloader(dataset, **loader_cfg)
model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
for k,v in model.state_dict().items():
print(f'{k} : {v.shape}')
model.load_state_dict(torch.load(args.checkpoint))
if cfg.edges is not None:
gsea_path, ppi_path, pearson_path = cfg.edges[0], cfg.edges[1], cfg.edges[2]
cell_edges_index, cell_edges_attr = get_weight(gsea_path, ppi_path, pearson_path)
model.update_encoder_buffer(cfg.test_batch_size, cell_edges_attr, cell_edges_index, 18498)
for k,v in model.state_dict().items():
print(f'{k} : {v.shape}')
outputs = single_gpu_test(
model.cuda(),
data_loader)
if rank == 0:
print('')
# print metrics
stats = dataset.evaluate(outputs)
for stat in stats:
print('Eval-{}: {}'.format(stat, stats[stat]))
# save result pickle
if args.out:
print('writing results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
if __name__ == '__main__':
main()
|
yivan-WYYGDSG/AGMI
|
tools/test.py
|
test.py
|
py
| 3,775 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30647763320
|
string = "aaabbccccdaajj"
mylist = list(string)
mylist.append(" ")
z = 0
c = []
for y in range(len(mylist)-1):
z = z+1
if mylist[y] != mylist[y+1]:
c.append(z)
z=0
a = []
for x in range(0, len(string)-1):
if string[x] == string[x+1]:
a.append(int(x))
k = ""
for x in a:
mylist[int(x)] = ""
for v in range(0, len(mylist)-1):
k = k + str(mylist[v])
list2 = list(k)
for h in range(len(c)):
list2.insert(2*h+1, c[h])
k=""
for v in range(0, len(list2)):
k = k + str(list2[v])
print(k)
|
itshimanshu2602/Python_restart
|
code4.py
|
code4.py
|
py
| 567 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14096602197
|
from .helpers import *
class TestClientTeams(ClientTestCase):
def test_teams_get_teams_for_workspace(self):
res = {
"data": [
{ "id": 5832, "name": "Atlanta Braves" },
{ "id": 15923, "name": "New York Yankees" }
]
}
responses.add(GET, 'http://app/workspaces/13523/teams', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.teams.get_teams_for_workspace('13523'), res['data'])
|
Asana/python-asana
|
tests/test_client_teams.py
|
test_client_teams.py
|
py
| 506 |
python
|
en
|
code
| 281 |
github-code
|
6
|
70777980988
|
import random
import re
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras.preprocessing.text import Tokenizer
import torch
def transform_to_index_tensor(pairs,rus_w2i,en_w2i,device):
rus_tensor = []
en_tensor = []
for word in range(len(pairs[0])):
en_tensor.append(en_w2i[pairs[0][word]])
for word in range(len(pairs[1])):
rus_tensor.append(rus_w2i[pairs[1][word]])
return en_tensor,rus_tensor
def split_dataset(pairs,val_size=20.000,test_size=20.000):
pairs = random.sample(pairs,len(pairs))
pairs_test = pairs[:test_size]
pairs_val = pairs[test_size:test_size+val_size]
pairs_train = pairs[test_size+val_size:len(pairs)]
return pairs_train,pairs_val,pairs_test
def custom_index_tokenizer(phrase,w2i,i2w):
for word in phrase:
if word in w2i:
continue
else:
if bool(w2i) == False:
w2i[word] = 0
i2w[0] = word
else:
new_idx = list(i2w)[-1]+1
w2i[word] = new_idx
i2w[new_idx] = word
return w2i,i2w
def filter_double_spaces(word_list):
updated_word_list = []
for w in range(len(word_list)):
if not word_list[w] == '':
updated_word_list.append(word_list[w])
return updated_word_list
def get_data(filename='fra.txt',max_words=-1,plot_res=False):
russian_word_list = []
english_word_list = []
#dictionaries for converting a word to a unique integer and the opposite
russian_word_to_idx = {'<SOS>':0,'<EOS>':1,'<PAD>':2}
english_word_to_idx = {'<SOS>':0,'<EOS>':1,'<PAD>':2}
russian_idx_to_word = {0:'<SOS>',1:'<EOS>',2:'<PAD>'}
english_idx_to_word = {0:'<SOS>',1:'<EOS>',2:'<PAD>'}
pairs = []
#read the dataset from the file
with open(filename, "r", encoding="utf-8") as f:
lines_list = f.read().split("\n")
print("The file total translated words/phrases are: "+str(len(lines_list)))
#get the phrases for each language to a different list
word_counter = 0
for i in range(len(lines_list)):
if not max_words == -1:
word_counter += 1
if word_counter > max_words:
break
try:
lines_list[i].split('\t')[1]
except:
continue
russian_word_list.append(lines_list[i].split('\t')[1])
english_word_list.append(lines_list[i].split('\t')[0])
print("The total english phrases are: " + str(len(english_word_list)))
print("The total russian phrases are: "+str(len(russian_word_list)))
russian_lengths = []
english_lengths = []
russian_words_final = []
english_words_final = []
for phrase in range(len(russian_word_list)):
#remove punc
russian_words = re.sub(r'[^\w\s]', '', russian_word_list[phrase])
english_words = re.sub(r'[^\w\s]', '', english_word_list[phrase])
#to lower case
russian_words = russian_words.lower()
english_words = english_words.lower()
russian_lengths.append(len(russian_words))
english_lengths.append(len(english_words))
#split to space
russian_words = russian_words.split(' ')
english_words = english_words.split(' ')
#filter double spaces
russian_words = filter_double_spaces(russian_words)
english_words = filter_double_spaces(english_words)
#add SOS and EOS tokens
russian_words.insert(0, "<SOS>")
russian_words.append("<EOS>")
english_words.insert(0, "<SOS>")
english_words.append("<EOS>")
pairs.append([english_words,russian_words])
russian_word_to_idx,russian_idx_to_word = custom_index_tokenizer(russian_words,russian_word_to_idx,russian_idx_to_word)
english_word_to_idx, english_idx_to_word = custom_index_tokenizer(english_words, english_word_to_idx,english_idx_to_word)
russian_words_final.append(russian_words)
english_words_final.append(english_words)
if plot_res:
plt.hist(english_lengths, 15, alpha=0.5, label='English Lengths',edgecolor = "black")
plt.hist(russian_lengths, 30, alpha=0.5, label='Russian Lengths',edgecolor = "black")
plt.legend(loc='upper right')
plt.show()
print("Found "+str(len(list(russian_idx_to_word.keys())))+" unique russian words.")
print("Found " + str(len(list(english_idx_to_word.keys()))) + " unique english words.")
return russian_words_final,english_words_final,russian_word_to_idx,russian_idx_to_word,english_word_to_idx,english_idx_to_word,pairs,russian_words_final,english_words_final
|
stefanos50/Seq2Seq-Machine-Translation
|
DataPreprocessing.py
|
DataPreprocessing.py
|
py
| 4,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34042438403
|
from django.db.models import Q
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from gcloud.core.apis.drf.exceptions import ValidationException
from gcloud.core.apis.drf.viewsets import ApiMixin, permissions
from gcloud.label.models import Label, TemplateLabelRelation
from gcloud.label.serilaziers import LabelSerializer
from gcloud.iam_auth import IAMMeta, get_iam_client, res_factory
from iam.contrib.drf.shortcuts import allow_or_raise_immediate_response
from iam import Subject, Action
iam = get_iam_client()
class LabelViewSet(ApiMixin, ModelViewSet):
queryset = Label.objects.all()
serializer_class = LabelSerializer
permission_classes = [permissions.IsAuthenticated]
filter_backends = [DjangoFilterBackend]
filterset_fields = "__all__"
def list(self, request, *args, **kwargs):
project_id = request.query_params.get("project_id")
if not project_id:
raise ValidationException("project_id should be provided.")
allow_or_raise_immediate_response(
iam=iam,
system=IAMMeta.SYSTEM_ID,
subject=Subject("user", request.user.username),
action=Action(IAMMeta.PROJECT_VIEW_ACTION),
resources=res_factory.resources_for_project(project_id),
)
return super(LabelViewSet, self).list(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
label = self.get_object()
if label.is_default:
raise ValidationException("default label cannot be updated.")
project_id = label.project_id
allow_or_raise_immediate_response(
iam=iam,
system=IAMMeta.SYSTEM_ID,
subject=Subject("user", request.user.username),
action=Action(IAMMeta.PROJECT_EDIT_ACTION),
resources=res_factory.resources_for_project(project_id),
)
return super(LabelViewSet, self).update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
label = self.get_object()
if label.is_default:
raise ValidationException("default label cannot be deleted.")
project_id = label.project_id
allow_or_raise_immediate_response(
iam=iam,
system=IAMMeta.SYSTEM_ID,
subject=Subject("user", request.user.username),
action=Action(IAMMeta.PROJECT_EDIT_ACTION),
resources=res_factory.resources_for_project(project_id),
)
self.perform_destroy(label)
return Response({"result": True, "message": "success"})
@action(methods=["get"], detail=False)
def list_with_default_labels(self, request, *args, **kwargs):
project_id = request.query_params.get("project_id")
if not project_id:
raise ValidationException("project_id should be provided.")
allow_or_raise_immediate_response(
iam=iam,
system=IAMMeta.SYSTEM_ID,
subject=Subject("user", request.user.username),
action=Action(IAMMeta.PROJECT_VIEW_ACTION),
resources=res_factory.resources_for_project(project_id),
)
queryset = Label.objects.filter(Q(project_id=project_id) | Q(is_default=True))
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
@action(methods=["get"], detail=False)
def get_templates_labels(self, request):
return self._fetch_label_or_template_ids(request, fetch_label=True)
@action(methods=["get"], detail=False)
def get_label_template_ids(self, request):
return self._fetch_label_or_template_ids(request, fetch_label=False)
@staticmethod
def _fetch_label_or_template_ids(request, fetch_label):
base_id_name = "template_ids" if fetch_label else "label_ids"
if fetch_label:
fetch_func = TemplateLabelRelation.objects.fetch_templates_labels
else:
fetch_func = TemplateLabelRelation.objects.fetch_label_template_ids
base_ids = request.query_params.get(base_id_name)
if not base_ids:
raise ValidationException("{} must be provided.".format(base_id_name))
project_id = request.query_params.get("project_id")
allow_or_raise_immediate_response(
iam=iam,
system=IAMMeta.SYSTEM_ID,
subject=Subject("user", request.user.username),
action=Action(IAMMeta.PROJECT_VIEW_ACTION),
resources=res_factory.resources_for_project(project_id),
)
base_ids = [int(base_id) for base_id in base_ids.strip().split(",")]
return Response(fetch_func(base_ids))
|
caiyj/bk-sops
|
gcloud/label/viewsets.py
|
viewsets.py
|
py
| 4,792 |
python
|
en
|
code
| null |
github-code
|
6
|
911733982
|
import pdfplumber
import os,re
file_path = "/home/FDDC_announcements_round2_train_pdf/"
def pdf_tbl2txt(file):
pdf = pdfplumber.open(file_path + "my.pdf")
for i in pdf.pages:
# page = pdf.pages[0]
# i.extract_table()
if i.find_tables(table_settings={}):
i.crop(boundiipng_box)
|
YankeeMarco/aliyun-FDDC-2018-Financial-Challenge-
|
pdf_to_text_with_table_tags.py
|
pdf_to_text_with_table_tags.py
|
py
| 322 |
python
|
en
|
code
| 14 |
github-code
|
6
|
31011028990
|
"""
Name: Timothy James Duffy, Kevin Falconett
File: metrics.py
Class: CSc 483; Spring 2023
Project: TextSummarizer
Provides methods to calculate the ROUGE metric and print the results.
"""
# Filter tensorflow warnings.
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from database import *
from rouge import Rouge
from config import DATABASE_NAME
from summarizer import generate_summary
def get_rouge_scores(num_docs, offset):
"""Gets the rogue scores using documents in the database. Documents can be offset by id."""
# Get articles and summaries from the database.
database = Database(DATABASE_NAME)
articles, summaries = zip(*database.get_data(num_docs, offset))
# Holds the generated summaries.
generated_summaries = []
# Generate summaries for the articles.
for i, article in enumerate(articles):
summary = generate_summary(article)
generated_summaries.append(summary)
print('Actual summary:\n{}'.format(summaries[i]))
print('Generated summary:\n{}\n'.format(generated_summaries[i]))
# Calculate ROUGE scores for the sample set.
rouge = Rouge()
scores = rouge.get_scores(generated_summaries, list(summaries), avg=True)
return scores
def print_scores(scores):
"""Prints the given ROUGE scores in a nice format."""
# Get ROUGE dictionaries. Each contains recall, precision, accuracy scores.
r1 = scores['rouge-1']
r2 = scores['rouge-2']
rl = scores['rouge-l']
# Print out the scores for Rouge-1, Rouge-2, and Rouge-l.
print('Rouge-1\trecall:\t{:.2f}\tprecision:\t{:.2f}\tf1_score:\t{:.2f}'.format(r1['r'], r1['p'], r1['f']))
print('Rouge-2\trecall:\t{:.2f}\tprecision:\t{:.2f}\tf1_score:\t{:.2f}'.format(r2['r'], r2['p'], r2['f']))
print('Rouge-l\trecall:\t{:.2f}\tprecision:\t{:.2f}\tf1_score:\t{:.2f}\n'.format(rl['r'], rl['p'], rl['f']))
def main():
# Prints the ROUGE results for data the model has been trained on.
print('Trained Data ROUGE Scores:')
trained_data = get_rouge_scores(1, 0)
print_scores(trained_data)
# Prints the ROUGE results for data the model has NOT been trained on.
print('Untrained Data ROUGE Scores:')
untrained_data = get_rouge_scores(1, 1200)
print_scores(untrained_data)
if __name__ == '__main__':
main()
|
tjdaz/TextSummarizer
|
metrics.py
|
metrics.py
|
py
| 2,310 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21965971169
|
#### Author: Ernesto González 52857
#### Date: 12/10/2019
import pylab
class DerivativeApproxManager():
def __init__(self, function, function_derivative, x_point, h_values):
self.function = function
self.function_derivative = function_derivative
self.x = x_point
self.h_values = h_values
def approx_function_derivative(self, h):
return (self.function(self.x+h)-self.function(self.x))/h
def approx_abs_error(self, h):
return abs(self.function_derivative(self.x)-self.approx_function_derivative(h))
def absolute_error_per_h(self, plot_title, x_label, y_label, plot_filename):
absolute_error_per_h = []
for h in self.h_values:
absolute_error_per_h.append(self.approx_abs_error(h))
# Plot absolute_error_per_h per h in log scale axis
pylab.figure(figsize=(10,10))
pylab.loglog(self.h_values, absolute_error_per_h, 'ro')
pylab.grid(color='k', linestyle='--', linewidth=0.5)
# pylab.title(plot_title, fontsize=20, fontweight='bold')
pylab.xticks(fontsize=22)
pylab.yticks(fontsize=22)
pylab.xlabel(x_label, fontsize=23)
pylab.ylabel(y_label, fontsize=23)
pylab.savefig(plot_filename)
pylab.show()
#####################APLICAÇÃO######################
# Define a função a ser usada neste exemplo
def f(x):
return x**2
def f_linha(x):
return 2*x
# Primeiro caso: h com valores em decimais
valores_h10 = [10**(-i) for i in range(21)]
erros_base10 = DerivativeApproxManager(x_point=1,
function=f,
function_derivative=f_linha,
h_values=valores_h10)
erros_base10.absolute_error_per_h(
plot_title="Erros Absolutos de Aproximações à primeira derivada\n da função quadrática usando h de base decimal",
x_label="h",
y_label="Erro Absoluto",
plot_filename="erroh10.png")
# Segundo caso: h com valores em base binária
valores_h2 = [2**(-i) for i in range(61)]
erros_base2 = DerivativeApproxManager(x_point=1,
function=f,
function_derivative=f_linha,
h_values=valores_h2)
erros_base2.absolute_error_per_h(
plot_title="Erros Absolutos de Aproximações à primeira derivada\n da função quadrática usando h de base binária",
x_label="h",
y_label="Erro Absoluto",
plot_filename="erroh2.png")
|
ernestofgonzalez/PhysicsBsc
|
TerceiroSemestre/MétodosNuméricos/Exercício01/derivative_approx.py
|
derivative_approx.py
|
py
| 2,551 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
24528285056
|
import concurrent.futures
import logging
import time
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
class FakeDatabase:
def __init__(self):
self.value = 0
def update(self, name):
logging.info("Thread %s: starting update", name)
local_copy = self.value
local_copy += 1
time.sleep(0.1)
self.value = local_copy
logging.info("Thread %s: finishing update", name)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
# threads = list()
# for index in range(3):
# logging.info("Main : create and start thread %d.", index)
# x = threading.Thread(target=thread_function, args=(index,))
# threads.append(x)
# x.start()
#
# for index, thread in enumerate(threads):
# logging.info("Main : before joining thread %d.", index)
# thread.join()
# logging.info("Main : thread %d done", index)
#
# with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor: # replace Max_Workers with number of threads and execute it using .map
# executor.map(thread_function, range(3))
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
database = FakeDatabase()
logging.info("Testing update. Starting value is %d.", database.value)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
for index in range(2):
executor.submit(database.update, index)
logging.info("Testing update. Ending value is %d.", database.value)
|
hiddenxx/Scripts
|
Learning/LearningThreads.py
|
LearningThreads.py
|
py
| 1,819 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6575851777
|
from time import sleep
from digitemp.master import UART_Adapter
from digitemp.device import DS18B20
import random
import asyncio
import logging
class TemperatureMonitor:
def __init__(self):
self.bus = UART_Adapter('/dev/ttyUSB0')
self.sensor = DS18B20(self.bus)
self.listeners = []
def register_listener(self, listener):
# A listener has a on_notify(new_temp) async function
self.listeners.append(listener)
def unregister_listener(self, listener):
self.listeners.remove(listener)
async def notify_listeners(self):
cur_temp = self.sensor.get_temperature()
# f = open('afile', 'r')
# cur_temp = int(f.readline().strip())
# f.close()
self.cur_temp = cur_temp
logging.info(f'Temperature is: {cur_temp}')
await asyncio.gather(
*(l.on_notify(cur_temp) for l in self.listeners)
)
async def monitor(self):
while True:
await asyncio.gather(
self.notify_listeners(),
asyncio.sleep(10)
)
|
SchrodingersCat00/vuurwachter
|
src/temp_monitor.py
|
temp_monitor.py
|
py
| 1,091 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40128251295
|
"""
http://www.sphinx-doc.org/en/stable/ext/doctest.html
https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/doctest.py
* TODO
** CLEANUP: use the sphinx directive parser from the sphinx project
"""
import doctest
import enum
import re
import sys
import textwrap
import traceback
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import _pytest.doctest
import pytest
from _pytest.config import Config
from _pytest.doctest import DoctestItem
from _pytest.doctest import _is_mocked
from _pytest.doctest import _patch_unwrap_mock_aware
from _pytest.main import Session
from _pytest.pathlib import import_path
from _pytest.python import Package
if TYPE_CHECKING:
import io
import pdb
from doctest import _Out
_SpoofOut = io.StringIO
class SphinxDoctestDirectives(enum.Enum):
TESTCODE = 1
TESTOUTPUT = 2
TESTSETUP = 3
TESTCLEANUP = 4
DOCTEST = 5
_DIRECTIVES_W_OPTIONS = (
SphinxDoctestDirectives.TESTOUTPUT,
SphinxDoctestDirectives.DOCTEST,
)
_DIRECTIVES_W_SKIPIF = (
SphinxDoctestDirectives.TESTCODE,
SphinxDoctestDirectives.TESTOUTPUT,
SphinxDoctestDirectives.TESTSETUP,
SphinxDoctestDirectives.TESTCLEANUP,
SphinxDoctestDirectives.DOCTEST,
)
def pytest_collect_file(
file_path: Path, parent: Union[Session, Package]
) -> Optional[Union["SphinxDoctestModule", "SphinxDoctestTextfile"]]:
config = parent.config
if file_path.suffix == ".py":
if config.option.doctestmodules:
mod: Union[
"SphinxDoctestModule", "SphinxDoctestTextfile"
] = SphinxDoctestModule.from_parent(parent, path=file_path)
return mod
elif _is_doctest(config, file_path, parent):
return SphinxDoctestTextfile.from_parent(parent, path=file_path) # type: ignore
return None
GlobDict = Dict[str, Any]
def _is_doctest(config: Config, path: Path, parent: Union[Session, Package]) -> bool:
if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ["test*.txt"]
assert isinstance(globs, list)
for glob in globs:
if path.match(path_pattern=glob):
return True
return False
# This regular expression looks for option directives in the expected output
# (testoutput) code of an example. Option directives are comments starting
# with ":options:".
_OPTION_DIRECTIVE_RE = re.compile(r':options:\s*([^\n\'"]*)$')
_OPTION_SKIPIF_RE = re.compile(r':skipif:\s*([^\n\'"]*)$')
_DIRECTIVE_RE = re.compile(
r"""
\s*\.\.\s
(?P<directive>(testcode|testoutput|testsetup|testcleanup|doctest))
::\s*
(?P<argument>([^\n'"]*))
$
""",
re.VERBOSE,
)
def _split_into_body_and_options(
section_content: str,
) -> Tuple[str, Optional[str], Dict[int, bool]]:
"""Parse the the full content of a directive and split it.
It is split into a string, where the options (:options:, :hide: and
:skipif:) are removed, and into options.
If there are options in `section_content`, they have to appear at the
very beginning. The first line that is not an option (:options:, :hide:
and :skipif:) and not a newline is the first line of the string that is
returned (`remaining`).
Parameters
----------
section_content : str
String consisting of optional options (:skipif:, :hide:
or :options:), and of a body.
Returns
-------
body : str
skipif_expr : str or None
flag_settings : dict
Raises
------
ValueError
* If options and the body of the section are not
separated by a newline.
* If the body of the section is empty.
"""
lines = section_content.strip().splitlines()
skipif_expr = None
flag_settings = {}
i = 0
for line in lines:
stripped = line.strip()
if _OPTION_SKIPIF_RE.match(stripped):
skipif_match = _OPTION_SKIPIF_RE.match(stripped)
assert skipif_match is not None
skipif_expr = skipif_match.group(1)
i += 1
elif _OPTION_DIRECTIVE_RE.match(stripped):
directive_match = _OPTION_DIRECTIVE_RE.match(stripped)
assert directive_match is not None
option_strings = directive_match.group(1).replace(",", " ").split()
for option in option_strings:
if (
option[0] not in "+-"
or option[1:] not in doctest.OPTIONFLAGS_BY_NAME
):
raise ValueError(f"doctest has an invalid option {option}")
flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]]
flag_settings[flag] = option[0] == "+"
i += 1
elif stripped == ":hide:":
i += 1
else:
break
if i == len(lines):
raise ValueError("no code/output")
body = "\n".join(lines[i:]).lstrip()
if not body:
raise ValueError("no code/output")
if i and lines[i].strip():
# no newline between option block and body
raise ValueError(f"invalid option block: {section_content!r}")
return body, skipif_expr, flag_settings
def _get_next_textoutputsections(
sections: List["Section"], index: int
) -> Iterator["Section"]:
"""Yield successive TESTOUTPUT sections."""
for j in range(index, len(sections)):
section = sections[j]
if section.directive == SphinxDoctestDirectives.TESTOUTPUT:
yield section
else:
break
SectionGroups = Optional[List[str]]
class Section:
def __init__(
self,
directive: SphinxDoctestDirectives,
content: str,
lineno: int,
groups: SectionGroups = None,
) -> None:
super().__init__()
self.directive = directive
self.groups = groups
self.lineno = lineno
body, skipif_expr, options = _split_into_body_and_options(content)
if skipif_expr and self.directive not in _DIRECTIVES_W_SKIPIF:
raise ValueError(f":skipif: not allowed in {self.directive}")
if options and self.directive not in _DIRECTIVES_W_OPTIONS:
raise ValueError(f":options: not allowed in {self.directive}")
self.body = body
self.skipif_expr = skipif_expr
self.options = options
def get_sections(docstring: str) -> List[Union[Any, Section]]:
lines = textwrap.dedent(docstring).splitlines()
sections = []
def _get_indentation(line: str) -> int:
return len(line) - len(line.lstrip())
def add_match(
directive: SphinxDoctestDirectives, i: int, j: int, groups: SectionGroups
) -> None:
sections.append(
Section(
directive,
textwrap.dedent("\n".join(lines[i + 1 : j])),
lineno=j - 1,
groups=groups,
)
)
i = 0
while True:
try:
line = lines[i]
except IndexError:
break
match = _DIRECTIVE_RE.match(line)
if match:
group = match.groupdict()
directive = getattr(SphinxDoctestDirectives, group["directive"].upper())
groups = [x.strip() for x in (group["argument"] or "default").split(",")]
indentation = _get_indentation(line)
# find the end of the block
j = i
while True:
j += 1
try:
block_line = lines[j]
except IndexError:
add_match(directive, i, j, groups)
break
if block_line.lstrip() and _get_indentation(block_line) <= indentation:
add_match(directive, i, j, groups)
i = j - 1
break
i += 1
return sections
def docstring2examples(
docstring: str, globs: Optional[GlobDict] = None
) -> List[Union[Any, doctest.Example]]:
"""
Parse all sphinx test directives in the docstring and create a
list of examples.
"""
# TODO subclass doctest.DocTestParser instead?
if globs is None:
globs = {}
sections = get_sections(docstring)
def get_testoutput_section_data(
section: "Section",
) -> Tuple[str, Dict[int, bool], int, Optional[Any]]:
want = section.body
exc_msg = None
options: Dict[int, bool] = {}
if section.skipif_expr and eval(section.skipif_expr, globs):
want = ""
else:
options = section.options
match = doctest.DocTestParser._EXCEPTION_RE.match(want) # type: ignore
if match:
exc_msg = match.group("msg")
return want, options, section.lineno, exc_msg
examples = []
for i, current_section in enumerate(sections):
# TODO support SphinxDoctestDirectives.TESTSETUP, ...
if current_section.directive == SphinxDoctestDirectives.TESTCODE:
next_testoutput_sections = _get_next_textoutputsections(sections, i + 1)
section_data_seq = [
get_testoutput_section_data(s) for s in next_testoutput_sections
]
num_unskipped_sections = len([d for d in section_data_seq if d[0]])
if num_unskipped_sections > 1:
raise ValueError("There are multiple unskipped TESTOUTPUT sections")
if num_unskipped_sections:
want, options, _, exc_msg = next(d for d in section_data_seq if d[0])
else:
# no unskipped testoutput section
# do we really need doctest.Example to test
# independent TESTCODE sections?
want, options, exc_msg = "", {}, None
if current_section.skipif_expr and eval(current_section.skipif_expr, globs):
# TODO add the doctest.Example to `examples` but mark it as
# skipped.
continue
examples.append(
doctest.Example(
source=current_section.body,
want=want,
exc_msg=exc_msg,
# we want to see the ..testcode lines in the
# console output but not the ..testoutput
# lines
# TODO why do we want to hide testoutput??
lineno=current_section.lineno,
options=options,
)
)
return examples
class SphinxDocTestRunner(doctest.DebugRunner):
"""
overwrite doctest.DocTestRunner.__run, since it uses 'single' for the
`compile` function instead of 'exec'.
"""
_checker: "doctest.OutputChecker"
_fakeout: "_SpoofOut"
debugger: "pdb.Pdb"
def _DocTestRunner__run(
self, test: doctest.DocTest, compileflags: int, out: "_Out"
) -> doctest.TestResults:
"""
Run the examples in `test`.
Write the outcome of each example with one of the
`DocTestRunner.report_*` methods, using the writer function
`out`. `compileflags` is the set of compiler flags that should
be used to execute examples. Return a tuple `(f, t)`, where `t`
is the number of examples tried, and `f` is the number of
examples that failed. The examples are run in the namespace
`test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (
self.optionflags & doctest.REPORT_ONLY_FIRST_FAILURE and failures > 0
)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & doctest.SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = "<doctest %s[%d]>" % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec(
compile(example.source, filename, "exec", compileflags, 1),
test.globs,
)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except Exception:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_msg = traceback.format_exception_only(*exception[:2])[-1]
if not quiet:
got += doctest._exception_traceback(exception) # type:ignore
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & doctest.IGNORE_EXCEPTION_DETAIL:
if check(
doctest._strip_exception_details( # type:ignore
example.exc_msg,
),
doctest._strip_exception_details(exc_msg), # type:ignore
self.optionflags,
):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
assert exception is not None
assert out is not None
self.report_unexpected_exception(
out,
test,
example,
exception, # type:ignore
)
failures += 1
else:
assert False, ("unknown outcome", outcome)
if failures and self.optionflags & doctest.FAIL_FAST:
break
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self._DocTestRunner__record_outcome(test, failures, tries) # type:ignore
return doctest.TestResults(failures, tries)
class SphinxDocTestParser:
def get_doctest(
self,
docstring: str,
globs: Dict[str, Any],
name: str,
filename: str,
lineno: int,
) -> doctest.DocTest:
# TODO document why we need to overwrite? get_doctest
return doctest.DocTest(
examples=docstring2examples(docstring, globs=globs),
globs=globs,
name=name,
filename=filename,
lineno=lineno,
docstring=docstring,
)
class SphinxDoctestTextfile(pytest.Module):
obj = None
def collect(self) -> Iterator[_pytest.doctest.DoctestItem]:
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
encoding = self.config.getini("doctest_encoding")
text = self.fspath.read_text(encoding)
name = self.fspath.basename
optionflags = _pytest.doctest.get_optionflags(self) # type:ignore
runner = SphinxDocTestRunner(
verbose=False,
optionflags=optionflags,
checker=_pytest.doctest._get_checker(),
)
test = doctest.DocTest(
examples=docstring2examples(text),
globs={},
name=name,
filename=name,
lineno=0,
docstring=text,
)
if test.examples:
yield DoctestItem.from_parent(
parent=self, # type:ignore
name=test.name,
runner=runner,
dtest=test,
)
class SphinxDoctestModule(pytest.Module):
def collect(self) -> Iterator[_pytest.doctest.DoctestItem]:
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(self.path, root=self.config.rootpath)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
pytest.skip("unable to import module %r" % self.path)
else:
raise
optionflags = _pytest.doctest.get_optionflags(self) # type:ignore
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""
a hackish doctest finder that overrides stdlib internals to fix
a stdlib bug
https://github.com/pytest-dev/pytest/issues/3456
https://bugs.python.org/issue25532
fix taken from https://github.com/pytest-dev/pytest/pull/4212/
"""
def _find(
self,
tests: List[doctest.DocTest],
obj: str,
name: str,
module: Any,
source_lines: Optional[List[str]],
globs: GlobDict,
seen: Dict[int, int],
) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
doctest.DocTestFinder._find( # type:ignore
self,
tests,
obj,
name,
module,
source_lines,
globs,
seen,
)
if sys.version_info < (3, 10):
finder = MockAwareDocTestFinder(
parser=SphinxDocTestParser() # type:ignore
)
else:
finder = doctest.DocTestFinder(parser=SphinxDocTestParser()) # type:ignore
runner = SphinxDocTestRunner(
verbose=False,
optionflags=optionflags,
checker=_pytest.doctest._get_checker(),
)
for test in finder.find(module, module.__name__):
if test.examples:
yield DoctestItem.from_parent(
parent=self, # type: ignore
name=test.name,
runner=runner,
dtest=test,
)
|
thisch/pytest-sphinx
|
src/pytest_sphinx.py
|
pytest_sphinx.py
|
py
| 20,904 |
python
|
en
|
code
| 27 |
github-code
|
6
|
13301085850
|
import requests
from prettytable import PrettyTable
#write this into terminal if using linux or cmd if using windows
#pip3 install Prettytable requests
city = input('Enter City => ')#City name here
api = "your api here"#api of https://openweathermap.org get urs from the website
response = requests.get(f"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={api}")#request a response
#if the response equal 200 = OK thats mean everything is okey
if response:
weather = response.json()["weather"]#get weather info
temp = response.json()["main"]#get temperature info
wind = response.json()["wind"]#get wind info
clouds = response.json()["clouds"]#get clouds info
coord = response.json()["coord"]#get coordinates
weather_cloud = weather[0]["description"]
temp_feels = temp["temp"]
temp_feels_like = temp["feels_like"]
temp_feels_min = temp["temp_min"]
temp_feels_max = temp["temp_max"]
wind_speed = wind["speed"]
clouds_deg = clouds["all"]
coordination_lon = coord["lon"]
coordination_lat= coord["lat"]
#write everything into a table
table = PrettyTable()
table.field_names = ["City",
"weather Description",
"Temperature",
"Feels Like",
"Temperature Min",
"Temperature Max",
"Win Speed",
"Cloud",
"Coordinates Long",
"Coordinates Lat"]
table.add_row([city.capitalize(),
weather_cloud,
temp_feels,
temp_feels_like,
temp_feels_min,
temp_feels_max,
wind_speed,
clouds_deg,
coordination_lon,
coordination_lat])
print(table)
else:
print("Invalid Input")
|
xavian1996/weatherpy
|
main.py
|
main.py
|
py
| 1,968 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36180496136
|
"""
Zimri Leisher and Luca Araujo
Codeforces database, API and web app
"""
import sys
import traceback
import psycopg2
import json
import config
import flask
from collections import defaultdict
api = flask.Blueprint('api', __name__)
def get_connection():
return psycopg2.connect(database=config.database,
user=config.user,
password=config.password)
@api.route('/help')
def get_help():
return flask.send_file('.' + flask.url_for('static', filename='api-design.txt'), mimetype='text')
@api.route('/users/<institution_type>')
def get_users(institution_type):
"""REQUEST: /users/<institution_type>
institution_type (Required) -- defines whether we should search users
by their university or country
GET parameters
search_name (Optional, default: '') -- gives the name of the unversity/country
to search for
lowest_rating (Optional, default: -infinity) -- return only users with
rating bigger than or equal to the one given
highest_rating (Optional, default: infinity) -- return only users with
rating less than or equal to the one given
max_users (Optional, default: 10) -- the maximum number of users to return
if value given is higher than 500 it is changed to 500
RESPONSE: a JSON list of dictionaries, each of which represents one
user, sorted decreasingly by rating. Each dictionary in this list
will have the following fields.
handle -- (TEXT) the user's handle
name -- (TEXT) the user's name
rating -- (INTEGER) the user's current rating
max_rating -- (INTEGER) the user's maximum rating
rank -- (TEXT) the user's current rank
max_rank -- (TEXT) the user's maximum rank"""
lowest_rating = flask.request.args.get("lowest_rating")
highest_rating = flask.request.args.get("highest_rating")
max_users = flask.request.args.get("max_users")
institution_name = flask.request.args.get("institution_name")
query = """SELECT handle, first_name, last_name, rating, max_rating, user_rank, max_user_rank FROM users"""
predicates = []
args = {}
if lowest_rating:
predicates.append("""users.rating >= %(lowest_rating)s""")
args["lowest_rating"] = int(lowest_rating)
if highest_rating:
predicates.append("""users.rating <= %(highest_rating)s""")
args["highest_rating"] = int(highest_rating)
if institution_type and institution_name:
if institution_type == 'country':
predicates.append("""users.country ILIKE CONCAT('%%', %(institution_name)s, '%%')""")
else:
predicates.append("""users.organization ILIKE CONCAT('%%', %(institution_name)s, '%%')""")
args["institution_name"] = institution_name
if len(predicates) > 0:
query += " WHERE " + " AND ".join(predicates)
query += " ORDER BY (-users.rating, users.handle) LIMIT %(max_users)s"
args["max_users"] = int(max_users) if max_users else 50
users = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, args)
for row in list(cursor):
users.append({"handle": row[0], "name": (row[1] if row[1] else "") + " " + (row[2] if row[2] else ""),
"rating": row[3], "max_rating": row[4], "user_rank": row[5], "max_user_rank": row[6]})
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
return json.dumps(users)
@api.route('/problems')
def get_problems():
"""REQUEST: /problems
GET parameters
tag (Optional, default: '') -- returns only problems that contain
the defined tag. If left blank returns problems of any tag
lowest_rating (Optional, default: -infinity) -- return only problems with
rating bigger than or equal to the one given
highest_rating (Optional, default: infinity) -- return only problems with
rating less than or equal to the one given
max_problems (Optional, default: 10) -- the maximum number of problems to return
if value given is higher than 500 it is changed to 500
RESPONSE: a JSON list of dictionaries, each of which represents one
problem, sorted decreasingly by number of users who solved the problem.
Each dictionary in this list will have the following fields.
id -- (INTEGER) the codeforces id of the problem
name -- (TEXT) the problem's name
rating -- (INTEGER) the problem's rating
tags -- (TEXT) the list of tags of a problem separated by commas
solved_count -- (INTEGER) the number of users that solved that problem"""
# a user can ask for a query without any tag
tag = flask.request.args.get("tag")
lowest_rating = flask.request.args.get("lowest_rating")
highest_rating = flask.request.args.get("highest_rating")
max_problems = flask.request.args.get("max_problems")
query = """SELECT problems.problem_id, problems.name, rating, solved_count FROM problems"""
predicates = []
args = {}
if tag:
query += ", problem_tags, tags" # we only search through the tags if we need
predicates.append("""tags.name = %(tag)s
AND tags.id = problem_tags.tag_id
AND problem_tags.problem_id = problems.problem_id""")
args["tag"] = tag
if lowest_rating:
predicates.append("""problems.rating >= %(lowest_rating)s""")
args["lowest_rating"] = int(lowest_rating)
if highest_rating:
predicates.append("""problems.rating <= %(highest_rating)s""")
args["highest_rating"] = int(highest_rating)
if len(predicates) > 0:
query += " WHERE " + " AND ".join(predicates)
query += " ORDER BY problems.solved_count DESC LIMIT %(max_problems)s"
args["max_problems"] = int(max_problems) if max_problems else 50
problems = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, args)
for row in list(cursor):
problems.append({"id": row[0], "name": row[1], "rating": row[2], "solved_count": row[3]})
for problem in problems:
local_query= """SELECT tags.name FROM tags, problem_tags
WHERE problem_tags.problem_id = %(id)s
AND problem_tags.tag_id = tags.id"""
local_args = {"id" : problem['id']}
cursor.execute(local_query, local_args)
problem["tags"] = []
for tag in list(cursor):
problem["tags"].append(tag[0])
problem["tags"] = ", ".join(problem["tags"])
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
return json.dumps(problems)
@api.route('/contests/<data_requested>')
def get_contest_graph(data_requested):
"""REQUEST: /contests/<data_requested>
data_requested (Required) -- defines whether to display the graph by difficulty
of the contest (calculated by the average difficulty of each contest)
or by the number of users that solved any problem of that contest
GET parameters
lowest_id (Optional, default: 0) -- return only the contests with id
bigger than or equal to the given value
highest_id (Optional, default: infinity) -- return only the contests with id
less than or equal to the given value
RESPONSE: a JSON list of tuples of two elements, each of which represents one
contest, sorted increasingly by index. Each tuple contains a pair of (id, difficulty)
if the requested information was difficulty or a pair of (index, solved_count) if the
requested information was solved count"""
# values of the data requested must be either total_solves or difficulty
lowest_id = flask.request.args.get("lowest_id")
highest_id = flask.request.args.get("highest_id")
print("received args:", flask.request.args)
predicates = []
args = {}
query = """SELECT contests.id, contests.%(data_requested)s FROM contests"""
args["data_requested"] = psycopg2.extensions.AsIs(data_requested)
if lowest_id:
predicates.append("""contests.id >= %(lowest_id)s""")
args["lowest_id"] = int(lowest_id)
if highest_id:
predicates.append("""contests.id <= %(highest_id)s""")
args["highest_id"] = int(highest_id)
if len(predicates) > 0:
query += " WHERE " + " AND ".join(predicates)
query += " ORDER BY contests.id"
contests = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, args)
for row in list(cursor):
contests.append((row[0], row[1]))
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
return json.dumps(contests)
@api.route('/tags_graph/<received_tags>')
def get_tags_graph(received_tags):
"""REQUEST: /tags_graph/<tags>
tags (Required) -- returns a plot graph for each of the required
tags. The input is a list of tags separated by commas
RESPONSE: a JSON dictionary, each of which represents one
tag, sorted alphabetically by the name of the tag.
There is a field of the dictionary for every tag, the field
contains a list of tuples with the following parameters:
rating -- (INTEGER) the rating range being counted
count -- (INTEGER) the number of problems with that tag in that rating range"""
received_tags = received_tags.split(',')
print("received args:", received_tags)
tags = {}
for tag in received_tags:
args = {}
query = """SELECT problems.rating, COUNT(problems.rating) FROM tags, problem_tags, problems
WHERE tags.name = %(tag)s
AND tags.id = problem_tags.tag_id
AND problem_tags.problem_id = problems.problem_id
GROUP BY problems.rating
ORDER BY problems.rating"""
args["tag"] = tag
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, args)
tags[tag] = []
for element in list(cursor):
if(element[0]): # I don't want the problems that have no rating (which is represented as 0)
tags[tag].append((element[0], element[1]))
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
return json.dumps(tags)
@api.route('/tag_names')
def get_tag_names():
"""REQUEST: /tag_names
RESPONSE: a JSON list of TEXT with all the tag names"""
query = """SELECT name FROM tags"""
tags = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query,)
tags = []
for tag in list(cursor):
tags.append(tag[0])
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
return json.dumps(tags)
@api.route('/tags_intersection/<received_tags>')
def get_tags_intersection(received_tags):
"""REQUEST: /tags_intersection/<tags>
tags (Required) -- returns the information for the problems that
contain all tags. The input is a list of tags separated by commas
RESPONSE: a JSON list of tuples, each of which represents a rating range,
sorted decreasingly by rating. Each tuple will have the following fields:
rating -- (INTEGER) the beginning of the rating range (all problems ratings' are multiples of 100)
problem_count -- (INTEGER) the count of problems in that range
with those tags
solved_count -- (INTEGER) the count of solutions of problems
in that range with those tags"""
received_tags = received_tags.split(',')
query = """SELECT problems.rating, problems.solved_count
FROM problems, problem_tags, tags
WHERE problem_tags.tag_id = tags.id AND problems.problem_id = problem_tags.problem_id
AND tags.name IN (
"""
# match only problems with all of the given tags
query += ','.join(['%s' for i in range(len(received_tags))])
query += """) GROUP BY (problems.problem_id, problems.rating, problems.solved_count) HAVING COUNT(problems.problem_id) = %s"""
ratingCount = defaultdict(int)
solvedCount = defaultdict(int)
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, received_tags + [len(received_tags)])
for element in list(cursor):
rating = element[0]
solves = element[1]
if not rating:
continue
ratingCount[rating] += 1
solvedCount[rating] += solves
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
# return a list of (rating, count at that rating, solutions at that rating) of problems with these tags
return json.dumps(sorted([(rating, ratingCount[rating], solvedCount[rating]) for rating in ratingCount]))
|
LucaDantas/cs257
|
webapp/api.py
|
api.py
|
py
| 13,186 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13538825366
|
from screen_objects.boy import Boy
from screen_objects.wall import Wall
def test_boy_is_cooked_from_recipe():
boy = Boy((100, 100), (20, 0), 'tootling boy')
assert boy.body.substance.radius == 7.5
assert boy.body.sprite.colour == (0, 0, 220)
assert boy.body.movement.max_accelleration == 2
def test_boy_has_unique_name():
boy = Boy((100, 100), (20, 0), 'tootling boy')
boy2 = Boy((100, 100), (20, 0), 'tootling boy')
assert boy.name != boy2.name
|
SimonCarryer/video_game_ai
|
tests/test_boy.py
|
test_boy.py
|
py
| 478 |
python
|
en
|
code
| 2 |
github-code
|
6
|
30578251026
|
import logging
import itertools
import math
import sys
import collections
import datetime
import shutil
import click
from .status import JobStatus, JOB_EVENT_STATUS_TRANSITIONS
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
SYMBOLS = [" ", "I", "R", "X", "C", "H", "S"]
STATUS_TO_SYMBOL = dict(zip(JobStatus, SYMBOLS))
COLORS = ["black", "yellow", "blue", "magenta", "green", "red", "magenta"]
SYMBOL_TO_COLOR = dict(zip(SYMBOLS, COLORS))
def make_state_graph(events):
job_states = {}
job_state_counts = collections.Counter()
counts_over_time = []
for event in sorted(events, key=lambda e: e.timestamp):
event_key = (event.cluster, event.proc)
new_status = JOB_EVENT_STATUS_TRANSITIONS.get(event.type, None)
if new_status is not None:
old_status = job_states.get(event_key, None)
job_states[event_key] = new_status
job_state_counts[new_status] += 1
if old_status is not None:
job_state_counts[old_status] -= 1
counts_over_time.append((event.timestamp, job_state_counts.copy()))
term = shutil.get_terminal_size((80, 20))
width = term.columns - 10
height = term.lines - 10
graph = make_bars(counts_over_time, width, height)
rows = ["│" + row for row in graph.splitlines()]
rows.append("└" + ("─" * (width)))
first_time, _ = counts_over_time[0]
last_time, _ = counts_over_time[-1]
left_date_str = (
datetime.datetime.fromtimestamp(first_time)
.strftime("%y-%m-%d %H:%M:%S")
.ljust(width + 1)
)
right_date_str = (
datetime.datetime.fromtimestamp(last_time)
.strftime("%y-%m-%d %H:%M:%S")
.rjust(width + 1)
)
time_str = "Time".center(width + 1)
rows.append(merge_strings(left_date_str, right_date_str, time_str))
max_jobs = max(total_counts(c) for _, c in counts_over_time)
extra_len = max(len(str(max_jobs)), len("# Jobs"))
new_rows = []
for idx, row in enumerate(rows):
if idx == 0:
new_rows.append(str(max_jobs).rjust(extra_len) + row)
elif idx == len(rows) - 2:
new_rows.append("0".rjust(extra_len) + row)
elif idx == len(rows) // 2:
new_rows.append("# Jobs".rjust(extra_len) + row)
else:
new_rows.append((" " * extra_len) + row)
rows = new_rows
graph = "\n".join(rows)
return graph
def merge_strings(*strings):
max_len = max(len(s) for s in strings)
out = [" "] * max_len
for string in strings:
for idx, char in enumerate(string):
if out[idx] == " " and char != " ":
out[idx] = char
return "".join(out)
def make_bars(counts_over_time, width, height):
first_time, _ = counts_over_time[0]
last_time, last_counts = counts_over_time[-1]
groups = list(group_counts_by_time(counts_over_time, width))
counts = [avg_counts(group) for group in groups]
counts[0] = groups[0][-1][1]
counts[-1] = last_counts
max_jobs = max(total_counts(c) for c in counts if c is not None)
columns = []
for count in counts:
if count is None:
columns.append(columns[-1])
continue
bar_lens = calculate_column_partition(count, max_jobs, height)
columns.append(
"".join(
symbol * bar_lens[status] for status, symbol in STATUS_TO_SYMBOL.items()
)
)
rows = list(
reversed(list(map(list, itertools.zip_longest(*columns, fillvalue=" "))))
)
rows = [
"".join(
click.style("█" * len(list(group)), fg=SYMBOL_TO_COLOR[symbol])
for symbol, group in itertools.groupby(row)
)
for row in rows
]
return "\n".join(rows)
def calculate_column_partition(counts, max_jobs, height):
raw_split = [(counts.get(status, 0) / max_jobs) * height for status in JobStatus]
int_split = [0 for _ in range(len(raw_split))]
carry = 0
for idx, entry in enumerate(raw_split):
dec = entry - math.floor(entry)
if entry == 0:
int_split[idx] = 0
elif dec >= 0.5:
int_split[idx] = math.ceil(entry)
elif math.floor(entry) == 0:
int_split[idx] = 1
carry += 1
elif dec < 0.5:
int_split[idx] = math.floor(entry)
else:
raise Exception("Unreachable")
int_split[int_split.index(max(int_split))] -= carry
return {k: v for k, v in zip(JobStatus, int_split)}
def _calculate_bar_component_len(count, total, bar_width):
if count == 0:
return 0
return max(int((count / total) * bar_width), 1)
def total_counts(counter):
return sum(counter.values())
def group_counts_by_time(counts_over_time, n_divisions):
first_time, _ = counts_over_time[0]
last_time, _ = counts_over_time[-1]
dt = (last_time - first_time) / n_divisions
left_idx = 0
right_idx = 0
for left_time in (first_time + (n * dt) for n in range(n_divisions)):
right_time = left_time + dt
for right_idx, (timestamp, _) in enumerate(
counts_over_time[left_idx:], start=left_idx
):
if timestamp > right_time:
break
yield counts_over_time[left_idx:right_idx]
left_idx = right_idx
def avg_counts(counts_over_time):
lc = len(counts_over_time)
if lc == 0:
return None
counts = [counts for _, counts in counts_over_time]
return collections.Counter(
{k: v / lc for k, v in sum(counts, collections.Counter()).items()}
)
if __name__ == "__main__":
make_state_graph(sys.argv[1])
|
JoshKarpel/condor_necropsy
|
condor_necropsy/state_graph.py
|
state_graph.py
|
py
| 5,724 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44426795976
|
from time import sleep
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from test_framework.script import CScript, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG
class P2PInvMsgTimeOrder(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
# This function takes unspent transaction and returns transaction (pay to random address), second (optional)
# parameter is fee that we want to pay for this transaction.
def make_tx(self, unspent_transaction, fee=10000):
unspent_amount = int(unspent_transaction['amount']) * 100000000 # BTC to Satoshis
ftx = CTransaction()
ftx.vout.append(CTxOut(unspent_amount - fee, CScript([OP_DUP, OP_HASH160,
hex_str_to_bytes(
"ab812dc588ca9d5787dde7eb29569da63c3a238c"),
OP_EQUALVERIFY,
OP_CHECKSIG]))) # Pay to random address
ftx.vin.append(CTxIn(COutPoint(uint256_from_str(hex_str_to_bytes(unspent_transaction["txid"])[::-1]),
unspent_transaction["vout"])))
ftx.rehash()
ftx_hex = self.nodes[0].signrawtransaction(ToHex(ftx))['hex']
ftx = FromHex(CTransaction(), ftx_hex)
ftx.rehash()
return ftx
def run_test_parametrized(self):
self.stop_node(0)
with self.run_node_with_connections("", 0, ['-broadcastdelay=500', '-txnpropagationfreq=500'], 2) as p2pc:
connection = p2pc[0]
connection2 = p2pc[1]
# protected by mininode_lock
txinvs = []
# Append txinv
def on_inv(conn, message):
for im in message.inv:
if im.type == 1:
txinvs.append(hashToHex(im.hash))
connection2.cb.on_inv = on_inv
# initialize
self.nodes[0].generate(1)
# List of transactions to be send (used for temporary storing created transactions)
transactions_to_send = []
# List of transaction hashes in order in which they are sent
transaction_list_by_time = []
# List of all unspent transactions available in self.node[0]
unspent_txns = self.nodes[0].listunspent()
# List of fees (in increasing order)
fees = range(100000, 500000, 1000)
# Make transactions with fees defined above
for i in range(len(unspent_txns)):
tx = self.make_tx(unspent_txns[i], fees[i])
transactions_to_send.append(tx)
# Send all transactions that have been previously assembled and signed
for txts in transactions_to_send:
transaction_list_by_time.append(txts.hash)
connection.send_message(msg_tx(txts))
# Due to asynchronous validation we can not expect that an order of receiving transactions is the same as order of sending.
for txid in transaction_list_by_time:
wait_until(lambda: txid in txinvs, lock=mininode_lock, timeout=20)
with mininode_lock:
# Assert the number of received transactions is the same as the number of sent transactions.
assert_equal(len(transaction_list_by_time), len(txinvs))
def run_test(self):
self.run_test_parametrized()
if __name__ == '__main__':
P2PInvMsgTimeOrder().main()
|
bitcoin-sv/bitcoin-sv
|
test/functional/bsv-p2p_inv_msg_time_order.py
|
bsv-p2p_inv_msg_time_order.py
|
py
| 3,698 |
python
|
en
|
code
| 597 |
github-code
|
6
|
16292938825
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 14:27:38 2021
@author: maximelucas
"""
import numpy as np
import matplotlib.pyplot as plt
rho_max=250
v_max=130
long=1000
A=150
B=40000
ga=[0 for i in range(B)]
gb=[0 for i in range(B)]
rho0=[200 for i in range(A//5)]+[0 for i in range(4*A//5)]
deltaX = long/A
deltaT=deltaX * (1/(v_max+1))
rho_critique = 125
def gG(s1, s2):
if (s1 <= 125 and s2 <= 125):
res = f(s1)
elif (s1 >= 125 and s2 >= 125):
res = f(s2)
elif (s1 < 125 and s2 > 125):
res = min(f(s1),f(s2))
else :
res = f(125)
return res
#return f(s1)
def f(rho):
res = rho*v(rho)
return res
#def v(rho):
#res = v_max
#res = (v_max/rho_max)*(rho_max-rho)
# return res
def v(rho):
res = v_max
if rho != 0:
res = np.sqrt(100*((1000/rho)-4))
if res > v_max :
res = v_max
return res
def rho_c(X):
l = (deltaT)/(deltaX)
Y = [0 for i in range(len(X))]
for i in range(1, len(X)-1):
Y[i] = X[i] -( l * (gG(X[i], X[i+1]) - gG(X[i-1], X[i])))
return Y
v = [v(i) for i in range(250)]
plt.plot(v)
#line, = plt.plot(rho0)
#X=rho0
#for i in range(1000):
# X = rho_c(X)
# line.set_ydata(X)
# plt.pause(1e-4)
# plt.draw()
|
Maksime0/Mod-lisation-et-mesures-pour-le-trafic-routier
|
Documents annexes/Godonov/godonov_anime.py
|
godonov_anime.py
|
py
| 1,309 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29716875304
|
from basePlayer import BasePlayer
import itertools
from math import inf
from random import choice
import numpy as np
from generals import State
import pickle
from minimaxPlayer import MinMaxPlayer
from generals import PlayerEnum
from game import Board
from game import Controller
from generals import g
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
class SVMPlayer(BasePlayer):
def __init__(self, sign, board):
BasePlayer.__init__(self, board)
self.states_value = {} # state -> value
# get unique hash of current board state
def getHash(self, boardState):
boardHash = ""
for k in range(len(boardState)):
for l in range(len(boardState)):
boardHash += str(boardState[k][l].state.value)
return boardHash
def training(self, X, Y, rounds=10000):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=42)
"""scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
print(X_scaled) """
""" for i in range(len(X)):
print(str(X[i]) + " " + str(Y[i])) """
""" reg = svm.SVR(C=1.0, cache_size=1000, coef0=0.0, degree=3, epsilon=0.1,
gamma='auto_deprecated', kernel='rbf', max_iter=-1, shrinking=True,
tol=0.001, verbose=False) """
#SVRmodel.fit(X_scaled, Y)
#reg = RandomForestRegressor(max_depth=3, random_state=0, n_estimators=200)
reg = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial')
#reg = LinearRegression()
#reg = linear_model.BayesianRidge()
#reg = linear_model.Lasso(alpha=0.1)
#reg = linear_model.Ridge(alpha=1.0)
#reg = RandomForestClassifier(n_estimators=150, max_depth=4, random_state=0)
#reg = MultinomialNB()
#reg = KNeighborsClassifier(n_neighbors=3)
reg.fit(X, Y)
#print(reg.feature_importances_)
result = reg.score(X_train, Y_train)
print("Accuracy: %.2f%%" % (result*100.0))
print()
#print(X[3])
""" print(X_scaled[1]) """
#print(Y[3])
""" for i in range(len(X_test)):
print(X_test[i])
print(reg.predict([X_test[i]]))
print(Y_test[i]) """
return reg
def generateStates(self, boardSize, gameNumber):
board = Board(boardSize)
cont = Controller(PlayerEnum.Random, PlayerEnum.Random, board)
boardStates = []
winStates = []
winner = -1
for _ in itertools.repeat(None, gameNumber):
end = False
number_of_appends = 0
while board.freeCellCheck():
win = False
move = cont.player1.move()
boardState = self.transfromBoardState4(self.getHash(board.table))
if boardState not in boardStates:
boardStates.append(boardState)
number_of_appends = number_of_appends + 1
if cont.checkWin(cont.player1.sign, move):
win = True
end = True
winner = 1
board.reset()
break
if not board.freeCellCheck():
break
move = cont.player2.move()
boardState = self.transfromBoardState4(self.getHash(board.table))
if boardState not in boardStates:
boardStates.append(boardState)
number_of_appends = number_of_appends + 1
if cont.checkWin(cont.player2.sign, move):
win = True
end = True
winner = 2
board.reset()
break
if not win:
board.reset()
end = True
winner = 0
if end:
for _ in itertools.repeat(None, number_of_appends):
winStates.append(winner)
return [boardStates, winStates]
def transfromBoardState4(self, boardStateHash):
result = []
for i in range(len(boardStateHash)):
if boardStateHash[i] == '1':
result.append(1)
result.append(0)
result.append(0)
elif boardStateHash[i] == '2':
result.append(0)
result.append(1)
result.append(0)
else:
result.append(0)
result.append(0)
result.append(1)
return result
def transfromBoardState5(self, boardStateHash):
result = []
for i in range(len(boardStateHash)):
if boardStateHash[i] == '1':
result.append(1)
result.append(0)
elif boardStateHash[i] == '2':
result.append(0)
result.append(1)
else:
result.append(0)
result.append(0)
return result
|
Gbor97/TicTacToe
|
SVMPlayer.py
|
SVMPlayer.py
|
py
| 5,736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74637015868
|
import pandas as pd
from src.distribution import compute_distribution_1_week
def test_compute_distribution_1_week():
"""Test compute 1 week distribution
In this test case:
- We have 1 brand that is sold by 1 bar -> distribution is 100% every week
when this bar sells
- We have 1 brand sold by 2 bars. bar_A makes 75% of market share.
Distribution is 100% when both bars sell. It's 75% when only bar 1 sells
Tests are composed of 4 steps:
- "Setup": when we create variables & conditions to run the code
- "Act": when we run the code
- "Assert": when we check that the code had the right result
- "Tear down": (optional) doing the cleaning
The code will be comment to show the various steps
"""
# --- Set up ---
# Creating variables "input sales" and "output expected"
df_sales = pd.read_csv("tests/assets/sales.csv")
df_distribution_expected = pd.read_csv("tests/assets/result_compute_distribution.csv")
# --- Act ---
# Compute distribution with the input sales
df_res = compute_distribution_1_week(df_sales)
# --- Assert ---
# Checking that our result is what we wanted
pd.testing.assert_frame_equal(df_distribution_expected, df_res)
# --- Tear down ---
# No teardown is needed here
|
afouchet/training_practical_testing
|
tests/test_distribution.py
|
test_distribution.py
|
py
| 1,293 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21368239786
|
import numpy as np
import hardware.sr_lockin as lockin
lo = lockin.SR830("GPIB0::08::INSTR")
idn = lo.identification()
print(idn)
freq = lo.get_frequency()
print(freq)
lo.set_reference_mode(1)
getrefmode = lo.get_reference_mode()
print(getrefmode)
lo.set_frequency(10000)
freq2 = lo.get_frequency()
print(freq2)
lo.set_harmonic(1)
detectionmode = lo.get_harmonic()
print(detectionmode)
sensi = lo.get_sensitivity()
print(sensi)
lo.set_sensitivity(20)
sensi = lo.get_sensitivity()
print(sensi)
time = lo.get_time_constant()
print(time)
lo.set_time_constant(7)
time = lo.get_time_constant()
print(time)
output = lo.get_output()
print(output)
x = lo.get_grounding()
print(x)
lo.set_grounding(1)
x = lo.get_grounding()
print(x)
x = lo.get_coupling()
print(x)
lo.set_coupling(1)
x = lo.get_coupling()
print(x)
x = lo.get_filter_status()
print(x)
lo.set_filter_status(3)
x = lo.get_filter_status()
print(x)
x = lo.get_reserve_mode()
print(x)
lo.set_reserve_mode(1)
x = lo.get_reserve_mode()
print(x)
|
physikier/magnetometer
|
lockin_test.py
|
lockin_test.py
|
py
| 1,054 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8927199604
|
import contextlib
import os
import shutil
import tempfile
import mandrel
import unittest
class TestCase(unittest.TestCase):
def assertIs(self, a, b):
# python 2.6/2.7 compatibility
self.assertTrue(a is b)
@contextlib.contextmanager
def tempdir(dir=None):
"""Context manager that yields a temporary directory. Cleans up afterwards."""
if dir is not None:
dir = os.path.realpath(os.path.expanduser(dir))
path = os.path.realpath(tempfile.mkdtemp(dir=dir))
try:
yield path
finally:
shutil.rmtree(path, ignore_errors=True)
@contextlib.contextmanager
def chdir(path):
"""Context manager that moves to path in context; returns to original dir afterwards."""
start_path = os.path.realpath('.')
try:
os.chdir(os.path.realpath(os.path.expanduser(path)))
yield
finally:
os.chdir(start_path)
@contextlib.contextmanager
def workdir(dir=None):
"""Context manager that creates a temp dir, moves to it, and yields the path.
Moves back to original dir and cleans up afterwards."""
with tempdir(dir) as path:
with chdir(path):
yield path
def refresh_bootstrapper():
if hasattr(mandrel, 'bootstrap'):
reload(mandrel.bootstrap)
else:
__import__('mandrel.bootstrap')
BOOTSTRAP_FILE = 'Mandrel.py'
@contextlib.contextmanager
def bootstrap_scenario(text="", dir=None):
with workdir(dir=dir) as path:
bootstrapper = os.path.join(path, BOOTSTRAP_FILE)
with open(bootstrapper, 'w') as f:
f.write(text)
yield path, bootstrapper
|
ethanrowe/python-mandrel
|
mandrel/test/utils.py
|
utils.py
|
py
| 1,612 |
python
|
en
|
code
| 4 |
github-code
|
6
|
18023127994
|
import os
import sqlite3
import shutil
import sys
import psutil
def is_browser_running(browser_name):
for process in psutil.process_iter():
try:
if browser_name in process.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
if is_browser_running('firefox'):
print("Firefox가 실행 중입니다. 브라우저를 닫고 다시 시도하세요.")
# 환경에 따른 경로 설정
if os.name == 'nt': # Windows 환경
base_path = "C:/Users"
else: # WSL 환경
base_path = "/mnt/c/Users"
# Windows 사용자 이름
windows_username = "user" # 여기에 실제 Windows 사용자 이름을 입력하세요
# Firefox 프로필 경로
firefox_path = os.path.join(base_path, windows_username, "AppData", "Roaming", "Mozilla", "Firefox")
# Chrome 프로필 경로
chrome_path = os.path.join(base_path, windows_username, "AppData", "Local", "Google", "Chrome", "User Data", "Default", "History")
# SQLite 데이터를 추출하는 함수
def extract_sqlite_data(db_path, query):
try:
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
cursor.execute(query)
rows = cursor.fetchall()
conn.close()
return rows
except Exception as e:
print(f"Error reading {db_path}: {str(e)}")
return []
if not os.access(firefox_path, os.R_OK):
print("Firefox 경로에 접근할 수 없습니다. 권한을 확인하세요.")
# 출력을 파일에 저장할 경로
output_file_path = "linuxweboutput.txt"
# 기존 출력 스트림을 백업하고 파일로 변경
original_stdout = sys.stdout
with open(output_file_path, "w") as f:
sys.stdout = f
# Firefox 인터넷 기록을 추출합니다.
for profile_directory in os.listdir(firefox_path):
if profile_directory.endswith('.default-release'):
places_db = os.path.join(firefox_path, profile_directory, 'places.sqlite')
if os.path.exists(places_db):
temp_copy = os.path.join("/tmp", "temp_places.sqlite")
shutil.copy(places_db, temp_copy)
query = "SELECT url, title, last_visit_date FROM moz_places"
records = extract_sqlite_data(temp_copy, query)
os.remove(temp_copy)
for record in records:
print(record)
# Chrome 인터넷 기록을 추출합니다.
if os.path.exists(chrome_path):
temp_copy = os.path.join("/tmp", "temp_history.sqlite")
shutil.copy(chrome_path, temp_copy)
query = "SELECT url, title, last_visit_time FROM urls"
records = extract_sqlite_data(temp_copy, query)
os.remove(temp_copy)
for record in records:
print(record)
# 기존 출력 스트림을 복원
sys.stdout = original_stdout
|
KIMJOONSIG/Reboot3
|
Linux/p3_BrowserHistory.py
|
p3_BrowserHistory.py
|
py
| 2,892 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
73200075389
|
def calcPrimes(n):
""" Calculate Prime Factors of an Integer """
if type(n) is not int:
raise ArgumentException("Requires argument of type int")
factors = []
d = 2
while d * d <= n:
while (n % d) == 0:
factors.append(d)
n /= d;
d += 1
if n > 1:
factors.append(int(n))
return factors
def solveProblem003():
""" Solves Project Euler Problem 003 """
primes = calcPrimes(600851475143)
print(max(primes))
if __name__ == "__main__":
solveProblem003()
|
jgroff/projecteuler
|
src/problem003.py
|
problem003.py
|
py
| 547 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73676828986
|
import serial,os,sys
ser = serial.Serial('/dev/ttyAMA1', 115200)
drawPath = "/home/ubuntu/my_ws/src/test_odom_rap/drawLcd/drawLcd.py"
byte_list = [0x55, 0x0E, 0x01, 0x02,
int(0 / 256), int(0 % 256),
0,0,
0,0,
0, 0, 1]
k = 0
for i in range(len(byte_list)):
k += byte_list[i]
k = k % 256
byte_list.append(k)
contr_law = b"%c%c%c%c%c%c%c%c%c%c%c%c%c%c" % (byte_list[0], byte_list[1], byte_list[2], byte_list[3],
byte_list[4], byte_list[5], byte_list[6], byte_list[7],
byte_list[8], byte_list[9], byte_list[10], byte_list[11],
byte_list[12], byte_list[13])
ser.write(contr_law)
try:
if ord(ser.read())==85:
flag = False
receive = [ord(ser.read()) for i in range(13)]
bettery = int(27.322*((receive[9] * 256 + receive[10]) /100.0) - 245.9)
print(bettery)
if bettery <0:
bettery = 0
elif bettery>100:
bettery = 100
try:
print(bettery)
os.popen("python "+" "+drawPath+" "+str(bettery)+" 0 0")
except IOError:
res = os.system("sudo chmod 777 /dev/i2c-1&sudo chown root.gpio /dev/gpiomem&sudo chmod g+rw /dev/gpiomem&sudo chmod 777 /dev/spidev0.*")
os.popen("python "+" "+drawPath+" "+str(bettery)+" 0 0")
except TypeError:
print('error')
|
caiyilian/INTELLIGENT-FOOD-DELIVERY-ROBOT
|
树莓派代码/drawLcd/auto_startup.py.py
|
auto_startup.py.py
|
py
| 1,524 |
python
|
en
|
code
| 2 |
github-code
|
6
|
1798363402
|
import tkinter as tk
import windows.Window as Window
import time
import random
import math
class GameWindow(Window.Window):
def __init__(self, master, controller):
super().__init__(master=master)
self.master = master
self.controller = controller
# Extra variables
self._job = None
self.dirs = [(0, -1), (-1, 0), (0, 1), (1, 0), (1, -1), (-1, 1), (1, 1), (-1, -1)]
self.cells = {}
# windows / Frames
self.canvas = None
self.button_frame = None
# Buttons
self.step_button = None
self.auto_step_button = None
self.random_fill_button = None
self.clear_button = None
self.settings_button = None
self.setup()
def setup(self):
self.config(bg=self.controller.BACKGROUND_COLOR)
self.canvas = tk.Canvas(master=self, bg="#333", width=self.controller.SCREEN_WIDTH,
height=self.controller.SCREEN_HEIGHT,
bd=0, highlightthickness=0, relief='ridge')
self.canvas.pack()
self.canvas.bind("<Button-1>", self.callback)
# Button Menu
self.button_frame = tk.Frame(master=self, width=self.controller.SCREEN_WIDTH,
bg=self.controller.BACKGROUND_COLOR)
self.step_button = tk.Button(self.button_frame, text="Step", padx=10, pady=5, fg="black", relief="flat",
bg=self.controller.PRIME_COLOR, command=self.step)
self.step_button.pack(side=tk.LEFT)
self.auto_step_button = tk.Button(self.button_frame, text="Start", padx=10, pady=5, fg="black", relief="flat",
bg=self.controller.PRIME_COLOR, command=self.start)
self.auto_step_button.pack(side=tk.LEFT)
self.random_fill_button = tk.Button(self.button_frame, text="Random Fill", padx=10, pady=5, fg="black",
relief="flat",
bg=self.controller.PRIME_COLOR, command=self.random)
self.random_fill_button.pack(side=tk.LEFT)
self.clear_button = tk.Button(self.button_frame, text="Clear", padx=10, pady=5, fg="black", relief="flat",
bg=self.controller.PRIME_COLOR, command=self.clear)
self.clear_button.pack(side=tk.LEFT)
self.settings_button = tk.Button(self.button_frame, text="Settings", padx=10, pady=5, fg="black", relief="flat",
bg=self.controller.PRIME_COLOR,
command=self.settings)
self.settings_button.pack(side=tk.RIGHT)
self.button_frame.pack(fill=tk.BOTH)
self.update()
def create_grid(self):
start_width = self.controller.tile_width
for idxC in range(self.controller.grid_width-1):
self.canvas.create_line(start_width, 0, start_width, self.controller.SCREEN_HEIGHT,
fill="#000", width=1)
start_width += self.controller.tile_width
start_height = self.controller.tile_height
for idxR in range(self.controller.grid_height-1):
self.canvas.create_line(0, start_height, self.controller.SCREEN_WIDTH, start_height,
fill="#000", width=1)
start_height += self.controller.tile_height
def random(self):
for r in range(self.controller.grid_width - 1):
for c in range(self.controller.grid_height - 1):
ri = random.randint(0, self.controller.random_level)
if ri == (self.controller.random_level / 2) and self.cells.get((c, r), None) is None:
self.cells.update({(c, r): 1})
self.update()
def clear(self):
self.cells.clear()
self.update()
def settings(self):
self.stop()
self.controller.show_frame("SettingsWindow")
def update(self):
self.canvas.delete('all')
self.create_grid()
for cell in self.cells.keys():
tile_start_width = cell[1] * self.controller.tile_width
tile_start_height = cell[0] * self.controller.tile_height
self.canvas.create_rectangle(tile_start_width, tile_start_height, tile_start_width + self.controller.tile_width,
tile_start_height + self.controller.tile_height, outline="#000",
fill=self.controller.PRIME_COLOR, width=1)
def step(self):
if len(self.cells) <= 0:
return
start = time.perf_counter()
neighbours = {}
deaths = []
for cell in self.cells.keys():
non = 0
for d in self.dirs:
nc = cell[0] + d[0]
nr = cell[1] + d[1]
if self.cells.get((nc, nr), None) is not None:
non += 1
if neighbours.get((nc, nr), None) is not None:
neighbours[(nc, nr)] += 1
else:
neighbours.update({(nc, nr): 1})
if (non < 2 or non > 3) or \
(0 > cell[0] >= self.controller.grid_height and 0 > cell[1] >= self.controller.grid_width):
deaths.append(cell)
for neighbour in neighbours.keys():
if neighbours[neighbour] == 3 and (
0 <= neighbour[0] < self.controller.grid_height and 0 <= neighbour[1] < self.controller.grid_width):
self.cells[neighbour] = 1
for death in deaths:
if self.cells.get(death, None) is not None:
self.cells.pop(death)
bfu = time.perf_counter()
self.update()
end = time.perf_counter()
print(f'Finished in {round(end - start, 9)} second(s).\n'
f'; Before Update {round(bfu - start, 9)} second(s).', end="\n\n")
def auto_step(self):
self.step()
self._job = self.after(self.controller.delay, self.auto_step)
def start(self):
self.auto_step()
self.auto_step_button.configure(text="Stop", command=self.stop)
self.step_button.configure(state="disabled")
def cancel(self):
if self._job is not None:
self.after_cancel(self._job)
self._job = None
def stop(self):
self.cancel()
self.auto_step_button.configure(text="Start", command=self.start)
self.step_button.configure(state="normal")
def callback(self, event):
print(f"Clicked at: x{event.x}, y{event.y}")
ylo = math.floor(event.x / self.controller.tile_width)
xlo = math.floor(event.y / self.controller.tile_height)
cell = self.cells.get((xlo, ylo), None)
if cell is not None:
self.cells.pop((xlo, ylo))
elif cell is None:
self.cells.update({(xlo, ylo): 1})
self.update()
@staticmethod
def name():
return "GameWindow"
|
Akoens/PyGOL
|
windows/GameWindow.py
|
GameWindow.py
|
py
| 7,051 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22115422189
|
from ibapi.client import *
from ibapi.wrapper import *
import threading
import time
# Change as necessary
port = 7496
class TestApp(EClient, EWrapper):
def __init__(self):
EClient.__init__(self, self)
# Only the necessary news-related callbacks are implemented below
# Headlines delivered to this callback after reqMktData for broadtape news
def tickNews(
self,
tickerId: int,
timeStamp: int,
providerCode: str,
articleId: str,
headline: str,
extraData: str,
):
print(
"tickNews.",
f"tickerId:{tickerId}",
f"timeStamp:{timeStamp}",
f"providerCode:{providerCode}",
f"articleId:{articleId}",
f"headline:{headline}",
f"extraData:{extraData}",
)
def tickString(self, reqId: TickerId, tickType: TickType, value: str):
print(reqId, tickType, value)
# Subscribed news sources are delivered here after reqNewsProviders
def newsProviders(self, newsProviders: ListOfNewsProviders):
print(
"newsProviders.",
f"newsProviders:{newsProviders}",
)
def tickReqParams(self, tickerId: int, minTick: float, bboExchange: str, snapshotPermissions: int):
print(tickerId, minTick, bboExchange, snapshotPermissions)
app = TestApp()
app.connect("127.0.0.1", port, 1001)
time.sleep(3)
threading.Thread(target=app.run).start()
# Creates a contract specific news source and denotes the news feed through 'genericTickList="mdoff,292:BRFG"'.
# mycontract = Contract()
# mycontract.symbol="SPY"
# mycontract.secType="STK"
# mycontract.exchange="SMART"
# mycontract.currency="USD"
# # Places the request for news data. Note the generic tick list string.
# app.reqMktData(
# reqId=123,
# contract=mycontract,
# genericTickList="mdoff,292:BRFG+DJNL",
# snapshot=False,
# regulatorySnapshot=False,
# mktDataOptions=[],
# )
# Creates a generic feed that provides all news articles sent by the resource
# Must set generic tick list to 'genericTickList="mdoff,292"' when using this request.
contract2 = Contract()
contract2.symbol = "DJNL:DJNL_ALL"
contract2.secType = "NEWS"
contract2.exchange = "DJNL"
# Places the request for news data. Note the generic tick list string.
app.reqMktData(
reqId=456,
contract=contract2,
genericTickList="mdoff,292",
snapshot=False,
regulatorySnapshot=False,
mktDataOptions=[],
)
# app.reqNewsProviders()
|
hardhittad22/Python-testers
|
News/News.py
|
News.py
|
py
| 2,518 |
python
|
en
|
code
| null |
github-code
|
6
|
10062234348
|
import dataclasses
import logging
import typing
import httpx
from sequoia.exceptions import DiscoveryResourcesError, DiscoveryServicesError, ResourceNotFound, ServiceNotFound
logger = logging.getLogger(__name__)
__all__ = ["Resource", "ResourcesRegistry", "Service", "ServicesRegistry"]
@dataclasses.dataclass
class Resource:
"""
Representation of a resource part of a Sequoia service.
"""
name: str
path: str
class ResourcesRegistry(dict):
"""
Mapping of available resources by name.
"""
def __getitem__(self, key: str) -> Resource:
try:
return super().__getitem__(key)
except KeyError:
raise ResourceNotFound(key)
@dataclasses.dataclass
class Service:
"""
Representation of a Sequoia service.
"""
name: str
url: str
title: typing.Optional[str] = dataclasses.field(default=None, hash=False, compare=False)
description: typing.Optional[str] = dataclasses.field(default=None, hash=False, compare=False)
async def discover(self):
"""
Request a service description endpoint to discover its resources and metadata.
"""
async with httpx.AsyncClient(timeout=60) as client:
try:
response = await client.get(f"{self.url}/descriptor/raw/")
response.raise_for_status()
response = response.json()
self.title = response["title"]
self.description = response["description"]
self._resources = ResourcesRegistry(
{
i["hyphenatedPluralName"].replace("-", "_"): Resource(
name=i["pluralName"], path=f"{i['path']}/{i['hyphenatedPluralName']}"
)
for i in response["resourcefuls"].values()
}
)
except KeyError:
logger.exception("Wrong response retrieving description of service '%s': %s", self.name, str(response))
raise DiscoveryResourcesError(service=self.name)
except (httpx.exceptions.HTTPError, OSError):
raise DiscoveryResourcesError(service=self.name)
@property
async def resources(self) -> ResourcesRegistry:
"""
Return the registry containing all the resources that are part of this service. This registry will be loaded
when requested, following lazy pattern.
:return: Resources registry.
"""
if not hasattr(self, "_resources"):
await self.discover()
return self._resources
class ServicesRegistry(dict):
"""
Mapping of available services by name.
"""
def __getitem__(self, item):
try:
value = super().__getitem__(item)
except KeyError:
raise ServiceNotFound(item)
return value
async def discover(self, registry_url: str, owner: typing.Optional[str] = None):
"""
Request Registry service to update the list of all available services.
:return: Services registry.
"""
async with httpx.AsyncClient(timeout=60) as client:
try:
response = await client.get(f"{registry_url}/services/{owner or 'root'}/")
response.raise_for_status()
response = response.json()
self.clear()
self.update(
sorted(
{i["name"]: Service(name=i["name"], url=i["location"]) for i in response["services"]}.items()
)
)
except KeyError:
logger.exception("Wrong response retrieving list of services from 'registry': %s", str(response))
raise DiscoveryServicesError()
except (httpx.exceptions.HTTPError, OSError):
raise DiscoveryServicesError()
|
pikselpalette/sequoia-python-client-sdk-async
|
sequoia/types.py
|
types.py
|
py
| 3,925 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7804713762
|
from app import get_app_db
import chalicelib.db as dynamoDB
import csv
db = get_app_db()
with open('Deckle.csv') as f:
taskReader = csv.reader(f, delimiter=',')
for task in taskReader:
name = task[0]
duration = int(task[1])
deadlineDateTime = task[2]
db.add_item(description=name, duration=duration, deadline=deadlineDateTime)
|
lexonli/Deckle-API
|
taskParser.py
|
taskParser.py
|
py
| 346 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37513630544
|
from model.group import Group
import random
def test_delete_some_group(app, db):
# if app.group.count() == 0:
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
# index = randrange(len(old_groups))
app.group.delete_group_by_id(group.id)
# app.group.delete_group_by_index(index)
assert len(old_groups) - 1 == app.group.count()
new_groups = db.get_group_list()
old_groups.remove(group)
# old_groups[index:index+1] = []
assert old_groups == new_groups
if db.check_ui == "true":
def clean(group):
return Group(id=group.id, name=group.name.strip())
new_groups = map(clean, db.get_group_list())
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
|
Iren337/pyton_training
|
test/test_del_group.py
|
test_del_group.py
|
py
| 877 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5821317586
|
# «Дано натуральное число. Определить: максимальную нечетную цифру числа;
n = int(input('Введите натуральное число: '))
max = n % 10
posl = 0
n = n // 100
while n > 0:
posl = n % 10
if posl > max:
max = posl
n = n // 100
print('Максимальная нечетная цифра числа = ', max)
|
GarryG6/PyProject
|
11.py
|
11.py
|
py
| 407 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
13171858253
|
word1=input()
for i in range(0,len(word1)):
if word1[i]=='h':
index1=i
break
for i in range(len(word1)-1,index1, -1):
if word1[i]=='h':
index2=i
break
word2=word1[index2:index1:-1]
print(word1[:index1]+word2+word1[index2:])
|
dagasheva01/py
|
lab28.py
|
lab28.py
|
py
| 243 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37698137583
|
coms = open('input.txt', 'r').readlines()
coms = [i.replace('\n', '') for i in coms]
coms = [i.split(' ') for i in coms]
commands = []
for _, x in coms:
if x[0] == '+':
x = int(x.replace('+', ''))
elif x[0] == '-':
x = -int(x.replace('-', ''))
commands.append([_, x])
#print(coms)
#print(commands)
def part1():
adapter = 0
cnt = 0
appeared = [0 for i in range(0, len(coms))]
while appeared[cnt] == 0:
appeared[cnt] = 1
if commands[cnt][0] == "acc":
adapter += commands[cnt][1]
cnt += 1
elif commands[cnt][0] == "jmp":
cnt += commands[cnt][1]
else:
cnt += 1
if cnt == len(commands):
print("Part 2:", adapter)
break
return adapter
def part2():
for x in range(0, len(commands)):
if commands[x][0] == "acc":
continue
if commands[x][0] == "nop":
commands[x][0] = "jmp"
c = part1()
commands[x][0] = "nop"
else:
commands[x][0] = "nop"
c = part1()
commands[x][0] = "jmp"
print("Part 1:", part1())
part2()
|
DLNinja/Advent-Of-Code-2020
|
day8.py
|
day8.py
|
py
| 1,234 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3928902472
|
#Grids 1-4 are 2x2
grid1 = [
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]
grid2 = [
[1, 0, 4, 2],
[4, 2, 1, 3],
[0, 1, 0, 4],
[3, 4, 2, 1]]
grid3 = [
[1, 2, 3, 4],
[2, 1, 4, 3],
[3, 4, 2, 1],
[4, 3, 1, 2]]
grid4 = [
[1, 3, 4, 2],
[4, 2, 1, 3],
[2, 1, 3, 4],
[3, 4, 2, 1]]
#Grids 4-7 are 3x3
grid5 = [
[1, 2, 3, 4, 5, 6, 7, 8, 9,],
[2, 3, 4, 5, 6, 7, 8, 9, 1,],
[3, 4, 5, 6, 7, 8, 9, 1, 2,],
[4, 5, 6, 7, 8, 9, 1, 2, 3,],
[5, 6, 7, 8, 9, 1, 2, 3, 4,],
[6, 7, 8, 9, 1, 2, 3, 4, 5,],
[7, 8, 9, 1, 2, 3, 4, 5, 6,],
[8, 9, 1, 2, 3, 4, 5, 6, 7,],
[9, 1, 2, 3, 4, 5, 6, 7, 8,]]
grid6 = [
[6, 1, 7, 8, 4, 2, 5, 3, 9,],
[7, 4, 5, 3, 6, 9, 1, 8, 2,],
[8, 3, 2, 1, 7, 5, 4, 6, 9,],
[1, 5, 8, 6, 9, 7, 3, 2, 4,],
[9, 6, 4, 2, 3, 1, 8, 7, 5,],
[2, 7, 3, 5, 8, 4, 6, 9, 1,],
[4, 8, 7, 9, 5, 6, 2, 1, 3,],
[3, 9, 1, 4, 2, 8, 7, 5, 6,],
[5, 2, 6, 7, 1, 3, 9, 4, 8,]]
grid7 = [
[6, 1, 9, 8, 4, 2, 5, 3, 7,],
[7, 4, 5, 3, 6, 9, 1, 8, 2,],
[8, 3, 2, 1, 7, 5, 4, 6, 9,],
[1, 5, 8, 6, 9, 7, 3, 2, 4,],
[9, 6, 4, 2, 3, 1, 8, 7, 5,],
[2, 7, 3, 5, 8, 4, 6, 9, 1,],
[4, 8, 7, 9, 5, 6, 2, 1, 3,],
[3, 9, 1, 4, 2, 8, 7, 5, 6,],
[5, 2, 6, 7, 1, 3, 9, 4, 8,]]
#grids 8-10 are 2x3
grid8 = [
[0, 0, 6, 0, 0, 3],
[5, 0, 0, 0, 0, 0],
[0, 1, 3, 4, 0, 0],
[0, 0, 0, 0, 0, 6],
[0, 0, 1, 0, 0, 0],
[0, 5, 0, 0, 6, 4]]
grid9 = [
[1, 2, 6, 5, 4, 3],
[5, 3, 4, 6, 2, 1],
[6, 1, 3, 4, 5, 2],
[2, 5, 5, 3, 1, 6],
[4, 6, 1, 2, 3, 5],
[3, 5, 2, 1, 6, 4]]
grid10 = [
[1, 2, 6, 5, 4, 3],
[5, 3, 4, 6, 2, 1],
[6, 1, 3, 4, 5, 2],
[2, 4, 5, 3, 1, 6],
[4, 6, 1, 2, 3, 5],
[3, 5, 2, 1, 6, 4]]
grids = [(grid1, 2, 2), (grid2, 2, 2), (grid3, 2, 2), (grid4, 2, 2),
(grid5, 3, 3), (grid6, 3, 3), (grid7, 3, 3),
(grid8, 2, 3), (grid9, 2, 3), (grid10, 2, 3)]
expected_outputs = [False, False, False, True, False, False, True, False, False, True]
'''
===================================
DO NOT CHANGE CODE ABOVE THIS LINE
===================================
'''
#To complete the first assignment, please write the code for the following function
def check_solution(grid_input):
'''
This function is used to check whether a sudoku board has been correctly solved
args: grid - representation of a suduko board as a nested list.
urns: True (correct solution) or False (incorrect solution)
'''
inside = False
check = []
width = grid_input[1]# width of a cell in the sudoku
length = grid_input[2]# length of a cell in the sudoku
area = width*length # number of elements in a cell the sudoku
for row in range(len(grid_input[0])): # cycles through the rows of the grid
for check_list in grid_input[0][row]:
for i in range(len(check)):
if check_list == check[i]:
inside = True
if inside == False:
check.append(check_list)
check = []
for collumn in range(len(grid_input[0])): # checks all the collumns
for row in range(area):
for check_list in check:
if grid_input[0][row][collumn] == check_list:
inside = True
if inside == False:
check.append(grid_input[0][row][collumn])
check = []
elements = []
for loops in range(area): # to make sure you get all the elements
before = length*loops
for row in range(len(grid_input[0])):
if len(grid_input[0][row][(before):(length)+before]) != 0: #So only important information is appended
elements.append(grid_input[0][row][(before):(length)+before])
cells = [] # a list of all the cells
sum_e = 0 # expected sum of the cell
sum_s = 0 # sum of the cell in the sudoku
for loop in range(area):
cells.append(elements[grid_input[1]*loop:grid_input[1]*(loop+1)])
sum_e += loop+1
sum_s = 0
for i in range(area):
for element in range(len(cells[i])):
sum_s += sum(cells[i][element])
if sum_s != sum_e: #checks if the sum of the elements in the cell is what it should be
inside = True
sum_s = 0
if inside == True: # To check the sudoku
return False
else:
return True
'''
===================================
DO NOT CHANGE CODE BELOW THIS LINE
===================================
'''
def main():
'''
This function will call the check_solution function on each of the provided grids,
comparing the answer to the expected output. Each correct output is given 10 'points
'''
points = 0
print("Running test script for coursework 1")
print("====================================")
#Loop through the grids and expected outputs together
for (i, (grid, output)) in enumerate(zip(grids, expected_outputs)):
#Compare output to expected output
print("Checking grid: %d" % (i+1))
checker_output = check_solution(grid)
if checker_output == expected_outputs[i]:
#Output is correct - yay!
print("grid %d correct" % (i+1))
points = points + 5
else:
#Output is incorrect - print both output and expected output.
print("grid %d incorrect" % (i+1))
print("Output was: %s, but expected: %s" % (checker_output, expected_outputs[i]))
print("====================================")
print("Test script complete, Total points: %d" % points)
if __name__ == "__main__":
main()
|
Louie-B/Course_work_1
|
CW1.py
|
CW1.py
|
py
| 5,614 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11322722554
|
from keras.models import *
from keras.layers import *
from model.model_basic import BasicDeepModel
from keras.utils.vis_utils import plot_model
from keras import regularizers
dp = 7
filter_nr = 64
filter_size = 3
max_pool_size = 3
max_pool_strides = 2
dense_nr = 256
spatial_dropout = 0.2
dense_dropout = 0.5
conv_kern_reg = regularizers.l2(0.00001)
conv_bias_reg = regularizers.l2(0.00001)
class DpcnnModel(BasicDeepModel):
def __init__(self, name='basicModel', num_flods=5, config=None):
name = 'dpcnn' + config.main_feature
BasicDeepModel.__init__(self, name=name, n_folds=num_flods, config=config)
def create_model(self):
char_embedding = Embedding(self.max_c_features, self.char_embed_size, weights=[self.char_embedding], trainable=True, name='char_embedding')
word_embedding = Embedding(self.max_w_features, self.word_embed_size, weights=[self.word_embedding], trainable=True, name='word_embedding')
char_input = Input(shape=(self.char_max_len,), name='char')
word_input = Input(shape=(self.word_max_len,), name='word')
if not self.config.main_feature == 'char':
char_input, word_input = word_input, char_input
char_embedding, word_embedding = word_embedding, char_embedding
self.char_max_len, self.word_max_len = self.word_max_len, self.char_max_len
x = char_embedding(char_input)
x = BatchNormalization()(x)
block1 = Conv1D(filter_nr, kernel_size=filter_size, padding='same', activation='linear', kernel_regularizer=conv_kern_reg, bias_regularizer=conv_bias_reg)(x)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
block1 = Conv1D(filter_nr, kernel_size=filter_size, padding='same', activation='linear', kernel_regularizer=conv_kern_reg, bias_regularizer=conv_bias_reg)(block1)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
# we pass embedded comment through conv1d with filter size 1 because it needs to have the same shape as block output
# if you choose filter_nr = embed_size (300 in this case) you don't have to do this part and can add emb_comment directly to block1_output
resize_emb = Conv1D(filter_nr, kernel_size=1, padding='same', activation='linear', kernel_regularizer=conv_kern_reg, bias_regularizer=conv_bias_reg)(x)
resize_emb = PReLU()(resize_emb)
block1_output = add([block1, resize_emb])
x = MaxPooling1D(pool_size=max_pool_size, strides=max_pool_strides)(block1_output)
for i in range(dp):
block1 = Conv1D(filter_nr, kernel_size=filter_size, padding='same', activation='linear', kernel_regularizer=conv_kern_reg, bias_regularizer=conv_bias_reg)(x)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
block1 = Conv1D(filter_nr, kernel_size=filter_size, padding='same', activation='linear', kernel_regularizer=conv_kern_reg, bias_regularizer=conv_bias_reg)(block1)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
block_output = add([block1, x])
print(i)
if i + 1 != dp:
x = MaxPooling1D(pool_size=max_pool_size, strides=max_pool_strides)(block_output)
x = GlobalMaxPooling1D()(block_output)
output = Dense(dense_nr, activation='linear')(x)
output = BatchNormalization()(output)
output = PReLU()(output)
if self.config.main_feature == 'all':
recurrent_units = 60
word_embedding = Embedding(self.max_w_features, self.word_embed_size, weights=[self.word_embedding], trainable=False, name='word_embedding')
word_input = Input(shape=(self.word_max_len,), name='word')
word_embedding_layer = word_embedding(word_input)
word_embedding_layer = SpatialDropout1D(0.5)(word_embedding_layer)
word_rnn_1 = Bidirectional(CuDNNGRU(recurrent_units // 2, return_sequences=True))(word_embedding_layer)
word_rnn_1 = SpatialDropout1D(0.5)(word_rnn_1)
word_rnn_2 = Bidirectional(CuDNNGRU(recurrent_units // 2, return_sequences=True))(word_rnn_1)
word_maxpool = GlobalMaxPooling1D()(word_rnn_2)
word_average = GlobalAveragePooling1D()(word_rnn_2)
output = concatenate([output, word_maxpool, word_average], axis=-1)
output = Dropout(dense_dropout)(output)
dense2 = Dense(self.n_class, activation="softmax")(output)
res_model = Model(inputs=[char_input, word_input], outputs=dense2)
else:
output = Dropout(dense_dropout)(output)
# dense2 = Dense(self.n_class, activation="softmax", kernel_regularizer=regularizers.l2(self.wd))(output)
dense2 = Dense(self.n_class, activation="softmax")(output)
res_model = Model(inputs=[char_input], outputs=dense2)
plot_model(res_model, to_file="{}.png".format(self.name), show_shapes=True)
return res_model
|
nlpjoe/daguan-classify-2018
|
src/model/dpcnn_model.py
|
dpcnn_model.py
|
py
| 5,042 |
python
|
en
|
code
| 154 |
github-code
|
6
|
14637407916
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'minimumBribes' function below.
#
# The function accepts INTEGER_ARRAY q as parameter.
#
def minimumBribes(q):
i = 1
tshift = 0
for p in q :
shift = p - i
if (shift > 2) :
print("Too chaotic")
return
for pr in q[max(p-2,0):i] :
if pr > p :
tshift += 1
i += 1
print(tshift)
if __name__ == '__main__':
t = int(input().strip())
for t_itr in range(t):
n = int(input().strip())
q = list(map(int, input().rstrip().split()))
minimumBribes(q)
|
94aharris/BrainExt
|
Python/HakerRanker/NewYears.py
|
NewYears.py
|
py
| 663 |
python
|
en
|
code
| 3 |
github-code
|
6
|
41183939223
|
# 1) Eingabe Suchbeggriff (deutsch)
# 2) Bestimmung der gesamtanzahl der Elemente ( = maximaler Index )
# 3) Schleife: Vergleich Eingabe mit jew. Listenelement
# 4) Wenn Element gefunden -> Index speichern
# 5) Zugriff auf Listenelement[Index] in Liste (englisches Wörterbuch)
'''
woerterbuch_deutsch = ["Apfel", "Birne", "Kirsche", "Melone", "Marille", "Pfirsich"]
print(woerterbuch_deutsch)
woerterbuch_english = ["apple", "pear", "cherry", "melon", "apricot", "peach"]
Wort = input("Bitte geben Sie Ihr Wort ein das Sie ins Englische übersetzen wollen:")
Index_Wort = 0
k = 0
Iterationen = len(woerterbuch_deutsch)
while k < Iterationen:
if woerterbuch_deutsch[k] == Wort:
Index_Wort = k
k += 1
print("Ihr Wort lautet Übersetzt:", woerterbuch_english[Index_Wort] )
'''
#Richtig / Schöner
woerterbuch_deutsch = ["Apfel", "Birne", "Kirsche", "Melone", "Marille", "Pfirsich"]
print(woerterbuch_deutsch)
woerterbuch_english = ["apple", "pear", "cherry", "melon", "apricot", "peach"]
Auswahl = input("Was möchten Sie tun? \n Einfügen [E] \n Löschen [L] \n Abfragen [A]:" )
Auswahl = Auswahl.upper()
if Auswahl == "E":
Wort = input("Bitte geben Sie ihr deutsches Wort ein: ")
woerterbuch_deutsch.append(Wort) #Wort in Liste hinzufügen
Word = input("Bitte geben Sie ihr englisches Wort ein: ")
woerterbuch_english.append(Word)
print(woerterbuch_deutsch)
print(woerterbuch_english)
elif Auswahl == "L" or Auswahl == "l":
Wort = input("Welches Wort Möchten sie löschen? ")
Index_Wort = 0
k = 0
Iterationen = len(woerterbuch_deutsch)
while k < Iterationen:
if woerterbuch_deutsch[k].lower() == Wort.lower():
Index_Wort = k
woerterbuch_deutsch.remove(woerterbuch_deutsch[Index_Wort])
woerterbuch_english.remove(woerterbuch_english[Index_Wort])
break
elif woerterbuch_english[k].lower() == Wort.lower():
Index_Wort = k
woerterbuch_english.remove(woerterbuch_english[Index_Wort])
woerterbuch_deutsch.remove(woerterbuch_deutsch[Index_Wort])
break
k += 1
print(woerterbuch_deutsch)
print(woerterbuch_english)
else:
Wort = input("Bitte geben Sie Ihr Wort ein das Sie ins Englische übersetzen wollen:")
Index_Wort = 0
k = 0
Iterationen = len(woerterbuch_deutsch)
while k < Iterationen:
if woerterbuch_deutsch[k].lower() == Wort.lower():
Index_Wort = k
print("Ihr Wort lautet Übersetzt:", woerterbuch_english[Index_Wort] )
break
k += 1
if k == max:
print("Ihr Wort wurde nicht gefunden")
'''
piviertel = 0
k = 0
Iterationen = int(input("Bitte geben Sie die gewünschte Anzahl an Iterationen ein:"))
while k < Iterationen:
print("k=" , k )
piviertel = piviertel + ( ( -1 ) ** k ) / ( 2*k + 1 )
k = k + 1
print("pi viertel =" , piviertel )
pi = piviertel * 4
print("pi=" , pi )
'''
|
karina1702/Karinasrepository
|
Wörterbuch_Algorithmus.py
|
Wörterbuch_Algorithmus.py
|
py
| 3,175 |
python
|
de
|
code
| 0 |
github-code
|
6
|
32059806236
|
#!/usr/bin/env python3
"""
led.py
Notes
-----
- Docstrings follow the numpydoc style:
https://numpydoc.readthedocs.io/en/latest/format.html
- Code follows the PEP 8 style guide:
https://www.python.org/dev/peps/pep-0008/
"""
import RPi.GPIO as GPIO
from time import sleep
import logging
import constants as c
LED_OUTPUT_PIN = 21
LED_TEST_TIME_SECS = 0.5
GPIO_WARNINGS_OFF = False
ON_STRING = 'ON'
OFF_STRING = 'OFF'
class Led:
"""
Class to represent Led actuator
Attributes
----------
__pin : int
BCM GPIO pin number
__led_on : bool
True if LED on
Methods
-------
get_led_status()
Returns the status of LED
set_led_status(status)
Sets the LED status
invert_status()
Inverts the status of the LED
"""
def __init__(self, pin=LED_OUTPUT_PIN):
"""
Initializes the Led
Parameters
----------
pin : int
BCM GPIO pin number
"""
self.__pin = pin
self.__led_on = c.LED_OFF
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(GPIO_WARNINGS_OFF)
GPIO.setup(self.__pin, GPIO.OUT)
def get_status(self):
"""
Returns
-------
self.__led_on : bool
True if LED on
"""
return self.__led_on
def set_status(self, status):
"""
Sets the LED status
Parameters
----------
status : bool
True if LED on, False if LED off
"""
output_gpio = None
output_string = ''
if status == c.LED_ON:
output_gpio = GPIO.HIGH
output_string = ON_STRING
else:
output_gpio = GPIO.LOW
output_string = OFF_STRING
GPIO.output(self.__pin, output_gpio)
self.__led_on = status
logging.debug('LED status updated to {}'.format(output_string))
def invert_status(self):
"""
Inverts the LED status
- If LED status on, turn off LED
- If LED status off, turn on LED
Returns
-------
self.__led_on : bool
True if LED on
"""
self.set_status(not self.__led_on)
return self.__led_on
def led_test():
"""
Creates an Led object for manual LED verification
"""
led = Led()
led.set_status(c.LED_ON)
sleep(LED_TEST_TIME_SECS)
led.set_status(c.LED_OFF)
GPIO.cleanup()
if __name__ == '__main__':
logging.basicConfig(format=c.LOGGING_FORMAT, level=c.LOGGING_DEFAULT_LEVEL)
led_test()
|
Hasan-Baig/SYSC3010_Home_Pixel
|
lightclapper/led.py
|
led.py
|
py
| 2,584 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40428589911
|
"""
Name: virtual_ip_address.py
Description: create, update, and delete operations on netbox ip_addresss for virtual machines
"""
from inspect import stack
import sys
from netbox_tools.common import get_vm, vm_id, tag_id
from netbox_tools.virtual_machine import make_vm_primary_ip, map_vm_primary_ip
OUR_VERSION = 105
class VirtualIpAddress:
"""
create, update, and delete operations on netbox ip_addresss for virtual machines
netbox_obj = netbox instance
info = dictionary with the following keys:
mandatory
virtual_machine: vm to which the interface belongs e.g. netbox_vm
interface: interface on which ip addresses will be assigned e.g. vmnet0, etc
ip4: ipv4 address for the interface e.g. 1.1.1.0/24
optional
description: free-form description of the ip address
role: role of the ip address. Example values: loopback, vip
status: status of the ip address. Example values: active, reserved, deprecated
"""
def __init__(self, netbox_obj, info):
self.lib_version = OUR_VERSION
self._classname = __class__.__name__
self._netbox_obj = netbox_obj
self._info = info
self._args = {}
self._mandatory_keys_create_update = set()
self._mandatory_keys_create_update.add("interface")
self._mandatory_keys_create_update.add("ip4")
self._mandatory_keys_create_update.add("virtual_machine")
self._mandatory_keys_delete = set()
self._mandatory_keys_delete.add("ip4")
self._optional_keys = set()
self._optional_keys.add("description")
self._optional_keys.add("role")
self._optional_keys.add("status")
self._populate_valid_choices()
def log(self, *args):
"""
simple logger
"""
print(
f"{self._classname}(v{self.lib_version}).{stack()[1].function}: {' '.join(args)}"
)
def _populate_valid_choices(self):
"""
retrieve valid ip address choices from the users netbox instance
"""
self._valid_choices = {}
choices_dict = self._netbox_obj.ipam.ip_addresses.choices()
for item in choices_dict:
valid_values = choices_dict[item]
self._valid_choices[item] = [item["value"] for item in valid_values]
def _validate_keys_create_update(self):
"""
Verify that all mandatory create/update operation keys are set.
If all keys are not set, log an error and exit.
"""
for key in self._mandatory_keys_create_update:
if key not in self._info:
self.log(f"exiting. mandatory key {key} not found in info {self._info}")
sys.exit(1)
def _validate_keys_delete(self):
"""
Verify that all mandatory delete operation keys are set.
If all keys are not set, log an error and exit.
"""
for key in self._mandatory_keys_delete:
if key not in self._info:
self.log(f"exiting. mandatory key {key} not found in info {self._info}")
sys.exit(1)
def _set_address(self):
"""
Add address to args
"""
self._args["address"] = self.ip4
def _set_assigned_object_id(self):
"""
Add assigned_object_id to args
"""
self._args["assigned_object_id"] = vm_id(self._netbox_obj, self.virtual_machine)
def _set_description(self):
"""
Add description to args.
If user has not set this, set a generic description for them.
"""
if self.description is None:
self._args["description"] = f"{self.virtual_machine} : {self.ip4}"
else:
self._args["description"] = self.description
def _set_interface(self):
"""
Add interface to args; converting it to a netbox id
"""
self._args["interface"] = vm_id(self._netbox_obj, self.virtual_machine)
def _set_role(self):
"""
Update args with the ip address role, if the user has set this.
Exit with error if the user set an invalid role.
"""
if self.role is None:
return
if self.role in self._valid_choices["role"] or self.role == "":
self._args["role"] = self.role
else:
_valid_choices = ",".join(sorted(self._valid_choices["role"]))
self.log(
f"exiting. Invalid role. Got {self.role}",
f"Expected one of {_valid_choices}.",
)
sys.exit(1)
def _set_status(self):
"""
Update args with the ip address status, if the user has set this.
Exit with error if the user set an invalid status.
"""
if self.status is None:
return
if self.status in self._valid_choices["status"]:
self._args["status"] = self.status
else:
_valid_choices = ",".join(sorted(self._valid_choices["status"]))
self.log(
f"exiting. Invalid status. Got {self.status}",
f"Expected one of {_valid_choices}.",
)
sys.exit(1)
def _set_tags(self):
"""
Add tags, if any, to args; converting them to netbox IDs
"""
if self.tags is None:
return
self._args["tags"] = []
for tag in self.tags:
tid = tag_id(self._netbox_obj, tag)
if tid is None:
self.log(f"tag {tag} not found in Netbox. Skipping.")
continue
self._args["tags"].append(tid)
def _generate_args(self):
"""
Generate all supported arguments for create and update methods
"""
self._set_address()
self._set_assigned_object_id()
self._set_description()
self._set_interface()
self._set_role()
self._set_status()
self._set_tags()
def _initialize_vm_primary_ip(self):
"""
Initialize primary_ip4 and primary_ip to avoid errors in map_vm_primary_ip()address.save()
"""
vm_obj = get_vm(self._netbox_obj, self.virtual_machine)
vm_obj.primary_ip4 = None
vm_obj.primary_ip = None
vm_obj.save()
def create(self):
"""
create a virtual ip address
"""
self.log(f"virtual_machine {self.virtual_machine}, address {self.ip4}")
try:
self._netbox_obj.ipam.ip_addresses.create(self._args)
except Exception as _general_exception:
self.log(
"exiting.",
f"Unable to create virtual_machine {self.virtual_machine} ip_address {self.ip4}."
f"Exception detail {_general_exception}",
)
sys.exit(1)
def update(self):
"""
update a virtual ip address
"""
self.log(f"virtual_machine {self.virtual_machine}, address {self.ip4}")
self._args["id"] = self.ip_address_id
try:
self.ip_address_obj.update(self._args)
except Exception as _general_exception:
self.log(
"exiting.",
f"Unable to update virtual_machine {self.virtual_machine} ip_address {self.ip4}."
f"Exception detail {_general_exception}",
)
sys.exit(1)
def delete(self):
"""
delete a virtual ip address
"""
self._validate_keys_delete()
if self.ip_address_obj is None:
self.log("Nothing to do.", f"ip_address {self.ip4} not found in Netbox.")
return
self.log(f"address {self.ip4}")
try:
self.ip_address_obj.delete()
except Exception as _general_exception:
self.log(
"exiting.",
f"Unable to delete ip_address {self.ip4}."
f"Exception detail {_general_exception}",
)
sys.exit(1)
def create_or_update(self):
"""
entry point into create and update methods
"""
self._validate_keys_create_update()
self._generate_args()
self._initialize_vm_primary_ip()
if self.ip_address_obj is None:
self.create()
else:
self.update()
map_vm_primary_ip(
self._netbox_obj, self.virtual_machine, self.interface, self.ip4
)
make_vm_primary_ip(self._netbox_obj, self.virtual_machine, self.ip4)
@property
def description(self):
"""
Return the ip description set by the caller.
Return None if the caller did not set this.
"""
if "description" in self._info:
return self._info["description"]
return None
@property
def status(self):
"""
Return the ip status set by the caller.
Return None if the caller did not set this.
"""
if "status" in self._info:
return self._info["status"]
return None
@property
def ip_address_obj(self):
"""
Return the Netbox ip address object associated with ip4 address set by the caller.
"""
try:
address, mask = self.ip4.split("/")
except Exception as _general_exception:
self.log(
"exiting. Unexpected IP address format. Expected A.B.C.D/E.",
f"Got {self.ip4}." f"Exception detail: {_general_exception}",
)
sys.exit(1)
return self._netbox_obj.ipam.ip_addresses.get(address=address, mask=mask)
@property
def ip_address_enabled(self):
"""
Return the enabled status set by the caller.
Return None if the caller did not set this.
"""
if "ip_address_enabled" in self._info:
return self._info["ip_address_enabled"]
return None
@property
def ip_address_id(self):
"""
Return the Netbox ID for the ip4 address set by the caller.
"""
return self.ip_address_obj.id
@property
def ip_address_type(self):
"""
Return the ip address type set by the caller.
Return None if the caller did not set this.
"""
if "ip_address_type" in self._info:
return self._info["ip_address_type"]
return None
@property
def interface(self):
"""
Return the interface set by the caller.
"""
return self._info["interface"]
@property
def ip4(self):
"""
Return the ipv4 address set by the caller.
"""
return self._info["ip4"]
@property
def role(self):
"""
Return the ip address role set by the caller.
If the caller didn't set this, return None
"""
if "role" in self._info:
return self._info["role"]
return None
@property
def tags(self):
"""
Return the list of tag names set by the caller.
If the caller didn't set this, return None.
"""
if "tags" in self._info:
return self._info["tags"]
return None
@property
def virtual_machine(self):
"""
Return the virtual machine set by the caller.
"""
return self._info["virtual_machine"]
|
allenrobel/netbox-tools
|
lib/netbox_tools/virtual_ip_address.py
|
virtual_ip_address.py
|
py
| 11,371 |
python
|
en
|
code
| 6 |
github-code
|
6
|
31356519066
|
import pygame
import sys
# 게임 창 크기 설정
screen_width = 800
screen_height = 600
# 버튼 클래스 정의
class Button:
def __init__(self, x, y, width, height, idle_image, hover_image):
self.rect = pygame.Rect(x, y, width, height)
self.idle_image = idle_image
self.hover_image = hover_image
self.image = idle_image
self.clicked = False
def draw(self, surface):
surface.blit(self.image, self.rect)
def handle_event(self, event):
if event.type == pygame.MOUSEMOTION:
if self.rect.collidepoint(event.pos):
self.image = self.hover_image
else:
self.image = self.idle_image
elif event.type == pygame.MOUSEBUTTONDOWN:
if self.rect.collidepoint(event.pos):
self.clicked = True
elif event.type == pygame.MOUSEBUTTONUP:
self.clicked = False
# 게임 클래스 정의
class Game:
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("Empire Builder")
self.clock = pygame.time.Clock()
# 버튼 이미지 로드
self.button_idle_img = pygame.image.load("resources/images/slimes.png").convert_alpha()
self.button_hover_img = pygame.image.load("resources/images/slime_top.png").convert_alpha()
# 버튼 생성
button_width = 150
button_height = 50
button_x = (screen_width - button_width) // 2
button_y = (screen_height - button_height) // 2
self.button = Button(button_x, button_y, button_width, button_height, self.button_idle_img, self.button_hover_img)
def run(self):
running = True
while running:
self.clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# 버튼 이벤트 처리
self.button.handle_event(event)
self.update()
self.render()
pygame.quit()
sys.exit()
def update(self):
# 게임 상태 업데이트
if self.button.clicked:
print("버튼이 클릭되었습니다!")
def render(self):
# 게임 화면 그리기
self.screen.fill((0, 0, 0)) # 검은색 배경
self.button.draw(self.screen) # 버튼 그리기
pygame.display.flip()
# 게임 시작
game = Game()
game.run()
|
frogress/prac_pygame
|
opentutorials/chatgpt.py
|
chatgpt.py
|
py
| 2,576 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10808451041
|
from typing import Dict, Tuple
from AnalysisOfUnstructuredData.helpers.hsc.publisher import Publisher
class PublisherRelation:
titles_types: Dict[str, str]
_id: Tuple[int, int]
def __init__(self, publisher_1: Publisher, publisher_2: Publisher):
self.publisher_1 = publisher_1
self.publisher_2 = publisher_2
self.titles_types = {}
self.id = (publisher_1.id, publisher_2.id)
@property
def id(self):
return self._id
@id.setter
def id(self, set_val: Tuple[int, int]):
if set_val[0] > set_val[1]:
self._id = (set_val[1], set_val[0])
else:
self._id = set_val
def add_publication(self, publication_type: str, title: str):
self.titles_types[title] = publication_type
@property
def times_linked(self):
return len(self.titles_types)
def __eq__(self, other: 'PublisherRelation'):
return self.id == other.id
def __hash__(self):
return self.id
def html_label(self) -> str:
lab = """{} publications of {}. {} and {}. {}.
<table border="1" class="dataframe">\n\t<thead>\n\t\t<tr style="text-align: left;">\n\t\t\t<th>Type</th>
\t\t\t<th>Title</th>\n\t\t</tr>\n\t</thead>\n\t<tbody>\n""".format(
self.times_linked,
self.publisher_1.name, '-'.join(self.publisher_1.surname),
self.publisher_2.name, '-'.join(self.publisher_2.surname)
)
lab += '\n'.join(["\t\t<tr>\n\t\t\t<td>{}</td>\n\t\t\t<td>{}</td>\n\t\t</tr>".format(p_type, title)
for title, p_type in self.titles_types.items()])
lab += '\n\t</tbody>\n</table>'
return lab
|
TheDecks/Studies
|
AnalysisOfUnstructuredData/helpers/hsc/relation.py
|
relation.py
|
py
| 1,688 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38479953864
|
# -*- coding:utf-8 -*-
class Ball:
'''Ball Definition by ball movement info'''
def __init__(self,ball):
self.x = ball[2]
self.y = ball[3]
self.z = ball[4]/5
self.radius = ball[4]
self.color = '#ff8c00'
|
RobinROAR/ViewNBATrackingData
|
Ball.py
|
Ball.py
|
py
| 251 |
python
|
en
|
code
| 2 |
github-code
|
6
|
40319717427
|
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.handler import \
module_dependency_error, MODULE_EXCEPTIONS
try:
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.api import single_post
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.defaults.main import OPN_MOD_ARGS
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.helper.system import wait_for_response
except MODULE_EXCEPTIONS:
module_dependency_error()
DOCUMENTATION = 'https://opnsense.ansibleguy.net/en/latest/modules/package.html'
EXAMPLES = 'https://opnsense.ansibleguy.net/en/latest/modules/package.html'
def run_module():
module_args = dict(
action=dict(
type='str', required=True,
choices=['poweroff', 'reboot', 'update', 'upgrade', 'audit']
),
wait=dict(type='bool', required=False, default=True),
wait_timeout=dict(type='int', required=False, default=90),
poll_interval=dict(type='int', required=False, default=2),
**OPN_MOD_ARGS
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
)
result = {
'changed': True,
'failed': False,
'timeout_exceeded': False,
}
if not module.check_mode:
single_post(
module=module,
cnf={
'command': module.params['action'],
'module': 'core',
'controller': 'firmware',
}
)
if module.params['action'] in ['reboot', 'upgrade'] and module.params['wait']:
if module.params['debug']:
module.warn(f"Waiting for firewall to complete '{module.params['action']}'!")
result['failed'] = not wait_for_response(module=module)
if result['failed']:
result['timeout_exceeded'] = True
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
ansibleguy/collection_opnsense
|
plugins/modules/system.py
|
system.py
|
py
| 2,071 |
python
|
en
|
code
| 158 |
github-code
|
6
|
74637089147
|
# TODO: (only sent audio, still need sync) receive audio packets and sync with video
# DONE: try to connect to host AFTER clicking on 'start' button
# TODO: fix crash when video is ended or trying to reconnect
import base64
import os
import socket
import sys
import numpy as np
from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.uic import loadUi
from PyQt5.QtCore import pyqtSlot, QTimer, QObject, pyqtSignal, QThread
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QFileDialog, QLabel, QGraphicsScene, QGraphicsView
import cv2
from datetime import timedelta
import queue
import time
import logging, random, imutils
import os
import pyaudio, wave, subprocess
import errno
import pickle
import threading
logging.basicConfig(format="%(message)s", level=logging.INFO)
class PlayVideo(QThread):
def __init__(self, frame, fpsLabel, threadChat, playButton, stopButton, chat_socket,
progressBar, progresslabel):
super().__init__()
self.frame = frame
self.fpsLabel = fpsLabel
self.playButton = playButton
self.stopButton = stopButton
self.progressBar = progressBar
self.progresslabel = progresslabel
self.timer = QTimer()
self.timer.timeout.connect(self.playVideo)
self.timer.start(0.5)
self.threadChat = threadChat
self.playButton.clicked.connect(self.playTimer)
self.stopButton.clicked.connect(self.stopTimer)
self.fps, self.st, self.frames_to_count, self.cnt = (0, 0, 20, 0)
self.BUFF_SIZE = 65536
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.BUFF_SIZE)
self.socket_address = ('192.168.0.106', 9685) # client ip
print('Reading from:', self.socket_address)
self.client_socket.bind(self.socket_address)
self.client_socket.setblocking(False)
self.progressBar.sliderPressed.connect(self.when_slider_pressed)
self.progressBar.sliderReleased.connect(self.moveProgressBar)
self.chat_socket = chat_socket
self.slider_pressed = False
self.set_total_frames = False
def frame_to_timestamp(self, frame, fps):
return str(timedelta(seconds=(frame / fps)))
def send_message(self, message):
message = '{}: {}'.format(self.threadChat.nickname, message)
self.chat_socket.send(message.encode('ascii'))
def playTimer(self):
# start timer
self.send_message('/play')
def stopTimer(self):
# stop timer
self.send_message('/pause')
def when_slider_pressed(self):
self.slider_pressed = True
def moveProgressBar(self):
value = self.progressBar.value()
self.send_message('/skipto ' + str(value))
self.slider_pressed = False
def playVideo(self):
try:
packet_ser, _ = self.client_socket.recvfrom(self.BUFF_SIZE)
packet = pickle.loads(packet_ser)
# TODO: receive total_frames and real_fps from the chat TCP socket only once
# can't since server can open different video file and client metadata doesn't update
# consider sending total_frames and real_fps to client over TCP chat everytime we change the file
current_frame_no = packet["frame_nb"]
total_frames = packet["total_frames"]
real_fps = packet["fps"]
if not self.set_total_frames:
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(total_frames)
self.set_total_frames = True
if self.slider_pressed is False:
self.progressBar.setValue(current_frame_no)
progress = self.frame_to_timestamp(current_frame_no, real_fps) + ' / ' \
+ self.frame_to_timestamp(total_frames, real_fps)
self.progresslabel.setText(progress)
data = base64.b64decode(packet["frame"], ' /')
npdata = np.fromstring(data, dtype=np.uint8)
frame = cv2.imdecode(npdata, 1)
# convert image to RGB format
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# get image infos
height, width, channel = frame.shape
# print(height, width, channel)
step = channel * width
# create QImage from image
qImg = QImage(frame.data, width, height, step, QImage.Format_RGB888)
self.frame.setPixmap(QPixmap.fromImage(qImg))
self.fpsLabel.setText(str(round(self.fps, 1)))
if self.cnt == self.frames_to_count:
try:
self.fps = round(self.frames_to_count / (time.time() - self.st))
self.st = time.time()
self.cnt = 0
except:
pass
self.cnt += 1
# because of socket being non-blocking
# we must pass the error when not receiving frames (video is paused)
except BlockingIOError:
pass
except Exception as e:
logging.error(e)
# print('received')
def quit(self):
print('closed thread')
class TcpChat(QThread):
def __init__(self, chat_socket):
super().__init__()
self.nickname = 'test_user' # input("Choose your nickname: ")
self.client = chat_socket
self.client.connect(('192.168.0.106', 7976)) # connecting client to server
# self.client.setblocking(False)
def receive(self):
while True: # making valid connection
try:
message = self.client.recv(1024).decode('ascii')
if message == 'NICKNAME':
self.client.send(self.nickname.encode('ascii'))
else:
print(message) # received in bytes
except Exception as e: # case on wrong ip/port details
print("An error occured on the server side!")
logging.error(e)
self.client.close()
break
def write(self):
while True: # message layout
message = '{}: {}'.format(self.nickname, input(''))
self.client.send(message.encode('ascii'))
def run(self):
receive_thread = threading.Thread(target=self.receive) # receiving multiple messages
receive_thread.start()
write_thread = threading.Thread(target=self.write) # sending messages
write_thread.start()
class AudioRec(QThread):
def __init__(self):
super().__init__()
self.host_name = socket.gethostname()
self.host_ip = '192.168.0.106' # client ip
print(self.host_ip)
self.port = 9631
# For details visit: www.pyshine.com
self.q = queue.Queue(maxsize=100)
self.BUFF_SIZE = 65536
self.audio_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.audio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.BUFF_SIZE)
self.socket_address = (self.host_ip, self.port)
self.audio_socket.bind(self.socket_address)
self.p = pyaudio.PyAudio()
self.CHUNK = 1024
self.stream = self.p.open(format=self.p.get_format_from_width(2),
channels=2,
rate=44100,
output=True,
frames_per_buffer=self.CHUNK)
self.timer = QTimer()
self.timer.timeout.connect(self.playAudio)
self.timer.start(1000 * 0.8 * self.CHUNK / 44100)
t1 = threading.Thread(target=self.getAudioData, args=())
t1.start()
print('Now Playing...')
def getAudioData(self):
while True:
try:
self.frame, _ = self.audio_socket.recvfrom(self.BUFF_SIZE)
self.q.put(self.frame)
except BlockingIOError:
pass
except Exception as e:
logging.error(e)
def playAudio(self):
if not self.q.empty():
frame = self.q.get()
self.stream.write(frame)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
loadUi('open_client.ui', self)
self.frame.setScaledContents(True)
self.setWindowTitle('OpenParty Client')
self.totalFrames = 0
self.fps = 0
self.threadVideoGen = QThread()
self.threadVideoPlay = QThread()
self.threadAudio = QThread()
self.threadChat = QThread()
self.readHost.clicked.connect(self.startAllThreads)
self.HEADER_LENGTH = 10
self.chat_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.chat_started = False
def startAllThreads(self):
if not self.chat_started:
self.startTcpChat()
self.chat_started = True
if not self.threadAudio.isRunning():
self.startAudio()
if not self.threadVideoPlay.isRunning():
self.startVideoPlay()
def closeEvent(self, event):
print('closed manually')
self.chat_socket.close()
self.threadVideoPlay.terminate()
self.threadAudio.terminate()
self.threadChat.terminate()
os._exit(1)
def startVideoPlay(self):
self.threadVideoPlay = PlayVideo(self.frame, self.fpsLabel, self.threadChat,
self.playButton, self.stopButton,
self.chat_socket,
self.progressBar, self.progresslabel)
self.threadVideoPlay.start()
def startAudio(self):
self.threadAudio = AudioRec()
self.threadAudio.start()
def startTcpChat(self):
self.threadChat = TcpChat(self.chat_socket)
self.threadChat.start()
app = QApplication(sys.argv)
widget = MainWindow()
widget.show()
sys.exit(app.exec_())
|
shully899509/OpenParty
|
pyqt player client.py
|
pyqt player client.py
|
py
| 10,093 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74028440509
|
from setup import *
from lesson import full_data
'''
ex 7.1
Taking the elements data frame, which PySpark code is equivalent to the following
SQL statement?
select count(*) from elements where Radioactive is not null;
a element.groupby("Radioactive").count().show()
b elements.where(F.col("Radioactive").isNotNull()).groupby().count().show()
c elements.groupby("Radioactive").where(F.col("Radioactive").isNotNull()).show()
d elements.where(F.col("Radioactive").isNotNull()).count()
e None of the queries above
->
b
----------------------------------
----------------------------------
ex 7.2:
If we look at the code that follows, we can simplify it even further and avoid creating
two tables outright. Can you write a summarized_data without having to use a table
other than full_data and no join? (Bonus: Try using pure PySpark, then pure Spark
SQL, and then a combo of both.)
full_data = full_data.selectExpr(
"model", "capacity_bytes / pow(1024, 3) capacity_GB", "date", "failure"
)
drive_days = full_data.groupby("model", "capacity_GB").agg(
F.count("*").alias("drive_days")
)
failures = (
full_data.where("failure = 1")
.groupby("model", "capacity_GB")
.agg(F.count("*").alias("failures"))
)
summarized_data = (
drive_days.join(failures, on=["model", "capacity_GB"], how="left")
.fillna(0.0, ["failures"])
.selectExpr("model", "capacity_GB", "failures / drive_days failure_rate")
.cache()
)
'''
def ex_seven_two_only_pyspark():
return (
full_data
.selectExpr("model", "capacity_bytes / pow(1024, 3) capacity_GB", "date", "failure")
.groupby("model", "capacity_GB")
.agg(
F.sum(F.when(F.col("failure") == 1, 1).otherwise(0)).alias("failures"),
# F.sum("failure").alias("failures"),
F.count("*").alias("drive_days"),
)
.selectExpr("model", "capacity_GB", "failures / drive_days failure_rate")
.show()
)
def ex_seven_two_only_sql():
full_data.createTempView("drive_stats")
return spark.sql(
"""
with drive_days as (
select model, count(*) as drive_days
from drive_stats
group by model
),
failures as (
select model, count(*) as failures
from drive_stats
where failure == 1
group by model
)
select d.model, d.drive_days / f.failures as failure_ratio
from drive_days d inner join failures f on d.model = f.model
"""
).show(20)
'''
The analysis in the chapter is flawed in that the age of a drive is not taken into consideration.
Instead of ordering the model by failure rate, order by average age at failure
(assume that every drive fails on the maximum date reported if they are still alive).
(Hint: Remember that you need to count the age of each drive first.)
'''
def ex_seven_three():
full_data.createTempView("drive_stats")
return spark.sql(
"""
with failure_date as (
select model, min(date) as dateFailure
from drive_stats
where failure == 1
group by model
),
last_date as (
select model, max(date) as dateFailure
from drive_stats
group by model
)
select model, dateFailure from
(
select f.model, case when f.dateFailure is not null then f.dateFailure else l.dateFailure end as dateFailure
from failure_date f inner join last_date l on f.model = l.model
)
order by dateFailure
"""
).show(20)
'''
What is the total capacity (in TB) that Backblaze records at the beginning of each month?
'''
def ex_seven_four_only_sql():
full_data.createTempView("drive_stats")
return spark.sql(
"""
select date, sum(capacity_bytes) / pow(1024, 4) as total_capacity
from drive_stats
where substring(date, 9, 2) = "01"
group by date
order by date
"""
).show()
def ex_seven_four_only_pyspark():
return (
full_data
.selectExpr("date", "capacity_bytes")
.where(F.substring(F.col("date"), 9, 2) == '01')
.groupby("date")
.agg(
(F.sum(F.col("capacity_bytes")) / pow(1024, 4)).alias("total_capacity")
)
.orderBy(F.col("date").asc())
.selectExpr("date", "total_capacity")
).show()
'''
NOTE: There is a much more elegant way to solve this problem that we see in chapter 10 using window functions.
In the meantime, this exercise can be solved with the judicious usage of group bys and joins.
If you look at the data, you’ll see that some drive models can report an erroneous capacity. In the data preparation
stage, restage the full_data data frame so that the most common capacity for each drive is used.
'''
def ex_seven_five():
capacity_count = (
full_data
.groupby("model", "capacity_bytes")
.agg(F.count("*").alias("count_occurences"))
)
most_common_occurence = (
capacity_count
.groupby("model")
.agg(F.max("count_occurences").alias("max_occurences"))
)
return(
most_common_occurence
.join(
capacity_count,
(capacity_count["model"] == most_common_occurence["model"]) &
(capacity_count["count_occurences"] == most_common_occurence["max_occurences"])
)
).select(most_common_occurence["model"], "capacity_bytes").show()
|
eldoria/learning-pyspark
|
chapter_7/functions_exercices.py
|
functions_exercices.py
|
py
| 5,500 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16407449430
|
import mysql.connector
import csv
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import colors as mcolors
import datetime
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
# Sort colors by hue, saturation, value and name.
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
for name, color in colors.items())
colors = [name for hsv, name in by_hsv]
hostname = 'localhost'
username = '' # no privlidges set so this works
password = ''
database = 'msr14'
conn = mysql.connector.connect(host=hostname, user=username, passwd=password, db=database)
# Simple routine to run a query on a database and print the results:
def queryDB(conn, query) :
cur = conn.cursor()
cur.execute(query)
return cur.fetchall()
## PR COMMENTS
project_ids = queryDB(conn, "select projects.id as id, pull_requests.pullreq_id as pullred_id from (select id from projects where forked_from is null) as projects join pull_requests on projects.id = pull_requests.base_repo_id;")[:-1]
projects_to_pr_comments = {}
sid = SentimentIntensityAnalyzer()
for ID, PR_ID in project_ids:
comments = "select created_at, body from pull_request_comments where pull_request_id = {} ORDER BY created_at;".format(PR_ID)
rows = queryDB(conn, comments)
dates = []
scores = []
for date, comment in rows:
ss = sid.polarity_scores(comment)
dates.append(date)
scores.append(ss['compound'])
""" if ss['compound'] < -0.75 or ss['compound'] > 0.75:
print comment
print ss"""
if len(dates) > 0:
if ID in projects_to_pr_comments:
current = projects_to_pr_comments[ID]
current[0].extend(dates)
current[1].extend(scores)
else:
projects_to_pr_comments[ID] = [dates,scores]
## COMMITS
project_ids = queryDB(conn, "select id from projects where forked_from is null;")[:-1]
projects_to_commits = {}
for ID in [x[0] for x in project_ids]:
commits = "select project_id, created_at from commits where project_id = {} ORDER BY created_at;".format(ID)
rows = queryDB(conn, commits)
rows = [r[1] for r in rows] #.strftime('%m/%d/%Y')
projects_to_commits[ID] = rows
conn.close()
## PLOT
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
j = 0
for ID,date_scores in projects_to_pr_comments.items():
fig = plt.figure(figsize=(9, 9))
ax = plt.subplot(111)
plt.title("Total Commits and Sentiment Analysis for Pull Requests Over Time For Repository {} on Github".format(ID), fontsize=10)
#plt.ylabel("Average Sentiment", fontsize=12)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
indices = [i[0] for i in sorted(enumerate(date_scores[0]), key=lambda x:x[1])]
sorted_dates = []
sorted_scores = []
current_sum = 0.0
count = 0
for i in indices:
sorted_dates.append(date_scores[0][i])
current_sum += date_scores[1][i]
count += 1.0
sorted_scores.append(current_sum)
sorted_scores = [i / current_sum for i in sorted_scores]
plt.plot_date(sorted_dates,sorted_scores, '-b', label="Total Sentiment")
datetime_p = projects_to_commits[ID]
datetime = []
for d in datetime_p:
if d >= date_scores[0][0]:
datetime.append(datetime_p)
normalized = [i / float(len(datetime)) for i in range(1, len(datetime) + 1)]
plt.plot_date(datetime,normalized, '-g', label="Total Commits (Normalized)")
ax.legend(loc=0)
fig.savefig('plots/total_commits_and_total_sentiment_date_normalized/{}_total_commits_and_total_sentiment.png'.format(ID))
plt.close(fig)
#plt.show()
|
natashaarmbrust/github-sentiment
|
pr_sentiments.py
|
pr_sentiments.py
|
py
| 3,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22780895982
|
from project.band import Band
from project.band_members.drummer import Drummer
from project.band_members.guitarist import Guitarist
from project.band_members.singer import Singer
from project.concert import Concert
class ConcertTrackerApp:
def __init__(self):
self.bands = []
self.musicians = []
self.concerts = []
@property
def available_musicians(self):
return {
"Guitarist": Guitarist,
"Drummer": Drummer,
"Singer": Singer
}
@staticmethod
def __get_object_from_attribute(attribute, value, iterable):
for obj in iterable:
if getattr(obj, attribute) == value:
return obj
@staticmethod
def __check_for_needed_types_of_musicians(band):
all_musician_types_in_band = [x.__class__.__name__ for x in band.members]
if len(set(all_musician_types_in_band)) < 3:
raise Exception(f"{band.name} can't start the concert because it doesn't have enough members!")
@staticmethod
def __check_collection_for_skill(skill: str, collection: list):
for i in collection:
if skill not in i.skills:
return False
return True
def __check_band_for_needed_skills(self, genre, band: Band):
valid = False
drummers = [x for x in band.members if x.__class__.__name__ == "Drummer"]
singers = [x for x in band.members if x.__class__.__name__ == "Singer"]
guitarists = [x for x in band.members if x.__class__.__name__ == "Guitarist"]
if genre == "Rock":
valid = all((self.__check_collection_for_skill("play the drums with drumsticks", drummers),
self.__check_collection_for_skill("sing high pitch notes", singers),
self.__check_collection_for_skill("play rock", guitarists)))
elif genre == "Metal":
valid = all((self.__check_collection_for_skill("play the drums with drumsticks", drummers),
self.__check_collection_for_skill("sing low pitch notes", singers),
self.__check_collection_for_skill("play metal", guitarists)))
elif genre == 'Jazz':
valid = all((self.__check_collection_for_skill("play the drums with drum brushes", drummers),
self.__check_collection_for_skill("sing high pitch notes", singers),
self.__check_collection_for_skill("sing low pitch notes", singers),
self.__check_collection_for_skill("play jazz", guitarists)))
if not valid:
raise Exception(f"The {band.name} band is not ready to play at the concert!")
def create_musician(self, musician_type: str, name: str, age: int):
if musician_type not in self.available_musicians:
raise ValueError("Invalid musician type!")
if self.__get_object_from_attribute("name", name, self.musicians):
raise Exception(f"{name} is already a musician!")
musician = self.available_musicians[musician_type](name, age)
self.musicians.append(musician)
return f"{name} is now a {musician_type}."
def create_band(self, name: str):
if self.__get_object_from_attribute("name", name, self.bands):
raise Exception(f"{name} band is already created!")
self.bands.append(Band(name))
return f"{name} was created."
def create_concert(self, genre: str, audience: int, ticket_price: float, expenses: float, place: str):
concert = self.__get_object_from_attribute("place", place, self.concerts)
if concert:
raise Exception(f"{place} is already registered for {concert.genre} concert!")
self.concerts.append(Concert(genre, audience, ticket_price, expenses, place))
return f"{genre} concert in {place} was added."
def add_musician_to_band(self, musician_name: str, band_name: str):
musician = self.__get_object_from_attribute("name", musician_name, self.musicians)
band: Band = self.__get_object_from_attribute("name", band_name, self.bands)
if not musician:
raise Exception(f"{musician_name} isn't a musician!")
if not band:
raise Exception(f"{band_name} isn't a band!")
band.members.append(musician)
return f"{musician_name} was added to {band_name}."
def remove_musician_from_band(self, musician_name: str, band_name: str):
band: Band = self.__get_object_from_attribute("name", band_name, self.bands)
if not band:
raise Exception(f"{band_name} isn't a band!")
musician = self.__get_object_from_attribute("name", musician_name, band.members)
if not musician:
raise Exception(f"{musician_name} isn't a member of {band_name}!")
band.members.remove(musician)
return f"{musician_name} was removed from {band_name}."
def start_concert(self, concert_place: str, band_name: str):
band = self.__get_object_from_attribute("name", band_name, self.bands)
concert = self.__get_object_from_attribute("place", concert_place, self.concerts)
genre = concert.genre
self.__check_for_needed_types_of_musicians(band)
self.__check_band_for_needed_skills(genre, band)
profit = (concert.audience * concert.ticket_price) - concert.expenses
return f"{band_name} gained {profit:.2f}$ from the {genre} concert in {concert_place}."
|
DanieII/SoftUni-Advanced-2023-01
|
oop/exam_practice/19_December_2022/project/concert_tracker_app.py
|
concert_tracker_app.py
|
py
| 5,504 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31963141041
|
from sys import stdin
input = stdin.readline
n = int(input())
dp = {1: 0}
def rec(n: int):
if n in dp.keys():
return dp[n]
if n % 3 == 0 and n % 2 == 0:
dp[n] = min(rec(n//3)+1, rec(n//2)+1)
elif n % 3 == 0:
dp[n] = min(rec(n//3)+1, rec(n-1)+1)
elif n % 2 == 0:
dp[n] = min(rec(n//2)+1, rec(n-1)+1)
else:
dp[n] = rec(n-1)+1
return dp[n]
print(rec(n))
|
yongwoo-jeong/Algorithm
|
백준/Silver/1463. 1로 만들기/1로 만들기.py
|
1로 만들기.py
|
py
| 440 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30980414030
|
from matplotlib.pyplot import draw
import pygame
from pygame.locals import *
pygame.init()
pygame.mixer.init()
# set screen resolution
resolution = (725,725)
# open a screen of above resolution
screen = pygame.display.set_mode(resolution)
# storing screen variable values
width = screen.get_width()
height = screen.get_height()
# text_on_screen() not affected by catPath variable
# main_menu() not affected by catPath variable
# tableGen() not affected by catPath variable
# get appropriate cat path to display in outcome screen
def getCatPath(state):
if state == 0:
catPath = "assets/wincat.png"
elif state == 1:
catPath = "assets/losecat.png"
elif state == 2:
catPath = "assets/draw.png"
return catPath
# updateScore() not affected by catPath variable
# displayScore() not affected by catPath variable
# gameTime() not affected by catPath variable
# checkWinner() not affected by catPath variable
# getDice() not affected by catPath variable
# showDice() not affected by catPath variable
# gameLogic() not affected by catPath variable
# user won
def winScreen(die1, die2, num, score):
status = num
# get path to cat image based on win/lose/draw state passed
catPath = getCatPath(status)
cat = pygame.transform.scale(pygame.image.load(catPath).convert_alpha(), (400, 450))
#display cat
screen.blit(cat, ((width/4),(height/2)-325))
# screen for when user loses
def loseScreen(die1, die2, num, score):
status = num
# get path to cat image based on win/lose/draw state passed
catPath = getCatPath(status)
cat = pygame.transform.scale(pygame.image.load(catPath).convert_alpha(), (400, 450))
#display cat
screen.blit(cat, ((width/4),(height/2)-325))
# screen for when computer and user dice are equal
def drawScreen(die1, num, score):
status = num
# get appropriate cat based on game status (win/loss/draw)
catPath = getCatPath(status)
cat = pygame.transform.scale(pygame.image.load(catPath).convert_alpha(), (400, 450))
# Display cat on screeen
screen.blit(cat, ((width/4),(height/2)-325))
|
jessica-leishman/high-rollers
|
analysis_static/manual slices/hrStatic4.py
|
hrStatic4.py
|
py
| 2,135 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27959320759
|
import os
from os import getenv
from dotenv import load_dotenv
load_dotenv()
# FOR CODES
API_ID = int(getenv("API_ID","2302493"))
API_HASH = getenv("API_HASH","1bf8344851a88633343fde339f2eee20")
SUDO_USERS = list(map(int, getenv("SUDO_USERS", "5366284852").split()))
LOGGER = int(getenv("LOGGER","-1001804302628"))
OWNER = int(getenv("OWNER_ID","5366284852"))
NAME = getenv("ALIVE_NAME","ANIRUDH OP")
OWN_USERNAME= getenv("OWN_USERNAME","@KATTAR_HINDU_OP")
ALIVE_PIC = getenv("ALIVE_PIC","https://te.legra.ph/file/a66beb72764269e744911.jpg")
# FOR SPAMBOT
TOKEN1 = getenv("TOKEN1", None)
TOKEN2 = getenv("TOKEN2", None)
TOKEN3 = getenv("TOKEN3", None)
TOKEN4 = getenv("TOKEN4", None)
TOKEN5 = getenv("TOKEN5", None)
TOKEN6 = getenv("TOKEN6", None)
TOKEN7 = getenv("TOKEN7", None)
TOKEN8 = getenv("TOKEN8", None)
TOKEN9 = getenv("TOKEN9", None)
TOKEN10 = getenv("TOKEN10", None)
|
Anirudh1212121/DcSpamBot
|
config.py
|
config.py
|
py
| 899 |
python
|
en
|
code
| null |
github-code
|
6
|
32010842805
|
class Spell:
def __init__(self, name="Fireball", damage=30, mana_cost=50, cast_range=2):
self.name = name
self.damage = damage
self.mana_cost = mana_cost
self.cast_range = cast_range
@property
def get_spell(self):
sepell_prop = {}
sepell_prop['name'] = self.name
sepell_prop['damage'] = self.damage
sepell_prop['mana_cost'] = self.mana_cost
sepell_prop['cast_range'] = self.cast_range
return sepell_prop
def __str__(self):
return str(self.name) + ' ' + str(self.damage) + ' mana cost ' + str(self.mana_cost) + ' cast range ' + str(self.cast_range)
|
1oss1ess/HackBulgaria-Programming101-Python-2018
|
week-7/engin/spell.py
|
spell.py
|
py
| 654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23205117046
|
alist = []
for i in range(1,101):
alist.append(i)
hangshu = 3
#
lieshu = int(len(alist)/hangshu)
# lastlieshu = lieshu + len(alist) % hangshu
# print(lieshu, lastlieshu)
for i in range(0, len(alist)):
if (i+1)%lieshu == 0 and (i+1)/lieshu != hangshu:
print(alist[i])
else:
print(alist[i],end="")
|
jaredchin/Core-Python-Programming
|
第六章/练习/6-19.py
|
6-19.py
|
py
| 328 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1544268645
|
import itertools
def test(kenken_grid):
n = kenken_grid[0][0]
dom = []
for i in range(n):
dom.append(i + 1)
vars = []
for i in dom:
for j in dom:
vars.append('V{}{}'.format(i, j))
cons = []
for i in range(n):
vars_row = vars[(i * n): ((i + 1) * n)]
vars_col = []
for j in range(n):
vars_col.append(vars[i + (j * n)])
for var_pair in itertools.combinations(vars_row, 2):
cons.append("C-{},{})".format(var_pair[0],var_pair[1]))
for var_pair in itertools.combinations(vars_col, 2):
cons.append("C-{},{})".format(var_pair[0],var_pair[1]))
return cons
|
monkeykingg/projects
|
3rd_year/csc384/A2/sol/test.py
|
test.py
|
py
| 691 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27322987878
|
from __future__ import print_function, division, absolute_import, unicode_literals
import logging
import os
from inspect import isclass
from tempfile import NamedTemporaryFile
from collections import OrderedDict
from fontTools.misc.py23 import tobytes, tounicode, UnicodeIO
from fontTools.feaLib.parser import Parser
from fontTools.feaLib.builder import addOpenTypeFeaturesFromString
from fontTools.feaLib.error import IncludedFeaNotFound, FeatureLibError
from fontTools import mtiLib
from ufo2ft.constants import MTI_FEATURES_PREFIX
from ufo2ft.featureWriters import (
KernFeatureWriter,
MarkFeatureWriter,
loadFeatureWriters,
ast,
)
logger = logging.getLogger(__name__)
def parseLayoutFeatures(font):
""" Parse OpenType layout features in the UFO and return a
feaLib.ast.FeatureFile instance.
"""
featxt = tounicode(font.features.text or "", "utf-8")
if not featxt:
return ast.FeatureFile()
buf = UnicodeIO(featxt)
ufoPath = font.path
includeDir = None
if ufoPath is not None:
# The UFO v3 specification says "Any include() statements must be relative to
# the UFO path, not to the features.fea file itself". We set the `name`
# attribute on the buffer to the actual feature file path, which feaLib will
# pick up and use to attribute errors to the correct file, and explicitly set
# the include directory to the parent of the UFO.
ufoPath = os.path.normpath(ufoPath)
buf.name = os.path.join(ufoPath, "features.fea")
includeDir = os.path.dirname(ufoPath)
glyphNames = set(font.keys())
try:
parser = Parser(buf, glyphNames, includeDir=includeDir)
doc = parser.parse()
except IncludedFeaNotFound as e:
if ufoPath and os.path.exists(os.path.join(ufoPath, e.args[0])):
logger.warning(
"Please change the file name in the include(...); "
"statement to be relative to the UFO itself, "
"instead of relative to the 'features.fea' file "
"contained in it."
)
raise
return doc
class BaseFeatureCompiler(object):
"""Base class for generating OpenType features and compiling OpenType
layout tables from these.
"""
def __init__(self, ufo, ttFont=None, glyphSet=None, **kwargs):
"""
Args:
ufo: an object representing a UFO (defcon.Font or equivalent)
containing the features source data.
ttFont: a fontTools TTFont object where the generated OpenType
tables are added. If None, an empty TTFont is used, with
the same glyph order as the ufo object.
glyphSet: a (optional) dict containing pre-processed copies of
the UFO glyphs.
"""
self.ufo = ufo
if ttFont is None:
from fontTools.ttLib import TTFont
from ufo2ft.util import makeOfficialGlyphOrder
ttFont = TTFont()
ttFont.setGlyphOrder(makeOfficialGlyphOrder(ufo))
self.ttFont = ttFont
glyphOrder = ttFont.getGlyphOrder()
if glyphSet is not None:
assert set(glyphOrder) == set(glyphSet.keys())
else:
glyphSet = ufo
self.glyphSet = OrderedDict((gn, glyphSet[gn]) for gn in glyphOrder)
def setupFeatures(self):
""" Make the features source.
**This should not be called externally.** Subclasses
must override this method.
"""
raise NotImplementedError
def buildTables(self):
""" Compile OpenType feature tables from the source.
**This should not be called externally.** Subclasses
must override this method.
"""
raise NotImplementedError
def setupFile_features(self):
""" DEPRECATED. Use 'setupFeatures' instead. """
_deprecateMethod("setupFile_features", "setupFeatures")
self.setupFeatures()
def setupFile_featureTables(self):
""" DEPRECATED. Use 'setupFeatures' instead. """
_deprecateMethod("setupFile_featureTables", "buildTables")
self.buildTables()
def compile(self):
if "setupFile_features" in self.__class__.__dict__:
_deprecateMethod("setupFile_features", "setupFeatures")
self.setupFile_features()
else:
self.setupFeatures()
if "setupFile_featureTables" in self.__class__.__dict__:
_deprecateMethod("setupFile_featureTables", "buildTables")
self.setupFile_featureTables()
else:
self.buildTables()
return self.ttFont
def _deprecateMethod(arg, repl):
import warnings
warnings.warn(
"%r method is deprecated; use %r instead" % (arg, repl),
category=UserWarning,
stacklevel=3,
)
class FeatureCompiler(BaseFeatureCompiler):
"""Generate automatic features and compile OpenType tables from Adobe
Feature File stored in the UFO, using fontTools.feaLib as compiler.
"""
defaultFeatureWriters = [KernFeatureWriter, MarkFeatureWriter]
def __init__(self, ufo, ttFont=None, glyphSet=None, featureWriters=None, **kwargs):
"""
Args:
featureWriters: a list of BaseFeatureWriter subclasses or
pre-initialized instances. The default value (None) means that:
- first, the UFO lib will be searched for a list of featureWriters
under the key "com.github.googlei18n.ufo2ft.featureWriters"
(see loadFeatureWriters).
- if that is not found, the default list of writers will be used:
[KernFeatureWriter, MarkFeatureWriter]. This generates "kern"
(or "dist" for Indic scripts), "mark" and "mkmk" features.
If the featureWriters list is empty, no automatic feature is
generated and only pre-existing features are compiled.
"""
BaseFeatureCompiler.__init__(self, ufo, ttFont, glyphSet)
self.initFeatureWriters(featureWriters)
if kwargs.get("mtiFeatures") is not None:
import warnings
warnings.warn(
"mtiFeatures argument is ignored; "
"you should use MtiLibFeatureCompiler",
category=UserWarning,
stacklevel=2,
)
def initFeatureWriters(self, featureWriters=None):
""" Initialize feature writer classes as specified in the UFO lib.
If none are defined in the UFO, the default feature writers are used:
currently, KernFeatureWriter and MarkFeatureWriter.
The 'featureWriters' argument can be used to override these.
The method sets the `self.featureWriters` attribute with the list of
writers.
Note that the writers that generate GSUB features are placed first in
this list, before all others. This is because the GSUB table may be
used in the subsequent feature writers to resolve substitutions from
glyphs with unicodes to their alternates.
"""
if featureWriters is None:
featureWriters = loadFeatureWriters(self.ufo)
if featureWriters is None:
featureWriters = self.defaultFeatureWriters
gsubWriters = []
others = []
for writer in featureWriters:
if isclass(writer):
writer = writer()
if writer.tableTag == "GSUB":
gsubWriters.append(writer)
else:
others.append(writer)
self.featureWriters = gsubWriters + others
def setupFeatures(self):
"""
Make the features source.
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
if self.featureWriters:
featureFile = parseLayoutFeatures(self.ufo)
for writer in self.featureWriters:
writer.write(self.ufo, featureFile, compiler=self)
# stringify AST to get correct line numbers in error messages
self.features = featureFile.asFea()
else:
# no featureWriters, simply read existing features' text
self.features = tounicode(self.ufo.features.text or "", "utf-8")
def writeFeatures(self, outfile):
if hasattr(self, "features"):
outfile.write(self.features)
def buildTables(self):
"""
Compile OpenType feature tables from the source.
Raises a FeaLibError if the feature compilation was unsuccessful.
**This should not be called externally.** Subclasses
may override this method to handle the table compilation
in a different way if desired.
"""
if not self.features:
return
# the path is used by the lexer to follow 'include' statements;
# if we generated some automatic features, includes have already been
# resolved, and we work from a string which does't exist on disk
path = self.ufo.path if not self.featureWriters else None
try:
addOpenTypeFeaturesFromString(self.ttFont, self.features, filename=path)
except FeatureLibError:
if path is None:
# if compilation fails, create temporary file for inspection
data = tobytes(self.features, encoding="utf-8")
with NamedTemporaryFile(delete=False) as tmp:
tmp.write(data)
logger.error("Compilation failed! Inspect temporary file: %r", tmp.name)
raise
class MtiFeatureCompiler(BaseFeatureCompiler):
""" Compile OpenType layout tables from MTI feature files using
fontTools.mtiLib.
"""
def setupFeatures(self):
ufo = self.ufo
features = {}
# includes the length of the "/" separator
prefixLength = len(MTI_FEATURES_PREFIX) + 1
for fn in ufo.data.fileNames:
if fn.startswith(MTI_FEATURES_PREFIX) and fn.endswith(".mti"):
content = tounicode(ufo.data[fn], encoding="utf-8")
features[fn[prefixLength:-4]] = content
self.mtiFeatures = features
def buildTables(self):
for tag, features in self.mtiFeatures.items():
table = mtiLib.build(features.splitlines(), self.ttFont)
assert table.tableTag == tag
self.ttFont[tag] = table
|
Torneo-Tipografico-Comunidad/Torneo-2020
|
Calmadita /05_SOURCES/sources/venv/lib/python3.7/site-packages/ufo2ft/featureCompiler.py
|
featureCompiler.py
|
py
| 10,497 |
python
|
en
|
code
| 7 |
github-code
|
6
|
31350011958
|
import unittest
from seleniumwire import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
#This code uses the google chrome browser to conduct a single test on the python.org website
#It uses the website search bar to search for "pycon"
# Selenium installation: 3 marks."pip install selenium"
# Conducting API testing: 5 marks."Created unittest for API Testing"
# Sharing the right code and configurations used for API testing: 5 marks."Test passed so code is right"
class PythonOrgSearch(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(ChromeDriverManager().install())
def test_search_in_python_org(self):
driver = self.driver
driver.get("http://www.python.org")
self.assertIn("Python", driver.title)
elem = driver.find_element(By.NAME, "q")
elem.send_keys("pycon")
elem.submit()
self.assertNotIn("No results found.", driver.page_source)
for i in driver.requests:
print("/n %s",i)
print(i.response)
a=a+1
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
|
Kitchlew/Summer
|
SQA/sel.py
|
sel.py
|
py
| 1,254 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3311087483
|
from math import *
from Robot import *
from Path import *
class Follower:
def __init__(self, file):
print('Sending commands to MRDS server', MRDS_URL)
self.robot = Robot()
self.path = Path(file)
self.path = self.path.getPath()
self.startTime = None
self.speed = 0.7
self.aSpeed = 1.3
self.lookAhead = 1
def follow(self):
self.startTime = time.time()
print('GO!')
while self.path:
position = self.robot.getPosition()
heading = self.robot.getHeading()
newPosition = self.robot.carrotPoint(self.path, position, self.lookAhead)
if newPosition:
distance = self.robot.getDistance((newPosition['X'] - position['X']),
(newPosition['Y'] - position['Y']))
bearing = self.robot.getBearing((position['X'], position['Y']),
(newPosition['X'], newPosition['Y']))
turnAmount = self.robot.turnAngle(bearing, heading)
response = self.robot.setMotion(self.speed, turnAmount)
time.sleep(0.15)
response = self.robot.setMotion(0,0)
self.goalTime = time.time()
print('Goal reached in %.2f seconds.' % (self.goalTime - self.startTime))
if __name__ == '__main__':
pathFollower = Follower('Path-to-bed.json')
pathFollower.follow()
|
aliciastrommer/done
|
Follower.py
|
Follower.py
|
py
| 1,575 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39297894031
|
#用菜单实现矩阵的加和乘
#python3.4
#作者:代*
#_____________求和函数_________________
def Sum(a,b):
#定义一个二维数组
#c=[[0 for i in range(L1)]for i in range(H1)]
c=[[0]*L1]*H1
if H1==H2 and L1==L2:
for i in range(0,H1):
for j in range(0,L1):
c[i][j]=int(a[i][j])+int(b[i][j])
print ('两个矩阵和为;',c)
else:
print('\n这两个矩阵无法相加!\n')
#_____________求积函数__________________
def Product(a,b):
#定义一个二维数组
#c=[[0 for i in range(L2)]for i in range(H1)]
c=[[0]*L2]*H1
if L1==H2:
k=0
f=0
for i in range(H1):
for j in range(L2):
while f<L1:
k+=int(a[i][j])*int(b[j][i])
f+=1
c[i][j]=k
print (c)
else:
print('\n这两个矩阵无法相乘!\n')
#_____________处理函数__________________
def Dealwith():
a=[]
f=1
while f-1<H1:
print('请输入第',f,'行:a',f,'=',end='')
x=input()
x=x.split(',')
a=a+[x]
f+=1
return a
#主程序______________________________________________________________
while True:
order='new'
#第一个矩阵
H1=int(input('请输入第一个矩阵的行 H1='))
L1=int(input('请输入第一个个矩阵的列 L1='))
a=Dealwith()
#第二个矩阵
H2=int(input('请输入第二个矩阵的行 H2='))
L2=int(input('请输入第二个个矩阵的列 L2='))
b=Dealwith()
if order=='new':
while True:
print('0.退出当前\n1.求 和\n2.求 积')
flag=int(input('请输入命令前的代号f='))
if flag==1:
Sum(a,b)
if flag==2:
Product(a,b)
if flag==0:
print('\n目前的两个矩阵运算已经正常结束!\n')
break
print('close.关闭程序\nnew.建立新的矩阵运算')
order=input('关闭程序还是建立新的矩阵运算?order=')
if order=='close':
print ('\n程序已经正常结束!')
break
|
FreedomSkyMelody/some-small-program
|
matrix_operations.py
|
matrix_operations.py
|
py
| 2,082 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
36615991000
|
import asyncio
import logging
import random
import sys
from time import sleep
def run_async():
async def mytask(tid: str):
n = random.randint(1, 3)
for i in range(n):
logging.info(f"task {tid} {i} of {n}")
await asyncio.sleep(1)
logging.info(f"finished {tid} {n}")
async def async_mytasks():
ids = [f"t-{i}" for i in range(100)]
ts = [asyncio.create_task(mytask(tid)) for tid in ids]
await asyncio.wait(ts)
logging.info("all finished")
asyncio.run(async_mytasks())
def call_worker():
import requests
logging.info("call worker")
rs = f"http://172.17.0.2:8080/1"
logging.info(f"sending {rs}")
r = requests.get(rs)
r1 = r.json()["result"]
logging.info(f"result for {rs}: {r.reason} {r1}")
def run_concurrent():
from concurrent.futures import ThreadPoolExecutor
def square(n):
print(f"Started square({n})")
st = 0.5 + random.random() * 2
sleep(st)
print(f"Almost finished square({n})")
return n * n
values = range(20)
with ThreadPoolExecutor(max_workers=15) as executor:
results = executor.map(square, values)
print(list(results))
def main():
# run_async()
# call_worker()
run_concurrent()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level='INFO')
main()
|
wwagner4/pymultiworker
|
tryout.py
|
tryout.py
|
py
| 1,397 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41675652390
|
# N번째 큰 수
import sys
import heapq
# sys.setrecursionlimit(10000000)
input = sys.stdin.readline
N = int(input())
# lst = [list(map(int, input().split())) for _ in range(N)]
res = []
for _ in range(N):
for num in map(int, input().split()):
if len(res) < N:
heapq.heappush(res, num)
else:
if num > res[0]:
heapq.heappushpop(res, num)
print(res[-N])
# def merge(lst1, lst2):
# new_lst = []
# i1, i2 = 0, 0
# while True:
# if i1 == len(lst1):
# new_lst.extend(lst2[i2:])
# break
# if i2 == len(lst2):
# new_lst.extend(lst1[i1:])
# break
# if lst1[i1] < lst2[i2]:
# new_lst.append(lst1[i1])
# i1 += 1
# else:
# new_lst.append(lst2[i2])
# i2 += 1
# return new_lst
# for i in range(N):
# res = merge(res, [lst[j][i] for j in range(N)])
# print(res[-N])
|
jisupark123/Python-Coding-Test
|
알쓰/week1/2075.py
|
2075.py
|
py
| 973 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18857851018
|
import numpy as np
from Beta_estimate import Beta_est
from C_estimation import C_est
from I_spline import I_S
from g_linear import g_L
def Est_linear(train_data,X_test,Beta0,nodevec,m,c0):
Z_train = train_data['Z']
U_train = train_data['U']
De_train = train_data['De']
Beta0 = np.array([Beta0])
# Given an initial value c0, calculate an initial value of Lambda(U)
Lambda_U = I_S(m, c0, U_train, nodevec)
C_index = 0
for loop in range(100):
print('linear_iteration time=', loop)
# The initial values of Beta and Lambda(U) are given, g(X) needs to be estimated
g_X = g_L(train_data,X_test,Lambda_U,Beta0)
g_train = g_X['g_train']
# Estimate Lambda(U)
c1 = C_est(m,U_train,De_train,Z_train,Beta0,g_train,nodevec)
Lambda_U = I_S(m,c1,U_train,nodevec)
# Estimate Beta
Beta1 = Beta_est(De_train,Z_train,Lambda_U,g_train)
print('Beta=', Beta1)
print('c=', c1)
# Convergence condition
if (abs(Beta0-Beta1) <= 0.001):
C_index = 1
break
c0 = c1
Beta0 = Beta1
return {
'g_train': g_train,
'g_test': g_X['g_test'],
'c': c1,
'Beta': Beta1,
'C_index': C_index,
}
|
qiangwu2023/dnn_current_status
|
Model_Linear/iteration_linear.py
|
iteration_linear.py
|
py
| 1,323 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20546789103
|
import discord
from discord.ext import commands
from discord.ext.commands import Command
from chime.main import prefix
from chime.misc.CustomCommand import CustomCommand
from chime.misc.StyledEmbed import StyledEmbed
class EmbedHelpCommand(commands.HelpCommand):
"""This is an example of a HelpCommand that utilizes embeds.
It's pretty basic but it lacks some nuances that people might expect.
1. It breaks if you have more than 25 cogs or more than 25 subcommands. (Most people don't reach this)
2. It doesn't DM users. To do this, you have to override `get_destination`. It's simple.
Other than those two things this is a basic skeleton to get you started. It should
be simple to modify if you desire some other behaviour.
To use this, pass it to the bot constructor e.g.:
bot = commands.Bot(help_command=EmbedHelpCommand())
"""
# Set the embed colour here
COLOUR = discord.colour.Color.from_rgb(r=255, g=197, b=84)
def get_command_signature(self, command):
return '{0.qualified_name} {0.signature}'.format(command)
async def send_bot_help(self, mapping):
embed = StyledEmbed(title='chime help')
embed.set_thumbnail(url="https://raw.githubusercontent.com/realmayus/chime/master/assets/chime_banner.png?token=AJC6B5VTHEZ5UHNY7QNDCU263LCCK")
embed.description = "chime is a versatile, yet intuitive music bot for discord. It aims to be as user-friendly as possible while still boasting many features. \n\n" \
"**More info and invite link [here](https://chime.realmayus.xyz)** \n\n" \
"Chime has a **web app** where you can manage and set up personal playlists and manage settings of your servers! https://chime.realmayus.xyz \n\n" \
"**Use** `" + self.clean_prefix + "help [command]` **for more info on a command.**"
for cog, commands in mapping.items():
if cog is not None: # We don't want commands without categories! >:c
name = cog.qualified_name
filtered = await self.filter_commands(commands, sort=True)
if filtered:
builder = []
for command in commands: # filtering out hidden commands
command: Command
builder.append(f"`{prefix + command.name}`" if not command.hidden else "")
value = ' '.join(builder)
if cog and cog.description:
value = '{0}\n{1}'.format(cog.description, value)
embed.add_field(name=name, value=value)
await self.get_destination().send(embed=embed)
async def send_cog_help(self, cog):
pass
async def send_group_help(self, group: CustomCommand):
embed = StyledEmbed(title='`' + group.qualified_name + '`')
desc = ""
if group.help:
desc += group.help
if group.usage:
embed.add_field(name="**Usage**", value=f"`{prefix + group.usage}`", inline=False)
if group.aliases and len(group.aliases) > 0:
embed.add_field(name="**Aliases**", value=' '.join([f"`{prefix + alias}`" for alias in group.aliases]), inline=False)
if hasattr(group, "available_args") and group.available_args:
arg_builder = ""
for typ in group.available_args:
arg_builder += f"\n**{typ['type']}**"
for arg in typ['args']:
arg_builder += f"\n`{arg['name']}`\n***{arg['desc']}***"
embed.add_field(name="**Arguments**", value=arg_builder)
if hasattr(group, "examples") and group.examples:
example_builder = ""
for ex in group.examples:
example_builder += f"\n`{ex['ex']}`\n{ex['desc']}"
embed.add_field(name="**Examples**", value=example_builder)
embed.description = desc
await self.get_destination().send(embed=embed)
# This makes it so it uses the function above
# Less work for us to do since they're both similar.
# If you want to make regular command help look different then override it
send_command_help = send_group_help
|
realmayus/chime
|
chime/cogs/HelpCommandCog.py
|
HelpCommandCog.py
|
py
| 4,233 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9887556442
|
import cv2, sys, time
def start_split(filename):
start = time.time()
video = cv2.VideoCapture(filename)
if not video:
print("无法读取视频文件")
sys.exit(1)
count = 0
while video.isOpened():
print("\r正在处理第{0}帧图像".format(count), end="")
ret, frame = video.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
resize = cv2.resize(gray, (8, 8))
cv2.imwrite("./pic/" + str(count).zfill(5) + ".jpg", resize)
count += 1
video.release()
end = time.time()
print("\n处理完成,处理了{0}帧图像,用时{1}秒".format(count, round(end - start, 3)))
return
if __name__ == "__main__":
file = "ba10s.mp4"
start_split(file)
|
Temperature6/BadAppleVideoProcess
|
VideoSplit.py
|
VideoSplit.py
|
py
| 825 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31997355144
|
# 1. Реализовать функцию, принимающую два числа (позиционные аргументы)
# и выполняющую их деление. Числа запрашивать у пользователя,
# предусмотреть обработку ситуации деления на ноль.
caption = f'Основы языка Python. Урок 3. Домашнее задание 1.\n'
print(caption)
def dividing(num_1, num_2):
"""Выполняет деление двух чисел. Обрабатывается исключение ZeroDivisionError."""
try:
result = num_1/num_2
except ZeroDivisionError as e:
print(f"Exception - {e} (деление на 0).")
except TypeError as e:
print(f"Exception - {e}.")
else:
print(f'{num_1}/{num_2} = {result}')
while True:
try:
num_1 = float(input("\nВведите числитель дроби: a = ").replace(',','.'))
num_2 = float(input("Введите знаменатель дроби: b = ").replace(',','.'))
except ValueError as e:
print(f"Exception - {e}.\nНеобходимо ввести число!")
else:
break
dividing(num_1, num_2)
# dividing() - без параметров даёт исключение TypeError
|
SokIL69/python
|
Lesson3/hw3_1.py
|
hw3_1.py
|
py
| 1,329 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
17693067570
|
import numpy as np
def data_to_matrix(path):
return (
np.loadtxt(open(path, "rb"), delimiter=",", usecols=[0,1,2,3]),
np.loadtxt(open(path, "rb"), delimiter=",", usecols=4),
)
def seidel_method(A, B, eps):
""" Метод Зейделя """
print(seidel_method.__doc__)
D = np.diag(A)
a = [-(A - np.diagflat(D))[i] / D[i] for i in range(np.shape(A)[0])]
b = B / D
X = b.copy() # Початкове наближення
for i in range(10):
print(f"\tІтерація {i}: {X}")
X_new = np.zeros_like(X)
for k in range(A.shape[0]):
s1 = np.dot(A[k], X_new)
s2 = np.dot(A[k, k+1:], X[k+1:])
X_new[k] = (B[k] - s1 - s2) / D[k]
if np.allclose(X_new, X, atol=eps):
break
X = X_new.copy()
print(f"Відповідь: {X}")
path = input("Введіть шлях до файлу з даними: ") or "data.csv"
A, B = data_to_matrix(path) # A - матриця коефіцієнтів системи
seidel_method(A, B, 0.001) # B - матриця вільних членів
|
geekylthyosaur/lpnu
|
NM/seidel_method.py
|
seidel_method.py
|
py
| 1,284 |
python
|
uk
|
code
| 2 |
github-code
|
6
|
3670374638
|
import math
from collections import defaultdict
from typing import Union, Callable, Tuple, List
import gevent
from gevent.queue import Queue
from requests import PreparedRequest, Response
from lib.utils.logger import Logger
from lib.utils.request_helper import RequestHelper, RequestInfo
class BaseFinder(RequestHelper):
# Формат:
# {'example.com:8443': {'some_bucket': {'size': Union[int, None], 'in_progress': Union[bool, None]}, ...}, ...}
bucket_size_cache = defaultdict(lambda: defaultdict(dict))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def determine_bucket_size(self, info: RequestInfo):
raise NotImplementedError
def find_secrets(self, info: RequestInfo, words: List[str]):
""" Проверяет изменения в ответе для заданного списка параметров `words` в теле запроса
:param info:
:param words: Названия параметров
:return: dict([(`param`, `reasons`)]) - если найдено конкретное слово
int - если со словами требуется провести манипуляции
"""
raise NotImplementedError
def get_bucket_size(self, info: RequestInfo):
""" Возвращает общие число хидеров в запросе """
raise NotImplementedError
def get_word_chunks(self, info: RequestInfo):
raise NotImplementedError
def is_info_searchable(self, info: RequestInfo):
raise NotImplementedError
def set_bucket_size(self, info: RequestInfo):
""" Устанавивает для запроса в `info` общее число хидеров """
raise NotImplementedError
def setup_requests_info(self, info_list: List[RequestInfo]):
raise NotImplementedError
def do_request(self, prepared_request: PreparedRequest, **kwargs) -> Union[Response, None]:
""" Выполняет подготовленных запрос с отчисткой промежуточного кэша
:param prepared_request:
:return: `None` - если по истечении `self.retry` попыток не удалось получить ответ от сервера
`requests.Response` - если удалось получить ответ от сервера
"""
return super().do_request(prepared_request, self.retry, self.timeout, self.delay, self.proxies,
self.arguments.allow_redirects, self.logger)
def filter_requests(self, *args, **kwargs):
kwargs.update({'logger': self.logger})
return super().filter_requests(*args, **kwargs)
def get_optimal_bucket(self, info: RequestInfo, min_chunk: int, add_random: Callable,
additional_size: Callable, logger: Logger) -> Union[int, None]:
""" Ищет оптимальный размер порции параметров соотношение (Длина порции) / (время ответа)
:param info:
:return:
"""
left, cur, right = 1024, 2048, 4096
left_border = 0
right_border = math.inf
counter = 5
optimal_size = None
optimal_rate = 0
# Ограничение на число циклов
while counter:
counter -= 1
# Если левая граница обнулилась
if left == 0:
break
# Если диапазон неделим, то прекратить цикл
if right - cur < 2 or cur - left < 2:
break
# Подготавливаем запросы
_requests = [info.copy_request() for _ in range(3)]
for request, length in zip(_requests, [left, cur, right]):
add_random(request, length)
# Отправляем
jobs = [gevent.spawn(self.do_request, request) for request in _requests]
gevent.joinall(jobs)
responses = [job.value for job in jobs]
# Получаем результаты
results = []
# results = [response.status_code == info.response.status_code if response is not None else response
# for response in responses]
for response in responses:
if not response:
results.append(None)
# Если совпадают коды ответа
elif response.status_code == info.response.status_code:
results.append(True)
# Если Payload Too Large/URI Too Long/Request Header Fields Too Large
elif response.status_code in {413, 414, 431}:
results.append(False)
# Если код ответа на отрезке [500, 599], а оригинальный код не в этом отрезке
elif 500 <= response.status_code < 600 and not 500 <= info.response.status_code < 600:
results.append(False)
# Если код ответа на отрезке [400, 499], а оригинальный код не в этом отрезке
elif 400 <= response.status_code < 500 and not 400 <= info.response.status_code < 500:
results.append(False)
else:
logger.debug(f'Необработанный случай: act_status_code={response.status_code}, orig_status_cod={info.response.status_code}')
results.append(True)
# Если все запросы не получили ответа от сервера, то сдвигаемся влево
if not any(results):
right_border = left
right = right_border
cur = right >> 1
left = cur >> 1
continue
# Иначе выбираем среди ответов оптимальный
rates = []
for response, size, result in zip([response for response in responses], [left, cur, right], results):
# Рассматриваем только те случаи, когда мы не вышли за границы
elapsed = response.elapsed.total_seconds() if (response is not None and result == True) else math.inf
rate = round(size / elapsed, 1)
rates.append(rate)
if rate > optimal_rate and result:
optimal_rate = rate
optimal_size = size
# Cмотрим, в какую сторону развивается динамика
max_rate = max(rates)
# Если все запросы не превысили границу, то двигаемся в сторону динамики
if all(results):
# Если динамика увеличивается слева
if rates[0] == max_rate:
right_border = right
# То смещаемся влево
right = left - 1
cur = right >> 1
left = cur >> 1
# Если левый указатель меньше левой границы
if left < left_border:
# То пересчитываем указатели в пределах границ
left, cur, right = self.shift_bounds(left_border, right_border)
# Если динамика увеличивается справа
elif rates[2] == max_rate:
left_border = left
# То смещаемся вправо
left = right + 1
cur = left << 1
right = cur << 1
# Если правый указатель вышел за пределы правой границы
if right > right_border:
# То пересчитываем указатели в пределах границ
left, cur, right = self.shift_bounds(left_border, right_border)
# Иначе рассматриваем окрестности центра
else:
left_border = left if left > left_border else left_border
right_border = right if right < right_border else right_border
left = (left + cur) // 2
right = (cur + right) // 2
# Если результаты [True, False|None, False|None]
elif results[0] == True and all([not r for r in results[1:]]):
right_border = cur if cur < right_border else right_border
# То сдвигаемся влево
right = left - 1
cur = right >> 1
left = cur >> 1
# Если результаты [True, True, False|None]
elif results[2] in {None, False} and all([r for r in results[:2]]):
right_border = right if right < right_border else right_border
# То смотрим на динамику слева и посередине
# Если динамика увеличивается слева
if rates[0] == max_rate:
# То сдвигаемся влево
right = left - 1 # Сдвигаем рассматриваемую правую границу на 1 от ранее рассматриваемой левой
cur = right >> 1
left = cur >> 1
# Если левый указатель меньше левой границы
if left < left_border:
# То пересчитываем указатели в пределах границ
left, cur, right = self.shift_bounds(left_border, right_border)
# Иначе копаем в пределах cur
else:
right = round((cur + right) / 2)
left = (left + cur) // 2
else:
# Сдвигаемся влево
right = left - 1 # Сдвигаем рассматриваемую правую границу на 1 от ранее рассматриваемой левой
cur = right >> 1
left = cur >> 1
# Если по итогу оптимальный размер меньше минимально требуемого, то вернуть минимально требуемый требуемый
if optimal_size is not None:
if optimal_size < min_chunk < right_border:
return min_chunk + additional_size(info)
return optimal_size + additional_size(info)
return optimal_size
@staticmethod
def parse_results_queue(results_queue: Queue):
results = defaultdict(lambda: defaultdict(list))
while results_queue.qsize():
result = results_queue.get()
for param_name, value in result.items():
url, reasons, type, response = value['url'], value['reasons'], value['type'], value['response']
results[url][type].append({'param': param_name, 'reasons': reasons, 'response': response})
return results
@staticmethod
def parse_results(results: list):
_results = defaultdict(lambda: defaultdict(list))
for result in results:
for param_name, value in result.items():
url, reasons, type, response = value['url'], value['reasons'], value['type'], value['response']
_results[url][type].append({'param': param_name, 'reasons': reasons, 'response': response})
return _results
@staticmethod
def shift_bounds(left_bound: int, right_bound: int) -> Tuple[int, int, int]:
""" Сдвигает тройку `left`, `cur`, `right` согласно новым границам `left_bound` и `right_bound` """
cur = (left_bound + right_bound) // 2
left = (left_bound + cur) // 2
right = round((cur + right_bound) / 2)
return left, cur, right
@staticmethod
def update_results(results: dict, new_results: dict) -> dict:
""" Обновляет словарь `results` данными из `new_results`
:param results:
:param new_results:
:return:
"""
if not len(results.keys()):
results = defaultdict(lambda: defaultdict(list))
for url in new_results:
for type in new_results[url]:
for new_info in new_results[url][type]:
new_param, new_reasons, new_response = new_info['param'], new_info['reasons'], new_info['response']
results[url][type].append({'param': new_param, 'reasons': new_reasons, 'response': new_response})
return results
|
medalahonor/suseeker
|
lib/finders/base_finder.py
|
base_finder.py
|
py
| 13,362 |
python
|
ru
|
code
| 3 |
github-code
|
6
|
26260800957
|
import pygame
class Item:
def __init__(self, image, x, y, id, player):
self.image = pygame.image.load(image)
self.x, self.y = x, y
self.player = player
self.id = id
def draw(self, screen):
screen.blit(self.image, (self.x, self.y))
def onPlayerCollision(self):
pygame.mixer.music.load("other\\itempickup.wav")
pygame.mixer.music.play(1)
if self.id == 1:
self.player.hasSnorkel = True
if self.id == 2:
self.player.hasMachete = True
self.player.attack_damage = 5
if self.id == 3:
self.player.hasBombs = True
self.player.bombs = 15
|
SeaPickle754/zeldaish
|
item.py
|
item.py
|
py
| 588 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19202891943
|
#!/usr/bin/python3
"""
A module that include the parent class Basemodel
"""
import models
from datetime import datetime
from uuid import uuid4
class BaseModel:
"""
class BaseModel that defines all common attributes/methods
for other classes.
"""
def __init__(self, *args, **kwargs):
"""
__init__ method to initise the an object during its creation
Args:
*args: tuple of arguments
**kwags: key value variables
"""
self.id = str(uuid4())
self.created_at = datetime.now()
self.updated_at = datetime.now()
if kwargs:
for key, value in kwargs.items():
if key != '__class__':
if key in ['created_at', 'updated_at']:
fformat = '%Y-%m-%dT%H:%M:%S.%f'
"""convert the string values to datetime objects"""
value = datetime.strptime(value, fformat)
setattr(self, key, value)
""" ^^^this is like self.key = value
and this line solves the purpose of
each value of this dictionary is the value of
this attribute name
"""
else:
self.id = str(uuid4())
self.created_at = datetime.now()
self.updated_at = datetime.now()
models.storage.new(self)
def __str__(self):
"""
__str__: should print: [<class name>] (<self.id>) <self.__dict__>
"""
class_name = self.__class__.__name__
return "[{}] ({}) {}".format(class_name, self.id, self.__dict__)
def save(self):
"""
save(self): updates the public instance attribute
updated_at with the current datetime
"""
self.updated_at = datetime.now()
models.storage.save()
def to_dict(self):
"""
returns a dictionary containing all keys/values
of __dict__ of the instance
"""
c_dict = self.__dict__.copy()
c_dict['__class__'] = self.__class__.__name__
c_dict['created_at'] = self.created_at.isoformat()
c_dict['updated_at'] = self.updated_at.isoformat()
return c_dict
|
sanotogii/AirBnB_clone
|
models/base_model.py
|
base_model.py
|
py
| 2,261 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.