hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ac9b5a3d46177edc6cd72b3bfdd2b89d505e52d
| 13,241 |
py
|
Python
|
tests/transformers/bert/test_modeling.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
tests/transformers/bert/test_modeling.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
tests/transformers/bert/test_modeling.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import unittest
import paddle
import copy
from paddlenlp.transformers import BertModel, BertForPretraining, BertPretrainingCriterion, BertForMaskedLM
from paddlenlp.transformers import BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertForMultipleChoice
from common_test import CommonTest
from util import softmax_with_cross_entropy, slow
import unittest
def create_input_data(config, seed=None):
'''
the generated input data will be same if a specified seed is set
'''
if seed is not None:
np.random.seed(seed)
input_ids = np.random.randint(low=0,
high=config['vocab_size'],
size=(config["batch_size"],
config["seq_len"]))
num_to_predict = int(config["seq_len"] * 0.15)
masked_lm_positions = np.random.choice(
config["seq_len"], (config["batch_size"], num_to_predict),
replace=False)
masked_lm_positions = np.sort(masked_lm_positions)
pred_padding_len = config["seq_len"] - num_to_predict
temp_masked_lm_positions = np.full(masked_lm_positions.size,
0,
dtype=np.int32)
mask_token_num = 0
for i, x in enumerate(masked_lm_positions):
for j, pos in enumerate(x):
temp_masked_lm_positions[
mask_token_num] = i * config["seq_len"] + pos
mask_token_num += 1
masked_lm_positions = temp_masked_lm_positions
return input_ids, masked_lm_positions
class NpBertPretrainingCriterion(object):
def __init__(self, vocab_size):
self.vocab_size = vocab_size
def __call__(self, prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels, masked_lm_scale):
masked_lm_loss = softmax_with_cross_entropy(prediction_scores,
masked_lm_labels,
ignore_index=-1)
masked_lm_loss = masked_lm_loss / masked_lm_scale
next_sentence_loss = softmax_with_cross_entropy(seq_relationship_score,
next_sentence_labels)
return np.sum(masked_lm_loss) + np.mean(next_sentence_loss)
class TestBertForSequenceClassification(CommonTest):
def set_input(self):
self.config = copy.deepcopy(
BertModel.pretrained_init_configuration['bert-base-uncased'])
self.config['num_hidden_layers'] = 2
self.config['vocab_size'] = 512
self.config['attention_probs_dropout_prob'] = 0.0
self.config['hidden_dropout_prob'] = 0.0
self.config['intermediate_size'] = 1024
self.config['seq_len'] = 64
self.config['batch_size'] = 3
self.config['max_position_embeddings'] = 512
self.input_ids, self.masked_lm_positions = create_input_data(
self.config)
def set_output(self):
self.expected_shape = (self.config['batch_size'], 2)
def set_model_class(self):
self.TEST_MODEL_CLASS = BertForSequenceClassification
def setUp(self):
self.set_model_class()
self.set_input()
self.set_output()
def check_testcase(self):
self.check_output_equal(self.output.numpy().shape, self.expected_shape)
def test_forward(self):
config = copy.deepcopy(self.config)
del config['batch_size']
del config['seq_len']
bert = BertModel(**config)
model = self.TEST_MODEL_CLASS(bert)
input_ids = paddle.to_tensor(self.input_ids, dtype="int64")
self.output = model(input_ids)
self.check_testcase()
class TestBertForTokenClassification(TestBertForSequenceClassification):
def set_model_class(self):
self.TEST_MODEL_CLASS = BertForTokenClassification
def set_output(self):
self.expected_shape = (self.config['batch_size'],
self.config['seq_len'], 2)
class TestBertForPretraining(TestBertForSequenceClassification):
def set_model_class(self):
self.TEST_MODEL_CLASS = BertForPretraining
def set_output(self):
self.expected_seq_shape = (self.masked_lm_positions.shape[0],
self.config['vocab_size'])
self.expected_pooled_shape = (self.config['batch_size'], 2)
def test_forward(self):
config = copy.deepcopy(self.config)
del config['batch_size']
del config['seq_len']
bert = BertModel(**config)
model = self.TEST_MODEL_CLASS(bert)
input_ids = paddle.to_tensor(self.input_ids, dtype="int64")
masked_lm_positions = paddle.to_tensor(self.masked_lm_positions,
dtype="int64")
self.output = model(input_ids, masked_positions=masked_lm_positions)
self.check_testcase()
def check_testcase(self):
self.check_output_equal(self.output[0].numpy().shape,
self.expected_seq_shape)
self.check_output_equal(self.output[1].numpy().shape,
self.expected_pooled_shape)
class TestBertForMaskedLM(TestBertForSequenceClassification):
def set_model_class(self):
self.TEST_MODEL_CLASS = BertForMaskedLM
def set_output(self):
self.expected_seq_shape = (self.config['batch_size'],
self.config['seq_len'],
self.config['vocab_size'])
def test_forward(self):
config = copy.deepcopy(self.config)
del config['batch_size']
del config['seq_len']
bert = BertModel(**config)
model = self.TEST_MODEL_CLASS(bert)
input_ids = paddle.to_tensor(self.input_ids, dtype="int64")
self.output = model(input_ids)
self.check_testcase()
def check_testcase(self):
self.check_output_equal(self.output.numpy().shape,
self.expected_seq_shape)
class TestBertForQuestionAnswering(TestBertForSequenceClassification):
def set_model_class(self):
self.TEST_MODEL_CLASS = BertForQuestionAnswering
def set_output(self):
self.expected_start_logit_shape = (self.config['batch_size'],
self.config['seq_len'])
self.expected_end_logit_shape = (self.config['batch_size'],
self.config['seq_len'])
def check_testcase(self):
self.check_output_equal(self.output[0].numpy().shape,
self.expected_start_logit_shape)
self.check_output_equal(self.output[1].numpy().shape,
self.expected_end_logit_shape)
class TestBertForMultipleChoice(TestBertForSequenceClassification):
def set_input(self):
self.config = copy.deepcopy(
BertModel.pretrained_init_configuration['bert-base-uncased'])
self.config['num_hidden_layers'] = 2
self.config['vocab_size'] = 512
self.config['attention_probs_dropout_prob'] = 0.0
self.config['hidden_dropout_prob'] = 0.0
self.config['intermediate_size'] = 1024
self.config['seq_len'] = 64
self.config['batch_size'] = 4
self.config['num_choices'] = 2
self.config['max_position_embeddings'] = 512
self.input_ids, _ = create_input_data(self.config)
# [bs*num_choice,seq_l] -> [bs,num_choice,seq_l]
self.input_ids = np.reshape(self.input_ids, [
self.config['batch_size'] // self.config['num_choices'],
self.config['num_choices'], -1
])
def set_model_class(self):
self.TEST_MODEL_CLASS = BertForMultipleChoice
def set_output(self):
self.expected_logit_shape = (self.config['batch_size'] //
self.config['num_choices'],
self.config['num_choices'])
def check_testcase(self):
self.check_output_equal(self.output.numpy().shape,
self.expected_logit_shape)
def test_forward(self):
config = copy.deepcopy(self.config)
del config["num_choices"]
del config['batch_size']
del config['seq_len']
bert = BertModel(**config)
model = self.TEST_MODEL_CLASS(bert)
input_ids = paddle.to_tensor(self.input_ids, dtype="int64")
self.output = model(input_ids)
self.check_testcase()
class TestBertPretrainingCriterion(CommonTest):
def setUp(self):
self.config['vocab_size'] = 1024
self.criterion = BertPretrainingCriterion(**self.config)
self.np_criterion = NpBertPretrainingCriterion(**self.config)
def _construct_input_data(self, mask_num, vocab_size, batch_size):
prediction_scores = np.random.rand(mask_num, vocab_size).astype(
paddle.get_default_dtype())
seq_relationship_score = np.random.rand(batch_size, 2).astype(
paddle.get_default_dtype())
masked_lm_labels = np.random.randint(0, vocab_size, (mask_num, 1))
next_sentence_labels = np.random.randint(0, 2, (batch_size, 1))
masked_lm_scale = 1.0
masked_lm_weights = np.random.randint(0, 2, (mask_num)).astype(
paddle.get_default_dtype())
return prediction_scores, seq_relationship_score, masked_lm_labels, \
next_sentence_labels, masked_lm_scale, masked_lm_weights
def test_forward(self):
np_prediction_score, np_seq_relationship_score, np_masked_lm_labels, \
np_next_sentence_labels, masked_lm_scale, np_masked_lm_weights \
= self._construct_input_data(20, self.config['vocab_size'], 4)
prediction_score = paddle.to_tensor(np_prediction_score)
seq_relationship_score = paddle.to_tensor(np_seq_relationship_score)
masked_lm_labels = paddle.to_tensor(np_masked_lm_labels, dtype="int64")
next_sentence_labels = paddle.to_tensor(np_next_sentence_labels,
dtype="int64")
masked_lm_weights = paddle.to_tensor(np_masked_lm_weights)
np_loss = self.np_criterion(np_prediction_score,
np_seq_relationship_score,
np_masked_lm_labels,
np_next_sentence_labels, masked_lm_scale)
loss = self.criterion(prediction_score, seq_relationship_score,
masked_lm_labels, next_sentence_labels,
masked_lm_scale)
self.check_output_equal(np_loss, loss.numpy()[0])
class TestBertFromPretrain(CommonTest):
@slow
def test_bert_base_uncased(self):
model = BertModel.from_pretrained('bert-base-uncased',
attention_probs_dropout_prob=0.0,
hidden_dropout_prob=0.0)
self.config = copy.deepcopy(model.config)
self.config['seq_len'] = 32
self.config['batch_size'] = 3
input_ids, _ = create_input_data(self.config, 102)
input_ids = paddle.to_tensor(input_ids)
output = model(input_ids)
expected_seq_shape = (self.config['batch_size'], self.config['seq_len'],
self.config['hidden_size'])
expected_pooled_shape = (self.config['batch_size'],
self.config['hidden_size'])
self.check_output_equal(output[0].numpy().shape, expected_seq_shape)
self.check_output_equal(output[1].numpy().shape, expected_pooled_shape)
expected_seq_slice = np.array([[0.17383946, 0.09206937, 0.45788339],
[-0.28287640, 0.06244858, 0.54864359],
[-0.54589444, 0.04811822, 0.50559914]])
# There's output diff about 1e-6 between cpu and gpu
self.check_output_equal(output[0].numpy()[0, 0:3, 0:3],
expected_seq_slice,
atol=1e-6)
expected_pooled_slice = np.array(
[[-0.67418981, -0.07148759, 0.85799801],
[-0.62072051, -0.08452632, 0.96691507],
[-0.74019802, -0.10187808, 0.95353240]])
self.check_output_equal(output[1].numpy()[0:3, 0:3],
expected_pooled_slice,
atol=1e-6)
if __name__ == "__main__":
unittest.main()
| 40.124242 | 141 | 0.626916 |
b135233345bb8716ffc17f40609fffc4a9ba9191
| 707 |
py
|
Python
|
Licence 1/I23/TP 4/tp_4.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 8 |
2020-11-26T20:45:12.000Z
|
2021-11-29T15:46:22.000Z
|
Licence 1/I23/TP 4/tp_4.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | null | null | null |
Licence 1/I23/TP 4/tp_4.py
|
axelcoezard/licence
|
1ed409c4572dea080169171beb7e8571159ba071
|
[
"MIT"
] | 6 |
2020-10-23T15:29:24.000Z
|
2021-05-05T19:10:45.000Z
|
def Lire():
chaine = input("Permutation = ")
return tuple([int(x)-1 for x in chaine.split()])
def Ecrire(s):
print(tuple(range(1, len(s) + 1)))
print(tuple(i + 1 for i in s))
"""
QUESTION 1
"""
def EstPermutation(s):
cool = sum(1 if c in range(len(s)) else 0 for c in s) == len(s)
fun = sum(1 if s[i] ):== i else 0 for i in range(len(s))) == 2
return cool and fun
"""
QUESTION 2
"""
def Inverser(s):
pass
"""
QUESTION 3
"""
def Composer(s, t):
pass
"""
QUESTION 4
"""
def Orbite(k, s):
pass
"""
QUESTION 5
"""
def Signature(s):
pass
"""
QUESTION 6
"""
perm = Lire()
Ecrire(perm)
is_perm = EstPermutation(perm)
print(is_perm)
| 12.403509 | 67 | 0.553041 |
b8cc9a590ab13694b2c1ab31c6f843930bb3dd62
| 633 |
py
|
Python
|
python/unittest/mock/medium/01_patch_on_import/test_work.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/unittest/mock/medium/01_patch_on_import/test_work.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/unittest/mock/medium/01_patch_on_import/test_work.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
from unittest import TestCase, mock
from work import work_on
class TestWorkMockingModule(TestCase):
def test_using_context_manager(self):
with mock.patch("work.os") as mocked_os:
work_on()
mocked_os.getcwd.assert_called_once()
@mock.patch("work.os")
def test_using_decorator(self, mocked_os):
work_on()
mocked_os.getcwd.assert_called_once()
def test_using_return_value(self):
"""Note 'as' in the context manager is optional"""
with mock.patch("work.os.getcwd", return_value="testing"):
assert work_on() == "testing"
| 30.142857 | 67 | 0.64455 |
b8e2497e1ccf2abcb513639c7ed64cd16e3ba4e4
| 2,365 |
py
|
Python
|
scripts/bibtex2json.py
|
stschiff/homepage-gatsby
|
e901472bcc5adba6611646067b8f49932ae86d22
|
[
"MIT"
] | null | null | null |
scripts/bibtex2json.py
|
stschiff/homepage-gatsby
|
e901472bcc5adba6611646067b8f49932ae86d22
|
[
"MIT"
] | 2 |
2021-03-24T08:42:46.000Z
|
2021-10-01T20:44:11.000Z
|
scripts/bibtex2json.py
|
stschiff/homepage-gatsby
|
e901472bcc5adba6611646067b8f49932ae86d22
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import bibtexparser
import sys
import json
month_names = "jan feb mar apr may jun jul aug sep oct nov dec".split()
def month_replace(s):
ret = s
for m in month_names:
search = " " + m
rep = " {" + m + "}"
ret = ret.replace(search, rep)
return ret
def month_to_num(m):
d = {month_names[i] : i for i in range(len(month_names))}
return d[m] + 1
def curate_entry(entry):
[d, m] = entry['month'].split('~')
j = entry['journal'] if 'journal' in entry else f"in {entry['booktitle']}, {entry['publisher']}"
# print(entry)
role = 'minor'
if 'keywords' in entry:
if 'role\_lead' in entry['keywords']:
role = 'lead'
elif 'role\_major' in entry['keywords']:
role = 'major'
cleaned_author_string = entry['author'].replace('\n', ' ').replace('{', '').replace('}', '')
author_list = list(map(
lambda author_string: " ".join(reversed(author_string.split(', '))),
cleaned_author_string.split(' and ')
))
author_abbrv = cleaned_author_string.split(' and ')[0].split(',')[0] + " et al."
if len(author_list) == 2:
[a1, a2] = cleaned_author_string.split(' and ')
author_abbrv = a1.split(',')[0] + " and " + a2.split(',')[0]
elif len(author_list) == 1:
author_abbrv = cleaned_author_string.split(',')[0]
title = entry['title'].replace('{', '').replace('}','').replace('\n', ' ')
ret = {
'journal' : j,
'authors' : author_list,
'date' : f"{entry['year']}-{month_to_num(m):02}-{int(d):02}",
'title' : title,
'url' : entry['url'],
'abstract' : entry['abstract'].replace('{','').replace('}','').replace('\n', ' ') if 'abstract' in entry else None,
'image' : f"images/publications/{entry['ID']}.jpg",
'citekey' : entry['ID'],
'role' : role,
'pdf' : f"pdfs/{author_abbrv} {entry['year']} - {title.replace(':', ' -')}.pdf"
}
return ret
class Bibtex2JSONError(Exception):
pass
with open('publications.bib') as bibtex_file:
bibtex_string = month_replace(bibtex_file.read())
bib_database = bibtexparser.loads(bibtex_string)
with open('../src/data/publications.json', 'w') as outf:
print(json.dumps([curate_entry(e) for e in bib_database.entries], indent=4), file=outf)
| 33.309859 | 123 | 0.573362 |
b8f1e644ec897bd93c6be6da5e6a35b66c2b194c
| 386 |
py
|
Python
|
exercises/de/exc_01_09.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/de/exc_01_09.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/de/exc_01_09.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
nlp = spacy.load("de_core_news_sm")
text = "Apple: Modell IPhone SE kommt im Sommer"
# Verarbeite den Text
doc = ____
# Iteriere über die Entitäten
for ____ in ____.____:
# Drucke Text und Label der Entität
print(____.____, ____.____)
# Erstelle eine Span für "IPhone SE"
iphone_se = ____
# Drucke den Text der Span
print("Fehlende Entität:", iphone_se.text)
| 19.3 | 48 | 0.727979 |
6200eca86f26cf5b48542ca5df629a6c6185e4fb
| 747 |
py
|
Python
|
Versuch5/versuch5/task5.py
|
Tobias-Schoch/SSS
|
f8b078ca7f6482fc7c89d5f9e784a549459eefb7
|
[
"MIT"
] | null | null | null |
Versuch5/versuch5/task5.py
|
Tobias-Schoch/SSS
|
f8b078ca7f6482fc7c89d5f9e784a549459eefb7
|
[
"MIT"
] | null | null | null |
Versuch5/versuch5/task5.py
|
Tobias-Schoch/SSS
|
f8b078ca7f6482fc7c89d5f9e784a549459eefb7
|
[
"MIT"
] | 1 |
2022-01-06T12:47:53.000Z
|
2022-01-06T12:47:53.000Z
|
import redlab as rl
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
print("-------einzelneWerte-------------------------")
print("16BitValue:" + str(rl.cbAIn(0, 0, 1)))
print("VoltageValue:" + str(rl.cbVIn(0, 0, 1)))
print("-------Messreihe-------------------------")
print("Messreihe:" + str(rl.cbAInScan(0, 0, 0, 300, 8000, 1)))
print("Messreihe:" + str(rl.cbVInScan(0, 0, 0, 300, 8000, 1)))
print("Samplerate:" + str(rl.cbInScanRate(0, 0, 0, 8000)))
print("Nyquist:" + str(rl.cbInScanRate(0, 0, 0, 8000) / 2))
print("-------Ausgabe-------------------------")
sin = [np.sin(2 * np.pi * 2 * (i / 30)) for i in range(0, 30)]
vec = np.zeros((8000))
data = rl.cbVInScan(0, 0, 0, 300, 8000, 1)
plt.plot(data)
plt.show()
| 32.478261 | 62 | 0.564926 |
627891f355c1b83bb754a39db7a48571c3d9c430
| 189 |
py
|
Python
|
superstructure/utils.py
|
fossabot/superstructure
|
f4ab5cac269fb3dedfbd3a54c441af23edf3840b
|
[
"MIT"
] | null | null | null |
superstructure/utils.py
|
fossabot/superstructure
|
f4ab5cac269fb3dedfbd3a54c441af23edf3840b
|
[
"MIT"
] | null | null | null |
superstructure/utils.py
|
fossabot/superstructure
|
f4ab5cac269fb3dedfbd3a54c441af23edf3840b
|
[
"MIT"
] | null | null | null |
from .logik import Relation
def is_compatible(bewusstseins_inhalt, x):
if isinstance(x, Relation):
for begriff in bewusstseins_inhalt:
return isinstance(begriff,)
| 23.625 | 43 | 0.708995 |
65b16ed5d4ad08a561c3e78a54a6e6220670344e
| 2,350 |
py
|
Python
|
2021/DEFCON 2021 quals/game.py
|
LeKSuS-04/Capture-The-Flag
|
7cfe5b716566d4d2d58c53d4eedd647bc15a3328
|
[
"WTFPL"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
DEFCON/2021/Quals/crypto/qoo-or-ooo/game.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
DEFCON/2021/Quals/crypto/qoo-or-ooo/game.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
"""
This is a public file
"""
import random
from coin import Coin
GAME_DESCRIPTION = \
"zardus: Hey hacker! Shall we play a game against QOO?\n" \
" There are two competitors here and they each will bet on 0 or 1.\n" \
" Let's put our numbers there so that the sum of ours is same as " \
"the multiplication of theirs"
OPTIONS = "0. Bet for 0\n" \
"1. Bet for 1\n" \
"2. Use your magic qoin"
ZERO = "0"
ONE = "1"
COIN = "2"
COIN_ROTATE = "Do you want to rotate your qoin before flipping?\n" \
"0. No, do not rotate my qoin\n" \
"1. Yes, rotate left\n" \
"2. Yes, rotate right"
NOT_CHANGE = "0"
LEFT = "1"
RIGHT = "2"
WIN = 1
LOSE = 0
WIN_MSG = "Win!"
LOSE_MSG = "Lose!"
class Game(object):
def __init__(self, hacker, zardus, id):
self.player1 = hacker
self.player2 = zardus
self.competitor_bet1 = random.randint(0, 1)
self.competitor_bet2 = random.randint(0, 1)
self.player2_bet = self.player2.bet(id, self.competitor_bet2)
self.coin = Coin(id)
self.id = id
def error(self):
print(f"Selection does not exist. {LOSE_MSG}")
return LOSE
def run(self):
print(f"[Round {self.id}]: Your competitor bets on {self.competitor_bet1}")
print(OPTIONS)
selection = input().strip()
if selection == COIN:
print(COIN_ROTATE)
selection = input().strip()
if selection == LEFT:
self.coin.rotate_left()
elif selection == RIGHT:
self.coin.rotate_right()
elif selection != NOT_CHANGE:
return self.error()
player1_bet = self.coin.flip(self.competitor_bet1)
elif selection == ZERO:
player1_bet = 0
elif selection == ONE:
player1_bet = 1
else:
return self.error()
print(f"[Round {self.id}]: zardus's competitor bets on {self.competitor_bet2}, " +
f"you bet on {player1_bet}")
return self.play(player1_bet, self.player2_bet)
def play(self, p1_bet, p2_bet):
if p1_bet ^ p2_bet == self.competitor_bet1 * self.competitor_bet2:
print(WIN_MSG)
return WIN
else:
print(LOSE_MSG)
return LOSE
| 29.375 | 90 | 0.566809 |
65ee1cf179f448df647e0b890c8cf7c97f2d3391
| 1,362 |
py
|
Python
|
Python/Regex_and_Parsing/Validating_And_Parsing_Email_Addresses.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
Python/Regex_and_Parsing/Validating_And_Parsing_Email_Addresses.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
Python/Regex_and_Parsing/Validating_And_Parsing_Email_Addresses.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/validating-named-email-addresses/problem?h_r=profile
#input
n = int(input())
e =[input() for _ in range(n)]
#name
name = [i.split()[0] for i in e]
#user
user = [i.split()[1].split('@')[0].replace('<','') for i in e]
#email
email = [i.split()[1].split('@')[1].split('.')[0] for i in e]
#extension
exts = [i.split()[1].split('@')[1].split('.')[1].replace('>','') for i in e]
#email list
r = list(zip(name,user,email,exts))
##name validation
nm = (lambda x,y: x.upper() == y.upper() )
##user validation
usr =(lambda x: ''.join([i for i in x if i in map(chr,range(65,91)) or i in map(chr,range(97,123)) or i in map(str,range(0,10)) or i in chr(95) or i in chr(45)]) == x)
##website validation
web = (lambda x: "".join([i for i in x if i in map(chr,range(65,91)) or i in map(chr,range(97,123)) or i in map(str,range(0,10))]) == x)
##extension validation
ext = lambda x: ''.join([i for i in x if (len(x) == 3) and i in map(chr,range(65,91)) or i in map(chr,range(97,123))]) == x
#validation
for i in r:
if all([
nm(i[0],i[1]),
usr(i[1]),
web(i[2]),
ext(i[3])
]):
print(f"{i[0]} <{i[1]}@{i[2]}.{i[3]}>")
'''
#email validation
validate = sorted([i[0]+'@'+i[1]+'.'+i[2]
for i in email if all([
usr(i[0]),web(i[1]),ext(i[2])])])
print(validate)
'''
"""
2
DEXTER <[email protected]>
VIRUS <virus!@variable.:p>
"""
| 28.375 | 167 | 0.590308 |
a238c28f012998b8629160bdcf6d6c1f1495a5e6
| 515 |
py
|
Python
|
BITs/2014/Shmireychik_S_V/task_5_17.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
BITs/2014/Shmireychik_S_V/task_5_17.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
BITs/2014/Shmireychik_S_V/task_5_17.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задача №5, Вариант 17
#Программа программу, которая при запуске случайным образом выводит названиение одной из трех стран, входящих в военно-политический блок "Тройственный союз".
#Шмирейчик С.В.
#14.03.2016
import random
a="Германия"
b="Австро-Венгрия"
с="Италия"
country=random.randint(1,3)
print("программа случайным образом отображает одну из трех стран Тройственного союза")
if country==1:
print(a)
elif country==2:
print(b)
elif country==3:
print(c)
input("Нажмите Enter для выхода.")
| 27.105263 | 158 | 0.741748 |
a25e97a2a9ab483712a453cdb96d60015f4c2c4f
| 2,022 |
py
|
Python
|
applications/doc_vqa/Rerank/src/index_search.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
applications/doc_vqa/Rerank/src/index_search.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
applications/doc_vqa/Rerank/src/index_search.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
import sys
import time
import faiss
import math
import numpy as np
def read_embed(file_name, dim=768, bs=3000):
if file_name.endswith('npy'):
i = 0
emb_np = np.load(file_name)
while (i < len(emb_np)):
vec_list = emb_np[i:i + bs]
i += bs
yield vec_list
else:
vec_list = []
with open(file_name) as inp:
for line in inp:
data = line.strip()
vector = [float(item) for item in data.split(' ')]
assert len(vector) == dim
vec_list.append(vector)
if len(vec_list) == bs:
yield vec_list
vec_list = []
if vec_list:
yield vec_list
def load_qid(file_name):
qid_list = []
with open(file_name) as inp:
for line in inp:
line = line.strip()
qid = line.split('\t')[0]
qid_list.append(qid)
return qid_list
def search(index, emb_file, qid_list, outfile, top_k):
q_idx = 0
with open(outfile, 'w') as out:
for batch_vec in read_embed(emb_file):
q_emb_matrix = np.array(batch_vec)
res_dist, res_p_id = index.search(q_emb_matrix.astype('float32'),
top_k)
for i in range(len(q_emb_matrix)):
qid = qid_list[q_idx]
for j in range(top_k):
pid = res_p_id[i][j]
score = res_dist[i][j]
out.write('%s\t%s\t%s\t%s\n' % (qid, pid, j + 1, score))
q_idx += 1
def main():
part = sys.argv[1]
topk = int(sys.argv[2])
q_text_file = sys.argv[3]
outfile = 'output/res.top%s-part%s' % (topk, part)
qid_list = load_qid(q_text_file)
engine = faiss.read_index("output/para.index.part%s" % part)
emb_file = 'output/query.emb.npy'
search(engine, emb_file, qid_list, outfile, topk)
if __name__ == "__main__":
main()
| 28.083333 | 77 | 0.515826 |
a7b5cf83ec9be9727318993dc8b2e6e9c96ac9b5
| 34,517 |
py
|
Python
|
Fastir_Collector/health/statemachine.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | 4 |
2021-04-23T15:39:17.000Z
|
2021-12-27T22:53:24.000Z
|
Fastir_Collector/health/statemachine.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | null | null | null |
Fastir_Collector/health/statemachine.py
|
Unam3dd/Train-2018-2020
|
afb6ae70fe338cbe55a21b74648d91996b818fa2
|
[
"MIT"
] | 2 |
2021-04-19T08:28:54.000Z
|
2022-01-19T13:23:29.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import subprocess
import traceback
import psutil
from settings import NETWORK_ADAPTATER
from utils.utils import write_to_output, get_csv_writer, write_to_json, close_json_writer, get_json_writer,\
write_list_to_json, write_to_csv, get_terminal_decoded_string, record_sha256_logs, process_md5, process_sha1
import win32process
import re
import wmi
import datetime
class _Statemachine(object):
def __init__(self, params):
self.params = params
self.wmi = wmi.WMI()
self.computer_name = params['computer_name']
self.output_dir = params['output_dir']
self.systemroot = params['system_root']
self.logger = params['logger']
self.rand_ext = params['rand_ext']
if 'destination' in params:
self.destination = params['destination']
def _list_network_drives(self):
for disk in self.wmi.Win32_LogicalDisk(DriveType=4):
yield disk.Caption, disk.FileSystem, disk.ProviderName
def _list_drives(self):
for physical_disk in self.wmi.Win32_DiskDrive():
for partition in physical_disk.associators("Win32_DiskDriveToDiskPartition"):
for logical_disk in partition.associators("Win32_LogicalDiskToPartition"):
yield physical_disk.Caption, partition.Caption, logical_disk.Caption, logical_disk.FileSystem
def _list_share(self):
for share in self.wmi.Win32_Share():
yield share.Name, share.Path
def _list_running(self):
for process in self.wmi.Win32_Process():
yield [process.ProcessId, process.Name, process.CommandLine, process.ExecutablePath]
def _list_sessions(self):
for session in self.wmi.Win32_Session():
yield session.LogonId, session.AuthenticationPackage, session.StartTime, session.LogonType
def _list_scheduled_jobs(self):
proc = subprocess.Popen(["schtasks.exe", '/query', '/fo', 'CSV'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
res = proc.communicate()
res = get_terminal_decoded_string(res[0])
column_names = None
for line in res.splitlines():
if line == "":
continue
if line[0] != '"':
continue
if column_names is None:
column_names = line
continue
elif column_names == line:
continue
yield line
def _list_at_scheduled_jobs(self):
proc = subprocess.Popen('at', stdout=subprocess.PIPE)
res = proc.communicate()
res = get_terminal_decoded_string(res[0])
for line in res.splitlines()[1:]:
line = re.compile(' {2,}').split(line, 4)
if len(line) is 5:
yield line
def _list_network_adapters(self):
net = self.wmi.Win32_NetworkAdapter()
for n in net:
netcard = n.Caption
IPv4 = ''
IPv6 = ''
DHCP_server = ''
DNS_server = ''
adapter_type = ''
nbtstat_value = ''
if n.AdapterTypeID:
adapter_type = NETWORK_ADAPTATER[int(n.AdapterTypeID)]
net_enabled = n.NetEnabled
mac_address = n.MACAddress
description = n.Description
physical_adapter = unicode(n.PhysicalAdapter)
product_name = n.ProductName
speed = n.Speed
database_path = ''
if net_enabled:
nic = self.wmi.Win32_NetworkAdapterConfiguration(MACAddress=mac_address)
for nc in nic:
database_path = nc.DatabasePath
if nc.IPAddress:
try:
IPv4 = nc.IPAddress[0]
IPv6 = nc.IPAddress[1]
except IndexError:
self.logger.error('Error to catch IP Address %s ' % str(nc.IPAddress))
if IPv4:
nbtstat = 'nbtstat -A ' + IPv4
p = subprocess.Popen(nbtstat, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = p.communicate()
# output=utils.decode_output_cmd(output)
output = get_terminal_decoded_string(output)
nbtstat_value = output.split('\r\n')
nbtstat_value = ' '.join([n.replace('\n', '') for n in nbtstat_value])
if nc.DNSServerSearchOrder:
DNS_server = nc.DNSServerSearchOrder[0]
if nc.DHCPEnabled:
if nc.DHCPServer:
DHCP_server = nc.DHCPServer
yield netcard, adapter_type, description, mac_address, product_name, physical_adapter, product_name, speed,\
IPv4, IPv6, DHCP_server, DNS_server, database_path, nbtstat_value
def _list_arp_table(self):
cmd = "arp -a"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = p.communicate()
output = get_terminal_decoded_string(output)
item = output.split("\n")
for i in item:
yield i
def _list_route_table(self):
route_table = self.wmi.Win32_IP4RouteTable()
for r in route_table:
yield r.Name, r.Mask
def _list_sockets_network(self):
for pid in win32process.EnumProcesses():
try:
p = psutil.Process(pid)
local_addr = ''
local_port = ''
remote_addr = ''
remote_port = ''
for connection in p.connections():
if len(connection.laddr) > 0:
local_addr = connection.laddr[0]
local_port = connection.laddr[1]
if len(connection.raddr) > 0:
remote_addr = connection.raddr[0]
remote_port = connection.raddr[1]
yield pid, p.name(), local_addr, local_port, remote_addr, remote_port, connection.status
except psutil.AccessDenied:
self.logger.warning(traceback.format_exc())
def _list_services(self):
services = self.wmi.Win32_Service()
for s in services:
yield s.Name, s.Caption, s.ProcessId, s.PathName, s.ServiceType, s.Status, s.State, s.StartMode
def _list_kb(self):
for kb in self.wmi.Win32_QuickFixEngineering():
yield kb.Caption, kb.CSName, kb.FixComments, kb.HotFixID, kb.InstallDate, kb.InstalledOn, kb.Name, \
kb.ServicePackInEffect, kb.Status
def _csv_list_running_process(self, list_running):
self.logger.info("Health : Listing running processes")
with open(self.output_dir + '%s_processes' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "COMMAND", "EXEC_PATH"], csv_writer)
for p in list_running:
pid = p[0]
name = p[1]
cmd = p[2]
exe_path = p[3]
write_to_csv(
[self.computer_name, 'processes', unicode(pid), name, unicode(cmd), unicode(exe_path)],
csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_processes' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_running_process(self, list_running):
self.logger.info("Health : Listing running processes")
if self.destination == 'local':
with open(self.output_dir + '%s_processes' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "COMMAND", "EXEC_PATH"]]
to_write += [[self.computer_name, 'processes', unicode(p[0]), p[1], unicode(p[2]), unicode(p[3])]
for p in list_running]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_processes' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_hash_running_process(self, list_running):
self.logger.info("Health : Hashing running processes")
with open(self.output_dir + '%s_hash_processes' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "EXEC_PATH", "MD5", "SHA1", "CTIME", "MTIME",
"ATIME"], csv_writer)
for p in list_running:
pid = p[0]
name = p[1]
# cmd = p[2]
exe_path = p[3]
if exe_path and os.path.isfile(exe_path):
ctime = datetime.datetime.fromtimestamp(os.path.getctime(exe_path))
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(exe_path))
atime = datetime.datetime.fromtimestamp(os.path.getatime(exe_path))
md5 = process_md5(unicode(exe_path))
sha1 = process_sha1(unicode(exe_path))
write_to_csv(
[self.computer_name, 'hash processes', unicode(pid), name, unicode(exe_path), md5, sha1, ctime,
mtime, atime], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_hash_processes' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_hash_running_process(self, list_running):
self.logger.info("Health : Hashing running processes")
if self.destination == 'local':
with open(self.output_dir + '%s_hash_processes' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "EXEC_PATH", "MD5", "SHA1", "CTIME",
"MTIME", "ATIME"]]
for p in list_running:
pid = p[0]
name = p[1]
# cmd = p[2]
exe_path = p[3]
if exe_path and os.path.isfile(exe_path):
ctime = datetime.datetime.fromtimestamp(os.path.getctime(exe_path))
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(exe_path))
atime = datetime.datetime.fromtimestamp(os.path.getatime(exe_path))
md5 = process_md5(unicode(exe_path))
sha1 = process_sha1(unicode(exe_path))
to_write += [[self.computer_name, 'hash processes', unicode(pid), name, unicode(exe_path), md5,
sha1, ctime, mtime, atime]]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_hash_processes' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_share(self, share):
self.logger.info("Health : Listing shares")
with open(self.output_dir + '%s_shares' % self.computer_name + self.rand_ext, 'wb') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "SHARE_NAME", "SHARE_PATH"], csv_writer)
for name, path in share:
write_to_csv([self.computer_name, 'shares', name, path], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_shares' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_share(self, share):
self.logger.info("Health : Listing shares")
if self.destination == 'local':
with open(self.output_dir + '%s_shares' % self.computer_name + self.rand_ext, 'wb') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "SHARE_NAME", "SHARE_PATH"]]
to_write += [[self.computer_name, 'shares', name, path] for name, path in share]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_shares' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_drives(self, drives):
self.logger.info("Health : Listing drives")
with open(self.output_dir + '%s_list_drives' % self.computer_name + self.rand_ext, 'wb') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "FAB", "PARTITIONS", "DISK", "FILESYSTEM"], csv_writer)
for phCapt, partCapt, logicalCapt, fs in drives:
write_to_csv([self.computer_name, 'list_drives', phCapt, partCapt, logicalCapt, fs], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_list_drives' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_drives(self, drives):
self.logger.info("Health : Listing drives")
if self.destination == 'local':
with open(self.output_dir + '%s_list_drives' % self.computer_name + self.rand_ext, 'wb') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "FAB", "PARTITIONS", "DISK", "FILESYSTEM"]]
to_write += [[self.computer_name, 'list_drives', phCapt, partCapt, logicalCapt, fs]
for phCapt, partCapt, logicalCapt, fs in drives]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_list_drives' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_network_drives(self, drives):
self.logger.info("Health : Listing network drives")
with open(self.output_dir + '%s_list_networks_drives' % self.computer_name + self.rand_ext, 'wb') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "DISK", "FILESYSTEM", "PARTITION_NAME"], csv_writer)
for diskCapt, diskFs, diskPName in drives:
write_to_csv([self.computer_name, 'list_networks_drives', diskCapt, diskFs, diskPName], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_list_networks_drives' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_network_drives(self, drives):
self.logger.info("Health : Listing network drives")
if self.destination == 'local':
with open(self.output_dir + '%s_list_networks_drives' % self.computer_name + self.rand_ext, 'wb') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "DISK", "FILESYSTEM", "PARTITION_NAME"]]
to_write += [[self.computer_name, 'list_networks_drives', diskCapt, diskFs, diskPName]
for diskCapt, diskFs, diskPName in drives]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_list_networks_drives' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_sessions(self, sessions):
self.logger.info('Health : Listing sessions')
with open(self.output_dir + '%s_sessions' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "LOGON_ID", "AUTH_PACKAGE", "START_TIME", "LOGON_TYPE"], csv_writer)
for logonID, authenticationPackage, startime, logontype in sessions:
write_to_csv([self.computer_name, 'sessions', unicode(logonID),
authenticationPackage, unicode(startime.split('.')[0]), unicode(logontype)], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_sessions' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_sessions(self, sessions):
self.logger.info('Health : Listing sessions')
if self.destination == 'local':
with open(self.output_dir + '%s_sessions' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "LOGON_ID", "AUTH_PACKAGE", "START_TIME", "LOGON_TYPE"]]
to_write += [[self.computer_name, 'sessions', unicode(logonID), authenticationPackage,
unicode(startime.split('.')[0]), unicode(logontype)]
for logonID, authenticationPackage, startime, logontype in sessions]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_sessions' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_scheduled_jobs(self, is_at_available=False):
self.logger.info('Health : Listing scheduled jobs')
file_tasks = self.output_dir + '%s_scheduled_jobs' % self.computer_name + self.rand_ext
with open(file_tasks, 'wb') as tasks_logs:
write_to_output('"COMPUTER_NAME","TYPE","TASK_NAME","NEXT_SCHEDULE","STATUS"\r\n', tasks_logs, self.logger)
csv_writer = get_csv_writer(tasks_logs)
for line in self._list_scheduled_jobs():
write_to_csv([self.computer_name, 'scheduled_jobs'] + line.replace('"', '').split(','), csv_writer)
if is_at_available:
for line in self._list_at_scheduled_jobs():
write_to_csv([self.computer_name, 'scheduled_jobs', line[4], line[2] + ' ' + line[3], line[0]],
csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_scheduled_jobs' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_scheduled_jobs(self, is_at_available=False):
self.logger.info('Health : Listing scheduled jobs')
if self.destination == 'local':
file_tasks = self.output_dir + '%s_scheduled_jobs' % self.computer_name + self.rand_ext
with open(file_tasks, 'wb') as tasks_logs:
json_writer = get_json_writer(tasks_logs)
header = ["COMPUTER_NAME", "TYPE", 'TASK_NAME', 'NEXT_SCHEDULE', "STATUS"]
for line in self._list_scheduled_jobs():
write_to_json(header, [self.computer_name, 'Scheduled Jobs'] + line.replace('"', '').split(','),
json_writer)
if is_at_available:
for line in self._list_at_scheduled_jobs():
write_to_json(header, [self.computer_name, 'scheduled_jobs', line[4], line[2] + ' ' + line[3], line[0]],
json_writer)
close_json_writer(json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_scheduled_jobs' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_network_adapters(self, ncs):
self.logger.info('Health : Listing network adapters')
with open(self.output_dir + '%s_networks_cards' % self.computer_name + self.rand_ext, 'wb') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "NETWORK_CARD", "ADAPTER_TYPE", "DESCRIPTION", "MAC_ADDR",
"PRODUCT_NAME", "PHYSICAL_ADAPTER", "SPEED", "IPv4", "IPv6", "DHCP_SERVER", "DNS_SERVER",
"DATABASE_PATH", "NBTSTAT_VALUE"], csv_writer)
for netcard, adapter_type, description, mac_address, product_name, physical_adapter, product_name, speed, \
IPv4, IPv6, DHCP_server, DNS_server, database_path, nbtstat_value in ncs:
if netcard is None:
netcard = ' '
if adapter_type is None:
adapter_type = ' '
if description is None:
description = ' '
if mac_address is None:
mac_address = ' '
if physical_adapter is None:
physical_adapter = ' '
if product_name is None:
product_name = ' '
if speed is None:
speed = ' '
if IPv4 is None:
IPv4 = ' '
if IPv6 is None:
IPv6 = ' '
if DHCP_server is None:
DHCP_server = ' '
if DNS_server is None:
DNS_server = ' '
if database_path is None:
database_path = ' '
if nbtstat_value is None:
nbtstat_value = ' '
try:
write_to_csv([self.computer_name,
'networks_cards', netcard, adapter_type,
description, mac_address, product_name,
physical_adapter, speed, IPv4,
IPv6, DHCP_server, DNS_server,
database_path, nbtstat_value], csv_writer)
except IOError:
self.logger.error(traceback.format_exc())
record_sha256_logs(self.output_dir + self.computer_name + '_networks_cards' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_network_adapters(self, ncs):
self.logger.info('Health : Listing network adapters')
if self.destination == 'local':
with open(self.output_dir + '%s_networks_cards' % self.computer_name + self.rand_ext, 'wb') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "NETWORK_CARD", "ADAPTER_TYPE", "DESCRIPTION", "MAC_ADDR",
"PRODUCT_NAME", "PHYSICAL_ADAPTER", "SPEED", "IPv4", "IPv6", "DHCP_SERVER", "DNS_SERVER",
"DATABASE_PATH", "NBTSTAT_VALUE"]]
for netcard, adapter_type, description, mac_address, product_name, physical_adapter, product_name, \
speed, IPv4, IPv6, DHCP_server, DNS_server, database_path, nbtstat_value in ncs:
if netcard is None:
netcard = ' '
if adapter_type is None:
adapter_type = ' '
if description is None:
description = ' '
if mac_address is None:
mac_address = ' '
if physical_adapter is None:
physical_adapter = ' '
if product_name is None:
product_name = ' '
if speed is None:
speed = ' '
if IPv4 is None:
IPv4 = ' '
if IPv6 is None:
IPv6 = ' '
if DHCP_server is None:
DHCP_server = ' '
if DNS_server is None:
DNS_server = ' '
if database_path is None:
database_path = ' '
if nbtstat_value is None:
nbtstat_value = ' '
try:
to_write += [[self.computer_name, 'networks_cards', netcard, adapter_type, description,
mac_address, product_name, physical_adapter, speed, IPv4, IPv6, DHCP_server,
DNS_server, database_path, nbtstat_value]]
except IOError:
self.logger.error(traceback.format_exc())
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_networks_cards' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_arp_table(self, arp):
self.logger.info('Health : Listing ARP tables')
with open(self.output_dir + '%s_arp_table' % self.computer_name + self.rand_ext, 'wb') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "IP", "MAC_ADDR", "STATUS"], csv_writer)
for entry in arp:
entry.replace('\xff', '')
tokens = entry.split()
entry_to_write = ''
if len(tokens) == 3:
entry_to_write = '"' + self.computer_name + '"|"arp_table"|"' + '"|"'.join(tokens) + '"\n'
if entry_to_write.find('\.') != 1 and len(entry_to_write) > 0:
arr_to_write = [self.computer_name, 'arp_table'] + tokens
write_to_csv(arr_to_write, csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_arp_table' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_arp_table(self, arp):
self.logger.info('Health : Listing ARP tables')
if self.destination == 'local':
with open(self.output_dir + '%s_arp_table' % self.computer_name + self.rand_ext, 'wb') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "IP", "MAC_ADDR", "STATUS"]]
for entry in arp:
entry.replace('\xff', '')
tokens = entry.split()
entry_to_write = ''
if len(tokens) == 3:
entry_to_write = '"' + self.computer_name + '"|"arp_table"|"' + '"|"'.join(tokens) + '"\n'
if entry_to_write.find('\.') != 1 and len(entry_to_write) > 0:
to_write += [[self.computer_name, 'arp_table'] + tokens]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_arp_table' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_route_table(self, routes):
self.logger.info('Health : Listing routes tables')
with open(self.output_dir + '%s_routes_tables' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "NAME", "MASK"], csv_writer)
for ip, mask in routes:
write_to_csv([self.computer_name, 'routes_tables', unicode(ip), unicode(mask)], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_routes_tables' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_route_table(self, routes):
self.logger.info('Health : Listing routes tables')
if self.destination == 'local':
with open(self.output_dir + '%s_routes_tables' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "NAME", "MASK"]]
to_write += [[self.computer_name, 'routes_tables', unicode(ip), unicode(mask)] for ip, mask in routes]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_routes_tables' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_sockets_network(self, connections):
self.logger.info('Health : Listing sockets networks')
with open(self.output_dir + '%s_sockets' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "LOCAL_ADDR", "SOURCE_PORT", "REMOTE_ADDR",
"REMOTE_PORT", "STATUS"], csv_writer)
for pid, name, local_address, source_port, remote_addr, remote_port, status in connections:
write_to_csv([self.computer_name, 'sockets', unicode(pid),
unicode(name), unicode(local_address), unicode(source_port),
unicode(remote_addr), unicode(remote_port), unicode(status)], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_sockets' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_sockets_network(self, connections):
self.logger.info('Health : Listing sockets networks')
if self.destination == 'local':
with open(self.output_dir + '%s_sockets' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "PID", "PROCESS_NAME", "LOCAL_ADDR", "SOURCE_PORT", "REMOTE_ADDR",
"REMOTE_PORT", "STATUS"]]
for pid, name, local_address, source_port, remote_addr, remote_port, status in connections:
to_write += [[self.computer_name, 'sockets', unicode(pid), unicode(name), unicode(local_address),
unicode(source_port), unicode(remote_addr), unicode(remote_port), unicode(status)]]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_sockets' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_services(self, services):
self.logger.info('Health : Listing services')
with open(self.output_dir + '%s_services' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "CAPTION", "PID", "SERVICE_TYPE", "PATH_NAME", "STATUS", "STATE",
"START_MODE"], csv_writer)
for name, caption, processId, pathName, serviceType, status, state, startMode in services:
write_to_csv([self.computer_name, 'services', caption,
unicode(processId), serviceType, pathName,
unicode(status), state, startMode], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_services' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_services(self, services):
self.logger.info('Health : Listing services')
if self.destination == 'local':
with open(self.output_dir + '%s_services' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "CAPTION", "PID", "SERVICE_TYPE", "PATH_NAME", "STATUS", "STATE",
"START_MODE"]]
for name, caption, processId, pathName, serviceType, status, state, startMode in services:
to_write += [[self.computer_name, 'services', caption, unicode(processId), serviceType, pathName,
unicode(status), state, startMode]]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_services' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _csv_list_kb(self, kbs):
self.logger.info('Health : Listing KB installed on computer')
with open(self.output_dir + '%s_kb' % self.computer_name + self.rand_ext, 'ab') as fw:
csv_writer = get_csv_writer(fw)
write_to_csv(["COMPUTER_NAME", "TYPE", "CAPTION", "CS_NAME", "FIX_COMMENTS", "HOTFIX_ID", "INSTALL_DATE",
"INSTALLED_ON", "NAME", "SERVICE_PACK", "STATUS"], csv_writer)
for Caption, CSName, FixComments, HotFixID, InstallDate, InstalledOn, Name, ServicePackInEffect, Status in kbs:
write_to_csv(
[self.computer_name, 'kb', Caption, CSName, FixComments, HotFixID, InstallDate, InstalledOn, Name,
ServicePackInEffect, Status], csv_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_kb' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
def _json_list_kb(self, kbs):
self.logger.info('Health : Listing KB installed on computer')
if self.destination == 'local':
with open(self.output_dir + '%s_kb' % self.computer_name + self.rand_ext, 'ab') as fw:
json_writer = get_json_writer(fw)
to_write = [["COMPUTER_NAME", "TYPE", "CAPTION", "CS_NAME", "FIX_COMMENTS", "HOTFIX_ID", "INSTALL_DATE",
"INSTALLED_ON", "NAME", "SERVICE_PACK", "STATUS"]]
for Caption, CSName, FixComments, HotFixID, InstallDate, InstalledOn, Name, ServicePackInEffect, Status in kbs:
to_write += [[self.computer_name, 'kb', Caption, CSName, FixComments, HotFixID, InstallDate,
InstalledOn, Name, ServicePackInEffect, Status]]
write_list_to_json(to_write, json_writer)
record_sha256_logs(self.output_dir + self.computer_name + '_kb' + self.rand_ext,
self.output_dir + self.computer_name + '_sha256.log')
| 58.109428 | 128 | 0.576093 |
a7ca7ddee6470aa236b8a59bf302fb56488e82a1
| 349 |
py
|
Python
|
leetcode/089-Gray-Code/GrayCode_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/089-Gray-Code/GrayCode_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/089-Gray-Code/GrayCode_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
# Although it can't AC leetcode test, it's definitely right. However, its pattern may be not as obvious as the one that can AC
def grayCode(n):
res = [0]
seed = 1
for i in range(n - 1):
res += [seed, 3 * seed]
seed = seed << 1
for j in range((seed << 1) - len(res)):
res.append(res[j] + seed)
return res
| 29.083333 | 126 | 0.573066 |
ac5f0a01e73935083b77f3c8aecc94f726368e12
| 129 |
py
|
Python
|
web/adminsite/__init__.py
|
ralfkret/hvz
|
842c25d58fb3c30060080efcfa8b3d183b78e2ab
|
[
"MIT"
] | 1 |
2019-07-30T14:39:43.000Z
|
2019-07-30T14:39:43.000Z
|
web/adminsite/__init__.py
|
ralfkret/hvz
|
842c25d58fb3c30060080efcfa8b3d183b78e2ab
|
[
"MIT"
] | 11 |
2019-07-31T13:40:30.000Z
|
2019-08-07T21:54:52.000Z
|
web/adminsite/__init__.py
|
ralfkret/hvz
|
842c25d58fb3c30060080efcfa8b3d183b78e2ab
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
admin = Blueprint('adminsite', __name__, template_folder='adminsite_templates')
from . import views
| 25.8 | 79 | 0.806202 |
3bd379e38ce5f491f72ee387b211228bc2d639d9
| 2,053 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/ch04_strings/solutions/ex04_palindrome.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch04_strings/solutions/ex04_palindrome.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch04_strings/solutions/ex04_palindrome.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
from ch04_strings.solutions.ex03_reverse_string import reverse
def is_palindrome(input):
left = 0
right = len(input) - 1
lower_input = input.lower()
is_same_char = True
while left < right and is_same_char:
is_same_char = (lower_input[left] == lower_input[right])
left += 1
right -= 1
return is_same_char
def is_palindrome_rec(input):
return is_palindrome_rec_in_range(input.lower(), 0, len(input) - 1)
def is_palindrome_rec_in_range(input, left, right):
if left >= right:
return True
if input[left] == input[right]:
return is_palindrome_rec_in_range(input, left + 1, right - 1)
return False
def is_palindrome_special(input, ignore_spaces_and_punctuation):
adjusted_input = input.lower()
if ignore_spaces_and_punctuation:
adjusted_input = adjusted_input.replace(" ", "")
adjusted_input = adjusted_input.replace("!", "")
adjusted_input = adjusted_input.replace(".", "")
return is_palindrome_rec(adjusted_input)
import re
def is_palindrome_special_with_reg_ex(input, ignore_spaces_and_punctuation):
adjusted_input = input.lower()
if ignore_spaces_and_punctuation:
adjusted_input = re.sub(r"[ !\.\?]", "", adjusted_input)
return is_palindrome_rec(adjusted_input)
def is_palindrome_with_reverse(input):
adjusted_input = input.lower()
return adjusted_input == reverse(adjusted_input)
def is_palindrome_short(input):
adjusted_input = input.lower()
return adjusted_input == adjusted_input[::-1]
def main():
print(is_palindrome("ABBA"))
print(is_palindrome("MICHA"))
print(is_palindrome_short("ABBA"))
print(is_palindrome_short("MICHA"))
print(is_palindrome_rec("ABBA"))
print(is_palindrome_rec("MICHA"))
print(is_palindrome_special("Dreh mal am Herd.", True))
print(is_palindrome_special("Dreh mal am Herd.", False))
if __name__ == "__main__":
main()
| 23.329545 | 76 | 0.700925 |
0cee86124c433e02b1d51732c87e63917e0028f4
| 3,855 |
py
|
Python
|
bind/pyevt/pyevt/evt_exception.py
|
harrywong/evt
|
95985384619e0f5ff4021e8838d421ac4b4b946d
|
[
"BSD-3-Clause"
] | 1,411 |
2018-04-23T03:57:30.000Z
|
2022-02-13T10:34:22.000Z
|
bind/pyevt/pyevt/evt_exception.py
|
Zhang-Zexi/evt
|
e90fe4dbab4b9512d120c79f33ecc62791e088bd
|
[
"Apache-2.0"
] | 27 |
2018-06-11T10:34:42.000Z
|
2019-07-27T08:50:02.000Z
|
bind/pyevt/pyevt/evt_exception.py
|
Zhang-Zexi/evt
|
e90fe4dbab4b9512d120c79f33ecc62791e088bd
|
[
"Apache-2.0"
] | 364 |
2018-06-09T12:11:53.000Z
|
2020-12-15T03:26:48.000Z
|
from . import libevt
class EVTErrCode:
EVT_OK = 0
EVT_INTERNAL_ERROR = -1
EVT_INVALID_ARGUMENT = -2
EVT_INVALID_PRIVATE_KEY = -3
EVT_INVALID_PUBLIC_KEY = -4
EVT_INVALID_SIGNATURE = -5
EVT_INVALID_HASH = -6
EVT_INVALID_ACTION = -7
EVT_INVALID_BINARY = -8
EVT_INVALID_JSON = -9
EVT_INVALID_ADDRESS = -10
EVT_SIZE_NOT_EQUALS = -11
EVT_DATA_NOT_EQUALS = -12
EVT_INVALID_LINK = -13
EVT_NOT_INIT = -15
class EVTException(Exception):
def __init__(self, err):
if err == 'EVT_INTERNAL_ERROR':
evt = libevt.check_lib_init()
code = evt.evt_last_error()
errmsg = '{}: {}'.format(err, code)
super().__init__(self, errmsg)
else:
super().__init__(self, err)
class EVTInternalErrorException(Exception):
def __init__(self):
err = 'EVT_INTERNAL_ERROR'
super().__init__(self, err)
class EVTInvalidArgumentException(Exception):
def __init__(self):
err = 'EVT_INVALID_ARGUMENT'
super().__init__(self, err)
class EVTInvalidPrivateKeyException(Exception):
def __init__(self):
err = 'EVT_INVALID_PRIVATE_KEY'
super().__init__(self, err)
class EVTInvalidPublicKeyException(Exception):
def __init__(self):
err = 'EVT_INVALID_PUBLIC_KEY'
super().__init__(self, err)
class EVTInvalidSignatureException(Exception):
def __init__(self):
err = 'EVT_INVALID_SIGNATURE'
super().__init__(self, err)
class EVTInvalidHashException(Exception):
def __init__(self):
err = 'EVT_INVALID_HASH'
super().__init__(self, err)
class EVTInvalidActionException(Exception):
def __init__(self):
err = 'EVT_INVALID_ACTION'
super().__init__(self, err)
class EVTInvalidBinaryException(Exception):
def __init__(self):
err = 'EVT_INVALID_BINARY'
super().__init__(self, err)
class EVTInvalidJsonException(Exception):
def __init__(self):
err = 'EVT_INVALID_JSON'
super().__init__(self, err)
class EVTInvalidAddressException(Exception):
def __init__(self):
err = 'EVT_INVALID_ADDRESS'
super().__init__(self, err)
class EVTSizeNotEqualsException(Exception):
def __init__(self):
err = 'EVT_SIZE_NOT_EQUALS'
super().__init__(self, err)
class EVTDataNotEqualsException(Exception):
def __init__(self):
err = 'EVT_DATA_NOT_EQUALS'
super().__init__(self, err)
class EVTInvalidLinkException(Exception):
def __init__(self):
err = 'EVT_INVALID_LINK'
super().__init__(self, err)
class EVTNotInitException(Exception):
def __init__(self):
err = 'EVT_NOT_INIT'
super().__init__(self, err)
ex_map = {
EVTErrCode.EVT_INTERNAL_ERROR: EVTInternalErrorException,
EVTErrCode.EVT_INVALID_ARGUMENT: EVTInvalidArgumentException,
EVTErrCode.EVT_INVALID_PRIVATE_KEY: EVTInvalidPrivateKeyException,
EVTErrCode.EVT_INVALID_PUBLIC_KEY: EVTInvalidPublicKeyException,
EVTErrCode.EVT_INVALID_SIGNATURE: EVTInvalidSignatureException,
EVTErrCode.EVT_INVALID_HASH: EVTInvalidHashException,
EVTErrCode.EVT_INVALID_ACTION: EVTInvalidActionException,
EVTErrCode.EVT_INVALID_BINARY: EVTInvalidBinaryException,
EVTErrCode.EVT_INVALID_JSON: EVTInvalidJsonException,
EVTErrCode.EVT_INVALID_ADDRESS: EVTInvalidAddressException,
EVTErrCode.EVT_INVALID_LINK: EVTInvalidLinkException,
EVTErrCode.EVT_SIZE_NOT_EQUALS: EVTSizeNotEqualsException,
EVTErrCode.EVT_DATA_NOT_EQUALS: EVTDataNotEqualsException,
EVTErrCode.EVT_NOT_INIT: EVTNotInitException
}
def evt_exception_raiser(error_code):
if error_code == EVTErrCode.EVT_OK:
return
if error_code in ex_map:
raise ex_map[error_code]
raise Exception('Unknown error code')
| 27.147887 | 70 | 0.705318 |
0b46a3a030e1fbade37d159ae85d845b8f18e9b6
| 1,777 |
py
|
Python
|
20-fs-ias-lec/groups/01-dev2dev/Code/BTonly/lib/logMerge/logStore/appconn/connection.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 8 |
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
20-fs-ias-lec/groups/01-dev2dev/Code/BTonly/lib/logMerge/logStore/appconn/connection.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 2 |
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
20-fs-ias-lec/groups/01-dev2dev/Code/BTonly/lib/logMerge/logStore/appconn/connection.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 25 |
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
from ..database.database_handler import DatabaseHandler
class Function:
"""To be used when there has not been a specific table implemented for a group:"""
def __init__(self):
self._handler = DatabaseHandler()
def insert_event(self, cbor):
""""Add a cbor event to the two databases.
Calls each the byte array handler as well as the event handler to insert the event in both databases
accordingly. Gets called both by database connector as well as the function connector. Returns 1 if successful,
otherwise -1 if any error occurred.
"""
self._handler.add_to_db(event_as_cbor=cbor, app=True)
def get_current_seq_no(self, feed_id):
""""Return the current sequence number of a given feed_id, returns an integer with the currently largest
sequence number for the given feed. Returns -1 if there is no such feed_id in the database."""
return self._handler.get_current_seq_no(feed_id)
def get_event(self, feed_id, seq_no):
""""Return a specific cbor event to the callee with the input feed_id and sequence number. Returns None if
there is no such entry."""
return self._handler.get_event(feed_id, seq_no)
def get_current_event(self, feed_id):
""""Return the newest (the one with the highest sequence number) cbor event for a feed_id. Returns None if
there is no such feed_id in the database."""
return self._handler.get_current_event_as_cbor(feed_id)
def get_all_feed_ids(self):
""""Return all current feed ids in the database."""
return self._handler.get_all_feed_ids()
def get_host_master_id(self):
return self._handler.get_host_master_id()
| 44.425 | 119 | 0.684862 |
0b6dd91d89263ff6b5d493421da73bb4c4763481
| 645 |
py
|
Python
|
Python/EstruturaSequencial/Exercicios/Exrtcicio08.py
|
ekballo/Back-End
|
b252e3b2a16ce36486344823f14afa6691fde9bc
|
[
"MIT"
] | null | null | null |
Python/EstruturaSequencial/Exercicios/Exrtcicio08.py
|
ekballo/Back-End
|
b252e3b2a16ce36486344823f14afa6691fde9bc
|
[
"MIT"
] | null | null | null |
Python/EstruturaSequencial/Exercicios/Exrtcicio08.py
|
ekballo/Back-End
|
b252e3b2a16ce36486344823f14afa6691fde9bc
|
[
"MIT"
] | null | null | null |
#Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês.
#Calcule e mostre o total do seu salário no referido mês.
valor = float(input('Digite o valor que ganha por hora: '))
horas = float(input('Digite quantas horas trabalhadas fez hoje: '))
dias = int(input('Quantos dias você trabalhou: '))
salário = valor * dias
salário = salário * horas
print('O valor ganho por horas é {:.2f}h'.format(valor))
print('A horas trabalhadas são {:.2f}h'.format(horas))
print('Quantos dias você trabalha no mês é {:.2f}'.format(dias))
print('A base salarial trabalhada no mês é {:.2f}'.format(salário,valor,horas))
| 43 | 98 | 0.728682 |
0ba1228e06eb3152b38fed31d83c38ea79d926b1
| 9,829 |
py
|
Python
|
Topic3-TradingBot/RLbot.py
|
SimonScapan/AusarbeitungenAI
|
ef97bd76ae724e2ec76132bc5f4b81d0b751d882
|
[
"MIT"
] | null | null | null |
Topic3-TradingBot/RLbot.py
|
SimonScapan/AusarbeitungenAI
|
ef97bd76ae724e2ec76132bc5f4b81d0b751d882
|
[
"MIT"
] | null | null | null |
Topic3-TradingBot/RLbot.py
|
SimonScapan/AusarbeitungenAI
|
ef97bd76ae724e2ec76132bc5f4b81d0b751d882
|
[
"MIT"
] | null | null | null |
'''
Reinforcement Learning Trading bot for educational purposes.
The project is based on: https://www.analyticsvidhya.com/blog/2021/01/bear-run-or-bull-run-can-reinforcement-learning-help-in-automated-trading/
The aim is to find a model for trading with high level intelligence to compare later on with a "buy low sell high" heuristic.
More Detailed Information in scientific Paper linked to this Repository (in folder LaTex)
'''
############################
# import required packages #
############################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt # required for plotting results at the end
import seaborn as sns
sns.set()
from pandas_datareader import data as pdr # required to read stock data
import fix_yahoo_finance as yf # stock information source
from collections import deque
import random
import tensorflow.compat.v1 as tf # Q-Learn model
tf.compat.v1.disable_eager_execution()
##################
# fetch the data #
##################
# fetch data from yahoo finance - stock: INFY, duration: since 2018
yf.pdr_override()
df_full = pdr.get_data_yahoo("INFY", start="2018-01-01", end="2021-01-01").reset_index()
df_full.to_csv('INFY.csv',index=False)
df_full.head()
###########################
# Define Q-Learning Agent #
###########################
df= df_full.copy() # make copy of stock dataframe
class Agent:
def __init__(self, state_size, window_size, trend, skip, batch_size): # initialize model information
self.state_size = state_size # define state size
self.window_size = window_size
self.half_window = window_size // 2
self.trend = trend
self.skip = skip
self.action_size = 3 # define action size (3 for: buy, sell, hold)
self.batch_size = batch_size # define batch size
self.memory = deque(maxlen = 1000) # deque which is the memory used
self.inventory = [] # inventory of stocks as list
self.gamma = 0.95
self.epsilon = 0.5
self.epsilon_min = 0.01
self.epsilon_decay = 0.999
tf.reset_default_graph()
self.sess = tf.InteractiveSession()
self.X = tf.placeholder(tf.float32, [None, self.state_size]) # two neural networks for the buy, sell and hold call
self.Y = tf.placeholder(tf.float32, [None, self.action_size]) # second neural net
feed = tf.layers.dense(self.X, 256, activation = tf.nn.relu) # set ReLu as activation function
self.logits = tf.layers.dense(feed, self.action_size)
self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))
self.optimizer = tf.train.GradientDescentOptimizer(1e-5).minimize( # set Gradient Descent as optimizer
self.cost
)
self.sess.run(tf.global_variables_initializer())
# act and get_state function makes use of neural Network for generating next state of the NN
def act(self, state):
if random.random() <= self.epsilon:
return random.randrange(self.action_size)
return np.argmax(
self.sess.run(self.logits, feed_dict = {self.X: state})[0]
)
def get_state(self, t):
window_size = self.window_size + 1
d = t - window_size + 1
block = self.trend[d : t + 1] if d >= 0 else -d * [self.trend[0]] + self.trend[0 : t + 1]
res = []
for i in range(window_size - 1):
res.append(block[i + 1] - block[i])
return np.array([res])
def replay(self, batch_size):
mini_batch = []
l = len(self.memory)
for i in range(l - batch_size, l):
mini_batch.append(self.memory[i])
replay_size = len(mini_batch)
X = np.empty((replay_size, self.state_size))
Y = np.empty((replay_size, self.action_size))
states = np.array([a[0][0] for a in mini_batch])
new_states = np.array([a[3][0] for a in mini_batch])
Q = self.sess.run(self.logits, feed_dict = {self.X: states})
Q_new = self.sess.run(self.logits, feed_dict = {self.X: new_states})
for i in range(len(mini_batch)):
state, action, reward, next_state, done = mini_batch[i] # rewards are subsequently calculated by adding or subtracting value generated by executing call option
target = Q[i]
target[action] = reward
if not done:
target[action] += self.gamma * np.amax(Q_new[i])
X[i] = state
Y[i] = target
cost, _ = self.sess.run(
[self.cost, self.optimizer], feed_dict = {self.X: X, self.Y: Y}
)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
return cost
def buy(self, initial_money):
starting_money = initial_money
states_sell = []
states_buy = []
inventory = []
state = self.get_state(0)
for t in range(0, len(self.trend) - 1, self.skip): # In every iteration, state is determined on basis of which an action is taken which will either buy or sell some stocks
action = self.act(state)
next_state = self.get_state(t + 1) # action taken at the next state is influenced by the action taken on the previous state
if action == 1 and initial_money >= self.trend[t] and t < (len(self.trend) - self.half_window):
inventory.append(self.trend[t]) # 1 refers to a Buy call
initial_money -= self.trend[t]
states_buy.append(t)
print('day %d: buy 1 unit at price %f, total balance %f'% (t, self.trend[t], initial_money))
elif action == 2 and len(inventory): # 2 refers to a Sell call
bought_price = inventory.pop(0)
initial_money += self.trend[t]
states_sell.append(t)
try:
invest = ((close[t] - bought_price) / bought_price) * 100
except:
invest = 0
print(
'day %d, sell 1 unit at price %f, investment %f %%, total balance %f,'
% (t, close[t], invest, initial_money)
)
state = next_state
invest = ((initial_money - starting_money) / starting_money) * 100
total_gains = initial_money - starting_money
return states_buy, states_sell, total_gains, invest
def train(self, iterations, checkpoint, initial_money):
for i in range(iterations):
total_profit = 0 # overall rewards are stored in the total profit variable
inventory = []
state = self.get_state(0)
starting_money = initial_money
for t in range(0, len(self.trend) - 1, self.skip):
action = self.act(state)
next_state = self.get_state(t + 1)
if action == 1 and starting_money >= self.trend[t] and t < (len(self.trend) - self.half_window):
inventory.append(self.trend[t])
starting_money -= self.trend[t]
elif action == 2 and len(inventory) > 0:
bought_price = inventory.pop(0)
total_profit += self.trend[t] - bought_price
starting_money += self.trend[t]
invest = ((starting_money - initial_money) / initial_money)
self.memory.append((state, action, invest,
next_state, starting_money < initial_money))
state = next_state
batch_size = min(self.batch_size, len(self.memory))
cost = self.replay(batch_size)
if (i+1) % checkpoint == 0:
print('epoch: %d, total rewards: %f.3, cost: %f, total money: %f'%(i + 1, total_profit, cost,
starting_money))
###################
# Train the Agent #
###################
close = df.Close.values.tolist()
initial_money = 10000
window_size = 30
skip = 1
batch_size = 32
agent = Agent(state_size = window_size, # initialize the previously defined agent
window_size = window_size,
trend = close,
skip = skip,
batch_size = batch_size)
agent.train(iterations = 1000, checkpoint = 10, initial_money = initial_money) # specify number of iterations
##################
# Test the Agent #
##################
states_buy, states_sell, total_gains, invest = agent.buy(initial_money = initial_money) # buy function will return the buy, sell, profit, and investment figures
####################
# Plot the results #
####################
fig = plt.figure(figsize = (15,5)) # plot the total gains vs the invested figures
plt.plot(close, color='r', lw=2.)
plt.plot(close, '^', markersize=10, color='m', label = 'buying signal', markevery = states_buy)
plt.plot(close, 'v', markersize=10, color='k', label = 'selling signal', markevery = states_sell)
plt.title('RL-Bot: total gains %f, total investment %f%%'%(total_gains, invest))
plt.legend()
plt.savefig('plots/1000iter-RL-bot.png')
| 47.254808 | 200 | 0.551836 |
0bd06b1e6acf8dfc1899810ecf27dada7181f20c
| 2,465 |
py
|
Python
|
Packs/IPQualityScore/Integrations/IPQualityScore/IPQualityScore_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/IPQualityScore/Integrations/IPQualityScore/IPQualityScore_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/IPQualityScore/Integrations/IPQualityScore/IPQualityScore_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import json
import io
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_ip_command(requests_mock):
from IPQualityScore import Client, ip_command
mock_response = util_load_json('test_data/ip_response.json')
requests_mock.get('https://ipqualityscore.com/api/json/ip/api_key_here/15.99.160.255', json=mock_response)
client = Client(
base_url='https://ipqualityscore.com/api/json/ip/api_key_here',
verify=False)
ip_suspicious_score_threshold = 75
ip_malicious_score_threshold = 85
reliability = "A - Completely reliable"
args = {
"ip": "15.99.160.255"
}
response = ip_command(client, args, ip_suspicious_score_threshold, ip_malicious_score_threshold, reliability)
assert response[0].outputs_prefix == 'IPQualityScore.IP'
def test_email_command(requests_mock):
from IPQualityScore import Client, email_command
mock_response = util_load_json('test_data/email_response.json')
requests_mock.get('https://ipqualityscore.com/api/json/email/api_key_here/[email protected]', json=mock_response)
client = Client(
base_url='https://ipqualityscore.com/api/json/email/api_key_here',
verify=False)
email_suspicious_score_threshold = 75
email_malicious_score_threshold = 85
reliability = "A - Completely reliable"
args = {
"email": "[email protected]"
}
response = email_command(client, args, email_suspicious_score_threshold, email_malicious_score_threshold,
reliability)
assert response[0].outputs_prefix == 'IPQualityScore.Email'
def test_url_command(requests_mock):
from IPQualityScore import Client, url_command
mock_response = util_load_json('test_data/url_response.json')
requests_mock.get('https://ipqualityscore.com/api/json/url/api_key_here/https%3A%2F%2Fgoogle.com',
json=mock_response)
client = Client(
base_url='https://ipqualityscore.com/api/json/url/api_key_here',
verify=False)
url_suspicious_score_threshold = 75
url_malicious_score_threshold = 85
reliability = "A - Completely reliable"
args = {
"url": "https://google.com"
}
response = url_command(client, args, url_suspicious_score_threshold, url_malicious_score_threshold,
reliability)
assert response[0].outputs_prefix == 'IPQualityScore.Url'
| 39.758065 | 117 | 0.711968 |
f088bee9cfb101dae4e879cd928c5a2a01a0413c
| 763 |
py
|
Python
|
SoSe-21/Uebung-2/A2-MwStBerechnung.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | 1 |
2022-03-23T09:40:39.000Z
|
2022-03-23T09:40:39.000Z
|
SoSe-21/Uebung-2/A2-MwStBerechnung.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
SoSe-21/Uebung-2/A2-MwStBerechnung.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
# Aufgabe 2 - Übung 2
#
# Erstellen Sie ein Python-Programm mit den folgenden Anforderungen:
# Es soll aus einem eingegebenen Nettobetrag die 19% Mehrwertsteuer und der Bruttobetrag errechnet und angezeigt werden.
# 1. Statische Variablen festlegen
mwst = 0.19
# 2. Nutzereingabe erfassen und in float umwandeln
nettobetrag = float(input("Bitte geben Sie den Nettobetrag ein:"))
print ("Der von Ihnen eingegebene Nettobetrag ist:", nettobetrag, "€.")
# 3. Mehrwertsteuerbetrag errechnen und ausgeben
mehrwertsteuerbetrag = nettobetrag * mwst
print("Der Mehrwertsteuerbetrag beträgt:", mehrwertsteuerbetrag, "€.")
# 4. Bruttobetrag errechnen und ausgeben
bruttobetrag = nettobetrag + mehrwertsteuerbetrag
print("Der Bruttobetrag beträgt:", bruttobetrag, "€.")
| 40.157895 | 120 | 0.782438 |
f0bea1ed57b8117cff99107664aa6e287d235e9f
| 1,795 |
py
|
Python
|
test/py/test_grpc.py
|
wybosys/nnt.logic.jvm
|
249aedd7f8dd03d94dcb0780b91dadacd4b25270
|
[
"BSD-3-Clause"
] | 2 |
2020-09-22T11:01:31.000Z
|
2020-09-22T11:12:29.000Z
|
test/py/test_grpc.py
|
wybosys/nnt.logic.jvm
|
249aedd7f8dd03d94dcb0780b91dadacd4b25270
|
[
"BSD-3-Clause"
] | null | null | null |
test/py/test_grpc.py
|
wybosys/nnt.logic.jvm
|
249aedd7f8dd03d94dcb0780b91dadacd4b25270
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import asyncio
import grpc
from google.protobuf.empty_pb2 import Empty
from google.protobuf.wrappers_pb2 import StringValue
from dubbo.test_pb2 import ReqTestEcho
from dubbo.test_pb2_grpc import TestStub, Test1Stub
# 当前启动的测试服务
host = "localhost:8093"
# 链接服务器
channel = grpc.insecure_channel(host)
stub = TestStub(channel)
stub1 = Test1Stub(channel)
async def test(idx):
print("test %d" % idx)
# 测试hello
response = stub.hello(Empty())
print("收到数据 %s" % response.message)
# 测试echo
req = ReqTestEcho()
req.input = "hello"
response = stub.echo(req)
print("收到数据 %s" % (response.output))
req = ReqTestEcho()
req.input = "test1:hello"
response = stub1.echo(req)
print("收到数据 %s" % (response.output))
# 测试echoo
response = stub.echoo(StringValue(value="test echoo"))
print("收到数据 %d %s" % (response.id, response.output))
# 修改
# response.id = 5555
response.output = "modified"
response = stub.echooupdate(response)
print("修改成功" if response.value else "修改失败")
# 查询
response = stub.echoos(Empty())
print("收到 %d 条数据" % len(response.item))
for e in response.item:
print("%d %s" % (e.id, e.output))
# 清空echoo
response = stub.echooclear(Empty())
print("清空 %d 条数据" % response.value)
# 测试返回错误
try:
stub.error(Empty())
except grpc.RpcError as e:
print(e.code(), e.details())
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument('-n', '--ncnt', default=1, help='数量')
args = args.parse_args()
async def test_co():
await asyncio.wait([test(i) for i in range(args.ncnt)])
loop = asyncio.get_event_loop()
loop.run_until_complete(test_co())
loop.close()
| 22.721519 | 63 | 0.645682 |
f0c345edda5e8d442134ca1a7a75fc99fe73a8f3
| 7,685 |
py
|
Python
|
projects/controllers/operators.py
|
Matheus158257/projects
|
26a6148046533476e625a872a2950c383aa975a8
|
[
"Apache-2.0"
] | null | null | null |
projects/controllers/operators.py
|
Matheus158257/projects
|
26a6148046533476e625a872a2950c383aa975a8
|
[
"Apache-2.0"
] | null | null | null |
projects/controllers/operators.py
|
Matheus158257/projects
|
26a6148046533476e625a872a2950c383aa975a8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Operator controller."""
from datetime import datetime
from sqlalchemy.exc import InvalidRequestError, ProgrammingError
from werkzeug.exceptions import BadRequest, NotFound
from ..database import db_session
from ..models import Operator
from .parameters import list_parameters
from .dependencies import list_dependencies, list_next_operators, \
create_dependency, delete_dependency
from .utils import raise_if_component_does_not_exist, \
raise_if_project_does_not_exist, raise_if_experiment_does_not_exist, \
raise_if_operator_does_not_exist, uuid_alpha
PARAMETERS_EXCEPTION_MSG = "The specified parameters are not valid"
DEPENDENCIES_EXCEPTION_MSG = "The specified dependencies are not valid."
def list_operators(project_id, experiment_id):
"""Lists all operators under an experiment.
Args:
project_id (str): the project uuid.
experiment_id (str): the experiment uuid.
Returns:
A list of all operator.
"""
raise_if_project_does_not_exist(project_id)
raise_if_experiment_does_not_exist(experiment_id)
operators = db_session.query(Operator) \
.filter_by(experiment_id=experiment_id) \
.all()
response = []
for operator in operators:
check_status(operator)
response.append(operator.as_dict())
return response
def create_operator(project_id, experiment_id, component_id=None,
parameters=None, dependencies=None, **kwargs):
"""Creates a new operator in our database.
The new operator is added to the end of the operator list.
Args:
project_id (str): the project uuid.
experiment_id (str): the experiment uuid.
component_id (str): the component uuid.
parameters (dict): the parameters dict.
dependencies (list): the dependencies array.
Returns:
The operator info.
"""
raise_if_project_does_not_exist(project_id)
raise_if_experiment_does_not_exist(experiment_id)
if not isinstance(component_id, str):
raise BadRequest("componentId is required")
try:
raise_if_component_does_not_exist(component_id)
except NotFound as e:
raise BadRequest(e.description)
if parameters is None:
parameters = {}
raise_if_parameters_are_invalid(parameters)
if dependencies is None:
dependencies = []
raise_if_dependencies_are_invalid(dependencies)
operator = Operator(uuid=uuid_alpha(),
experiment_id=experiment_id,
component_id=component_id,
parameters=parameters)
db_session.add(operator)
db_session.commit()
check_status(operator)
operator_as_dict = operator.as_dict()
update_dependencies(operator_as_dict['uuid'], dependencies)
operator_as_dict["dependencies"] = dependencies
return operator_as_dict
def update_operator(uuid, project_id, experiment_id, **kwargs):
"""Updates an operator in our database and adjusts the position of others.
Args:
uuid (str): the operator uuid to look for in our database.
project_id (str): the project uuid.
experiment_id (str): the experiment uuid.
**kwargs: arbitrary keyword arguments.
Returns:
The operator info.
"""
raise_if_project_does_not_exist(project_id)
raise_if_experiment_does_not_exist(experiment_id)
operator = Operator.query.get(uuid)
if operator is None:
raise NotFound("The specified operator does not exist")
raise_if_parameters_are_invalid(kwargs.get("parameters", {}))
dependencies = kwargs.pop("dependencies", None)
if dependencies is not None:
raise_if_dependencies_are_invalid(dependencies, operator_id=uuid)
update_dependencies(uuid, dependencies)
data = {"updated_at": datetime.utcnow()}
data.update(kwargs)
try:
db_session.query(Operator).filter_by(uuid=uuid).update(data)
db_session.commit()
except (InvalidRequestError, ProgrammingError) as e:
raise BadRequest(str(e))
check_status(operator)
return operator.as_dict()
def delete_operator(uuid, project_id, experiment_id):
"""Delete an operator in our database.
Args:
uuid (str): the operator uuid to look for in our database.
project_id (str): the project uuid.
experiment_id (str): the experiment uuid.
Returns:
The deletion result.
"""
raise_if_project_does_not_exist(project_id)
raise_if_experiment_does_not_exist(experiment_id)
operator = Operator.query.get(uuid)
if operator is None:
raise NotFound("The specified operator does not exist")
operator_as_dict = operator.as_dict()
delete_dependencies(operator_as_dict["uuid"], operator_as_dict["dependencies"])
db_session.delete(operator)
db_session.commit()
return {"message": "Operator deleted"}
def update_dependencies(operator_id, new_dependencies):
dependencies_raw = list_dependencies(operator_id)
dependencies = [d['dependency'] for d in dependencies_raw]
dependencies_to_add = [d for d in new_dependencies if d not in dependencies]
dependencies_to_delete = [d for d in dependencies if d not in new_dependencies]
for dependency in dependencies_to_add:
create_dependency(operator_id, dependency)
for dependency in dependencies_to_delete:
for dependency_object in dependencies_raw:
if dependency == dependency_object["dependency"]:
delete_dependency(dependency_object["uuid"])
break
def delete_dependencies(operator_id, dependencies):
next_operators = list_next_operators(operator_id)
for op in next_operators:
op_dependencies_raw = list_dependencies(op)
op_dependencies = [d["dependency"] for d in op_dependencies_raw]
new_dependencies = dependencies + list(set(op_dependencies) - set(dependencies))
new_dependencies.remove(operator_id)
update_dependencies(op, new_dependencies)
update_dependencies(operator_id, [])
def raise_if_parameters_are_invalid(parameters):
"""Raises an exception if the specified parameters are not valid.
Args:
parameters (dict): the parameters dict.
"""
if not isinstance(parameters, dict):
raise BadRequest(PARAMETERS_EXCEPTION_MSG)
for key, value in parameters.items():
if not isinstance(value, (str, int, float, bool, list, dict)):
raise BadRequest(PARAMETERS_EXCEPTION_MSG)
def raise_if_dependencies_are_invalid(dependencies, operator_id=None):
"""Raises an exception if the specified dependencies are not valid.
Args:
dependencies (list): the dependencies list.
operator_id (str): the operator uuid.
"""
if not isinstance(dependencies, list):
raise BadRequest(DEPENDENCIES_EXCEPTION_MSG)
for d in dependencies:
try:
raise_if_operator_does_not_exist(d)
if d == operator_id:
raise BadRequest(DEPENDENCIES_EXCEPTION_MSG)
except NotFound:
raise BadRequest(DEPENDENCIES_EXCEPTION_MSG)
def check_status(operator):
# get total operator parameters with value
op_params_keys = [key for key in operator.parameters.keys() if operator.parameters[key] != '']
total_op_params = len(op_params_keys)
# get component parameters and remove dataset parameter
comp_params = list_parameters(operator.component_id)
total_comp_params = len(comp_params)
if total_op_params == total_comp_params:
operator.status = 'Setted up'
else:
operator.status = 'Unset'
| 30.74 | 98 | 0.708133 |
c5832c0bb540cabda8c7e8d4ca1400ad5ee8d81e
| 1,196 |
py
|
Python
|
src/visitpy/visit_utils/src/builtin/__init__.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/visitpy/visit_utils/src/builtin/__init__.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/visitpy/visit_utils/src/builtin/__init__.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: __init__.py
author: Cyrus Harrison <[email protected]>
created: 7/6/2020
description:
Init file for 'visit_utils.builtin' module.
"""
###############################################################################
# Modifications:
# Cyrus Harrison, Wed Feb 24 10:12:20 PST 2021
# Move PySide logic into visit_utils.builtin
#
# Mark C. Miller, Mon Jun 28 17:00:28 PDT 2021
# Add apropos and help override
#
###############################################################################
from .evalfuncs import *
from .writescript import WriteScript
from .convert2to3 import ConvertPy2to3
from .convert2to3 import GetAutoPy2to3
from .convert2to3 import SetAutoPy2to3
from .apropos import *
#
# Import PySide2 if it exists.
#
try:
from . import pyside_hook
from . import pyside_gui
from . import pyside_support
def IsPySideViewerEnabled(): return True
except ImportError:
def IsPySideViewerEnabled(): return False
pass
| 27.181818 | 79 | 0.644649 |
7687548d795131d0cf62035310d06957afc97d8b
| 207 |
py
|
Python
|
Pythonskripte/ladeBildKeras.py
|
NoahEmbedded/EmbeddedKWD
|
2380d56b0b75bae4fedeb60885358332766f7319
|
[
"MIT"
] | null | null | null |
Pythonskripte/ladeBildKeras.py
|
NoahEmbedded/EmbeddedKWD
|
2380d56b0b75bae4fedeb60885358332766f7319
|
[
"MIT"
] | null | null | null |
Pythonskripte/ladeBildKeras.py
|
NoahEmbedded/EmbeddedKWD
|
2380d56b0b75bae4fedeb60885358332766f7319
|
[
"MIT"
] | null | null | null |
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
def ladeBild(pfad):
bild = load_img(pfad,grayscale=True)
array = img_to_array(bild)
return array
| 29.571429 | 50 | 0.782609 |
4f4e6ddd533e6bc19dfecc2da31d1ff1aaff5e22
| 1,423 |
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pybedtools-0.7.6-py2.7-linux-x86_64.egg/pybedtools/test/test_len_leak.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pybedtools-0.7.6-py2.7-linux-x86_64.egg/pybedtools/test/test_len_leak.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pybedtools-0.7.6-py2.7-linux-x86_64.egg/pybedtools/test/test_len_leak.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
import pybedtools
fn = pybedtools.example_filename('a.bed')
def show_open_fds(func):
doc = func.__doc__
print()
print(doc)
print("." * len(doc))
orig_fds = pybedtools.helpers.n_open_fds()
obs = max(func(fn)) - orig_fds
assert obs == 0, obs
def func1(src):
"create bedtool in loop"
for i in range(10):
x = pybedtools.BedTool(src)
yield pybedtools.helpers.n_open_fds()
def func2(src):
'create bedtool in loop and check length'
for i in range(10):
x = pybedtools.BedTool(src)
len(x)
yield pybedtools.helpers.n_open_fds()
def func3(src):
'create bedtool outside of loop; check length inside'
x = pybedtools.BedTool(src)
for i in range(10):
len(x)
yield pybedtools.helpers.n_open_fds()
def func4(src):
'create and len in loop; don\'t assign to var'
for i in range(10):
len(pybedtools.BedTool(src))
yield pybedtools.helpers.n_open_fds()
def func0(src):
'check field count'
x = pybedtools.BedTool(src)
for i in range(10):
# since the test file is only 4 lines, set `n` to make sure we're
# not exhausting the iterator.
fc = x.field_count(n=2)
assert fc == 6
yield pybedtools.helpers.n_open_fds()
if __name__ == "__main__":
for k, v in sorted(locals().items()):
if k.startswith('func'):
show_open_fds(v)
| 23.327869 | 73 | 0.618412 |
4f98e0b9a19256dd90796a89491152b54c250289
| 2,206 |
py
|
Python
|
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline_schema/pipeline_context.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline_schema/pipeline_context.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline_schema/pipeline_context.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from typing import Any, Callable, Dict, List, Optional
from watchmen_auth import PrincipalService
from watchmen_data_kernel.topic_schema import TopicSchema
from watchmen_model.admin import Pipeline
from watchmen_model.pipeline_kernel import PipelineMonitorLog, PipelineTriggerTraceId
from watchmen_pipeline_kernel.cache import CacheService
from watchmen_pipeline_kernel.pipeline_schema_interface import CompiledPipeline, PipelineContext, TopicStorages
from .compiled_pipeline import RuntimeCompiledPipeline
class RuntimePipelineContext(PipelineContext):
def __init__(
self,
pipeline: Pipeline,
trigger_topic_schema: TopicSchema,
previous_data: Optional[Dict[str, Any]], current_data: Optional[Dict[str, Any]],
principal_service: PrincipalService, trace_id: PipelineTriggerTraceId, data_id: int
):
self.pipeline = pipeline
self.triggerTopicSchema = trigger_topic_schema
self.previousData = previous_data
self.currentData = current_data
self.principalService = principal_service
self.traceId = trace_id
self.data_id = data_id
def start(
self, storages: TopicStorages,
handle_monitor_log: Callable[[PipelineMonitorLog, bool], None]
) -> List[PipelineContext]:
compiled_pipeline = self.build_compiled_pipeline()
return compiled_pipeline.run(
previous_data=self.previousData,
current_data=self.currentData,
principal_service=self.principalService,
trace_id=self.traceId,
data_id=self.data_id,
storages=storages,
handle_monitor_log=handle_monitor_log
)
def build_compiled_pipeline(self) -> CompiledPipeline:
compiled = CacheService.compiled_pipeline().get(self.pipeline.pipelineId)
if compiled is None:
compiled = RuntimeCompiledPipeline(self.pipeline, self.principalService)
CacheService.compiled_pipeline().put(self.pipeline.pipelineId, compiled)
return compiled
if id(compiled.get_pipeline()) != id(self.pipeline):
# not same pipeline, abandon compiled cache
CacheService.compiled_pipeline().remove(self.pipeline.pipelineId)
compiled = RuntimeCompiledPipeline(self.pipeline, self.principalService)
CacheService.compiled_pipeline().put(self.pipeline.pipelineId, compiled)
return compiled
return compiled
| 38.034483 | 111 | 0.810063 |
96e4982fe079019360095d9e72b3758c2574834b
| 1,058 |
py
|
Python
|
doc/for_dev/scikit-image/setup.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
doc/for_dev/scikit-image/setup.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
doc/for_dev/scikit-image/setup.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import os
import sys
from distutils.core import setup
from pathlib import Path
import numpy as np
from transonic.dist import make_backend_files, init_transonic_extensions
path_here = Path(__file__).parent.absolute()
include_dirs = [np.get_include()]
pack_name = "future"
paths = tuple((path_here / pack_name).glob("*.py"))
for backend in ("pythran", "cython", "numba"):
make_backend_files(paths, backend=backend)
extensions = []
if "egg_info" not in sys.argv:
extensions = init_transonic_extensions(
pack_name,
backend="pythran",
include_dirs=[np.get_include()],
compile_args=("-O3", "-DUSE_XSIMD"),
inplace=True,
)
extensions.extend(
init_transonic_extensions(
pack_name, backend="cython", inplace=True, annotate=True
)
)
init_transonic_extensions(pack_name, backend="numba")
setup(
name=pack_name,
ext_modules=extensions,
# script_name="setup.py",
script_args=["build_ext", "--inplace"],
# cmdclass=dict(build_ext=ParallelBuildExt),
)
| 24.045455 | 72 | 0.6862 |
fa004ae1897c15b09582bf92bb23ba8c65a0e28d
| 3,033 |
py
|
Python
|
research/cv/AVA_hpa/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/AVA_hpa/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/AVA_hpa/export.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""export"""
import argparse
import numpy as np
from mindspore import context, Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
from src.resnet import resnet18, resnet50, resnet101
from src.network_define_eval import EvalCell310
parser = argparse.ArgumentParser(description="export")
parser.add_argument("--device_id", type=int, default=0,
help="Device id, default is 0.")
parser.add_argument("--device_num", type=int, default=1,
help="Use device nums, default is 1.")
parser.add_argument('--device_target', type=str,
default="Ascend", help='Device target')
parser.add_argument('--ckpt_path', type=str, default="",
help='model checkpoint path')
parser.add_argument("--model_arch", type=str, default="resnet18",
choices=['resnet18', 'resnet50', 'resnet101'], help='model architecture')
parser.add_argument("--classes", type=int, default=10, help='class number')
parser.add_argument("--file_name", type=str, default="ava_hpa", help='model name')
parser.add_argument("--file_format", type=str, default="MINDIR",
choices=['AIR', 'MINDIR'], help='model format')
args_opt = parser.parse_args()
if __name__ == "__main__":
context.set_context(mode=context.GRAPH_MODE,
device_target=args_opt.device_target)
if args_opt.device_target == "Ascend":
context.set_context(device_id=args_opt.device_id)
ckpt_path = args_opt.ckpt_path
if args_opt.model_arch == 'resnet18':
resnet = resnet18(pretrain=False, classes=args_opt.classes)
elif args_opt.model_arch == 'resnet50':
resnet = resnet50(pretrain=False, classes=args_opt.classes)
elif args_opt.model_arch == 'resnet101':
resnet = resnet101(pretrain=False, classes=args_opt.classes)
else:
raise "Unsupported net work!"
param_dict = load_checkpoint(args_opt.ckpt_path)
load_param_into_net(resnet, param_dict)
bag_size_for_eval = 20
image_shape = (224, 224)
input_shape = (bag_size_for_eval, 3) + image_shape
test_network = EvalCell310(resnet)
input_data0 = Tensor(np.random.uniform(low=0, high=1.0, size=input_shape).astype(np.float32))
export(test_network, input_data0, file_name=args_opt.file_name, file_format=args_opt.file_format)
| 44.602941 | 101 | 0.695681 |
fa71190cf936a62d68d87a6d8cb4b216f3618a86
| 191 |
py
|
Python
|
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/46_47_ 48_49_50_51_funcoes_4/funcoes.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/46_47_ 48_49_50_51_funcoes_4/funcoes.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao3-Python-Intermediario-Programacao-Procedural/46_47_ 48_49_50_51_funcoes_4/funcoes.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
variavel = 'Valor'
def func():
print(variavel)
def func2():
# global variavel // nn é boa pratica de programação
variavel = 'Outro valor'
print(variavel)
func()
func2()
| 11.9375 | 56 | 0.633508 |
ad8c43db18dd9d5563a7f2b24948f794b62ceaa7
| 7,883 |
py
|
Python
|
mongodb/mongodb_consistent_backup/official/mongodb_consistent_backup/Common/Config.py
|
smthkissinger/docker-images
|
35e868295d04fa780325ada4168381f1e80e8fe4
|
[
"BSD-3-Clause"
] | 63 |
2018-02-04T03:31:22.000Z
|
2022-03-07T08:27:39.000Z
|
mongodb/mongodb_consistent_backup/official/mongodb_consistent_backup/Common/Config.py
|
smthkissinger/docker-images
|
35e868295d04fa780325ada4168381f1e80e8fe4
|
[
"BSD-3-Clause"
] | 3 |
2020-06-15T03:41:03.000Z
|
2020-06-15T03:41:04.000Z
|
mongodb/mongodb_consistent_backup/official/mongodb_consistent_backup/Common/Config.py
|
smthkissinger/docker-images
|
35e868295d04fa780325ada4168381f1e80e8fe4
|
[
"BSD-3-Clause"
] | 40 |
2018-01-22T16:31:16.000Z
|
2022-03-08T04:40:42.000Z
|
import json
import mongodb_consistent_backup
import sys
from datetime import datetime
from argparse import Action
from pkgutil import walk_packages
from yconf import BaseConfiguration
from yconf.util import NestedDict
def parse_config_bool(item):
try:
if isinstance(item, bool):
return item
elif isinstance(item, str):
if item.rstrip().lower() is "true":
return True
return False
except Exception:
return False
class PrintVersions(Action):
def __init__(self, option_strings, dest, nargs=0, **kwargs):
super(PrintVersions, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
print("%s version: %s, git commit hash: %s" % (
mongodb_consistent_backup.prog_name,
mongodb_consistent_backup.__version__,
mongodb_consistent_backup.git_commit
))
import platform
print("Python version: %s" % platform.python_version())
print("Python modules:")
import fabric.version
print("\t%s: %s" % ('Fabric', fabric.version.get_version()))
modules = ['pymongo', 'multiprocessing', 'yaml', 'boto', 'filechunkio']
for module_name in modules:
module = __import__(module_name)
if hasattr(module, '__version__'):
print("\t%s: %s" % (module_name, module.__version__))
sys.exit(0)
class ConfigParser(BaseConfiguration):
def makeParserLoadSubmodules(self, parser):
for _, modname, ispkg in walk_packages(path=mongodb_consistent_backup.__path__, prefix=mongodb_consistent_backup.__name__ + '.'):
if not ispkg:
continue
try:
components = modname.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
parser = mod.config(parser)
except AttributeError:
continue
return parser
def makeParser(self):
parser = super(ConfigParser, self).makeParser()
parser.add_argument("-V", "--version", dest="version", help="Print mongodb_consistent_backup version info and exit", action=PrintVersions)
parser.add_argument("-v", "--verbose", dest="verbose", help="Verbose output", default=False, action="store_true")
parser.add_argument("-H", "--host", dest="host", default="localhost", type=str,
help="MongoDB Hostname, IP address or '<replset>/<host:port>,<host:port>,..' URI (default: localhost)")
parser.add_argument("-P", "--port", dest="port", help="MongoDB Port (default: 27017)", default=27017, type=int)
parser.add_argument("-u", "--user", "--username", dest="username", help="MongoDB Authentication Username (for optional auth)", type=str)
parser.add_argument("-p", "--password", dest="password", help="MongoDB Authentication Password (for optional auth)", type=str)
parser.add_argument("-a", "--authdb", dest="authdb", help="MongoDB Auth Database (for optional auth - default: admin)", default='admin', type=str)
parser.add_argument("--ssl.enabled", dest="ssl.enabled", default=False, action="store_true",
help="Use SSL secured database connections to MongoDB hosts (default: false)")
parser.add_argument("--ssl.insecure", dest="ssl.insecure", default=False, action="store_true",
help="Do not validate the SSL certificate and hostname of the server (default: false)")
parser.add_argument("--ssl.ca_file", dest="ssl.ca_file", default=None, type=str,
help="Path to SSL Certificate Authority file in PEM format (default: use OS default CA)")
parser.add_argument("--ssl.crl_file", dest="ssl.crl_file", default=None, type=str,
help="Path to SSL Certificate Revocation List file in PEM or DER format (for optional cert revocation)")
parser.add_argument("--ssl.client_cert_file", dest="ssl.client_cert_file", default=None, type=str,
help="Path to Client SSL Certificate file in PEM format (for optional client ssl auth)")
parser.add_argument("-L", "--log-dir", dest="log_dir", help="Path to write log files to (default: disabled)", default='', type=str)
parser.add_argument("-T", "--backup-time", dest="backup_time",
default=datetime.now().strftime("%Y%m%d_%H%M"), type=str,
help="Backup timestamp as yyyymmdd_HHMM. (default: current time)")
parser.add_argument("--lock-file", dest="lock_file", default='/tmp/mongodb-consistent-backup.lock', type=str,
help="Location of lock file (default: /tmp/mongodb-consistent-backup.lock)")
parser.add_argument("--rotate.max_backups", dest="rotate.max_backups", default=0, type=int,
help="Maximum number of backups to keep in backup directory (default: unlimited)")
parser.add_argument("--rotate.max_days", dest="rotate.max_days", default=0, type=float,
help="Maximum age in days for backups in backup directory (default: unlimited)")
parser.add_argument("--sharding.balancer.wait_secs", dest="sharding.balancer.wait_secs", default=300, type=int,
help="Maximum time to wait for balancer to stop, in seconds (default: 300)")
parser.add_argument("--sharding.balancer.ping_secs", dest="sharding.balancer.ping_secs", default=3, type=int,
help="Interval to check balancer state, in seconds (default: 3)")
return self.makeParserLoadSubmodules(parser)
class Config(object):
# noinspection PyUnusedLocal
def __init__(self):
self._config = ConfigParser()
self.parse()
self.version = mongodb_consistent_backup.__version__
self.git_commit = mongodb_consistent_backup.git_commit
def _get(self, keys, data=None):
if not data:
data = self._config
if "." in keys:
key, rest = keys.split(".", 1)
return self._get(rest, data[key])
else:
return data[keys]
def check_required(self):
required = [
'backup.name',
'backup.location'
]
for key in required:
try:
self._get(key)
except Exception:
raise mongodb_consistent_backup.Errors.OperationError(
'Field "%s" (config file field: "%s.%s") must be set via command-line or config file!' % (
key,
self._config.environment,
key
)
)
def parse(self):
self._config.parse(self.cmdline)
self.check_required()
def to_dict(self, data):
if isinstance(data, dict) or isinstance(data, NestedDict):
ret = {}
for key in data:
value = self.to_dict(data[key])
if value and key is not ('merge'):
if key == "password" or key == "secret_key":
value = "******"
ret[key] = value
return ret
elif isinstance(data, (str, int, bool)):
return data
def dump(self):
return self.to_dict(self._config)
def to_json(self):
return json.dumps(self.dump(), sort_keys=True)
def __repr__(self):
return self.to_json()
def __getattr__(self, key):
try:
return self._config.get(key)
# TODO-timv What can we do to make this better?
except Exception:
return None
| 46.64497 | 154 | 0.601674 |
0f369928c9ab0cab133f4a68378ba6e1b7117452
| 110 |
py
|
Python
|
Shivani/ques 7.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
Shivani/ques 7.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
Shivani/ques 7.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
n=int(input("enter a number\n"))
if n%7==0:
print("Divisible by 7")
else:
print("Not Divisible by 7")
| 18.333333 | 32 | 0.618182 |
0f75ce3a9210a5be10331c3616a71310f8c3fa55
| 2,240 |
py
|
Python
|
research/cv/StackedHourglass/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/StackedHourglass/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/StackedHourglass/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
run model eval
"""
import os
from mindspore import context, load_checkpoint, load_param_into_net
from src.config import parse_args
from src.models.StackedHourglassNet import StackedHourglassNet
from src.utils.inference import MPIIEval, get_img, inference
args = parse_args()
if __name__ == "__main__":
if not os.path.exists(args.ckpt_file):
print("ckpt file not valid")
exit()
if not os.path.exists(args.img_dir) or not os.path.exists(args.annot_dir):
print("Dataset not found.")
exit()
# Set context mode
if args.context_mode == "GRAPH":
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, save_graphs=False)
else:
context.set_context(mode=context.PYNATIVE_MODE, device_target=args.device_target)
# Import net
net = StackedHourglassNet(args.nstack, args.inp_dim, args.oup_dim)
param_dict = load_checkpoint(args.ckpt_file)
load_param_into_net(net, param_dict)
gts = []
preds = []
normalizing = []
num_eval = args.num_eval
num_train = args.train_num_eval
for anns, img, c, s, n in get_img(num_eval, num_train):
gts.append(anns)
ans = inference(img, net, c, s)
if ans.size > 0:
ans = ans[:, :, :3]
# (num preds, joints, x/y/visible)
pred = []
for i in range(ans.shape[0]):
pred.append({"keypoints": ans[i, :, :]})
preds.append(pred)
normalizing.append(n)
mpii_eval = MPIIEval()
mpii_eval.eval(preds, gts, normalizing, num_train)
| 32.463768 | 105 | 0.662946 |
0e1ddc9e50051fdda82d76af3bfd3508c5f98cc5
| 364 |
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 86/86.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101 |
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 86/86.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 4 |
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 86/86.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 38 |
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
#Create a script that checks a list against countries_clean.txt
#and creates a list with items that were in that file
checklist = ["Portugal", "Germany", "Munster", "Spain"]
with open("countries_clean.txt", "r") as file:
content = file.readlines()
content = [i.rstrip('\n') for i in content]
checked = [i for i in content if i in checklist]
print(checked)
| 28 | 63 | 0.711538 |
adac4f2c0f736271502537b4a0d2e317f0f6f4b2
| 1,299 |
py
|
Python
|
python/tf_gpu.py
|
YA-androidapp/Python-cheatsheet-ML
|
eafa57d933a2bafd1e169c234acf987670ba790d
|
[
"Apache-2.0"
] | null | null | null |
python/tf_gpu.py
|
YA-androidapp/Python-cheatsheet-ML
|
eafa57d933a2bafd1e169c234acf987670ba790d
|
[
"Apache-2.0"
] | null | null | null |
python/tf_gpu.py
|
YA-androidapp/Python-cheatsheet-ML
|
eafa57d933a2bafd1e169c234acf987670ba790d
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
# GPU版Tensor Flowを、特定のGPUで実行する
GPU_INDEX = 2
tf.config.set_soft_device_placement(True)
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
print(gpus)
print(logical_gpus)
except RuntimeError as e:
print(e)
try:
with tf.device('/device:GPU:{}'.format(GPU_INDEX)): # GPUの番号を指定する
# MNIST
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test)
except RuntimeError as e:
print(e)
| 28.866667 | 77 | 0.632794 |
2a2654d0ce2de2bd0820c91b45f5148096851e30
| 3,155 |
py
|
Python
|
src/bo4e/bo/netznutzungsrechnung.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/bo/netznutzungsrechnung.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/bo/netznutzungsrechnung.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
"""
Contains Netznutzungsrechnung class and corresponding marshmallow schema for de-/serialization
"""
from typing import Optional
import attr
from marshmallow import fields
from marshmallow_enum import EnumField # type:ignore[import]
from bo4e.bo.rechnung import Rechnung, RechnungSchema
from bo4e.enum.botyp import BoTyp
from bo4e.enum.nnrechnungsart import NNRechnungsart
from bo4e.enum.nnrechnungstyp import NNRechnungstyp
from bo4e.enum.sparte import Sparte
# pylint: disable=too-few-public-methods, too-many-instance-attributes
@attr.s(auto_attribs=True, kw_only=True)
class Netznutzungsrechnung(Rechnung):
"""
Modell für die Abbildung von Netznutzungsrechnungen
.. HINT::
`Netznutzungsrechnung JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/bo/NetznutzungsrechnungSchema.json>`_
"""
# required attributes
bo_typ: BoTyp = attr.ib(default=BoTyp.NETZNUTZUNGSRECHNUNG)
#: Sparte (Strom, Gas ...) für die die Rechnung ausgestellt ist
sparte: Sparte = attr.ib(validator=attr.validators.instance_of(Sparte))
absendercodenummer: str = attr.ib(validator=attr.validators.matches_re(r"^\d{13}$"))
"""
Die Rollencodenummer des Absenders (siehe :class:`Marktteilnehmer`).
Über die Nummer können weitere Informationen zum Marktteilnehmer ermittelt werden.
"""
empfaengercodenummer: str = attr.ib(validator=attr.validators.matches_re(r"^\d{13}$"))
"""
Die Rollencodenummer des Empfängers (siehe :class:`Marktteilnehmer`).
Über die Nummer können weitere Informationen zum Marktteilnehmer ermittelt werden.
"""
#: Aus der INVOIC entnommen
nnrechnungsart: NNRechnungsart = attr.ib(validator=attr.validators.instance_of(NNRechnungsart))
#: Aus der INVOIC entnommen
nnrechnungstyp: NNRechnungstyp = attr.ib(validator=attr.validators.instance_of(NNRechnungstyp))
#: Kennzeichen, ob es sich um ein Original (true) oder eine Kopie handelt (false)
original: bool = attr.ib(validator=attr.validators.instance_of(bool))
#: Kennzeichen, ob es sich um eine simulierte Rechnung, z.B. zur Rechnungsprüfung handelt
simuliert: bool = attr.ib(validator=attr.validators.instance_of(bool))
# optional attributes
lokations_id: Optional[str] = attr.ib(
default=None, validator=attr.validators.optional(attr.validators.instance_of(str))
)
"""
Die Markt- oder Messlokations-Identifikation (als Malo/Melo-Id) der Lokation, auf die sich die Rechnung bezieht
"""
class NetznutzungsrechnungSchema(RechnungSchema):
"""
Schema for de-/serialization of Netznutzungsrechnung
"""
class_name = Netznutzungsrechnung # type:ignore[assignment]
# required attributes (additional to those of Rechnung)
sparte = EnumField(Sparte)
absendercodenummer = fields.Str()
empfaengercodenummer = fields.Str()
nnrechnungsart = EnumField(NNRechnungsart)
nnrechnungstyp = EnumField(NNRechnungstyp)
original = fields.Boolean()
simuliert = fields.Boolean()
# optional attributes
lokations_id = fields.Str(allow_none=True)
| 40.448718 | 194 | 0.750872 |
aacc0d1d28f9c7a582c0a0dfc0dbd43b3058d58a
| 1,422 |
py
|
Python
|
src/setup.py
|
daimon99/santicms
|
9b113b806cb43e94084ea474ba0aab51b32f9b3c
|
[
"MIT"
] | null | null | null |
src/setup.py
|
daimon99/santicms
|
9b113b806cb43e94084ea474ba0aab51b32f9b3c
|
[
"MIT"
] | null | null | null |
src/setup.py
|
daimon99/santicms
|
9b113b806cb43e94084ea474ba0aab51b32f9b3c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
def fread(filename):
with open(filename) as f:
return f.read()
setup(
name='santicms',
version='0.1',
author='Daimon',
author_email='[email protected]',
packages=find_packages(exclude=['tests', 'tests.*']),
description="Santi Cms app.",
zip_safe=False,
include_package_data=True,
# entry_points={'console_scripts': [
# 'JQR = manage',
# ]},
platforms='any',
long_description=fread('../README.md'),
license='MIT',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
], install_requires=[
'django',
'pymysql',
'djangorestframework',
'markdown',
'django-filter',
'jsonpickle',
'wechat',
'django-cors-headers',
'pycrypto',
'requests',
'docutils',
'simplejson',
'django-debug-toolbar',
'networkx'
]
)
| 25.854545 | 71 | 0.563994 |
879c9d4d6448276aae2c134299887e7259cb38d1
| 1,096 |
py
|
Python
|
data_log_sheet/migrations/0001_initial.py
|
antonnifo/DIT
|
7c496f37bab70229cd84c4b33332708ea8cf278b
|
[
"MIT"
] | null | null | null |
data_log_sheet/migrations/0001_initial.py
|
antonnifo/DIT
|
7c496f37bab70229cd84c4b33332708ea8cf278b
|
[
"MIT"
] | null | null | null |
data_log_sheet/migrations/0001_initial.py
|
antonnifo/DIT
|
7c496f37bab70229cd84c4b33332708ea8cf278b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.3 on 2021-05-18 10:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataLogSheet',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reel', models.CharField(max_length=200)),
('epi', models.IntegerField()),
('scn', models.CharField(max_length=200)),
('location', models.CharField(max_length=200)),
('clips', models.CharField(max_length=200)),
('shot', models.IntegerField()),
('take', models.IntegerField()),
('note', models.CharField(max_length=200)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ('epi',),
},
),
]
| 32.235294 | 117 | 0.531934 |
ea944c82bb12f999c40cd26c90c2a596a58bd720
| 1,020 |
py
|
Python
|
src/crypt1.py
|
xiaonanln/python-usaco
|
8f0fef19cb5f89232d985f79d955f0de5ef4e10d
|
[
"MIT"
] | null | null | null |
src/crypt1.py
|
xiaonanln/python-usaco
|
8f0fef19cb5f89232d985f79d955f0de5ef4e10d
|
[
"MIT"
] | null | null | null |
src/crypt1.py
|
xiaonanln/python-usaco
|
8f0fef19cb5f89232d985f79d955f0de5ef4e10d
|
[
"MIT"
] | null | null | null |
"""
ID: isaiahl1
LANG: PYTHON2
TASK: crypt1
"""
TASK = 'crypt1'
def readints(fin):
return tuple(int(x) for x in fin.readline().split())
def readint(fin):
return int(fin.readline())
def main(fin, fout):
N = readint(fin)
digits = readints(fin)
count = 0
print 'digits', digits
for a in digits:
if a == 0: continue
for b in digits:
for c in digits:
for d in digits:
if d == 0: continue
for e in digits:
# abc * de
x = (a*100+b*10+c) * e
y = (a*100+b*10+c) * d
if x >= 1000 or y >= 1000: continue
if not fit(x, digits) or not fit(y, digits): continue
s = y*10 + x
if fit(s, digits):
print a,b,c,d,e, x, y, s
count += 1
print 'result', count
print >>fout, count
def fit(n, digits):
d = n % 10
n //= 10
if d not in digits:
return False
while n:
d = n % 10
n //= 10
if d not in digits:
return False
return True
fin = open (TASK + '.in', 'r')
fout = open (TASK + '.out', 'w')
with fin:
with fout:
main(fin, fout)
| 17 | 59 | 0.562745 |
17c3cc1f6ea3270b77093854c512d7f8473b2e7d
| 5,949 |
py
|
Python
|
Modell/CSV/diagramm_tf_genauigkeit.py
|
NoahEmbedded/EmbeddedKWD
|
2380d56b0b75bae4fedeb60885358332766f7319
|
[
"MIT"
] | null | null | null |
Modell/CSV/diagramm_tf_genauigkeit.py
|
NoahEmbedded/EmbeddedKWD
|
2380d56b0b75bae4fedeb60885358332766f7319
|
[
"MIT"
] | null | null | null |
Modell/CSV/diagramm_tf_genauigkeit.py
|
NoahEmbedded/EmbeddedKWD
|
2380d56b0b75bae4fedeb60885358332766f7319
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
#Input = Sprache
#output sprache
spIn_spOut = np.array([100,100,0.61,99.84,99.17,100,98.2,100,100,100,92.67,99.98,100,99.84,100,96.43,99.99,100,99.97,97.62,100,100,92.77,100,99.93,96.97,100,100,100,100,94.32,97.25,88.54,100,78.25,100,100,82.26,100,100,99.99,11.78,77.43,100,100,99.69,100,99.66,99.99,100,100,98.32,2.45,100,4.2,26.09,98.98,96.09,94.92,82.16,100,99.99,100,100,4.92,1.98,100,86.59,97.08,100,99.94,100,100,100,100,100,99.97,11.96,100,85.7,93.36,100,99.95,0.76,100,100,100,100,99.99,100,93.68,100,79.97,100,100,100,99.98,100,99.93,100,50.76,100,100,76.21,99.89,100,100,76.73,17.59,100,66.92,95.29,100,97.4,99.69,99.85,99.99,100,100,1.11,98.35,100,100,99.75,99.98,100,100,100,100,100,100,98.49,100,99.99,12.14,100,100,71.05,100,99.96,100,99.67,99.98,100,99.19,97.06,97.42,86.73,55.24,35.23,100,100,100,100,14.57,100,100,99.89,100,99.87,100,100,100,100,99.99,100,100,100,99.92,70.29,100,100,100,99.98,99.11,100,100,100,99.98,56.94,98.76,92.32,1.88,99.69,99.96,99.74,99.01,96.03,59.72,100,100,0.77,100,100,100,15.04,100,47.34,100,96.9,100,99.68,100,65.52,100,99.03,94.87,100,98.33,99.24,99.9,100,4.81,100,100,95.19,99.51,99.92,1.18,100,100,100,96.91,99.99,4.31,0,92.39,100,98.56,100,100,100,0,100,56.89,100,100,100,100,97.98,4.64,34.55,99.71,99.22,72.04,99.92,97.49,100,100,100,96.51,7.58,99.96,100,99.96,100,0.69,100,99.74,0.66,99.99,100,99.81,100,0.04,100,100,100,99.99,99.54,57.23,99.71,100,99.97,100,99.94,7.01,100,100,100,99.63,100,100,100,98.88,98.31,99.89,100,98.73,99.96,100,99.99,99.82,100,99.99,99.86,99.98,99.54,100,92.38])
#output marvin
spIn_maOut = np.array([0,0,99.39,0.16,0.83,0,1.8,0,0,0,7.33,0.02,0,0.16,0,3.57,0.01,0,0.03,2.38,0,0,7.23,0,0.07,3.03,0,0,0,0,5.68,2.75,11.46,0,21.75,0,0,17.74,0,0,0.01,88.22,22.57,0,0,0.31,0,0.34,0.01,0,0,1.68,97.55,0,95.8,73.91,1.02,3.91,5.08,17.84,0,0.01,0,0,95.08,98.02,0,13.41,2.92,0,0.06,0,0,0,0,0,0.03,88.04,0,14.3,6.64,0,0.05,99.24,0,0,0,0,0.01,0,6.32,0,20.03,0,0,0,0.02,0,0.07,0,49.24,0,0,23.79,0.11,0,0,23.27,82.41,0,33.08,4.71,0,2.6,0.31,0.15,0.01,0,0,98.89,1.65,0,0,0.25,0.02,0,0,0,0,0,0,1.51,0,0.01,87.86,0,0,28.95,0,0.04,0,0.33,0.02,0,0.81,2.94,2.58,13.27,44.76,64.77,0,0,0,0,85.43,0,0,0.11,0,0.13,0,0,0,0,0.01,0,0,0,0.08,29.71,0,0,0,0.02,0.89,0,0,0,0.02,42.68,1.24,7.68,98.12,0.31,0.04,0.26,0.99,3.97,40.28,0,0,99.23,0,0,0,84.96,0,52.66,0,3.1,0,0.32,0,34.48,0,0.97,5.13,0,1.67,0.76,0.1,0,95.19,0,0,4.81,0.49,0.08,0,0,0,0,3.09,0.01,95.69,100,7.61,0,1.44,0,0,0,100,0,43.11,0,0,0,0,2.02,95.36,65.45,0.29,0.78,27.96,0.08,2.51,0,0,0,3.49,92.42,0.04,0,0.04,0,99.31,0,0.26,99.34,0.01,0,0.19,0,99.96,0,0,0,0.01,0.46,42.77,0.29,0,0.03,0,0.06,92.99,0,0,0,0.29,0,0,0,1.12,1.69,0.11,0,1.27,0.04,0,0.01,0.18,0,0.01,0.14,0.02,0.46,0,7.62])
#output
spIn_stOut = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.38,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,98.82,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.08,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,])
verteilungSP = [0]*10
verteilungMA = [0]*10
verteilungST = [0]*10
#sp_out verteilung
for i in spIn_spOut:
if i <= 10:
verteilungSP[0]+=1
elif i <= 20 and i >10:
verteilungSP[1]+=1
elif i <= 30 and i >20:
verteilungSP[2]+=1
elif i <= 40 and i >30:
verteilungSP[3]+=1
elif i <= 50 and i >40:
verteilungSP[4]+=1
elif i <= 60 and i >50:
verteilungSP[5]+=1
elif i <= 70 and i >60:
verteilungSP[6]+=1
elif i <= 80 and i >70:
verteilungSP[7]+=1
elif i <= 90 and i >80:
verteilungSP[8]+=1
elif i <= 100 and i >90:
verteilungSP[9]+=1
#ma_out verteilung
for i in spIn_maOut:
if i <= 10:
verteilungMA[0]+=1
elif i <= 20 and i >10:
verteilungMA[1]+=1
elif i <= 30 and i >20:
verteilungMA[2]+=1
elif i <= 40 and i >30:
verteilungMA[3]+=1
elif i <= 50 and i >40:
verteilungMA[4]+=1
elif i <= 60 and i >50:
verteilungMA[5]+=1
elif i <= 70 and i >60:
verteilungMA[6]+=1
elif i <= 80 and i >70:
verteilungMA[7]+=1
elif i <= 90 and i >80:
verteilungMA[8]+=1
elif i <= 100 and i >90:
verteilungMA[9]+=1
#st_out verteilung
for i in spIn_stOut:
if i <= 10:
verteilungST[0]+=1
elif i <= 20 and i >10:
verteilungST[1]+=1
elif i <= 30 and i >20:
verteilungST[2]+=1
elif i <= 40 and i >30:
verteilungST[3]+=1
elif i <= 50 and i >40:
verteilungST[4]+=1
elif i <= 60 and i >50:
verteilungST[5]+=1
elif i <= 70 and i >60:
verteilungST[6]+=1
elif i <= 80 and i >70:
verteilungST[7]+=1
elif i <= 90 and i >80:
verteilungST[8]+=1
elif i <= 100 and i >90:
verteilungST[9]+=1
labels = ["0-10","10-20","20-30","30-40","40-50","50-60","60-70","70-80","80-90","90-100"]
X = np.arange(10)
fig,axs = plt.subplots(1,3)
fig.suptitle("Ergebnisverteilung Tensorflow",fontsize="xx-large")
axs[0].set_title("Input = Sprache")
axs[0].set_xlabel("Ergebnis in %",fontsize="large")
axs[0].set_ylabel("Sampleanzahl",fontsize="large")
axs[0].bar(x = X+0,height = verteilungSP,width=0.25,color = "b",label = "Output = Sprache")
axs[0].bar(x = X+0.25,height = verteilungMA,width=0.25,color = "g",label = "Output = Marvin Go")
axs[0].bar(x = X+0.5,height = verteilungST,width=0.25,color = "r",label = "Output = Stille")
axs[0].legend()
axs[0].yaxis.grid(True,linestyle = "--")
axs[0].set_xticks(X+0.25)
axs[0].set_xticklabels(labels)
plt.show()
| 63.287234 | 1,510 | 0.598588 |
aa456aac60eda7b908eddcff08d55a6c001fb201
| 361 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/hr/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/hr/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/hr/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def set_employee_name(doc):
if doc.employee and not doc.employee_name:
doc.employee_name = frappe.db.get_value("Employee", doc.employee, "employee_name")
| 32.818182 | 84 | 0.792244 |
52c780264fc83fa227df87ffefd9844136e5e12a
| 56 |
py
|
Python
|
solutions/Textsys/Milvus-bert-server/src/const.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | 1 |
2021-04-06T06:13:20.000Z
|
2021-04-06T06:13:20.000Z
|
solutions/Textsys/Milvus-bert-server/src/const.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
solutions/Textsys/Milvus-bert-server/src/const.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | 2 |
2021-06-14T23:50:25.000Z
|
2021-06-21T10:30:34.000Z
|
UPLOAD_PATH="/tmp/result-mols"
default_cache_dir="./tmp"
| 28 | 30 | 0.785714 |
dc98c0bc44c9e963e9caf14150d57ef48555a84a
| 9,987 |
py
|
Python
|
easyp2p/ui/main_window.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 4 |
2019-07-18T10:58:28.000Z
|
2021-11-18T16:57:45.000Z
|
easyp2p/ui/main_window.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 1 |
2019-07-05T09:21:47.000Z
|
2019-07-05T09:21:47.000Z
|
easyp2p/ui/main_window.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 2 |
2019-07-05T08:56:34.000Z
|
2020-06-09T10:03:42.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Niko Sandschneider
"""
Application for downloading and presenting investment results for
several people-to-people (P2P) lending platforms.
"""
from datetime import date, timedelta
import gc
import logging
import os
import sys
from typing import Set
from PyQt5.QtCore import (
pyqtSlot, QCoreApplication, QLocale, QTranslator, QLibraryInfo)
from PyQt5.QtWidgets import (
QApplication, QMainWindow, QFileDialog, QLineEdit, QCheckBox, QMessageBox)
import easyp2p
from easyp2p.p2p_settings import Settings
from easyp2p.p2p_signals import Signals
from easyp2p.ui.progress_window import ProgressWindow
from easyp2p.ui.settings_window import SettingsWindow
from easyp2p.ui.Ui_main_window import Ui_MainWindow
_translate = QCoreApplication.translate
name = 'easyp2p' # pylint: disable=invalid-name
logger = logging.getLogger(__name__)
class MainWindow(QMainWindow, Ui_MainWindow):
"""This class defines the main window of easyp2p."""
def __init__(self, app: QApplication) -> None:
"""Constructor of MainWindow."""
super().__init__()
self.setupUi(self)
self._app = app
self._translator = QTranslator()
self._qttranslator = QTranslator()
end_last_month = date.today().replace(day=1) - timedelta(days=1)
self.date_range = (end_last_month.replace(day=1), end_last_month)
self.set_language()
self.settings = Settings(
self.date_range, self.line_edit_output_file.text())
if not os.path.isdir(self.settings.directory):
os.makedirs(self.settings.directory, exist_ok=True)
self.output_file_changed = False
self.set_output_file()
def init_date_combo_boxes(self) -> None:
"""Set the items for all date combo boxes."""
month_list = [
QLocale(QLocale().name()).monthName(i, 1) for i in range(1, 13)]
year_list = [str(year) for year in range(2010, date.today().year + 1)]
for i, combo_box in zip(
range(2),
[self.combo_box_start_month, self.combo_box_end_month]):
combo_box.clear()
combo_box.addItems(month_list)
combo_box.setCurrentIndex(self.date_range[i].month - 1)
for i, combo_box in zip(
range(2), [self.combo_box_start_year, self.combo_box_end_year]):
combo_box.clear()
combo_box.addItems(year_list)
combo_box.setCurrentIndex(self.date_range[i].year - 2010)
def set_language(self, locale: str = None) -> None:
"""
Translate GUI into language of locale.
Args:
locale: Locale into which the GUI must be translated. If None the
system locale will be used.
"""
if not locale:
locale = QLocale().name()
QLocale.setDefault(QLocale(locale))
if locale.startswith('de'):
self.action_english.setChecked(False)
self.action_german.setChecked(True)
else:
self.action_english.setChecked(True)
self.action_german.setChecked(False)
self._translator.load('easyp2p_' + locale, os.path.join(
easyp2p.__path__[0], 'i18n'))
self._app.installTranslator(self._translator)
self._qttranslator.load(
'qtbase_' + locale, QLibraryInfo.location(
QLibraryInfo.TranslationsPath))
self._app.installTranslator(self._qttranslator)
self.retranslateUi(self)
self.init_date_combo_boxes()
def set_date_range(self) -> None:
"""Set currently in combo boxes selected date range."""
start_month = self.combo_box_start_month.currentText()
start_year = self.combo_box_start_year.currentText()
start_date = QLocale().toDate(
'1' + start_month + start_year, 'dMMMyyyy')
end_month = self.combo_box_end_month.currentText()
end_year = self.combo_box_end_year.currentText()
end_date = QLocale().toDate(
'1' + end_month + end_year, 'dMMMyyyy')
end_date.setDate(
end_date.year(), end_date.month(), end_date.daysInMonth())
self.date_range = (start_date.toPyDate(), end_date.toPyDate())
self.set_output_file()
def get_platforms(self, checked: bool = True) -> Set[str]:
"""
Get list of all platforms selected by the user.
Keyword Args:
checked: If True only the platforms selected by the user will
be returned, if False all platforms will be returned
Returns:
Set of P2P platform names
"""
platforms = set()
for check_box in self.group_box_platforms.findChildren(QCheckBox):
if not checked:
platforms.add(check_box.text().replace('&', ''))
elif check_box.isChecked():
platforms.add(check_box.text().replace('&', ''))
return platforms
def set_output_file(self) -> None:
"""Helper method to set the name of the output file."""
start_date = self.date_range[0].strftime('%d%m%Y')
end_date = self.date_range[1].strftime('%d%m%Y')
if not self.output_file_changed:
output_file = os.path.join(
self.settings.directory,
_translate(
'MainWindow',
f'P2P_Results_{start_date}-{end_date}.xlsx'))
QLineEdit.setText(self.line_edit_output_file, output_file)
@pyqtSlot(bool)
def on_action_german_triggered(self):
"""Translate GUI to German."""
self.set_language('de_de')
@pyqtSlot(bool)
def on_action_english_triggered(self):
"""Translate GUI to English."""
self.set_language('en_US')
@pyqtSlot(str)
def on_combo_box_start_month_activated(self) -> None:
"""Update output file if user changed start month in the combo box."""
self.set_date_range()
@pyqtSlot(str)
def on_combo_box_start_year_activated(self) -> None:
"""Update output file if user changed start year in the combo box."""
self.set_date_range()
@pyqtSlot(str)
def on_combo_box_end_month_activated(self) -> None:
"""Update output file if user changed end month in the combo box."""
self.set_date_range()
@pyqtSlot(str)
def on_combo_box_end_year_activated(self) -> None:
"""Update output file if user changed end year in the combo box."""
self.set_date_range()
@pyqtSlot()
def on_push_button_file_chooser_clicked(self) -> None:
"""
Open dialog window for changing the save location of the results file.
"""
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
output_file, _ = QFileDialog.getSaveFileName(
self, _translate('MainWindow', 'Choose output file'),
self.line_edit_output_file.text(),
'MS Excel ' + _translate('MainWindow', 'files') + ' (*.xlsx)',
options=options)
if output_file:
# The file name must include xlsx file format. Otherwise the Excel
# writer will crash later.
if not output_file.endswith('.xlsx'):
output_file += '.xlsx'
QLineEdit.setText(self.line_edit_output_file, output_file)
self.output_file_changed = True
@pyqtSlot(bool)
def on_check_box_select_all_toggled(self, checked: bool) -> None:
"""
Toggle/untoggle all P2P platforms.
Args:
checked: if True toggle all check boxes, if False untoggle
all check boxes
"""
for check_box in self.group_box_platforms.findChildren(QCheckBox):
check_box.setChecked(checked)
@pyqtSlot()
def on_push_button_start_clicked(self) -> None:
"""
Start evaluation for selected P2P platforms and the given date range.
The evaluation will be done by a worker thread in class WorkerThread.
Progress is tracked in ProgressWindow.
"""
# Make sure all abort flags are False in case the user aborted the
# previous run
for obj in gc.get_objects():
if isinstance(obj, Signals):
obj.abort = False
platforms = self.get_platforms()
# Check that start date is before end date
if self.date_range[0] > self.date_range[1]:
QMessageBox.warning(
self,
_translate('MainWindow', 'Start date is after end date!'),
_translate(
'MainWindow',
'Start date must be before end date!'))
return
# Check that at least one platform is selected
if not platforms:
QMessageBox.warning(
self, _translate(
'MainWindow', 'No P2P platform selected!'),
_translate(
'MainWindow',
'Please choose at least one P2P platform!'))
return
self.settings.date_range = self.date_range
self.settings.platforms = platforms
self.settings.output_file = self.line_edit_output_file.text()
# Open progress window
progress_window = ProgressWindow(self.settings)
progress_window.exec_()
@pyqtSlot()
def on_tool_button_settings_clicked(self) -> None:
"""Open the settings window."""
settings_window = SettingsWindow(
self.get_platforms(False), self.settings)
settings_window.exec_()
def main():
"""Open the main window of easyp2p."""
app = QApplication(sys.argv)
ui = MainWindow(app) # pylint: disable=invalid-name
logging.basicConfig(
filename=ui.settings.directory + '/easyp2p.log',
filemode='w',
level=logging.DEBUG)
ui.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 36.054152 | 80 | 0.625713 |
bffc5e73043f9f440e068040f81bdf9b3a1d6010
| 4,299 |
py
|
Python
|
web/controllers/user/User.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | 1 |
2020-03-24T04:26:34.000Z
|
2020-03-24T04:26:34.000Z
|
web/controllers/user/User.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
web/controllers/user/User.py
|
yao6891/FlaskOrdering
|
cbd24bd8d95afaba91ce4d6b1b3548c4e82e3807
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Blueprint, request, jsonify, make_response, g, redirect
from common.models.User import (User)
from common.libs.user.UserService import (UserService)
from common.libs.Helper import (ops_render)
from common.libs.UrlManager import (UrlManager)
from application import app, db
import json
route_user = Blueprint('user_page', __name__)
@route_user.route("/login", methods=["GET", "POST"])
def login():
"""
登录控制器处理
:return: 登录页面视图
"""
if request.method == "GET":
if g.current_user:
return redirect(UrlManager.build_url("/"))
return ops_render("user/login.html")
resp = {'code': 200, 'msg': '登录成功~~', 'data': {}}
req = request.values
login_name = req['login_name'] if 'login_name' in req else ''
login_pwd = req['login_pwd'] if 'login_pwd' in req else ''
if login_name is None or len(login_name) < 1:
resp['code'] = -1
resp['msg'] = "请输入正确的登录用户名~~"
return jsonify(resp)
if login_pwd is None or len(login_pwd) < 1:
resp['code'] = -1
resp['msg'] = "请输入正确的邮箱密码~~"
return jsonify(resp)
user_info = User.query.filter_by(login_name=login_name).first()
if not user_info:
resp['code'] = -1
resp['msg'] = "请输入正确的登录用户名和密码-1~~"
return jsonify(resp)
if user_info.login_pwd != UserService.generate_password(login_pwd, user_info.login_salt):
resp['code'] = -1
resp['msg'] = "请输入正确的登录用户名和密码-2~~"
return jsonify(resp)
if user_info.status != 1:
resp['code'] = -1
resp['msg'] = "账号已被禁用,请联系管理员处理~~"
return jsonify(resp)
response = make_response(json.dumps({'code': 200, 'msg': '登录成功~~'}))
response.set_cookie(app.config['AUTH_COOKIE_NAME'], '%s#%s' % (
UserService.generate_auth_code(user_info), user_info.uid), 60 * 60 * 24 * 120) # 保存120天
return response
@route_user.route("/edit", methods=["GET", "POST"])
def edit():
if request.method == "GET":
return ops_render("user/edit.html", {'current': 'edit'})
resp = {'code': 200, 'msg': '操作成功~', 'data': {}}
req = request.values
nickname = req['nickname'] if 'nickname' in req else ''
email = req['email'] if 'email' in req else ''
if nickname is None or len(nickname) < 1:
resp['code'] = -1
resp['msg'] = "请输入符合规范的姓名~~"
return jsonify(resp)
if email is None or len(email) < 1:
resp['code'] = -1
resp['msg'] = "请输入符合规范的邮箱~~"
return jsonify(resp)
user_info = g.current_user
user_info.nickname = nickname
user_info.email = email
db.session.add(user_info)
db.session.commit()
return jsonify(resp)
@route_user.route("/reset-pwd", methods=["GET", "POST"])
def reset_pwd():
if request.method == "GET":
return ops_render("user/reset_pwd.html", {'current': 'reset-pwd'})
resp = {'code': 200, 'msg': '操作成功~', 'data': {}}
req = request.values
old_password = req['old_password'] if 'old_password' in req else ''
new_password = req['new_password'] if 'new_password' in req else ''
if old_password is None or len(old_password) < 6:
resp['code'] = -1
resp['msg'] = "请输入符合规范的原密码~~"
return jsonify(resp)
if new_password is None or len(new_password) < 6:
resp['code'] = -1
resp['msg'] = "请输入符合规范的新密码~~"
return jsonify(resp)
if old_password == new_password:
resp['code'] = -1
resp['msg'] = "请重新输入一个吧,新密码和原密码不能相同哦~~"
return jsonify(resp)
user_info = g.current_user
if user_info.uid == 1:
resp['code'] = -1
resp['msg'] = "该用户是演示账号,不准修改密码和登录用户名~~"
return jsonify(resp)
user_info.login_pwd = UserService.generate_password(new_password, user_info.login_salt)
db.session.add(user_info)
db.session.commit()
response = make_response(json.dumps(resp))
response.set_cookie(app.config['AUTH_COOKIE_NAME'], '%s#%s' % (
UserService.generate_auth_code(user_info), user_info.uid), 60 * 60 * 24 * 120) # 保存120天
return response
@route_user.route("/logout")
def logout():
response = make_response(redirect(UrlManager.build_url("/user/login")))
response.delete_cookie(app.config['AUTH_COOKIE_NAME'])
return response
| 30.928058 | 96 | 0.619214 |
0dda58fa041b17a77b08b94ca9faf18663dfa17f
| 125 |
py
|
Python
|
historie/models.py
|
mribrgr/StuRa-Mitgliederdatenbank
|
87a261d66c279ff86056e315b05e6966b79df9fa
|
[
"MIT"
] | 8 |
2019-11-26T13:34:46.000Z
|
2021-06-21T13:41:57.000Z
|
historie/models.py
|
mribrgr/StuRa-Mitgliederdatenbank
|
87a261d66c279ff86056e315b05e6966b79df9fa
|
[
"MIT"
] | 93 |
2019-12-16T09:29:10.000Z
|
2021-04-24T12:03:33.000Z
|
historie/models.py
|
mribrgr/StuRa-Mitgliederdatenbank
|
87a261d66c279ff86056e315b05e6966b79df9fa
|
[
"MIT"
] | 2 |
2020-12-03T12:43:19.000Z
|
2020-12-22T21:48:47.000Z
|
from django.db import models
from django.contrib.auth.models import User
from simple_history import register
register(User)
| 20.833333 | 43 | 0.84 |
21a11cec17006998db2cd69cfb41b493e49eb088
| 203 |
py
|
Python
|
savageml/models/__init__.py
|
savagewil/SavageML
|
d5aa9a5305b5de088e3bf32778252c877faec41d
|
[
"MIT"
] | null | null | null |
savageml/models/__init__.py
|
savagewil/SavageML
|
d5aa9a5305b5de088e3bf32778252c877faec41d
|
[
"MIT"
] | null | null | null |
savageml/models/__init__.py
|
savagewil/SavageML
|
d5aa9a5305b5de088e3bf32778252c877faec41d
|
[
"MIT"
] | null | null | null |
from .base_model import BaseModel
from .matix_net_model import MatrixNetModel
from .layerless_dense_net_model import LayerlessDenseNetModel
from .layerless_sparse_net_model import LayerlessSparseNetModel
| 50.75 | 63 | 0.906404 |
1d3104238a43acd1fab07c40823cb1f58af1e4a6
| 17,518 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/manufacturing/doctype/production_plan/production_plan.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/manufacturing/doctype/production_plan/production_plan.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/manufacturing/doctype/production_plan/production_plan.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import msgprint, _
from frappe.model.document import Document
from erpnext.manufacturing.doctype.bom.bom import validate_bom_no
from frappe.utils import cstr, flt, cint, nowdate, add_days, comma_and, now_datetime
from erpnext.manufacturing.doctype.work_order.work_order import get_item_details
from six import string_types
class ProductionPlan(Document):
def validate(self):
self.calculate_total_planned_qty()
self.set_status()
def validate_data(self):
for d in self.get('po_items'):
if not d.bom_no:
frappe.throw(_("Please select BOM for Item in Row {0}".format(d.idx)))
else:
validate_bom_no(d.item_code, d.bom_no)
if not flt(d.planned_qty):
frappe.throw(_("Please enter Planned Qty for Item {0} at row {1}").format(d.item_code, d.idx))
def get_open_sales_orders(self):
""" Pull sales orders which are pending to deliver based on criteria selected"""
so_filter = item_filter = ""
if self.from_date:
so_filter += " and so.transaction_date >= %(from_date)s"
if self.to_date:
so_filter += " and so.transaction_date <= %(to_date)s"
if self.customer:
so_filter += " and so.customer = %(customer)s"
if self.project:
so_filter += " and so.project = %(project)s"
if self.item_code:
item_filter += " and so_item.item_code = %(item)s"
open_so = frappe.db.sql("""
select distinct so.name, so.transaction_date, so.customer, so.base_grand_total
from `tabSales Order` so, `tabSales Order Item` so_item
where so_item.parent = so.name
and so.docstatus = 1 and so.status not in ("Stopped", "Closed")
and so.company = %(company)s
and so_item.qty > so_item.delivered_qty {0} {1}
and (exists (select name from `tabBOM` bom where bom.item=so_item.item_code
and bom.is_active = 1)
or exists (select name from `tabPacked Item` pi
where pi.parent = so.name and pi.parent_item = so_item.item_code
and exists (select name from `tabBOM` bom where bom.item=pi.item_code
and bom.is_active = 1)))
""".format(so_filter, item_filter), {
"from_date": self.from_date,
"to_date": self.to_date,
"customer": self.customer,
"project": self.project,
"item": self.item_code,
"company": self.company
}, as_dict=1)
self.add_so_in_table(open_so)
def add_so_in_table(self, open_so):
""" Add sales orders in the table"""
self.set('sales_orders', [])
for data in open_so:
self.append('sales_orders', {
'sales_order': data.name,
'sales_order_date': data.transaction_date,
'customer': data.customer,
'grand_total': data.grand_total
})
def get_pending_material_requests(self):
""" Pull Material Requests that are pending based on criteria selected"""
mr_filter = item_filter = ""
if self.from_date:
mr_filter += " and mr.transaction_date >= %(from_date)s"
if self.to_date:
mr_filter += " and mr.transaction_date <= %(to_date)s"
if self.warehouse:
mr_filter += " and mr_item.warehouse = %(warehouse)s"
if self.item_code:
item_filter += " and mr_item.item_code = %(item)s"
pending_mr = frappe.db.sql("""
select distinct mr.name, mr.transaction_date
from `tabMaterial Request` mr, `tabMaterial Request Item` mr_item
where mr_item.parent = mr.name
and mr.material_request_type = "Manufacture"
and mr.docstatus = 1 and mr.company = %(company)s
and mr_item.qty > ifnull(mr_item.ordered_qty,0) {0} {1}
and (exists (select name from `tabBOM` bom where bom.item=mr_item.item_code
and bom.is_active = 1))
""".format(mr_filter, item_filter), {
"from_date": self.from_date,
"to_date": self.to_date,
"warehouse": self.warehouse,
"item": self.item_code,
"company": self.company
}, as_dict=1)
self.add_mr_in_table(pending_mr)
def add_mr_in_table(self, pending_mr):
""" Add Material Requests in the table"""
self.set('material_requests', [])
for data in pending_mr:
self.append('material_requests', {
'material_request': data.name,
'material_request_date': data.transaction_date
})
def get_items(self):
if self.get_items_from == "Sales Order":
self.get_so_items()
elif self.get_items_from == "Material Request":
self.get_mr_items()
def get_so_items(self):
so_list = [d.sales_order for d in self.sales_orders if d.sales_order]
if not so_list:
msgprint(_("Please enter Sales Orders in the above table"))
return []
item_condition = ""
if self.item_code:
item_condition = ' and so_item.item_code = "{0}"'.format(frappe.db.escape(self.item_code))
items = frappe.db.sql("""select distinct parent, item_code, warehouse,
(qty - delivered_qty)*conversion_factor as pending_qty
from `tabSales Order Item` so_item
where parent in (%s) and docstatus = 1 and qty > delivered_qty
and exists (select name from `tabBOM` bom where bom.item=so_item.item_code
and bom.is_active = 1) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
if self.item_code:
item_condition = ' and pi.item_code = "{0}"'.format(frappe.db.escape(self.item_code))
packed_items = frappe.db.sql("""select distinct pi.parent, pi.item_code, pi.warehouse as warehouse,
(((so_item.qty - so_item.delivered_qty) * pi.qty) / so_item.qty)
as pending_qty
from `tabSales Order Item` so_item, `tabPacked Item` pi
where so_item.parent = pi.parent and so_item.docstatus = 1
and pi.parent_item = so_item.item_code
and so_item.parent in (%s) and so_item.qty > so_item.delivered_qty
and exists (select name from `tabBOM` bom where bom.item=pi.item_code
and bom.is_active = 1) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
self.add_items(items + packed_items)
self.calculate_total_planned_qty()
def get_mr_items(self):
mr_list = [d.material_request for d in self.material_requests if d.material_request]
if not mr_list:
msgprint(_("Please enter Material Requests in the above table"))
return []
item_condition = ""
if self.item_code:
item_condition = " and mr_item.item_code ='{0}'".format(frappe.db.escape(self.item_code))
items = frappe.db.sql("""select distinct parent, name, item_code, warehouse,
(qty - ordered_qty) as pending_qty
from `tabMaterial Request Item` mr_item
where parent in (%s) and docstatus = 1 and qty > ordered_qty
and exists (select name from `tabBOM` bom where bom.item=mr_item.item_code
and bom.is_active = 1) %s""" % \
(", ".join(["%s"] * len(mr_list)), item_condition), tuple(mr_list), as_dict=1)
self.add_items(items)
self.calculate_total_planned_qty()
def add_items(self, items):
self.set('po_items', [])
for data in items:
item_details = get_item_details(data.item_code)
pi = self.append('po_items', {
'include_exploded_items': 1,
'warehouse': data.warehouse,
'item_code': data.item_code,
'description': item_details and item_details.description or '',
'stock_uom': item_details and item_details.stock_uom or '',
'bom_no': item_details and item_details.bom_no or '',
'planned_qty': data.pending_qty,
'pending_qty': data.pending_qty,
'planned_start_date': now_datetime()
})
if self.get_items_from == "Sales Order":
pi.sales_order = data.parent
pi.sales_order_item = data.name
elif self.get_items_from == "Material Request":
pi.material_request = data.parent
pi.material_request_item = data.name
def calculate_total_planned_qty(self):
self.total_planned_qty = 0
for d in self.po_items:
self.total_planned_qty += flt(d.planned_qty)
def calculate_total_produced_qty(self):
self.total_produced_qty = 0
for d in self.po_items:
self.total_produced_qty += flt(d.produced_qty)
self.db_set("total_produced_qty", self.total_produced_qty, update_modified=False)
def update_produced_qty(self, produced_qty, production_plan_item):
for data in self.po_items:
if data.name == production_plan_item:
data.produced_qty = produced_qty
data.db_update()
self.calculate_total_produced_qty()
self.set_status()
self.db_set('status', self.status)
def on_cancel(self):
self.db_set('status', 'Cancelled')
self.delete_draft_work_order()
def delete_draft_work_order(self):
for d in frappe.get_all('Work Order', fields = ["name"],
filters = {'docstatus': 0, 'production_plan': ("=", self.name)}):
frappe.delete_doc('Work Order', d.name)
def set_status(self):
self.status = {
'0': 'Draft',
'1': 'Submitted'
}[cstr(self.docstatus or 0)]
if self.total_produced_qty > 0:
self.status = "In Process"
if self.total_produced_qty == self.total_planned_qty:
self.status = "Completed"
if self.status != 'Completed':
self.update_ordered_status()
self.update_requested_status()
def update_ordered_status(self):
update_status = False
for d in self.po_items:
if d.planned_qty == d.ordered_qty:
update_status = True
if update_status and self.status != 'Completed':
self.status = 'In Process'
def update_requested_status(self):
update_status = True
for d in self.mr_items:
if d.quantity != d.requested_qty:
update_status = False
if update_status:
self.status = 'Material Requested'
def get_production_items(self):
item_dict = {}
for d in self.po_items:
item_details= {
"production_item" : d.item_code,
"use_multi_level_bom" : d.include_exploded_items,
"sales_order" : d.sales_order,
"material_request" : d.material_request,
"material_request_item" : d.material_request_item,
"bom_no" : d.bom_no,
"description" : d.description,
"stock_uom" : d.stock_uom,
"company" : self.company,
"fg_warehouse" : d.warehouse,
"production_plan" : self.name,
"production_plan_item" : d.name
}
item_details.update({
"project": self.project or frappe.db.get_value("Sales Order", d.sales_order, "project")
})
if self.get_items_from == "Material Request":
item_details.update({
"qty": d.planned_qty
})
item_dict[(d.item_code, d.material_request_item, d.warehouse)] = item_details
else:
item_details.update({
"qty":flt(item_dict.get((d.item_code, d.sales_order, d.warehouse),{})
.get("qty")) + flt(d.planned_qty)
})
item_dict[(d.item_code, d.sales_order, d.warehouse)] = item_details
return item_dict
def get_items_for_material_requests(self):
self.mr_items = []
for data in self.po_items:
bom_wise_item_details = {}
if not data.planned_qty:
frappe.throw(_("For row {0}: Enter planned qty").format(data.idx))
if data.include_exploded_items and data.bom_no and self.include_subcontracted_items:
for d in frappe.db.sql("""select bei.item_code, item.default_bom as bom,
ifnull(sum(bei.stock_qty/ifnull(bom.quantity, 1)), 0) as qty, item.item_name,
bei.description, bei.stock_uom, item.min_order_qty, bei.source_warehouse,
item.default_material_request_type, item.min_order_qty, item.default_warehouse
from
`tabBOM Explosion Item` bei, `tabBOM` bom, `tabItem` item
where
bom.name = bei.parent and item.name = bei.item_code
and bei.docstatus < 2 and bom.name=%s and item.is_stock_item in (1, {0})
group by bei.item_code, bei.stock_uom""".format(self.include_non_stock_items),
data.bom_no, as_dict=1):
bom_wise_item_details.setdefault(d.item_code, d)
else:
bom_wise_item_details = self.get_subitems(data, bom_wise_item_details, data.bom_no, 1)
for item, item_details in bom_wise_item_details.items():
if item_details.qty > 0:
self.add_item_in_material_request_items(item, item_details, data)
def get_subitems(self, data, bom_wise_item_details, bom_no, parent_qty):
items = frappe.db.sql("""
SELECT
bom_item.item_code, default_material_request_type, item.item_name,
ifnull(%(parent_qty)s * sum(bom_item.stock_qty/ifnull(bom.quantity, 1)), 0) as qty,
item.is_sub_contracted_item as is_sub_contracted, bom_item.source_warehouse,
item.default_bom as default_bom, bom_item.description as description,
bom_item.stock_uom as stock_uom, item.min_order_qty as min_order_qty,
item.default_warehouse
FROM
`tabBOM Item` bom_item, `tabBOM` bom, tabItem item
where
bom.name = bom_item.parent and bom.name = %(bom)s
and bom_item.docstatus < 2 and bom_item.item_code = item.name
and item.is_stock_item in (1, {0})
group by bom_item.item_code""".format(self.include_non_stock_items),{
'bom': bom_no,
'parent_qty': parent_qty
}, as_dict=1)
for d in items:
if not data.include_exploded_items or not d.default_bom:
if d.item_code in bom_wise_item_details:
bom_wise_item_details[d.item_code].qty = bom_wise_item_details[d.item_code].qty + d.qty
else:
bom_wise_item_details[d.item_code] = d
if data.include_exploded_items and d.default_bom:
if ((d.default_material_request_type in ["Manufacture", "Purchase"] and
not d.is_sub_contracted) or (d.is_sub_contracted and self.include_subcontracted_items)):
if d.qty > 0:
self.get_subitems(data, bom_wise_item_details, d.default_bom, d.qty)
return bom_wise_item_details
def add_item_in_material_request_items(self, item, row, data):
total_qty = row.qty * data.planned_qty
projected_qty, actual_qty = get_bin_details(row)
requested_qty = 0
if self.ignore_existing_ordered_qty:
requested_qty = total_qty
elif total_qty > projected_qty:
requested_qty = total_qty - projected_qty
if requested_qty and requested_qty < row.min_order_qty:
requested_qty = row.min_order_qty
if requested_qty > 0:
self.append('mr_items', {
'item_code': item,
'item_name': row.item_name,
'quantity': requested_qty,
'warehouse': row.source_warehouse or row.default_warehouse,
'actual_qty': actual_qty,
'min_order_qty': row.min_order_qty,
'sales_order': data.sales_order
})
def make_work_order(self):
wo_list = []
self.validate_data()
items_data = self.get_production_items()
for key, item in items_data.items():
work_order = self.create_work_order(item)
if work_order:
wo_list.append(work_order)
frappe.flags.mute_messages = False
if wo_list:
wo_list = ["""<a href="#Form/Work Order/%s" target="_blank">%s</a>""" % \
(p, p) for p in wo_list]
msgprint(_("{0} created").format(comma_and(wo_list)))
else :
msgprint(_("No Work Orders created"))
def create_work_order(self, item):
from erpnext.manufacturing.doctype.work_order.work_order import OverProductionError, get_default_warehouse
warehouse = get_default_warehouse()
wo = frappe.new_doc("Work Order")
wo.update(item)
wo.set_work_order_operations()
if not wo.fg_warehouse:
wo.fg_warehouse = warehouse.get('fg_warehouse')
try:
wo.insert()
return wo.name
except OverProductionError:
pass
def make_material_request(self):
material_request_list = []
item_details = self.get_itemwise_qty()
for item_code, rows in item_details.items():
item_doc = frappe.get_doc("Item", item_code)
schedule_date = add_days(nowdate(), cint(item_doc.lead_time_days))
material_request = frappe.new_doc("Material Request")
material_request.update({
"transaction_date": nowdate(),
"status": "Draft",
"company": self.company,
"requested_by": frappe.session.user,
"schedule_date": schedule_date,
'material_request_type': item_doc.default_material_request_type
})
for idx in rows:
child = self.mr_items[cint(idx)-1]
material_request.append("items", {
"item_code": item_code,
"qty": child.quantity,
"schedule_date": schedule_date,
"warehouse": child.warehouse,
"sales_order": child.sales_order,
'production_plan': self.name,
'material_request_plan_item': child.name,
"project": frappe.db.get_value("Sales Order", child.sales_order, "project") \
if child.sales_order else None
})
material_request.flags.ignore_permissions = 1
material_request.run_method("set_missing_values")
material_request.submit()
material_request_list.append(material_request.name)
frappe.flags.mute_messages = False
if material_request_list:
material_request_list = ["""<a href="#Form/Material Request/%s" target="_blank">%s</a>""" % \
(p, p) for p in material_request_list]
msgprint(_("{0} created").format(comma_and(material_request_list)))
else :
msgprint(_("No material request created"))
def get_itemwise_qty(self):
item_details = {}
for data in self.get('mr_items'):
if data.item_code in item_details:
item_details[data.item_code].append(data.idx)
else:
item_details.setdefault(data.item_code, [data.idx])
return item_details
@frappe.whitelist()
def get_bin_details(row):
if isinstance(row, string_types):
row = frappe._dict(json.loads(row))
conditions = ""
warehouse = row.source_warehouse or row.default_warehouse or row.warehouse
if warehouse:
conditions = " and warehouse='{0}'".format(frappe.db.escape(warehouse))
item_projected_qty = frappe.db.sql(""" select ifnull(sum(projected_qty),0) as projected_qty,
ifnull(sum(actual_qty),0) as actual_qty from `tabBin`
where item_code = %(item_code)s {conditions}
""".format(conditions=conditions), { "item_code": row.item_code }, as_list=1)
return item_projected_qty and item_projected_qty[0] or (0,0)
| 34.966068 | 108 | 0.712296 |
1da64cb440ff66f51e1dd60e36c978a349c0eb78
| 4,406 |
py
|
Python
|
app/problem/admin.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | 2 |
2019-06-24T08:34:39.000Z
|
2019-06-27T12:23:47.000Z
|
app/problem/admin.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | null | null | null |
app/problem/admin.py
|
pushyzheng/docker-oj-web
|
119abae3763cd2e53c686a320af7f4f5af1f16ca
|
[
"MIT"
] | null | null | null |
# encoding:utf-8
from app import app, db
from app.problem.models import Problem
from utils import logger, success
from app.auth.main import auth
from app.common.models import RoleName
from schemas.problem import *
import os
from flask import jsonify, g, abort
from flask_expects_json import expects_json
from sqlalchemy import desc
import requests
@app.route('/admin/problems', methods=['POST'])
@expects_json(save_problem_schema)
# @auth(role=RoleName.ADMIN)
def save_problem():
case_list, answer_list = g.data['case_list'], g.data['answer_list']
if len(case_list) != len(answer_list):
abort(400, 'case list length must equals answer list length')
problem = Problem()
problem.from_dict(g.data)
problem.id = get_problem_id()
problem.author = 123
# 调用Github接口,渲染题目markdown内容
problem.content_html = render_markdown_text(problem.content)
resp = problem.to_dict()
db.session.add(problem)
db.session.commit()
write_case_and_answer_to_file(problem.id, case_list, answer_list)
return jsonify(data=resp)
@app.route('/admin/problems/<id>', methods=['GET'])
# @auth(role=RoleName.ADMIN)
def admin_get_problem(id):
problem = Problem.query.filter_by(id=id).first()
input_case_list = get_problem_cases(id)
output_case_list = get_problem_answer(id)
setattr(problem, 'input_case_list', input_case_list)
setattr(problem, 'output_case_list', output_case_list)
return success(problem.to_dict())
@app.route('/admin/problems/<id>', methods=['PUT'])
# @auth(role=RoleName.ADMIN)
@expects_json(update_problem_schema)
def update_problem(id):
problem = valid_exists_problem(id)
for key in g.data.keys():
if g.data[key] and g.data[key] != '':
setattr(problem, key, g.data[key])
problem.content_html = render_markdown_text(g.data['content'])
resp = problem.to_dict()
db.session.add(problem)
db.session.commit()
return jsonify(data=resp)
@app.route('/admin/problems/<id>/cases-and-answers', methods=['PUT'])
# @auth(role=RoleName.ADMIN)
@expects_json(update_cases_answers_schema)
def update_problem_answers(id):
valid_exists_problem(id)
case_list, answer_list = g.data['case_list'], g.data['answer_list']
# if len(case_list) != len(answer_list):
# abort(400, 'case list length must equals answer list length')
# 写入测试样例文件和答案文件中
write_case_and_answer_to_file(id, case_list, answer_list)
return jsonify(data=None)
def get_problem_cases(id):
valid_exists_problem(id)
problem_case_path = '{}/case_{}.txt'.format(app.config['CASE_PATH'], id)
if not os.path.exists(problem_case_path):
return jsonify(data=None)
case_list = []
with open(problem_case_path) as f:
lines = f.readlines()
for line in lines:
case_list.append(line.replace("\n", ""))
return case_list
def get_problem_answer(id):
valid_exists_problem(id)
problem_answer_path = '{}/answer_{}.txt'.format(app.config['ANSWER_PATH'], id)
if not os.path.exists(problem_answer_path):
return jsonify(data=None)
answer_list = []
with open(problem_answer_path) as f:
lines = f.readlines()
for line in lines:
answer_list.append(line.replace("\n", ""))
return answer_list
def valid_exists_problem(id):
problem = Problem.query.filter_by(id=id).first()
if not problem:
abort(404, 'The problem not found')
return problem
def get_problem_id():
problem = Problem.query.order_by(desc(Problem.id)).first()
if not problem:
return 1
return problem.id + 1
def write_case_and_answer_to_file(problem_id, case_list, answer_list):
problem_case_path = '{}/case_{}.txt'.format(app.config['CASE_PATH'], problem_id)
problem_answer_path = '{}/answer_{}.txt'.format(app.config['ANSWER_PATH'], problem_id)
with open(problem_case_path, 'w', encoding='utf-8') as f:
for case in case_list:
f.write(case + '\n')
with open(problem_answer_path, 'w', encoding='utf-8') as f:
for answer in answer_list:
f.write(answer + '\n')
def render_markdown_text(text):
text = text.encode('utf-8')
url = "https://api.github.com/markdown/raw"
headers = {
"Content-Type": "text/plain"
}
response = requests.request("POST", url, data=text, headers=headers)
return response.text
| 27.5375 | 90 | 0.688833 |
d53b0fcc700a8ea4071c41b5598165cb774021a1
| 191 |
py
|
Python
|
weibo/test/testData.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
weibo/test/testData.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
weibo/test/testData.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
#coding:utf8
'''
Created on 2016年4月19日
@author: wb-zhaohaibo
'''
datas = []
weibo_data = {}
if weibo_data is None:
print "sss"
else:
print "---"
print len(weibo_data)
| 14.692308 | 25 | 0.596859 |
63b753d6e11ef67e52bb5ca5c1df2f9489269cfa
| 778 |
py
|
Python
|
etc code/bodysize.py
|
gusghrlrl101/Dance-Helper
|
e9f5f6168f1dc6944c25cc8ff7eb829791fe484c
|
[
"MIT"
] | 11 |
2019-06-17T02:59:01.000Z
|
2021-05-24T14:10:04.000Z
|
etc code/bodysize.py
|
RabbitG29/Dance-Helper
|
e9f5f6168f1dc6944c25cc8ff7eb829791fe484c
|
[
"MIT"
] | null | null | null |
etc code/bodysize.py
|
RabbitG29/Dance-Helper
|
e9f5f6168f1dc6944c25cc8ff7eb829791fe484c
|
[
"MIT"
] | 3 |
2019-06-17T02:59:32.000Z
|
2019-07-03T04:31:30.000Z
|
# -*- Encoding:UTF-8 -*- #
import pickle
import sys
import json
import os
# use -> python bodysize.py 0000
#file isn't exist
if(os.path.isfile('./'+str(sys.argv[1])+'body.txt')==False):
with open('./'+str(sys.argv[1])+'.pkl','rb') as f:
data = pickle.load(f)
print(1)
print(str(data))
# file 1 write
f2 = open('./'+str(sys.argv[1])+'body.txt', 'w')
line=str(data)
print(line)
body3='{"betas": '
body=line.partition("'betas':")[2]
body2=body.partition("])")[0]
body3+=body2.partition("array(")[2]
body3+="]}"
print(str(body3))
f2.write(str(body3))
f2.close()
# parsing
with open('./'+str(sys.argv[1])+'body.txt') as json_file:
json_data = json.load(json_file)
json_string = json_data["betas"]
print(str(json_string))
| 21.611111 | 60 | 0.604113 |
988b19bbdf15a5a8ae0c73a3429977fc96020f0b
| 400 |
py
|
Python
|
source/data.py
|
casulemarc/Job-Search-Engine
|
a86acd23a5e6f367706c978c25e39a4b65647da9
|
[
"MIT"
] | null | null | null |
source/data.py
|
casulemarc/Job-Search-Engine
|
a86acd23a5e6f367706c978c25e39a4b65647da9
|
[
"MIT"
] | null | null | null |
source/data.py
|
casulemarc/Job-Search-Engine
|
a86acd23a5e6f367706c978c25e39a4b65647da9
|
[
"MIT"
] | null | null | null |
import json
import requests
url = "https://aerodatabox.p.rapidapi.com/flights/%7BsearchBy%7D/KL1395/2020-06-10"
headers = {
'x-rapidapi-host': "aerodatabox.p.rapidapi.com",
'x-rapidapi-key': "eba13d5c36mshf3468974d77a55ep141171jsn483f22443c2d"
}
response = requests.get(url, headers)
print(response.status_code)
data = response.text
# dataJsonFormat = json.loads(data)
print(data)
| 20 | 83 | 0.745 |
f695aefae34a2b259725eecfcb307a29a3c1ed7b
| 367 |
py
|
Python
|
pacman-arch/test/pacman/tests/fileconflict021.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/fileconflict021.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/fileconflict021.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "upgrade package overwriting existing unowned file with directory"
lp1 = pmpkg("pkg1")
self.addpkg2db("local", lp1)
self.filesystem = ["file"]
p = pmpkg("pkg1", "1.0-2")
p.files = ["file/"]
self.addpkg2db("sync", p)
self.args = "-S pkg1"
self.addrule("PACMAN_RETCODE=1")
self.addrule("PKG_VERSION=pkg1|1.0-1")
self.addrule("!DIR_EXIST=file/")
| 21.588235 | 85 | 0.697548 |
63de898b21d09598e67ceb8893ad2125a4918d24
| 8,412 |
py
|
Python
|
ImageClassify/advanced CNN/Inseption_Network_xrh.py
|
Xinrihui/DeepLearningApp
|
8d86b88251ee8d37358c642b1ec4a341767bfd17
|
[
"Apache-2.0"
] | 2 |
2021-08-25T01:13:29.000Z
|
2021-10-10T14:49:59.000Z
|
ImageClassify/advanced CNN/Inseption_Network_xrh.py
|
Xinrihui/DeepLearningApp
|
8d86b88251ee8d37358c642b1ec4a341767bfd17
|
[
"Apache-2.0"
] | null | null | null |
ImageClassify/advanced CNN/Inseption_Network_xrh.py
|
Xinrihui/DeepLearningApp
|
8d86b88251ee8d37358c642b1ec4a341767bfd17
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 适用于 tensorflow >= 2.0, keras 被直接集成到 tensorflow 的内部
# ref: https://keras.io/about/
from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, \
Flatten, Conv2D, AveragePooling2D, MaxPooling2D, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.initializers import glorot_uniform
from utils.dataset_xrh import *
from utils.utils_xrh import *
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
class Inception:
"""
实现了 Inception 网络的其中一部分
Author: xrh
Date: 2019-10-16
ref:
1. 论文 Going Deeper with Convolutions
"""
def __init__(self, input_shape=(64, 64, 3), class_num=6, model_path='models/Inception.h5'):
"""
:param input_shape: 输入图片的大小 input_shape=(64, 64, 3)
:param class_num: 分类的类别数
:param model_path: 预训练模型的路径
"""
self.input_shape = input_shape
self.class_num = class_num
self.model_path = model_path
def conv2d_bn(self, x,
n_c,
n_h,
n_w,
padding='same',
strides=(1, 1),
name=None):
"""
实现 卷积层 + BatchNormalization层
:param x: 输入 tensor shape (m, n_H_prev, n_W_prev, n_C_prev)
:param n_c: 输出通道个数
:param n_h: 卷积核的高度
:param n_w: 卷积核的宽度
:param padding: padding 填充
:param strides: 步长
:param name: 当期层的名字
:return:
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
x = Conv2D(
n_c, (n_h, n_w),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(x)
x = BatchNormalization(axis=3, scale=False, name=bn_name)(x) # BN 作用在输出通道(特征图), 对每一个通道, 要训练一个单独的BN
x = Activation('relu', name=name)(x)
return x
def inception_v1_model(self, input_shape=(64, 64, 3)):
"""
inception_v1 网络截取了一部分
:param input_shape: 输入图片的大小 input_shape=(64, 64, 3) 前面要增加样本个数(m)的维度, 实际 shape(m, 64, 64, 3)
:return:
"""
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape) #
# conv->maxpool->conv->maxpool
X = self.conv2d_bn(X_input, 64, 7, 7, strides=(2, 2)) # 29x29
X = MaxPooling2D((3, 3), strides=(2, 2))(X) # 14x14
X = self.conv2d_bn(X, 64, 1, 1)
X = self.conv2d_bn(X, 192, 3, 3)
X = MaxPooling2D((3, 3), strides=(2, 2))(X) # 6x6
# inception(3a)
# mixed 0 :28 x 28 x 192 -> 28 x 28 x 256 (64+128+32+32=256)
branch_0 = self.conv2d_bn(X, 64, 1, 1) # 通道序列[64,96,128,16,32,32]其实和 论文 中table1 的顺序相同
branch_1 = self.conv2d_bn(X, 96, 1, 1)
branch_1 = self.conv2d_bn(branch_1, 128, 3, 3)
branch_2 = self.conv2d_bn(X, 16, 1, 1)
branch_2 = self.conv2d_bn(branch_2, 32, 5, 5)
branch_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(X)
branch_3 = self.conv2d_bn(branch_3, 32, 1, 1)
channel_axis = 3 # 通道维度的标号
X = Concatenate(
axis=channel_axis,
name='mixed0')([branch_0, branch_1, branch_2, branch_3])
# inception(3b)
# mixed 1 :28 x 28 x 256 -> 28 x 28 x 480 (128+192+96+64=480)
branch_0 = self.conv2d_bn(X, 128, 1, 1)
branch_1 = self.conv2d_bn(X, 128, 1, 1)
branch_1 = self.conv2d_bn(branch_1, 192, 3, 3)
branch_2 = self.conv2d_bn(X, 32, 1, 1)
branch_2 = self.conv2d_bn(branch_2, 96, 5, 5)
branch_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(X)
branch_3 = self.conv2d_bn(branch_3, 64, 1, 1)
X = Concatenate(
axis=channel_axis,
name='mixed1')([branch_0, branch_1, branch_2, branch_3])
# AVGPOOL
X = AveragePooling2D(pool_size=(2, 2), padding='valid')(X)
# output layer
X = Flatten()(X)
X = Dense(self.class_num, activation='softmax', name='fc' + str(self.class_num), kernel_initializer=glorot_uniform(seed=0))(X)
# 6 classes
# Create model
model = Model(inputs=X_input, outputs=X, name='inception_v1')
return model
def fit(self, X_train, Y_train, epoch_num=20, batch_size=32):
"""
训练模型
:param X_train: 输入图片
:param Y_train: 输出标签
:param epoch_num: 模型训练的 epoch 个数, 一般训练集所有的样本模型都见过一遍才算一个 epoch
:param batch_size: 选择 min-Batch梯度下降时, 每一次输入模型的样本个数 (默认 = 32)
:return:
"""
m = np.shape(X_train)[0] # 训练样本总数
input_shape = np.shape(X_train)[1:]
# one-hot 化
Y_train_oh = ArrayUtils.one_hot_array(Y_train, self.class_num)
assert self.class_num == np.shape(Y_train_oh)[-1] # 设定的类别需要与样本标签类别一致
assert self.input_shape == input_shape # 设定的输入维度应与训练数据相同
model = self.inception_v1_model(self.input_shape)
# 打印 模型(计算图) 的所有网络层
# print(model.summary())
# 画出计算图
# plot_model(model, to_file='models/resnet_model.png')
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X_train, Y_train_oh, epochs=epoch_num, batch_size=batch_size)
# save the model
model.save(self.model_path)
print('save model dir:{} complete'.format(self.model_path))
def evaluate(self, X_test, Y_test):
"""
模型评价
:param X_test:
:param Y_test:
:return:
"""
model = self.inception_v1_model(self.input_shape)
# 载入训练好的模型
model.load_weights(self.model_path)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# one-hot 化
Y_test_oh = ArrayUtils.one_hot_array(Y_test, self.class_num)
result = model.evaluate(X_test, Y_test_oh)
accuracy = result[1]
return accuracy
def predict(self, X_test):
"""
模型预测
:param X_test:
:return: labels -预测的标签 shape: (N,)
"""
model = self.inception_v1_model(self.input_shape)
# 载入训练好的模型
model.load_weights(self.model_path)
# model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
prob = model.predict(X_test) # shape(120,6)
labels = np.argmax(prob, axis=1) # axis=1 干掉第1个维度, shape: (N,)
return labels
class Test:
def test_signs_dataset(self):
signs_dataset_dir = 'datasets/signs'
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_signs_dataset(signs_dataset_dir)
# 图片特征的标准化
X_train = X_train_orig / 255.
X_test = X_test_orig / 255.
Y_train = Y_train_orig
Y_test =Y_test_orig
print("number of training examples = " + str(X_train.shape[0]))
print("number of test examples = " + str(X_test.shape[0]))
print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))
inception_net = Inception(input_shape=(64, 64, 3), class_num=6)
# inception_net.fit(X_train=X_train, Y_train=Y_train)
y_predict = inception_net.predict(X_test)
print('test accuracy :', accuracy_score(y_predict, Y_test))
def test_cafir_dataset(self):
dataset_dir = 'datasets/cafir-10/cifar-10-batches-py'
data = get_CIFAR10_data(dataset_dir, subtract_mean=False) # subtract_mean 是否对样本特征进行normalize
for k, v in data.items():
print('%s: ' % k, v.shape)
inception_net = Inception(input_shape=(32, 32, 3), class_num=10)
# inception_net.fit(X_train=data['X_train'], Y_train=data['y_train'], epoch_num=30, batch_size=512)
y_predict = inception_net.predict(data['X_test'])
print('test accuracy :', accuracy_score(y_predict, data['y_test']))
if __name__ == '__main__':
test = Test()
# test.test_signs_dataset()
test.test_cafir_dataset()
| 29.006897 | 134 | 0.598431 |
d65b1c484963db8f2b62413fbf79cb491de541a5
| 4,444 |
py
|
Python
|
tikz/histogram-symbols/process.py
|
RalfGuder/LaTeX-examples
|
a1bf9fe422969be1ca4674394ebd2170c07f7693
|
[
"MIT"
] | 1,231 |
2015-01-07T04:04:25.000Z
|
2022-03-31T17:43:29.000Z
|
tikz/histogram-symbols/process.py
|
DoubleL61/LaTeX-examples
|
cd0d97f85fadb59b7c6e9062b37a8bf7d725ba0c
|
[
"MIT"
] | 5 |
2015-05-10T13:10:47.000Z
|
2021-05-02T21:28:49.000Z
|
tikz/histogram-symbols/process.py
|
DoubleL61/LaTeX-examples
|
cd0d97f85fadb59b7c6e9062b37a8bf7d725ba0c
|
[
"MIT"
] | 400 |
2015-01-05T06:22:18.000Z
|
2022-03-19T04:07:59.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import shutil
import fileinput
import math
def main(filename, bins, maximum, yticks_number):
with open(filename) as f:
content = f.read().split("\n")
numbers = []
for line in content:
line = line.strip()
if line != "":
numbers.append(float(line))
numbers = sorted(numbers)
minimum = min(numbers)
bin_counter = [0 for i in range(bins+1)]
xticklabels = []
for i, number in enumerate(numbers):
if number >= minimum + (maximum - minimum)/bins*(bins+1):
bin_counter[bins] += 1
elif number < minimum:
bin_counter[0] += 1
else:
for b in range(bins):
lower = minimum + (maximum - minimum)/bins*b
upper = minimum + (maximum - minimum)/bins*(b+1)
if lower <= number < upper:
bin_counter[b] += 1
break
minimum = 0
for b in range(bins):
lower = minimum + (maximum - minimum)/bins*b
xticklabels.append(get_xticklabel(lower))
# Get labels for y-axis
yticks = []
ytickslabels = []
maxy = max(bin_counter)
maxylabel = int(10**math.floor(math.log(maxy, 10)))*int(str(maxy)[0])
ylabelsteps = int(math.ceil(maxylabel / yticks_number))
if ylabelsteps == 0:
ylabelsteps = 1
for i in range(0, maxylabel+1, ylabelsteps):
print("i: %i, %i" % (i, maxylabel))
print("label: %i%s" % get_si_suffix(i))
yticks.append(str(i))
ytickslabels.append(get_yticklabel(i, True))
xticklabels.append("\infty")
return bin_counter, xticklabels, ytickslabels, yticks
def get_xticklabel(value):
return str(int(value))
def get_yticklabel(value, si_suffix):
value = float(value)
if si_suffix:
divide_by, suffix = get_si_suffix(value)
new_value = (value / divide_by)
if int(new_value) == new_value:
return ("%i" % int(new_value)) + suffix
else:
return ("%0.2f" % new_value) + suffix
else:
return str(value)
def get_si_suffix(value):
if value >= 10**3:
return (10**3, "K")
elif value >= 10**6:
return (10**6, "M")
else:
return (1, "")
def modify_template(bin_counter, xticklabels, yticklabels, yticks):
shutil.copyfile("histogram-large-1d-dataset.template.tex",
"histogram-large-1d-dataset.tex")
xticklabels = ", ".join(map(lambda n: "$%s$" % n, xticklabels))
yticklabels = ", ".join(yticklabels)
yticks = ",".join(yticks)
coordinates = ""
for i, value in enumerate(bin_counter):
coordinates += "(%i, %i) " % (i, value)
for line in fileinput.input("histogram-large-1d-dataset.tex",
inplace=True):
line = line.replace("{{xticklabels}}", xticklabels)
line = line.replace("{{yticklabels}}", yticklabels)
line = line.replace("{{yticks}}", yticks)
line = line.replace("{{coordinates}}", coordinates)
print(line, end='')
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--file", dest="filename",
default="1ddata.txt",
help="use FILE as input data", metavar="FILE")
parser.add_argument("-b", "--bins", dest="bins", type=int,
default=15,
help="how many bins should be used")
parser.add_argument("-m", "--max", dest="max", type=float,
default=15000,
help=("what is the maximum number "
"that should get binned?"))
parser.add_argument("--yticks", dest="yticks", type=int,
default=5,
help=("How many y-ticks should be used?"))
args = parser.parse_args()
bin_counter, xticklabels, yticklabels, yticks = main(args.filename,
args.bins,
args.max,
args.yticks)
modify_template(bin_counter, xticklabels, yticklabels, yticks)
| 35.83871 | 74 | 0.550855 |
f67375b2c7330b24149920df54a5e5f1d0eacdd3
| 1,380 |
py
|
Python
|
Import/sourceconfig.py
|
simonbredemeier/ds100bot
|
1318b32b818891f4bc6d24f12fcf0ceae898f8bd
|
[
"Apache-2.0"
] | 15 |
2019-12-20T08:24:31.000Z
|
2022-03-18T09:24:25.000Z
|
Import/sourceconfig.py
|
simonbredemeier/ds100bot
|
1318b32b818891f4bc6d24f12fcf0ceae898f8bd
|
[
"Apache-2.0"
] | 124 |
2020-04-20T04:36:49.000Z
|
2022-01-29T11:08:09.000Z
|
Import/sourceconfig.py
|
simonbredemeier/ds100bot
|
1318b32b818891f4bc6d24f12fcf0ceae898f8bd
|
[
"Apache-2.0"
] | 12 |
2020-07-08T22:19:39.000Z
|
2022-03-19T09:13:11.000Z
|
#!/usr/bin/python3
"""Read source configuation"""
import json
import Persistence.log as log
from .access import Access
from .datasource import DataSource
from .error import JsonError
log_ = log.getLogger(__name__, fmt='{name}:{levelname} {message}')
class SourceConfig:
# pylint: disable=R0903
# pylint: disable=R0902
_mandatory_fields = (
'access',
'data',
'id',
'magic_hashtags'
)
def __init__(self, filepath):
self.file = filepath
with self.file.open() as jsonfile:
try:
self.json = json.load(jsonfile)
except json.JSONDecodeError as jde:
msg = "{}::{}::{}: JSON object could not be decoded: {}".format(
self.file, jde.lineno, jde.colno, jde.msg)
raise JsonError(msg)
for mf in SourceConfig._mandatory_fields:
if mf not in self.json:
msg = "Key {} missing".format(mf)
raise JsonError(msg)
self.access = [Access(a) for a in self.json['access']]
self.magic_hashtags = self.json['magic_hashtags']
self.data_list = [DataSource(d, self.json) for d in self.json['data']]
self.id = self.json['id']
self.head = self.json.get("headline", self.json.get("description", self.id))
self.desc = self.json.get("description", "")
| 33.658537 | 84 | 0.589855 |
148b206c3c6ca638a80a7e50cc4cd86151aedeec
| 1,386 |
py
|
Python
|
Openharmony v1.0/vendor/hisi/hi35xx/third_party/uboot/tools/binman/etype/intel_fit_ptr.py
|
clkbit123/TheOpenHarmony
|
0e6bcd9dee9f1a2481d762966b8bbd24baad6159
|
[
"MIT"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.1/device/hisilicon/third_party/uboot/u-boot-2020.01/tools/binman/etype/intel_fit_ptr.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.1/device/hisilicon/third_party/uboot/u-boot-2020.01/tools/binman/etype/intel_fit_ptr.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2016 Google, Inc
# Written by Simon Glass <[email protected]>
#
# Entry-type module for a pointer to an Intel Firmware Image Table
#
import struct
from blob import Entry_blob
class Entry_intel_fit_ptr(Entry_blob):
"""Intel Firmware Image Table (FIT) pointer
This entry contains a pointer to the FIT. It is required to be at address
0xffffffc0 in the image.
"""
def __init__(self, section, etype, node):
Entry_blob.__init__(self, section, etype, node)
if self.HasSibling('intel-fit') is False:
self.Raise("'intel-fit-ptr' section must have an 'intel-fit' sibling")
def _GetContents(self):
fit_pos = self.GetSiblingImagePos('intel-fit')
return struct.pack('<II', fit_pos or 0, 0)
def ObtainContents(self):
self.SetContents(self._GetContents())
return True
def ProcessContents(self):
"""Write an updated version of the FIT pointer to this entry
This is necessary since image_pos is not available when ObtainContents()
is called, since by then the entries have not been packed in the image.
"""
return self.ProcessContentsUpdate(self._GetContents())
def Pack(self, offset):
"""Special pack method to set the offset to the right place"""
return Entry_blob.Pack(self, 0xffffffc0)
| 33 | 82 | 0.681818 |
1adae5a15574475e8bd20b0d1402f83b31784d12
| 5,639 |
py
|
Python
|
deploy/deployctl/subcommands/reads_deployments.py
|
broadinstitute/gnomadjs
|
00da72cdc2cb0753f822c51456ec15147c024a1d
|
[
"MIT"
] | 38 |
2018-02-24T02:33:52.000Z
|
2020-03-03T23:17:04.000Z
|
deploy/deployctl/subcommands/reads_deployments.py
|
broadinstitute/gnomadjs
|
00da72cdc2cb0753f822c51456ec15147c024a1d
|
[
"MIT"
] | 385 |
2018-02-21T16:53:13.000Z
|
2020-03-04T00:52:40.000Z
|
deploy/deployctl/subcommands/reads_deployments.py
|
broadinstitute/gnomadjs
|
00da72cdc2cb0753f822c51456ec15147c024a1d
|
[
"MIT"
] | 13 |
2020-05-01T13:03:54.000Z
|
2022-02-28T13:12:57.000Z
|
import argparse
import datetime
import glob
import os
import string
import sys
import typing
from deployctl.config import config
from deployctl.shell import kubectl, get_most_recent_tag, image_exists, get_k8s_deployments
KUSTOMIZATION_TEMPLATE = """---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../../base
commonLabels:
deployment: '{deployment_name}'
nameSuffix: '-{deployment_name}'
images:
- name: gnomad-reads-server
newName: {reads_server_image_repository}
newTag: '{reads_server_tag}'
- name: gnomad-reads-api
newName: {reads_api_image_repository}
newTag: '{reads_api_tag}'
"""
def deployments_directory() -> str:
path = os.path.realpath(os.path.join(os.path.dirname(__file__), "../../manifests/reads/deployments"))
if not os.path.exists(path):
os.makedirs(path)
return path
def list_deployments() -> None:
print("Local configurations")
print("====================")
paths = reversed(sorted(glob.iglob(f"{deployments_directory()}/*/kustomization.yaml"), key=os.path.getmtime))
for path in paths:
print(os.path.basename(os.path.dirname(path)))
print()
print("Cluster deployments")
print("===================")
for deployment in get_k8s_deployments("component=gnomad-reads"):
print(deployment[len("gnomad-reads-") :])
def create_deployment(name: str, reads_server_tag: str = None, reads_api_tag: str = None) -> None:
if not name:
name = datetime.datetime.now().strftime("%Y%m%d-%H%M")
else:
allowed_characters = set(string.ascii_lowercase) | set(string.digits) | {"-"}
if set(name).difference(allowed_characters):
raise ValueError(f"invalid deployment name '{name}'")
if name == "latest":
raise ValueError("'latest' cannot be used for a deployment name")
deployment_directory = os.path.join(deployments_directory(), name)
if os.path.exists(deployment_directory):
raise RuntimeError(f"deployment '{name}' already exists")
if reads_server_tag:
if not image_exists(config.reads_server_image_repository, reads_server_tag):
raise RuntimeError(f"could not find image {config.reads_server_image_repository}:{reads_server_tag}")
else:
reads_server_tag = get_most_recent_tag(config.reads_server_image_repository)
print(f"No server tag provided, using most recent ({reads_server_tag})")
if reads_api_tag:
if not image_exists(config.reads_api_image_repository, reads_api_tag):
raise RuntimeError(f"could not find image {config.reads_api_image_repository}:{reads_api_tag}")
else:
reads_api_tag = get_most_recent_tag(config.reads_api_image_repository)
print(f"No API tag provided, using most recent ({reads_api_tag})")
os.makedirs(deployment_directory)
with open(os.path.join(deployment_directory, "kustomization.yaml"), "w") as kustomization_file:
kustomization = KUSTOMIZATION_TEMPLATE.format(
deployment_name=name,
reads_server_image_repository=config.reads_server_image_repository,
reads_server_tag=reads_server_tag,
reads_api_image_repository=config.reads_api_image_repository,
reads_api_tag=reads_api_tag,
)
kustomization_file.write(kustomization)
print(f"configured deployment '{name}'")
def apply_deployment(name: str) -> None:
deployment_directory = os.path.join(deployments_directory(), name)
if not os.path.exists(deployment_directory):
raise RuntimeError(f"no configuration for deployment '{name}'")
kubectl(["apply", "-k", deployment_directory])
def delete_deployment(name: str, clean: bool = False) -> None:
deployment_directory = os.path.join(deployments_directory(), name)
if os.path.exists(deployment_directory):
kubectl(["delete", "-k", deployment_directory])
if clean:
clean_deployment(name)
else:
create_deployment(name)
delete_deployment(name, clean=True)
def clean_deployment(name: str) -> None:
deployment_directory = os.path.join(deployments_directory(), name)
os.remove(os.path.join(deployment_directory, "kustomization.yaml"))
os.rmdir(deployment_directory)
def main(argv: typing.List[str]) -> None:
parser = argparse.ArgumentParser(prog="deployctl")
subparsers = parser.add_subparsers()
list_parser = subparsers.add_parser("list")
list_parser.set_defaults(action=list_deployments)
create_parser = subparsers.add_parser("create")
create_parser.set_defaults(action=create_deployment)
create_parser.add_argument("--name")
create_parser.add_argument("--server-tag", dest="reads_server_tag")
create_parser.add_argument("--api-tag", dest="reads_api_tag")
apply_parser = subparsers.add_parser("apply")
apply_parser.set_defaults(action=apply_deployment)
apply_parser.add_argument("name")
delete_parser = subparsers.add_parser("delete")
delete_parser.set_defaults(action=delete_deployment)
delete_parser.add_argument("name")
delete_parser.add_argument("--clean", action="store_true")
clean_parser = subparsers.add_parser("clean")
clean_parser.set_defaults(action=clean_deployment)
clean_parser.add_argument("name")
args = parser.parse_args(argv)
if "action" not in args:
parser.print_usage()
sys.exit(1)
action = args.action
del args.action
try:
action(**vars(args))
except Exception as err: # pylint: disable=broad-except
print(f"Error: {err}", file=sys.stderr)
sys.exit(1)
| 34.175758 | 113 | 0.705622 |
25bdfa008f2a95104a12531524f6faf12c54dfaa
| 1,156 |
py
|
Python
|
dme2sms-readserial.py
|
JanKl/dme2sms-readserial
|
11f9185217a3172b2a3da042a9d4ab6208e66ca5
|
[
"MIT"
] | 2 |
2020-10-05T21:09:51.000Z
|
2021-05-02T20:37:55.000Z
|
dme2sms-readserial.py
|
JanKl/dme2sms-readserial
|
11f9185217a3172b2a3da042a9d4ab6208e66ca5
|
[
"MIT"
] | 1 |
2020-10-05T21:09:31.000Z
|
2020-10-24T14:16:10.000Z
|
dme2sms-readserial.py
|
JanKl/dme2sms-readserial
|
11f9185217a3172b2a3da042a9d4ab6208e66ca5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import serial
import requests
import logging
import logging.handlers
from datetime import datetime
# Configuration
class Config:
def __init__(self):
self.port = '/dev/ttyUSB0'
self.baudRate = 9600
self.httpAddress = 'https://some.url/endpoint'
self.pagerEncoding = 'iso-8859-1'
# End of configuration
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter('%(module)s.%(funcName)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.info("dme2sms readserial service started")
config = Config()
def readFromSerial(port, baudRate):
with serial.Serial(port, baudRate) as ser:
while True:
yield ser.readline()
def sendMessageToServer(message):
requests.post(url = config.httpAddress, data = message)
messageString = str(message, config.pagerEncoding, 'ignore')
log.info("Sent '" + messageString + "' to server")
for line in readFromSerial(config.port, config.baudRate):
if not line or line == b'\r\n' or line == b'\x00\r\n': # Ignore empty lines
continue
sendMessageToServer(line)
| 23.591837 | 76 | 0.742215 |
d3632e2704f2dc68dfa8e0c11f00eb6bf47d5229
| 854 |
py
|
Python
|
migrations/versions/3ebd774ccac8_.py
|
nkxavis2907/aws-bank-backend
|
dc6b754b25d6ffe6f445cfac0f6c9bc745455c69
|
[
"MIT"
] | 1 |
2021-09-13T21:05:44.000Z
|
2021-09-13T21:05:44.000Z
|
migrations/versions/3ebd774ccac8_.py
|
nkxavis2907/aws-bank-backend
|
dc6b754b25d6ffe6f445cfac0f6c9bc745455c69
|
[
"MIT"
] | null | null | null |
migrations/versions/3ebd774ccac8_.py
|
nkxavis2907/aws-bank-backend
|
dc6b754b25d6ffe6f445cfac0f6c9bc745455c69
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 3ebd774ccac8
Revises:
Create Date: 2021-09-08 10:58:39.971503
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3ebd774ccac8'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('user_name', sa.String(length=200), nullable=False),
sa.Column('user_birth', sa.DateTime(), nullable=True),
sa.Column('user_balance', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('user_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
# ### end Alembic commands ###
| 24.4 | 66 | 0.679157 |
9fa16601331a4d15f932fd86f30221f5b9a102c0
| 531 |
py
|
Python
|
Python/Buch_ATBS/Teil_1/Kapitel_06_Stringbearbeitung/08_unsicherer_passwortsafe_cmd_argument.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | null | null | null |
Python/Buch_ATBS/Teil_1/Kapitel_06_Stringbearbeitung/08_unsicherer_passwortsafe_cmd_argument.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Buch_ATBS/Teil_1/Kapitel_06_Stringbearbeitung/08_unsicherer_passwortsafe_cmd_argument.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Kopiert das Passwort des über das Kommandozeilenargument übergebenen Users
from pyperclip import copy, paste
import sys
if len(sys.argv[1]) < 3:
print('Eingabe Ungültig')
print('Ausführen mit: "file.py username"')
sys.exit()
PASS={'login' : 'User',
'passw' : 'helloworld123',
'beila' : '12345'}
user=sys.argv[1]
if user == PASS['login']:
copy(PASS['passw'])
print('Passwort für den User', PASS['login'], 'wurde kopiert')
else:
print('Dieser Account ist nicht vorhanden')
| 25.285714 | 76 | 0.6629 |
4cc7c7dbf47998843ac4f588bf176345b7853efd
| 135 |
py
|
Python
|
python/python_new/Python 3/untitled2xx.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_new/Python 3/untitled2xx.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_new/Python 3/untitled2xx.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import numpy as np
a=np.array([[3,6],[7,5]])
b=np.array([[2,5],[3,9]])
c=a+b
d=a*b
e=a/b
print(c)
print(d)
print(e)
f=a.dot(b)
print(f)
| 12.272727 | 25 | 0.577778 |
e23c199873b5109cb354456ae7445e719b05b8eb
| 4,592 |
py
|
Python
|
listings/chapter05/maze.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | 2 |
2021-09-20T06:16:41.000Z
|
2022-01-17T14:24:43.000Z
|
listings/chapter05/maze.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | null | null | null |
listings/chapter05/maze.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | null | null | null |
import math
from node_search import get_path, dfs, bfs, a_star
# Das Beispiel-Labyrinth
maze_source = """+++++++++
+S +
+ +++++ +
+ +
+ ++ ++ +
+ +
+ +++++ +
+ G+
+++++++++"""
# Den String zu einem verschachtelten Array parsen, mit Fehlerbehandlung
def parse_maze(maze_str):
maze = []
lines = maze_str.splitlines()
length = -1
start_location = ()
goal_location = ()
for line_index, line in enumerate(lines):
if length == -1:
length = len(line)
else:
if length != len(line):
raise ValueError(f"Alle Zeilen benötigen die Länge {length}.")
cells = list(line)
for char_index, char in enumerate(cells):
if char not in ['+', ' ', 'S', 'G']:
raise ValueError(f"Ungültiges Zeichen {char}.")
if char == 'S':
start_location = (line_index, char_index)
elif char == 'G':
goal_location = (line_index, char_index)
maze.append(cells)
if ((not start_location) or (not goal_location)):
raise ValueError("Start und/oder Ziel fehlt.")
return (maze, start_location, goal_location)
# Nachfolger finden
def find_successors(maze, line, column):
successors = []
# Nachfolger unten?
if line < len(maze) - 1 and maze[line + 1][column] != '+':
successors.append((line + 1, column))
# Nachfolger oben?
if line > 0 and maze[line - 1][column] != '+':
successors.append((line - 1, column))
# Nachfolger rechts?
if column < len(maze[0]) - 1 and maze[line][column + 1] != '+':
successors.append((line, column + 1))
# Nachfolger links?
if column > 0 and maze[line][column - 1] != '+':
successors.append((line, column - 1))
return successors
# Das Labyrinth ausgeben, optional mit Pfad
def print_maze(maze, start_location, goal_location, path = None):
# Wenn Pfad vorhanden, alle Positionen markieren
if path:
for location in path:
if location != start_location and location != goal_location:
maze[location[0]][location[1]] = "X"
# Die eigentliche Ausgabe durchführen
for line in maze:
print("".join(line))
# Wenn Pfad vorhanden, Markierung rückgängig machen
if path:
for location in path:
if location != start_location and location != goal_location:
maze[location[0]][location[1]] = " "
# Labyrinth-String parsen, um Labyrinth, Start- und Zielposition zu erhalten
maze, start_location, goal_location = parse_maze(maze_source)
# Tiefensuche durchführen
goal = dfs(
start_location,
lambda location: location == goal_location,
lambda location: find_successors(maze, location[0], location[1])
)
print()
print("Tiefensuche:")
# Lösung ausgeben, falls vorhanden
if (goal):
print_maze(maze, start_location, goal_location, get_path(goal))
else:
print("Keine Lösung gefunden.")
# Breitensuche durchführen
goal = bfs(
start_location,
lambda location: location == goal_location,
lambda location: find_successors(maze, location[0], location[1])
)
print()
print("Breitensuche:")
# Lösung ausgeben, falls vorhanden
if (goal):
print_maze(maze, start_location, goal_location, get_path(goal))
else:
print("Keine Lösung gefunden.")
# Euklidischer Abstand als Heuristik
def euclidian_distance(location1, location2):
rows = location1[0] - location2[0]
cols = location1[1] - location2[1]
return math.sqrt(rows ** 2 + cols ** 2)
# A* durchführen
goal = a_star(
start_location,
lambda location: location == goal_location,
lambda location: find_successors(maze, location[0], location[1]),
lambda location: euclidian_distance(location, goal_location)
)
print()
print("A*-Suche:")
# Lösung ausgeben, falls vorhanden
if (goal):
print_maze(maze, start_location, goal_location, get_path(goal))
else:
print("Keine Lösung gefunden.")
# Manhattan-Abstand als Heuristik
def manhattan_distance(location1, location2):
rows = abs(location1[0] - location2[0])
cols = abs(location1[1] - location2[1])
return rows + cols
# A* durchführen
goal = a_star(
start_location,
lambda location: location == goal_location,
lambda location: find_successors(maze, location[0], location[1]),
lambda location: manhattan_distance(location, goal_location)
)
print()
print("A*-Suche mit Manhattan-Abstand:")
# Lösung ausgeben, falls vorhanden
if (goal):
print_maze(maze, start_location, goal_location, get_path(goal))
else:
print("Keine Lösung gefunden.")
| 31.452055 | 78 | 0.649826 |
3950284be59a6034e25ef5f74d6f7dda95755788
| 2,885 |
py
|
Python
|
quant/example/ex_bithumb.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 7 |
2017-10-22T15:00:09.000Z
|
2019-09-19T11:45:43.000Z
|
quant/example/ex_bithumb.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 1 |
2018-01-19T16:19:40.000Z
|
2018-01-19T16:19:40.000Z
|
quant/example/ex_bithumb.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 5 |
2017-12-11T15:10:29.000Z
|
2018-12-21T17:40:58.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from quant import config
from quant.api.bithumb import PrivateClient
from quant.brokers._bithumb import Bithumb
"""test api client"""
# client = PrivateClient(config.Bithumb_API_KEY, config.Bithumb_SECRET_TOKEN)
# print(client.account())
# res = client.balances('bch')
# if res:
# print(str(res))
# else:
# print('balance failed')
'''test sell order'''
# res = client.sell(currency='btc', price=1500000, amount=0.0001)
# if res:
# print('place order success======>' + str(res))
# else:
# print('place order failed')
'''test get orders'''
# 1510642305378, 1510642337397, 1510642076674
# order_id = '1510646372110'
# currency = 'btc'
# order_type = 'ask'
# res = client.get_order(order_id=order_id, currency=currency, order_type=order_type)
# if res:
# print('get orders success======>' + str(res))
# else:
# print('get orders failed')
# 获取已成交订单信息
# order_id = '1510646372110'
# currency = 'btc'
# order_type = 'ask'
# res = client.order_detail(currency=currency, order_id=order_id, order_type=order_type)
# if res:
# print('order detail success======>' + str(res))
# else:
# print('order detail failed')
"""test broker"""
broker = Bithumb(pair_code='btc', api_key=config.Bithumb_API_KEY, api_secret=config.Bithumb_SECRET_TOKEN)
'''test buy order'''
# price = 8200000.000000
# amount = 0.0029
# order_id, order = broker.buy_limit(amount=amount, price=price)
# if order_id:
# print('broker buy order id: %s' % order_id)
# if order:
# print('broker buy order info: ' + str(order))
# else:
# print('broker buy order failed')
'''test sell order'''
# price = 8500000
# amount = 0.001
# order_id, order = broker.sell_limit(amount=amount, price=price)
# if order_id:
# print('broker sell order id: %s' % order_id)
# if order:
# print('broker sell order info: ' + str(order))
# else:
# print('broker sell order failed')
'''test cancel order'''
# order_id = '1510819655981'
# order_type = 'ask'
# res = broker.cancel_order(order_id=order_id, order_type=order_type)
# print('broker cancel res: ' + str(res))
'''test get order'''
# order_id = '1510819655981'
# order_type = 'ask'
# res = broker.get_order(order_id=order_id, order_type=order_type)
# if res:
# print('broker get order: ' + str(res))
# else:
# print('broker get order failed')
'''test balance, 注意切换pair_code'''
# broker.get_balances()
'''test order detail'''
# order_id = '1510819655981'
# order_type = 'ask'
# res = broker.order_detail(order_id=order_id, order_type=order_type)
# if res:
# print('broker order detail success: ' + str(res))
# else:
# print('broker order detail failed')
'''test order detail'''
# order_id = '1510819181758'
# order_type = 'ask'
# print(broker.get_deal_amount(order_id=order_id, order_type=order_type))
'''test ticker'''
print(broker.get_ticker())
| 26.227273 | 105 | 0.67591 |
ff066a26a64ad2687d5033abef542176438209dd
| 678 |
py
|
Python
|
user/migrations/0015_auto_20201214_1544.py
|
hhdMrLion/Product-System
|
e870225ab10c32688a87426d5943d922c47c4404
|
[
"MIT"
] | 1 |
2021-06-18T03:03:42.000Z
|
2021-06-18T03:03:42.000Z
|
user/migrations/0015_auto_20201214_1544.py
|
hhdMrLion/Product-System
|
e870225ab10c32688a87426d5943d922c47c4404
|
[
"MIT"
] | null | null | null |
user/migrations/0015_auto_20201214_1544.py
|
hhdMrLion/Product-System
|
e870225ab10c32688a87426d5943d922c47c4404
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.16 on 2020-12-14 07:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0014_auto_20201208_1402'),
]
operations = [
migrations.AddField(
model_name='user',
name='professional',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='职称'),
),
migrations.AlterField(
model_name='user',
name='power',
field=models.SmallIntegerField(choices=[(1, '业务'), (4, '经理'), (3, '商务'), (2, '生产')], default=1, verbose_name='权限'),
),
]
| 28.25 | 128 | 0.551622 |
20d6255d4e75c908e9d39f8a6c64ba8b7d1b9907
| 1,091 |
py
|
Python
|
Chapter10_Cython/2_CythonCode/tests/test_clipping.py
|
tomex74/UdemyPythonPro
|
b4b83483fa2d3337a2860d53ff38e68eb38b3ac4
|
[
"MIT"
] | null | null | null |
Chapter10_Cython/2_CythonCode/tests/test_clipping.py
|
tomex74/UdemyPythonPro
|
b4b83483fa2d3337a2860d53ff38e68eb38b3ac4
|
[
"MIT"
] | null | null | null |
Chapter10_Cython/2_CythonCode/tests/test_clipping.py
|
tomex74/UdemyPythonPro
|
b4b83483fa2d3337a2860d53ff38e68eb38b3ac4
|
[
"MIT"
] | null | null | null |
import random
from timeit import Timer
import numpy as np
import fastvector
v = fastvector.VectorND([random.random() for _ in range(100_000)])
a = np.array([random.random() for _ in range(100_000)])
# Timing test
import_string = \
'''from __main__ import v, a
import fastvector
import numpy as np'''
python_timer = Timer(
'fastvector.python_clip_vector(v, -1, 1, v)',
setup=import_string)
naive_cython_timer = Timer(
'fastvector.naive_cython_clip_vector(v, -1, 1, v)',
setup=import_string)
cython_timer = Timer(
'fastvector.cython_clip_vector(v, -1, 1, v)',
setup=import_string)
numpy_timer = Timer(
'np.clip(a, -1, 1, a)',
setup=import_string)
num_runs = 100
print('fastvector.python_clip_vector')
print(sum(python_timer.repeat(number=num_runs)) / num_runs)
print('fastvector.naive_cython_clip_vector')
print(sum(naive_cython_timer.repeat(number=num_runs)) / num_runs)
print('fastvector.cython_clip_vector')
print(sum(cython_timer.repeat(number=num_runs)) / num_runs)
print('np.clip')
print(sum(numpy_timer.repeat(number=num_runs)) / num_runs)
| 25.372093 | 66 | 0.738772 |
45a4ddc78880b3450d4187d6ddca3f4938ef2639
| 305 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v8_6/rename_bom_update_tool.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v8_6/rename_bom_update_tool.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v8_6/rename_bom_update_tool.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
import frappe
def execute():
frappe.delete_doc_if_exists("DocType", "BOM Replace Tool")
frappe.reload_doctype("BOM")
frappe.db.sql("update tabBOM set conversion_rate=1 where conversion_rate is null or conversion_rate=0")
frappe.db.sql("update tabBOM set set_rate_of_sub_assembly_item_based_on_bom=1")
| 43.571429 | 104 | 0.809836 |
b3035710e338b1926443a86f99d63286fcb58c3a
| 1,801 |
py
|
Python
|
Packs/ShiftManagement/Scripts/TimeToNextShift/TimeToNextShift.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/ShiftManagement/Scripts/TimeToNextShift/TimeToNextShift.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/ShiftManagement/Scripts/TimeToNextShift/TimeToNextShift.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CommonServerPython import *
def get_time_to_next_shift(roles):
today_week_day = datetime.today().weekday()
# transform python weekday to demisto shift weekday(monday in python is 0 and in demisto is 1)
today_week_day = 0 if today_week_day == 6 else today_week_day + 1
for role in roles:
shifts = role.get('shifts') or []
for shift in shifts:
shift_from_day = shift.get('fromDay')
shift_to_day = shift.get('toDay')
if shift_from_day <= today_week_day <= shift_to_day:
# get the time when the shift starts
delta = shift_from_day - today_week_day
shift_from = datetime.today() + timedelta(days=delta)
shift_from = shift_from.replace(minute=shift.get('fromMinute'), hour=shift.get('fromHour'), second=0)
# get the time when the shift ends
delta = shift_to_day - today_week_day
shift_to = datetime.today() + timedelta(days=delta)
shift_to = shift_to.replace(minute=shift.get('toMinute'), hour=shift.get('toHour'), second=0)
if shift_from < datetime.today() < shift_to:
# found the current shift
diff = shift_to - datetime.today()
return round(diff.total_seconds())
return 0
def main():
get_roles_response = demisto.executeCommand('getRoles', {})
if is_error(get_roles_response):
demisto.error(f'Failed to get roles: {str(get_error(get_roles_response))}')
else:
roles = get_roles_response[0]['Contents']
widget = [{'name': '', 'data': [get_time_to_next_shift(roles)]}]
return_results(json.dumps(widget))
if __name__ in ('__builtin__', 'builtins', '__main__'):
main()
| 39.152174 | 117 | 0.618545 |
b35ea33bb31f5c99845d6bab422837444312204d
| 1,368 |
py
|
Python
|
readwriteexample/com/aaron/writeexample.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
readwriteexample/com/aaron/writeexample.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | 2 |
2021-03-25T22:00:07.000Z
|
2022-01-20T15:51:48.000Z
|
readwriteexample/com/aaron/writeexample.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""write showcase"""
__author__ = "aaron.qiu"
import sys
import pprint
def write_file():
"""
r Open file for reading
w Open file for writing (will truncate file)
b binary more
r+ open file for reading and writing
a+ open file for reading and writing (appends to end)
w+ open file for reading and writing (truncates files)
"""
# Filename to write
filename = "test.txt"
# Open the file with writing permission
myfile = open(filename, 'w')
# Write a line to the file
myfile.write('Written with Python\n')
# Close the file
myfile.close()
def append_file():
# Filename to append
filename = "test.txt"
# The 'a' flag tells Python to keep the file contents
# and append (add line) at the end of the file.
myfile = open(filename, 'a')
# Add the line
myfile.write('Written with Python\n')
# Close the file
myfile.close()
# Filename to append
filename = "test.txt"
# The 'a' flag tells Python to keep the file contents
# and append (add line) at the end of the file.
myfile = open(filename, 'a')
# Add the line
myfile.write('Written with Python\n')
# Close the file
myfile.close()
if __name__ == "__main__":
write_file()
append_file()
| 22.42623 | 59 | 0.605263 |
2fc403114f40114464ae167a39771c44bbaf3760
| 628 |
py
|
Python
|
gestionNeuroLab_Panel_de_Control/migrations/0001_initial.py
|
AnuTor/UniNeuroLab
|
5825f440d4663650f038083f3da05229cc5ada4f
|
[
"Apache-2.0"
] | 1 |
2021-01-09T01:20:45.000Z
|
2021-01-09T01:20:45.000Z
|
gestionNeuroLab_Panel_de_Control/migrations/0001_initial.py
|
AnuTor/UniNeuroLab
|
5825f440d4663650f038083f3da05229cc5ada4f
|
[
"Apache-2.0"
] | 1 |
2021-01-09T00:53:55.000Z
|
2021-01-09T00:53:55.000Z
|
gestionNeuroLab_Panel_de_Control/migrations/0001_initial.py
|
AnuTor/UniNeuroLab
|
5825f440d4663650f038083f3da05229cc5ada4f
|
[
"Apache-2.0"
] | 1 |
2021-01-07T23:57:28.000Z
|
2021-01-07T23:57:28.000Z
|
# Generated by Django 2.2.17 on 2021-01-21 20:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='APP_TableroControl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('appsNombre', models.CharField(max_length=300, verbose_name='Nombre del APPs')),
('appsNumero', models.IntegerField(null=True, verbose_name='ID de APPs')),
],
),
]
| 27.304348 | 114 | 0.603503 |
2baf7459854f9574c18850fca7760e6475756b46
| 8,893 |
py
|
Python
|
test/test_npu/test_network_ops/super_copy_contiguous_test.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/super_copy_contiguous_test.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/super_copy_contiguous_test.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
def create_common_tensor_new(item, minValue, maxValue):
dtype = item[0]
format = item[1]
shape = item[2]
input1 = np.random.uniform(minValue, maxValue, shape[0]).astype(dtype)
cpu_input = torch.from_numpy(input1)
npu_input = torch.from_numpy(input1).to("npu")
if format != -1:
npu_input = npu_input.npu_format_cast(format)
return cpu_input, npu_input
# Optimized view Ops contains Transpose, permute, narrow, indexing, select, unfold
class SuperContiguous(TestCase):
def test_BroadcastToContiguous(self, device):
dtype_list = [np.float16 ,np.float32, np.int32, np.int8, np.uint8]
format_list = [0]
shape_list = [
[[1], [5]],
[[ 1, 2], [3, 2]],
[[1, 2, 1], [1, 2, 3]],
]
shape_format = [
[i, j, k] for i in dtype_list for j in format_list for k in shape_list
]
broadcast_time = 0
broadcast_time_exper = 10
for item in shape_format:
a1_cpu, a1_npu = create_common_tensor_new(item, 0, 100)
broadcast_start = time.time()
npu_out1 = a1_npu.expand(item[2][1]).contiguous()
broadcast_end = time.time()
broadcast_time += (broadcast_end - broadcast_start)
cpu_out1 = a1_cpu.expand(item[2][1]).contiguous()
self.assertRtolEqual(npu_out1.to("cpu").numpy(), cpu_out1.numpy())
print("------------------------Broadcast---------------------------")
print("Broadcast to contiguous uses: %.2f s " %(broadcast_time))
print("Typical time required: 7-10s, Ops: broadcastToD")
self.assertTrue(broadcast_time < broadcast_time_exper)
def test_PermuteToContiguous(self, device):
dtype_list = [np.bool, np.int32, np.float16, np.float32, np.int8, np.uint8, np.int64]
format_list = [0]
shape_list = [[2, 6, 9, 4]]
shape_format = [
[i, j, k] for i in dtype_list for j in format_list for k in shape_list
]
permute_time = 0
permute_time_exper = 5
for item in shape_format:
a1_cpu, a1_npu = create_common_tensor(item, 0, 100)
permute_start = time.time()
npu_out1 = a1_npu.permute(1,0,2,3).contiguous()
npu_out2 = a1_npu.permute(2,3,0,1).contiguous()
permute_end = time.time()
permute_time += (permute_end - permute_start)
cpu_out1 = a1_cpu.permute(1,0,2,3).contiguous()
cpu_out2 = a1_cpu.permute(2,3,0,1).contiguous()
self.assertRtolEqual(npu_out1.to("cpu").numpy(), cpu_out1.numpy())
self.assertRtolEqual(npu_out2.to("cpu").numpy(), cpu_out2.numpy())
print("------------------------Permute---------------------------")
print("Permute to contiguous uses: %.2f s " %(permute_time))
print("Typical time required: 2-5s, Ops: TransposeD")
self.assertTrue(permute_time < permute_time_exper)
def test_NarrowToContiguous(self, device):
# AssertionError: required dtype in [np.bool, np.int32, np.float16, np.float32, np.int8, np.uint8, np.int64]
# However, considering the dtypes that Transdata supports, only np.float16, np.float32 are tested.
dtype_list = [np.float16, np.float32]
format_list = [0, 3, 29, 4]
shape_list = [[2, 32, 16, 9]]
shape_format = [
[i, j, k] for i in dtype_list for j in format_list for k in shape_list
]
narrow_time = 0
narrow_time_exper = 3
for item in shape_format:
a1_cpu, a1_npu = create_common_tensor(item, 0, 100)
# for narrow with step=1 -- SliceD
narrow_start = time.time()
npu_out1 = a1_npu[:,:16,:,:].contiguous()
npu_out2 = a1_npu[:,:,:16,:].contiguous()
narrow_end = time.time()
narrow_time += (narrow_end - narrow_start)
cpu_out1 = a1_cpu[:,:16,:,:].contiguous()
cpu_out2 = a1_cpu[:,:,:16,:].contiguous()
self.assertRtolEqual(npu_out1.to("cpu").numpy(), cpu_out1.numpy())
self.assertRtolEqual(npu_out2.to("cpu").numpy(), cpu_out2.numpy())
print("------------------------Narrow---------------------------")
print("Narrow to contiguous uses: %.2f s"%(narrow_time))
print("Typical time required: 1-3s, Ops: SliceD")
self.assertTrue(narrow_time < narrow_time_exper)
def test_IndexingToContiguous(self, device):
dtype_list = [np.float16, np.float32, np.int8, np.int32, np.uint8, np.bool]
format_list = [0]
shape_list = [[10,32,16,9]]
shape_format = [
[i, j, k] for i in dtype_list for j in format_list for k in shape_list
]
indexing_time = 0
indexing_time_exper = 4
for item in shape_format:
a1_cpu, a1_npu = create_common_tensor(item, 0, 100)
# for indexing with step>1 -- StridedSliceD
indexing_start = time.time()
npu_out1 = a1_npu[::2,::4,1:16:5,:].contiguous()
indexing_end = time.time()
indexing_time += (indexing_end - indexing_start)
cpu_out1 = a1_cpu[::2,::4,1:16:5,:].contiguous()
self.assertRtolEqual(npu_out1.to("cpu").numpy(), cpu_out1.numpy())
print("------------------------Indexing---------------------------")
print("Indexing to contiguous uses: %.2f s"%(indexing_time))
print("Typical time required: 1-4s, Ops: StridedSliceD")
self.assertTrue(indexing_time < indexing_time_exper)
def test_SelectToContiguous(self, device):
dtype_list = [np.float16, np.float32]
format_list = [0, 3, 29, 4]
shape_list = [[2,32,16,9]]
shape_format = [
[i, j, k] for i in dtype_list for j in format_list for k in shape_list
]
select_time = 0
select_time_exper = 22
for item in shape_format:
a1_cpu, a1_npu = create_common_tensor(item, 0, 100)
for dim in range(1,len(item[2])):
select_start = time.time()
npu_out = a1_npu.select(dim,1).contiguous()
select_end = time.time()
select_time += (select_end - select_start)
cpu_out = a1_cpu.select(dim,1).contiguous()
self.assertRtolEqual(npu_out.to("cpu").numpy(), cpu_out.numpy())
print("------------------------Select---------------------------")
print("Select to contiguous uses: %.2f s "%(select_time))
print("Typical time required: 18-22s, Ops: SliceD")
self.assertTrue(select_time < select_time_exper)
def test_UnfoldToContiguous(self, device):
dtype_list = [np.float16, np.float32, np.int8, np.int32, np.uint8, np.bool]
format_list = [0]
shape_list = [[6, 9, 4]]
shape_format = [
[i, j, k] for i in dtype_list for j in format_list for k in shape_list
]
unfold_time = 0
unfold_time_exper = 5
for item in shape_format:
a1_cpu, a1_npu = create_common_tensor(item, 0, 100)
for dim in range(1, len(item[2]) - 1):
unfold_start = time.time()
npu_out = a1_npu.unfold(dim,3,3).contiguous()
unfold_end = time.time()
unfold_time += (unfold_end - unfold_start)
cpu_out = a1_cpu.unfold(dim,3,3).contiguous()
self.assertRtolEqual(npu_out.to("cpu").numpy(), cpu_out.numpy())
print("------------------------Unfold---------------------------")
print("Unfold to contiguous uses: %.2f s " %(unfold_time))
print("Typical time required: 2-5s, Ops: TransposeD [optional:SliceD]")
self.assertTrue(unfold_time < unfold_time_exper)
instantiate_device_type_tests(SuperContiguous, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| 43.169903 | 116 | 0.583155 |
92019799eda54d4e67a9299deb5eb6c66639432b
| 2,687 |
py
|
Python
|
official/nlp/cpm/src/lr_schedule.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/nlp/cpm/src/lr_schedule.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/nlp/cpm/src/lr_schedule.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Learning rate schedule."""
import numpy as np
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore.common import dtype as mstype
from mindspore.nn.learning_rate_schedule import LearningRateSchedule, WarmUpLR
class DecayLR(LearningRateSchedule):
"""
Implements of decay learning rate scheduler.
Args:
learning_rate (float): Initial learning rate.
warmup_steps (int): Warmup steps.
end_steps (int): A value used to calculate decayed learning rate.
Returns:
np.ndarray, learning rate of each step.
"""
def __init__(self, learning_rate, warmup_steps, end_iter):
super(DecayLR, self).__init__()
self.learning_rate = learning_rate
self.warmup_steps = warmup_steps
self.end_iter = end_iter
self.cast = P.Cast()
def construct(self, global_step):
warmup_percent = self.cast((self.end_iter - (global_step - self.warmup_steps)), mstype.float32) / self.end_iter
return self.learning_rate * warmup_percent
class CPMLearningRate(LearningRateSchedule):
"""
Implements of warmup-polynomial decay learning rate scheduler.
Args:
learning_rate (float): The initial value of learning rate.
warmup_steps (int): The warm up steps of learning rate.
end_steps (int): A value used to calculate decayed learning rate.
Returns:
Tensor. The learning rate value for the current step.
"""
def __init__(self, learning_rate, warmup_steps, end_steps):
super(CPMLearningRate, self).__init__()
self.warmup_lr = WarmUpLR(learning_rate, warmup_steps)
self.decay_lr = DecayLR(learning_rate, warmup_steps, end_steps)
self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32))
def construct(self, global_step):
if global_step < self.warmup_steps:
lr = self.warmup_lr(global_step)
else:
lr = self.decay_lr(global_step)
return lr
| 36.310811 | 119 | 0.689617 |
5b3932f6acbb687ac535f9990ac5f392d8a35f5c
| 1,572 |
py
|
Python
|
05 Hardwarenutzung/Fallbeispiele/src/hindernisdetektor.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | null | null | null |
05 Hardwarenutzung/Fallbeispiele/src/hindernisdetektor.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | null | null | null |
05 Hardwarenutzung/Fallbeispiele/src/hindernisdetektor.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | 1 |
2020-10-10T20:24:05.000Z
|
2020-10-10T20:24:05.000Z
|
#! ./env/bin/python3
#encoding=utf-8
# Copyright (C) 2020 Dennis Schulmeister-Zimolong
#
# E-Mail: [email protected]
# Webseite: https://www.wpvs.de
#
# Diese Quellcode ist lizenziert unter einer
# Creative Commons Namensnennung 4.0 International Lizenz
"""
Beispiel zur Ansteuerung des KY-032 Hindernisdetektors und der KY-011 2-Farb LED
aus dem X40 Sensorkit. Erkennt der Detetkor kein Hindernis, leuchtet die LED grün,
andernfalls rot.
"""
import time
import RPi.GPIO as GPIO
DETECTOR_GPIO = 11
LED_GREEN_GPIO = 24
LED_RED_GPIO = 23
def on_detector_change(*args, **kwargs):
"""
Callback-Funktion, die bei jeder Änderung des Hindernisdetektors
aufgerufen wird. Hier wird die 2-Farb LED in Abhängigkeit des
erkannten Ereignisses an oder ausgeschaltet.
"""
obstacle_detected = GPIO.input(DETECTOR_GPIO)
print ("Hindernis erkannt: %s" % (not obstacle_detected))
GPIO.output(LED_GREEN_GPIO, not obstacle_detected)
GPIO.output(LED_RED_GPIO, obstacle_detected)
if __name__ == "__main__":
try:
# GPIO-Pins initialisieren
GPIO.setmode(GPIO.BCM)
GPIO.setup(DETECTOR_GPIO, GPIO.IN)
GPIO.setup(LED_RED_GPIO, GPIO.OUT)
GPIO.setup(LED_GREEN_GPIO, GPIO.OUT)
GPIO.add_event_detect(DETECTOR_GPIO, GPIO.BOTH, bouncetime=15)
GPIO.add_event_callback(DETECTOR_GPIO, on_detector_change)
on_detector_change()
# Endlosschleife, damit das Programm weiterläuft
while True:
time.sleep(10)
except KeyboardInterrupt:
pass
GPIO.cleanup()
| 27.578947 | 82 | 0.715013 |
45e7ff6920127c4e819acf0a4b9559fb39d7f357
| 23 |
py
|
Python
|
scripts.py
|
Jerobeam/Mobile-Apps-Project
|
1bae1b9ce9fd57442053e93695abcd2b85983673
|
[
"Apache-2.0"
] | null | null | null |
scripts.py
|
Jerobeam/Mobile-Apps-Project
|
1bae1b9ce9fd57442053e93695abcd2b85983673
|
[
"Apache-2.0"
] | null | null | null |
scripts.py
|
Jerobeam/Mobile-Apps-Project
|
1bae1b9ce9fd57442053e93695abcd2b85983673
|
[
"Apache-2.0"
] | null | null | null |
def int = 1
return int
| 7.666667 | 11 | 0.695652 |
942bc93df4d24e12395912b80081ecf7fec24cc8
| 1,715 |
py
|
Python
|
devices/relay.py
|
MiaranaDIY/Salamigal
|
44ac98fa6463d46694e1f9343a0ebc788e7a88f8
|
[
"MIT"
] | 3 |
2017-08-02T12:26:34.000Z
|
2021-01-13T01:06:26.000Z
|
devices/relay.py
|
MiaranaDIY/Salamigal
|
44ac98fa6463d46694e1f9343a0ebc788e7a88f8
|
[
"MIT"
] | null | null | null |
devices/relay.py
|
MiaranaDIY/Salamigal
|
44ac98fa6463d46694e1f9343a0ebc788e7a88f8
|
[
"MIT"
] | 3 |
2017-02-14T22:10:00.000Z
|
2021-01-02T14:26:43.000Z
|
#Setup logging
import logging
import logging.config
logging.config.fileConfig('logging.conf')
# create logger
logger = logging.getLogger('root')
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM) # set board mode to Broadcom
GPIO.setwarnings(False)
import devices.device as device
import time
class Relay(device.Device):
#Global variable
instant_count = 0
def __init__(self, PIN):
#Increment instant counter
Relay.instant_count += 1
#Relay state, 0 = off, 1 = on
self.pin = PIN
#Setup relay pin to output mode
GPIO.setup(self.pin, GPIO.OUT)
self.state = GPIO.input(self.pin)
try:
if(self.state == 1):
self.started_time = int(time.time())
else:
self.started_time = 0
except Exception as err:
self.started_time = 0
pass
#load watt for power usage calculation and relay property
self.uid = 'R'+str(Relay.instant_count)
self.load_watt = 0
self.name = 'Relay'
self.location = 'Location'
self.group = 'Relay'
self.streaming = 0
#Turn relay on or off
def turn(self, s):
try:
s = int(s)
if(s == 0 or s == 1):
GPIO.output(self.pin, s)
self.state = GPIO.input(self.pin)
#set ontime for statistic
if(s == 1):
self.started_time = time.time()
else:
self.started_time = 0
return s
return None
except Exception as err:
logging.error("%s", traceback.format_exc())
return None
| 30.087719 | 65 | 0.544606 |
04d943ca662fceb67e78226d43855550c4ea8eb7
| 768 |
py
|
Python
|
backend/apps/iamstudent/migrations/0015_auto_20200409_1620.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | 2 |
2020-03-28T13:56:39.000Z
|
2020-03-29T10:16:12.000Z
|
backend/apps/iamstudent/migrations/0015_auto_20200409_1620.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | 76 |
2020-03-27T21:53:04.000Z
|
2020-03-30T20:27:43.000Z
|
backend/apps/iamstudent/migrations/0015_auto_20200409_1620.py
|
n-hackert/match4healthcare
|
761248c27b49e568c545c643a72eac9a040649d7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-04-09 16:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('iamstudent', '0014_emailtohospital_send_date'),
]
operations = [
migrations.AlterField(
model_name='emailgroup',
name='message',
field=models.TextField(default='', max_length=10000),
),
migrations.AlterField(
model_name='emailtohospital',
name='message',
field=models.TextField(default='', max_length=10000),
),
migrations.AlterField(
model_name='emailtosend',
name='message',
field=models.TextField(default='', max_length=10000),
),
]
| 26.482759 | 65 | 0.583333 |
b6ce6d1d45e5895ee0b225fe6fcd9313721c5609
| 4,147 |
py
|
Python
|
registry/migrations/0001_initial.py
|
KSIUJ/erc-backend
|
a78a6ee85c2865c8d25c15f40dc72fe32ba4bfd3
|
[
"MIT"
] | null | null | null |
registry/migrations/0001_initial.py
|
KSIUJ/erc-backend
|
a78a6ee85c2865c8d25c15f40dc72fe32ba4bfd3
|
[
"MIT"
] | 5 |
2020-10-10T00:21:37.000Z
|
2021-09-22T18:01:46.000Z
|
registry/migrations/0001_initial.py
|
KSIUJ/erc-backend
|
a78a6ee85c2865c8d25c15f40dc72fe32ba4bfd3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-10-19 21:22
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import registry.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('given_name', models.CharField(max_length=128)),
('surname', models.CharField(max_length=128)),
('email', models.CharField(max_length=128, unique=True, validators=[django.core.validators.EmailValidator()])),
('card_id', registry.models.NullCharField(blank=True, default=None, max_length=20, null=True, unique=True)),
('discord_id', registry.models.NullCharField(blank=True, default=None, max_length=128, null=True, unique=True)),
('ldap_uid', registry.models.NullCharField(blank=True, default=None, max_length=64, null=True, unique=True)),
('do_not_contact', models.BooleanField(default=False)),
('tshirt', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Period',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField()),
('end_date', models.DateField()),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True)),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('client_id', models.CharField(max_length=32, unique=True)),
('secret', models.CharField(max_length=64)),
('name', models.CharField(max_length=64, unique=True)),
('enabled', models.BooleanField(default=True)),
('permissions', models.ManyToManyField(blank=True, to='registry.Role')),
],
),
migrations.CreateModel(
name='AuthEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_type', models.CharField(choices=[('CARD', 'Card'), ('LDAP', 'LDAP'), ('DISCORD', 'Discord'), ('EMAIL', 'Email')], default='CARD', max_length=10)),
('value', models.CharField(max_length=64)),
('success', models.BooleanField(default=False)),
('date', models.DateTimeField(auto_now_add=True)),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='auth_events', to='registry.Client')),
('member', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='auth_events', to='registry.Member')),
],
),
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fee_paid', models.BooleanField(default=False)),
('member', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to='registry.Member')),
('period', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to='registry.Period')),
('roles', models.ManyToManyField(related_name='memberships', to='registry.Role')),
],
options={
'unique_together': {('member', 'period')},
},
),
]
| 49.963855 | 170 | 0.588136 |
b6dcc4c9f5bc10c3024ca4c1131591fcb24ca28e
| 3,394 |
py
|
Python
|
src/onegov/activity/models/occasion_date.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/activity/models/occasion_date.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/activity/models/occasion_date.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import sedate
from datetime import time
from enum import IntEnum
from onegov.core.orm import Base
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.types import UUID, UTCDateTime
from sqlalchemy import event
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import Session
class DAYS(IntEnum):
half = 2**1
full = 2**2
many = 2**3
@staticmethod
def has(value, mask):
return value & mask > 0 if value else False
@staticmethod
def compute(localized_start, localized_end, total_seconds):
hours = total_seconds / 3600
if hours <= 6:
return DAYS.half
elif hours <= 24:
start, end = localized_start, localized_end
# if a less than 24 hours long activity ends on another day, the
# end time is relevant. An end before 06:00 indicates that this
# is an activity that lasts very long. An end after 06:00 is an
# multi-day activity.
if start.date() != end.date() and end.time() >= time(6, 0):
return DAYS.many
return DAYS.full
else:
return DAYS.many
class OccasionDate(Base, TimestampMixin):
""" A single start/end date of an occurrence (which may have multiple
date ranges).
"""
__tablename__ = 'occasion_dates'
def __hash__(self):
return hash(self.id)
#: the internal id of this occasion date
id = Column(Integer, primary_key=True)
#: Timezone of the occasion date
timezone = Column(Text, nullable=False)
#: The start of the range
start = Column(UTCDateTime, nullable=False)
#: The end of the range
end = Column(UTCDateTime, nullable=False)
#: The associated occasion
occasion_id = Column(UUID, ForeignKey('occasions.id'), nullable=False)
__table_args__ = (
CheckConstraint('"start" <= "end"', name='start_before_end'),
)
@property
def localized_start(self):
return sedate.to_timezone(self.start, self.timezone)
@property
def localized_end(self):
return sedate.to_timezone(self.end, self.timezone)
@property
def active_days(self):
for dt in sedate.dtrange(self.localized_start, self.localized_end):
yield dt.date().toordinal()
@property
def weekdays(self):
return list({
dt.weekday() for dt in sedate.dtrange(
self.localized_start, self.localized_end
)
})
@hybrid_property
def duration_in_seconds(self):
return (self.end - self.start).total_seconds()
@hybrid_property
def duration(self):
return DAYS.compute(
self.localized_start,
self.localized_end,
self.duration_in_seconds)
def overlaps(self, other):
return sedate.overlaps(self.start, self.end, other.start, other.end)
# # changes to the dates need to be propagated to the parent occasion
# # so it can update its aggreagated values
@event.listens_for(Session, 'before_flush')
def before_flush(session, context, instances):
for obj in session.dirty:
if isinstance(obj, OccasionDate):
obj.occasion.on_date_change()
| 28.049587 | 76 | 0.659104 |
ed948d44bff9f65e30e8412b56d90950380640e4
| 103 |
py
|
Python
|
gemtown/copyrights/apps.py
|
doramong0926/gemtown
|
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
|
[
"MIT"
] | null | null | null |
gemtown/copyrights/apps.py
|
doramong0926/gemtown
|
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
|
[
"MIT"
] | 5 |
2020-09-04T20:13:39.000Z
|
2022-02-17T22:03:33.000Z
|
gemtown/copyrights/apps.py
|
doramong0926/gemtown
|
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class CopyrightsConfig(AppConfig):
name = 'gemtown.copyrights'
| 17.166667 | 34 | 0.776699 |
1e59890ebfcb2bc48c45d44f9e63ec7c95d78873
| 1,007 |
py
|
Python
|
Data Analytics/Collecting Data on AWS/Querying Sales Data stream using Kinesis Analytics/data.py
|
djanshuman/Amazon-Web-Services-Learning-AWS-
|
d0c716f95e0d101705f29bca7b474e513fae8852
|
[
"MIT"
] | null | null | null |
Data Analytics/Collecting Data on AWS/Querying Sales Data stream using Kinesis Analytics/data.py
|
djanshuman/Amazon-Web-Services-Learning-AWS-
|
d0c716f95e0d101705f29bca7b474e513fae8852
|
[
"MIT"
] | null | null | null |
Data Analytics/Collecting Data on AWS/Querying Sales Data stream using Kinesis Analytics/data.py
|
djanshuman/Amazon-Web-Services-Learning-AWS-
|
d0c716f95e0d101705f29bca7b474e513fae8852
|
[
"MIT"
] | null | null | null |
import boto3
import json
import random
import time
import uuid
# Set a stream name for later
stream_name = 'sales'
# Create the Kinesis client
kinesis = boto3.client('kinesis', region_name='us-east-1')
# Create the Stream
kinesis.create_stream(
StreamName=stream_name,
ShardCount=1
)
def put_to_stream():
record = {
'sale_id': str(uuid.uuid4()),
'timestamp': str(int(time.time())),
'product_sold': random.choice(
[
'socks',
'jacket',
'sweatpants',
'scarf',
'shirt',
'pajamas',
'jeans',
'raincoat',
]
),
'num_items': random.choice([1, 1, 2, 2, 3, 4, 5]),
}
kinesis.put_record(
StreamName=stream_name,
Data=json.dumps(record),
PartitionKey='a-partition'
)
# Check that the stream is created before running this
while True:
put_to_stream()
time.sleep(.3)
| 21.891304 | 58 | 0.541212 |
1ee5b06c2f2649b8eb66f40fd0ca05f77bc99045
| 12,522 |
py
|
Python
|
skil/jobs.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 23 |
2018-09-19T13:34:27.000Z
|
2022-02-14T09:49:35.000Z
|
skil/jobs.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 33 |
2018-10-18T07:58:05.000Z
|
2019-05-16T08:24:12.000Z
|
skil/jobs.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 11 |
2018-10-21T18:58:57.000Z
|
2022-02-14T09:49:36.000Z
|
import skil_client
class JobConfiguration(object):
"""JobConfiguration
A SKIL job configuration collects all data needed to set up and run a SKIL Job.
SKIL currently has inference and training jobs, each come with their respective
configuration.
# Arguments:
skil_model: a `skil.Model` instance
compute_resource: `skil.resources.compute.ComputeResource` instance, created before running a job.
storage_resource: `skil.resources.storage.StorageResource` instance, created before running a job.
output_path: string with path to folder in which job output should be stored.
data_set_provider_class: name of the class to be used as `DataSetProvider` in SKIL
is_multi_data_set: boolean, whether data set uses `MultiDataSet` interface.
verbose: boolean, log level. Set True for detailed logging.
"""
# TODO: provide a smart default for output_path relative to input data or model path.
def __init__(self, skil_model, compute_resource, storage_resource,
output_path, data_set_provider_class,
is_multi_data_set, verbose):
self.model = skil_model
self.compute_id = compute_resource.resource_id
self.storage_id = storage_resource.resource_id
self.output_path = output_path
self.dsp = data_set_provider_class
self.mds = is_multi_data_set
self.verbose = verbose
class InferenceJobConfiguration(JobConfiguration):
"""InferenceJobConfiguration
Configuration for a SKIL inference job. On top of what you need to specify for a base JobConfiguration,
you need to set the batch size for the model as well.
# Arguments:
skil_model: a `skil.Model` instance
batch_size: int, data batch size to run inference with on the model.
compute_resource: `skil.resources.compute.ComputeResource' instance, created before running a job.
storage_resource: `skil.resources.storage.StorageResource` instance created before runnning a job
output_path: string with path to folder in which job output should be stored.
data_set_provider_class: name of the class to be used as `DataSetProvider` in SKIL
is_multi_data_set: boolean, whether data set uses `MultiDataSet` interface.
verbose: boolean, log level. Set True for detailed logging.
"""
# TODO signature to aim for: (model, data_path (what), data_format (how), storage_id (where), compute_id)
# TODO: data format may eventually even be inferred automatically. no reason not to. we can do so for models.
# TODO: we could even consider *setting* compute and storage resources for workspaces
# or experiments. No need to specify this every time.
# TODO batch_size should either be known to the model or the data, I don't believe this.
# TODO KILL DSP!!!1ONE
# TODO There must be a way to hide is_multi_data_set
def __init__(self, skil_model, batch_size, compute_resource, storage_resource, output_path,
data_set_provider_class,
is_multi_data_set=False,
verbose=False):
super(InferenceJobConfiguration, self).__init__(
skil_model, compute_resource,
storage_resource, output_path, data_set_provider_class,
is_multi_data_set, verbose)
self.batch_size = batch_size
class TrainingJobConfiguration(JobConfiguration):
# TODO update doc string
"""TrainingJobConfiguration
Configuration for a SKIL training job. On top of what you need to specify for a base JobConfiguration,
you need to set the number of epochs to train for, a (distributed) training configuration and provide
information about how to evaluate your model.
# Arguments:
skil_model: a `skil.Model` instance
num_epochs: number of epochs to train
eval_type: evaluation type
eval_data_set_provider_class: name of the `DataSetProvider` class
compute_resource: `skil.resources.compute.ComputeResource' instance, created before running a job.
storage_resource: `skil.resources.storage.StorageResource` instance created before runnning a job
output_path: string with path to folder in which job output should be stored.
data_set_provider_class: name of the class to be used as `DataSetProvider` in SKIL
is_multi_data_set: boolean, whether data set uses `MultiDataSet` interface.
ui_url: url of a previously started DL4J training UI
verbose: boolean, log level. Set True for detailed logging.
"""
# TODO signature to aim for: (model, num_epochs, data_path, eval_data_path, eval_types,
# data_format, storage_id, compute_id)
# TODO what if we want to split data on the go. what about validation data? cross validation?
# current concept seems insufficient to cover this properly.
# TODO model_path, config_path, model config path... why both? should be able to guess this
# TODO model_history_url=None, can we infer this from experiment?
# TODO model_history_id=None, this is the workspace id
# TODO model_instance_id=None, is this alternatively to model/config path? don't get it.
# TODO: eval_type make this a proper class instead of (or additionally to) strings.
# TODO: allow multiple eval metrics?!
# TODO: the training master config should be deconstructed. maybe provide this to the job.run(...) as argument.
# TODO: user should just be *handed* a ui, not take care of a $%$%! URL.
def __init__(self, skil_model,
num_epochs,
eval_type,
eval_data_set_provider_class, # good lord
compute_resource, storage_resource,
output_path,
data_set_provider_class,
is_multi_data_set=False,
ui_url=None,
verbose=False):
super(TrainingJobConfiguration, self).__init__(
skil_model, compute_resource,
storage_resource, output_path, data_set_provider_class,
is_multi_data_set, verbose)
self.num_epochs = num_epochs
self.eval_dsp = eval_data_set_provider_class
self.eval_type = eval_type
self.ui_url = ui_url
class Job(object):
"""Job
Basic SKIL job abstraction. You can run a job, refresh its status,
download its output file once completed, and delete a Job.
"""
def __init__(self):
self.job_id = None
self.run_id = None
self.skil = None
self.status = None
def run(self):
if self.job_id and self.skil:
response = self.skil.api.run_a_job(self.job_id)
self.run_id = response.run_id
else:
raise Exception(
'Can not run job, either skil server or job_id non-existent.')
def refresh_status(self):
if self.job_id and self.skil:
response = self.skil.api.refresh_job_status(self.job_id)
self.status = response.status
print(self.status)
else:
raise Exception(
'Can not refresh job status, either skil server or job_id non-existent.')
def delete(self):
self.skil.api.delete_job_by_id(self.job_id)
def download_output_file(self, local_path):
download_path = skil_client.DownloadOutputFileRequest(
local_download_path=local_path)
self.skil.api.download_job_output_file(
job_id=self.job_id,
download_output_file_request=download_path
)
class TrainingJob(Job):
"""TrainingJob
Initialize and run a SKIL training job.
# Arguments:
training_config: `TrainingJobConfiguration` instance
distributed_config: `DistributedConfiguration` instance
job_id: None by default, provide this ID for existing jobs.
create: boolean, whether to create a new job or retrieve an existing one.
"""
# TODO make it so that if a distributed config is provided,
# SKIL will run your model on Spark. Otherwise it will carry out regular training
# on provided resources.
def __init__(self, skil, training_config, distributed_config, job_id=None, create=True):
super(TrainingJob, self).__init__()
self.skil = skil
self.training_config = training_config
self.tm = distributed_config.to_json()
if create:
training_create_job_request = skil_client.CreateJobRequest(
compute_resource_id=self.training_config.compute_id,
storage_resource_id=self.training_config.storage_id,
job_args=self._training_job_args(),
output_file_name=self.training_config.output_path
)
response = self.skil.api.create_job(
"TRAINING", training_create_job_request)
else:
response = self.skil.api.get_job_by_id(job_id)
assert response.job_id == job_id
self.job_id = response.job_id
self.run_id = response.run_id
self.status = response.status
def _training_job_args(self):
tc = self.training_config
tm = self.tm
inference = "-i false "
output = "-o {} ".format(tc.output_path)
num_epochs = "--numEpochs {} ".format(tc.num_epochs)
model_path = "-mo {} ".format(tc.model.model_path)
dsp = "-dsp {} ".format(tc.dsp)
eval_dsp = "--evalDataSetProviderClass {} ".format(tc.eval_dsp)
eval_type = "--evalType {} ".format(tc.eval_type)
tm = "-tm {} ".format(tm)
mds = "--multiDataSet {} ".format(_bool_to_string(tc.mds))
verbose = "--verbose {} ".format(_bool_to_string(tc.verbose))
args = inference + output + num_epochs + model_path + dsp + \
eval_dsp + eval_type + tm + mds + verbose
print(args)
return args
class InferenceJob(Job):
"""InferenceJob
Initialize and run a SKIL inference job.
# Arguments:
inference_config: `InferenceJobConfiguration` instance
job_id: None by default, provide this ID for existing jobs.
create: boolean, whether to create a new job or retrieve an existing one.
"""
def __init__(self, skil, inference_config, job_id=None, create=True):
super(InferenceJob, self).__init__()
self.skil = skil
self.inference_config = inference_config
if create:
inference_create_job_request = skil_client.CreateJobRequest(
compute_resource_id=self.inference_config.compute_id,
storage_resource_id=self.inference_config.storage_id,
job_args=self._inference_job_args(),
output_file_name=self.inference_config.output_path
)
response = self.skil.api.create_job(
"INFERENCE", inference_create_job_request)
else:
response = self.skil.api.get_job_by_id(job_id)
assert response.job_id == job_id
self.job_id = response.job_id
self.run_id = response.run_id
self.status = response.status
def _inference_job_args(self):
ic = self.inference_config
inference = "-i true "
output = "-o {} ".format(ic.output_path)
batch_size = "--batchSize {} ".format(ic.batch_size)
model_path = "-mo {} ".format(ic.model.model_path)
dsp = "-dsp {} ".format(ic.dsp)
mds = "--multiDataSet {} ".format(_bool_to_string(ic.mds))
verbose = "--verbose {} ".format(_bool_to_string(ic.verbose))
return inference + output + batch_size + model_path + dsp + \
mds + verbose
def get_all_jobs(skil):
jobs = skil.api.get_all_jobs()
return [get_job_by_id(skil, j.job_id) for j in jobs]
def get_job_by_id(skil, job_id):
job = skil.api.get_job_by_id(job_id)
job_type = job.job_type
if job_type.lower() == 'training':
return TrainingJob(
skil=skil, training_config=None, distributed_config=None, job_id=job_id, create=False
)
elif job_type.lower() == 'inference':
return InferenceJob(
skil=skil, inference_config=None, job_id=job_id, create=False
)
else:
raise ValueError(
'job_id does not correspond to training or inference job')
def delete_job_by_id(skil, job_id):
skil.api.delete_job_by_id(job_id)
def _bool_to_string(bool):
return "true" if bool else "false"
| 39.752381 | 115 | 0.667785 |
7834500c6231c267611ee9eca3901b3236379dea
| 1,842 |
py
|
Python
|
db/ops/trews-model/deploy_model.py
|
cloud-cds/cds-stack
|
d68a1654d4f604369a071f784cdb5c42fc855d6e
|
[
"Apache-2.0"
] | 6 |
2018-06-27T00:09:55.000Z
|
2019-03-07T14:06:53.000Z
|
db/ops/trews-model/deploy_model.py
|
cloud-cds/cds-stack
|
d68a1654d4f604369a071f784cdb5c42fc855d6e
|
[
"Apache-2.0"
] | 3 |
2021-03-31T18:37:46.000Z
|
2021-06-01T21:49:41.000Z
|
db/ops/trews-model/deploy_model.py
|
cloud-cds/cds-stack
|
d68a1654d4f604369a071f784cdb5c42fc855d6e
|
[
"Apache-2.0"
] | 3 |
2020-01-24T16:40:49.000Z
|
2021-09-30T02:28:55.000Z
|
# deploy model parameters to opsdx
# please make sure the file names are correct before deployment
# please run this script on either dev or prod controller
import os, sys
from sqlalchemy import create_engine, types, text
import pandas as pd
DB_CONN_STR = 'postgresql://{}:{}@{}:{}/{}'
user = os.environ['db_user']
host = os.environ['db_host']
db = os.environ['db_name']
if len(sys.argv) == 2:
db = sys.argv[1]
print("db_name: " + db)
port = os.environ['db_port']
password = os.environ['db_password']
DB_CONN_STR = DB_CONN_STR.format(user, password, host, port, db)
feature_weights_file = "lactateConstrFeatureWeights.csv"
feature_weights_dbtable = "trews_feature_weights"
scaler_file = "lactateConstrStdScale.csv"
scaler_dbtable = "trews_scaler"
parameters_file = "trews_parameters.csv"
parameters_dbtable = "trews_parameters"
# load parameters
feature_weights = pd.read_csv(feature_weights_file)
print(feature_weights.head())
scaler = pd.read_csv(scaler_file)
print(scaler.head())
parameters = pd.read_csv(parameters_file)
print(parameters.head())
# write to opsdx database
engine = create_engine(DB_CONN_STR)
conn = engine.connect()
conn.execute(text("delete from %s;" % feature_weights_dbtable).execution_options(autocommit=True))
conn.close()
feature_weights.to_sql(feature_weights_dbtable, engine, if_exists='append', index=False)
print("feature weights updated")
conn = engine.connect()
conn.execute(text("delete from %s;" % scaler_dbtable).execution_options(autocommit=True))
conn.close()
scaler.to_sql(scaler_dbtable, engine, if_exists='append', index=False)
print("scaler updated")
conn = engine.connect()
conn.execute(text("delete from %s;" % parameters_dbtable).execution_options(autocommit=True))
conn.close()
parameters.to_sql(parameters_dbtable, engine, if_exists='append', index=False)
print("parameters updated")
| 32.315789 | 98 | 0.775244 |
153f8bf6af9aedf1816bf59f9c152b6b9c4eb60e
| 2,199 |
py
|
Python
|
LifeMeter.py
|
VizStars7/LifeMeter
|
f67abf0a8f897788b4e1072685a0a74cd54d797d
|
[
"Apache-2.0"
] | null | null | null |
LifeMeter.py
|
VizStars7/LifeMeter
|
f67abf0a8f897788b4e1072685a0a74cd54d797d
|
[
"Apache-2.0"
] | null | null | null |
LifeMeter.py
|
VizStars7/LifeMeter
|
f67abf0a8f897788b4e1072685a0a74cd54d797d
|
[
"Apache-2.0"
] | null | null | null |
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00@\x00\x00\x00s\xd0\x00\x00\x00d\x00d\x01l\x00m\x01Z\x01\x01\x00d\x02Z\x02e\x01d\x03\x83\x01\x01\x00e\x03e\x02\x83\x01\x01\x00e\x04d\x04\x83\x01Z\x05e\x01d\x03\x83\x01\x01\x00e\x03d\x05e\x05\x83\x02\x01\x00e\x01d\x03\x83\x01\x01\x00e\x03d\x06\x83\x01\x01\x00e\x06e\x04d\x07\x83\x01\x83\x01Z\x07e\x08e\x07d\x08\x14\x00\x83\x01Z\te\x08e\x07d\t\x14\x00\x83\x01Z\ne\x08e\x07d\n\x14\x00\x83\x01Z\x0be\x08e\x07d\x0b\x14\x00\x83\x01Z\x0ce\x01d\x03\x83\x01\x01\x00e\x03e\x05d\x0cd\re\td\x0ed\x0fe\nd\x10d\x0fe\x0bd\x11d\x0fe\x0cd\x12\x83\x0e\x01\x00d\x13Z\re\x01d\x03\x83\x01\x01\x00e\x03e\r\x83\x01\x01\x00e\x03e\x05\x83\x01\x01\x00d\x14S\x00)\x15\xe9\x00\x00\x00\x00)\x01\xda\x05sleepa\xc0\x01\x00\x00\n\x1b[33;1m\n _ _ __ __ _ _\n | | (_)/ _|___ | \\/ |___| |_ ___ _ _\n | |__| | _/ -_) | |\\/| / -_) _/ -_) \'_|\n |____|_|_| \\___| |_| |_\\___|\\__\\___|_|\n\n \x1b[35;1m\n=====================================================\n| |\n| C0DED BY : Heartz Brotherhood | Mr.Vizstars |\n| |\n=====================================================\n\n\xe9\x01\x00\x00\x00z\x1c\x1b[34;1m Masukan Nama Anda: z%Cek Seberapa Lama Kamu Hidup Di Duniaz&\x1b[33;1m Sekarang Masukan Umur Kamu!!! z\x02->id\x01\x00\x00i\xec\r\x00\x00i\x10\x87\x00\x00i\xa5J\x05\x00z\x11kamu hidup selama\xfa\x01:Z\x04hari\xfa\x01,Z\x03JamZ\x05menitZ\x05detikz\xd4\n\n\x1b[35;1m\n ___ _ __ __ _ _\n / __|___ _ __ __ _| |_ | \\/ |__ _| |_(_)\n | (__/ -_) \'_ \\/ _` | _| | |\\/| / _` | _| |\n \\___\\___| .__/\\__,_|\\__| |_| |_\\__,_|\\__|_|\n |_|\n\nN)\x0e\xda\x04timer\x02\x00\x00\x00\xda\x06banner\xda\x05print\xda\x05input\xda\x01i\xda\x03intZ\x03age\xda\x03strZ\x03DayZ\x04hour\xda\x03minZ\x03sec\xda\x01b\xa9\x00r\x0f\x00\x00\x00r\x0f\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s&\x00\x00\x00\x0c\x10\x04\x01\x08\x01\x08\x02\x08\x01\x08\x01\n\x01\x08\x01\x08\x01\x0c\x01\x0c\x01\x0c\x01\x0c\x01\x0c\x01\x08\x01"\x0b\x04\x01\x08\x01\x08\x01'))
| 1,099.5 | 2,184 | 0.606185 |
bf1842aa4e5062df138a77a5ea239b50705eed97
| 1,656 |
py
|
Python
|
research/recommend/Fat-DeepFFM/src/lr_generator.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/recommend/Fat-DeepFFM/src/lr_generator.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/recommend/Fat-DeepFFM/src/lr_generator.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""lr"""
import numpy as np
def _generate_linear_lr(lr_init, lr_end, total_steps, warmup_steps, useWarmup=False):
""" warmup lr"""
lr_each_step = []
if useWarmup:
for i in range(0, total_steps):
lrate = lr_init + (lr_end - lr_init) * i / warmup_steps
if i >= warmup_steps:
lrate = lr_end - (lr_end - lr_init) * (i - warmup_steps) / (total_steps - warmup_steps)
lr_each_step.append(lrate)
else:
for i in range(total_steps):
lrate = lr_end - (lr_end - lr_init) * i / total_steps
lr_each_step.append(lrate)
return lr_each_step
def get_warmup_linear_lr(lr_init, lr_end, total_steps, warmup_steps=10):
lr_each_step = _generate_linear_lr(lr_init, lr_end, total_steps, warmup_steps)
lr_each_step = np.array(lr_each_step).astype(np.float32)
return lr_each_step
if __name__ == '__main__':
lr = get_warmup_linear_lr(0, 1e-4, 1000)
print(lr.size)
print(lr)
| 36 | 103 | 0.65942 |
171f8c597f1eff7b890f85f7a9bf2e1f0e6d5e2d
| 244 |
py
|
Python
|
7-assets/past-student-repos/data_struct_and_algo-master/rotateImage.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/data_struct_and_algo-master/rotateImage.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/data_struct_and_algo-master/rotateImage.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
def Rotate(arr):
temp=[]
for i in range(len(arr)):
for j in range(0,len(arr)):
if i != j and i < j :
arr[i][j],arr[j][i]=arr[j][i],arr[i][j]
for l in arr:
l.reverse()
print(l)
arr=[[1,2,3],
[4,5,6],
[7,8,9]]
Rotate(arr)
| 13.555556 | 43 | 0.508197 |
e55666b8fa21fb8ab12151e3051f46b0136aa931
| 352 |
py
|
Python
|
___Python/Marco/PythonProj/p06_persistence/m02_json.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Marco/PythonProj/p06_persistence/m02_json.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Marco/PythonProj/p06_persistence/m02_json.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
import json
from p01.m_01 import teilnehmerliste
# "Pickling" / Serialization / Marshalling / Flattening
with open("teilnehmer.json", "w") as datei:
json.dump(teilnehmerliste, datei, default=str)
# "Unpickling" / Deserialization / Unmarshalling
with open("teilnehmer.json", "r") as datei:
liste = json.load(datei)
print(liste)
| 25.142857 | 56 | 0.704545 |
970549b5605ba7a5244d32d28d6a8baf0adecb24
| 5,192 |
py
|
Python
|
src/onegov/town6/theme/town_theme.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/town6/theme/town_theme.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/town6/theme/town_theme.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import os
from collections import OrderedDict
from onegov.foundation6 import BaseTheme
from onegov.core.utils import module_path
HELVETICA = '"Helvetica Neue", Helvetica, Roboto, Arial, sans-serif !default'
ARIAL = 'Arial, sans-serif !default'
VERDANA = 'Verdana, Geneva, sans-serif !default'
COURIER_NEW = '"Courier New", Courier, monospace !default' # monospace
ROBOTO_CONDENSED = '"Roboto Condensed", sans-serif !default'
MERRIWEATHER = 'Merriweather, sans-serif !default'
# "Merriweather","Helvetica Neue",Helvetica,Roboto,Arial,sans-serif
# options editable by the user
user_options = {
'primary-color-ui': '#006fba',
'body-font-family-ui': MERRIWEATHER,
'header-font-family-ui': ROBOTO_CONDENSED
}
default_font_families = {
'Roboto Condensed': ROBOTO_CONDENSED,
'Helvetica': HELVETICA,
'Arial': ARIAL,
'Verdana': VERDANA,
'Courier New': COURIER_NEW,
}
class TownTheme(BaseTheme):
name = 'onegov.town6.foundation'
_force_compile = False
use_flex = True
include_motion_ui = True
@property
def default_options(self):
options = OrderedDict((
# tile images
('tile-image-1', '"../static/homepage-images/tile-1-small.jpg"'),
('tile-image-2', '"../static/homepage-images/tile-2-small.jpg"'),
('tile-image-3', '"../static/homepage-images/tile-3-small.jpg"'),
('tile-image-4', '"../static/homepage-images/tile-4-small.jpg"'),
('tile-image-5', '"../static/homepage-images/tile-5-small.jpg"'),
('tile-image-6', '"../static/homepage-images/tile-6-small.jpg"'),
))
options.update(user_options)
return options
@property
def foundation_styles(self):
return 'global-styles', 'forms', 'typography'
@property
def foundation_components(self):
return (
'button',
'button-group',
'close-button',
'label',
# 'progress-bar',
# 'slider',
# 'switch',
'table',
# 'badge',
'breadcrumbs',
'callout',
'card',
'dropdown',
'pagination',
'tooltip',
'accordion',
'media-object',
'orbit',
'responsive-embed',
'tabs',
'thumbnail',
'menu',
'menu-icon',
'accordion-menu',
'drilldown-menu',
'dropdown-menu',
'off-canvas',
'reveal',
'sticky',
'title-bar',
'top-bar',
)
@property
def pre_imports(self):
imports = [
'foundation-mods',
]
for font_family in self.additional_font_families:
imports.append(font_family)
return imports
@property
def post_imports(self):
"""Our scss code split into various files"""
return [
'custom_mixins',
'typography',
'header',
'org',
'sortable',
'sidebars',
'forms',
'formcode',
'panels',
'sliders',
'org-settings',
'helpers',
'footer',
'chosen',
'news',
'events',
'homepage-tiles',
'tickets',
'user',
'timeline',
'upload',
'files',
'publication_signature',
'image-grid',
'widgets',
'popup',
'fullcalendar',
'alert',
'redactor',
'directories',
'daypicker',
'payment',
'person',
'newsletter',
'search',
'hints',
'allocations',
'homepage',
'progress_indicator',
'healthcheck',
'qrcode',
'leaflet'
]
@property
def extra_search_paths(self):
return super().extra_search_paths + [
module_path('onegov.town6.theme', 'styles'),
self.font_search_path
]
@property
def font_search_path(self):
""" Load fonts of the current theme folder and ignore fonts from
parent applications if OrgTheme is inherited. """
module = self.name.replace('foundation', 'theme')
return module_path(module, 'fonts')
@property
def font_families(self):
families = default_font_families.copy()
families.update(self.additional_font_families)
return families
@property
def additional_font_families(self):
""" Returns the filenames as they are to use as label in the settings
as well as to construct the font-family string.
Only sans-serif fonts are supported by now.
"""
if not os.path.exists(self.font_search_path):
return {}
def fn(n):
return n.split('.')
return {
fn(n)[0]: f'"{fn(n)[0]}", {HELVETICA}' for n in os.listdir(
self.font_search_path) if fn(n)[1] in ('css', 'scss')
}
| 27.913978 | 77 | 0.52369 |
97510af780e47e47e693a532f22bce6b44b9d481
| 3,397 |
py
|
Python
|
nomespoo.py
|
PedroPegado/POO
|
3fbf36d77770b7c03d829b1eb0bd59eb4750a1cf
|
[
"MIT"
] | null | null | null |
nomespoo.py
|
PedroPegado/POO
|
3fbf36d77770b7c03d829b1eb0bd59eb4750a1cf
|
[
"MIT"
] | null | null | null |
nomespoo.py
|
PedroPegado/POO
|
3fbf36d77770b7c03d829b1eb0bd59eb4750a1cf
|
[
"MIT"
] | null | null | null |
import turtle
import time
import random
class Name:
def __init__(self, nome):
self.nome = nome
self.t = turtle.Turtle()
self.shapes = ['arrow', 'turtle', 'circle',
'square', 'triangle', 'classic']
self.colors = ['red', 'blue', 'green',
'yellow', 'black', 'brown', 'pink']
self.speed = [5, 6, 7, 8, 9, 10]
def start(self):
self.restart_position()
self.move_to_start()
self.write_j()
self.write_h()
self.write_o()
self.write_n()
self.write_a(0)
self.write_t()
def restart_position(self):
self.t.setheading(360)
def RGB_me(self):
self.t.speed(random.choice(self.speed))
self.t.shape(random.choice(self.shapes))
self.t.color(random.choice(self.colors))
def move_to_start(self):
self.RGB_me()
self.t.penup()
self.t.setpos(-400, 150)
self.t.pendown()
def write_j(self):
self.RGB_me()
self.t.forward(50)
self.t.backward(100)
self.t.forward(50)
self.t.right(90)
self.t.forward(150)
self.t.right(90)
self.t.forward(25)
self.t.right(90)
self.t.forward(25)
self.t.penup()
self.t.right(45)
self.t.forward(175)
def write_h(self):
self.RGB_me()
self.t.pendown()
self.t.right(135)
self.t.forward(150)
self.t.backward(75)
self.t.left(90)
self.t.forward(50)
self.t.left(90)
self.t.forward(75)
self.t.backward(150)
self.t.right(85)
self.t.penup()
self.t.forward(100)
def write_o(self):
self.RGB_me()
self.t.pendown()
self.t.circle(70)
self.t.right(5)
self.t.penup()
self.t.forward(80)
def write_n(self):
self.RGB_me()
self.t.pendown()
self.t.left(90)
self.t.forward(150)
self.t.right(165)
self.t.forward(160)
self.t.left(165)
self.t.forward(150)
self.t.right(175)
self.t.penup()
self.t.forward(150)
def write_a(self, angulo):
self.RGB_me()
if angulo != 160:
angulo = 170
self.t.pendown()
self.t.left(angulo)
self.t.forward(150)
self.t.right(165)
self.t.forward(150)
self.t.left(180)
self.t.forward(75)
self.t.left(90)
self.t.forward(20)
self.t.backward(20)
self.t.penup()
def write_t(self):
self.RGB_me()
self.t.right(240)
self.t.forward(90)
self.t.pendown()
self.t.left(140)
self.t.forward(145)
self.t.left(90)
self.t.forward(50)
self.t.backward(100)
self.t.penup()
self.t.right(240+20)
self.t.forward(145)
self.write_a(160)
self.t.pendown()
self.t.penup()
self.t.right(240)
self.t.forward(90)
self.t.left(145)
self.t.pendown()
self.t.forward(150)
self.t.right(165)
self.t.forward(160)
self.t.left(165)
self.t.forward(150)
self.t.right(175)
self.t.penup()
self.t.forward(150)
self.start()
nome = input('Digite seu nome: ')
pessoa = Name(nome)
pessoa.start()
| 24.092199 | 58 | 0.516338 |
979b91d17c8eecbc0eb22eacbc64f25f5a05b9e0
| 6,809 |
py
|
Python
|
official_examples/Reinforcement_Learning_Custom_Env_Example_Code/custom_env/snake_env.py
|
RuichunWang/ModelArts-Lab
|
cfa9a853e3a76a21eac2818f055b36978ac2bb69
|
[
"Apache-2.0"
] | 1,045 |
2019-05-09T02:50:43.000Z
|
2022-03-31T06:22:11.000Z
|
official_examples/Reinforcement_Learning_Custom_Env_Example_Code/custom_env/snake_env.py
|
RuichunWang/ModelArts-Lab
|
cfa9a853e3a76a21eac2818f055b36978ac2bb69
|
[
"Apache-2.0"
] | 1,468 |
2019-05-16T00:48:18.000Z
|
2022-03-08T04:12:44.000Z
|
official_examples/Reinforcement_Learning_Custom_Env_Example_Code/custom_env/snake_env.py
|
RuichunWang/ModelArts-Lab
|
cfa9a853e3a76a21eac2818f055b36978ac2bb69
|
[
"Apache-2.0"
] | 1,077 |
2019-05-09T02:50:53.000Z
|
2022-03-27T11:05:32.000Z
|
from collections import deque
import gym
import numpy as np
from gym import spaces, logger
from gym.utils import seeding
class SnakeAction(object):
LEFT = 0
RIGHT = 1
UP = 2
DOWN = 3
class BoardColor(object):
BODY_COLOR = np.array([0, 0, 0], dtype=np.uint8)
FOOD_COLOR = np.array([0, 255, 0], dtype=np.uint8)
SPACE_COLOR = np.array([255, 255, 255], dtype=np.uint8)
class SnakeEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
}
def __init__(self, observation_mode='rgb', energy_consum=False):
self.observation_mode = observation_mode
self.energy_consum = energy_consum
self.width = 42
self.height = 42
self.action_space = spaces.Discrete(4)
if observation_mode == 'rgb':
self.observation_space = spaces.Box(low=0, high=256, shape=(self.width * 2, self.height * 2, 3),
dtype=np.float32)
else:
self.observation_space = spaces.Box(low=0, high=255, shape=(self.width, self.height, 1), dtype=np.uint8)
self.snake = Snake()
self.foods = []
self.n_foods = 5
self.viewer = None
self.np_random = np.random
def set_foods(self, n):
self.n_foods = n
def reset(self):
self.snake.body.clear()
self.foods.clear()
empty_cells = self.get_empty_cells()
empty_cells = self.snake.init(empty_cells, self.np_random)
self.foods = [empty_cells[i] for i in self.np_random.choice(len(empty_cells), self.n_foods, replace=False)]
import pdb
pdb.set_trace()
return self.get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
snake_tail = self.snake.step(action)
self.snake.reward = 0.
if self.energy_consum:
self.snake.reward -= 0.01
if self.snake.head in self.foods:
self.snake.reward += 1.
self.snake.body.append(snake_tail)
self.foods.remove(self.snake.head)
empty_cells = self.get_empty_cells()
food = empty_cells[self.np_random.choice(len(empty_cells))]
self.foods.append(food)
# snake collided wall
if self.is_collided_wall(self.snake.head):
self.snake.reward -= 1.
self.snake.done = True
# snake bite itself
if self.snake.head in list(self.snake.body)[1:]:
self.snake.reward -= 1.
self.snake.done = True
self.snake.reward = np.clip(self.snake.reward, -1., 1.)
return self.get_observation(), self.snake.reward, self.snake.done, {}
def get_observation(self):
if self.observation_mode == 'rgb':
return self.get_image()
else:
observation = np.zeros((self.width, self.height), dtype=np.uint8)
for x, y in self.snake.body:
try:
observation[x][y] = 100
except:
pass
for food in self.foods:
x, y = food
observation[x][y] = 200
return observation[:, :, None]
def get_image(self):
board_width = 2 * self.width
board_height = 2 * self.height
cell_size = 2
board = Board(board_height, board_width)
for x, y in self.snake.body:
board.fill_cell((x * cell_size, y * cell_size), cell_size, BoardColor.BODY_COLOR)
for food in self.foods:
x, y = food
board.fill_cell((x * cell_size, y * cell_size), cell_size, BoardColor.FOOD_COLOR)
return board.board
def get_empty_cells(self):
empty_cells = [(x, y) for x in range(self.width) for y in range(self.height)]
for cell in self.snake.body:
if cell in empty_cells:
empty_cells.remove(cell)
for food in self.foods:
if food in empty_cells:
empty_cells.remove(food)
return empty_cells
def is_collided_wall(self, head):
x, y = head
if x < 0 or x > (self.width - 1) or y < 0 or y > (self.height - 1):
return True
return False
def render(self, mode='human'):
img = self.get_image()
if mode == 'rgb_array':
return img
elif mode == 'human':
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
return self.viewer.isopen
class Snake(object):
def __init__(self):
self.body = deque()
self.prev_act = None
self.done = False
self.reward = 0.
def step(self, action):
if not self.done:
if not self.is_valid_action(action):
action = self.prev_act
self.prev_act = action
x, y = self.head
if action == SnakeAction.LEFT:
self.body.appendleft((x, y - 1))
if action == SnakeAction.RIGHT:
self.body.appendleft((x, y + 1))
if action == SnakeAction.UP:
self.body.appendleft((x - 1, y))
if action == SnakeAction.DOWN:
self.body.appendleft((x + 1, y))
return self.body.pop()
@property
def head(self):
return self.body[0]
def is_valid_action(self, action):
if len(self.body) == 1:
return True
horizontal_actions = [SnakeAction.LEFT, SnakeAction.RIGHT]
vertical_actions = [SnakeAction.UP, SnakeAction.DOWN]
if self.prev_act in horizontal_actions:
return action in vertical_actions
return action in horizontal_actions
def init(self, empty_cells, np_random):
self.body.clear()
self.done = False
self.reward = 0.
self.prev_act = None
start_head = empty_cells[np_random.choice(len(empty_cells))]
self.body.appendleft(start_head)
empty_cells.remove(start_head)
return empty_cells
class Board(object):
def __init__(self, height, weight):
self.board = np.empty((height, weight, 3), dtype=np.uint8)
self.board[:, :, :] = BoardColor.SPACE_COLOR
def fill_cell(self, vertex, cell_size, color):
x, y = vertex
self.board[x:x + cell_size, y:y + cell_size, :] = color
class SnakeEnvMC(SnakeEnv):
def __init__(self):
super().__init__(observation_mode='rgb')
snake_env = SnakeEnvMC()
if __name__ == '__main__':
ss = SnakeEnv()
ss.reset()
| 29.995595 | 116 | 0.575562 |
979e9407221c95d45a3c9bd89de097c649dd4b1c
| 76 |
py
|
Python
|
Sketche/nature_of_code/chapter_1/random2d_test.py
|
kantel/p5
|
2ef14191c35fdb056b44624c6ff0ff764c88cc30
|
[
"MIT"
] | null | null | null |
Sketche/nature_of_code/chapter_1/random2d_test.py
|
kantel/p5
|
2ef14191c35fdb056b44624c6ff0ff764c88cc30
|
[
"MIT"
] | null | null | null |
Sketche/nature_of_code/chapter_1/random2d_test.py
|
kantel/p5
|
2ef14191c35fdb056b44624c6ff0ff764c88cc30
|
[
"MIT"
] | null | null | null |
from pvector import PVector
v = PVector.random2D()
print(v)
print(v.mag())
| 12.666667 | 27 | 0.723684 |
97abcbd448c8ebbaa7a913e2457fd4ba972704c3
| 1,724 |
py
|
Python
|
IJCTF/2021/crypto/ECSign/problem.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
IJCTF/2021/crypto/ECSign/problem.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
IJCTF/2021/crypto/ECSign/problem.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
import hashlib
import random
from Crypto.Util.number import inverse, bytes_to_long, long_to_bytes
from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
from ecdsa import NIST256p
from flag import FLAG
message = b"ECDSA prevents forging messages"
curve = NIST256p
G = curve.generator
n = curve.order
class Signature:
def __init__(self):
self.privkey = random.randrange(n)
self.pubkey = self.privkey * G
self.r = random.randrange(n)
self.s = random.randrange(n)
def sign(self, msg, k):
hsh = int(hashlib.sha256(msg).hexdigest(), 16)
p = k * G
self.r = p.x() % n
self.s = (inverse(k, n) * (hsh + (self.privkey * self.r) % n)) % n
return (self.r, self.s)
def f_sign(self, msg, k):
hsh = int(hashlib.sha256(msg).hexdigest(), 16)
self.s = (inverse(k, n) * (hsh + (self.privkey * self.r) % n)) % n
return (self.r, self.s)
sig = Signature()
k = random.randrange(n)
sig1 = sig.sign(message, k)
sig2 = sig.f_sign(message, k^0xffffffff)
sig3 = sig.f_sign(message, k^0xffffffff00000000)
r = random.randrange(n)
pubkey2 = r*G
sharedkey = r*sig.pubkey
iv = get_random_bytes(16)
key = long_to_bytes(int(sharedkey.x()))
cipher = AES.new(key, AES.MODE_CBC, iv)
ct = cipher.encrypt(pad(FLAG, 16))
print("pubkey="+ str((int(sig.pubkey.x()), int(sig.pubkey.y()))))
print("sig1=" + str(tuple(map(lambda x:int(x), sig1))))
print("sig2=" + str(tuple(map(lambda x:int(x), sig2))))
print("sig3=" + str(tuple(map(lambda x:int(x), sig3))))
print("pubkey2="+ str((int(pubkey2.x()), int(pubkey2.y()))))
print("iv=" + "0x"+str(iv.hex()))
print("ct=" + "0x"+str(ct.hex()))
| 28.733333 | 74 | 0.643271 |
c151dea181eca56694482f171a716c1e6512656b
| 2,022 |
py
|
Python
|
research/cv/vnet/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/vnet/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/vnet/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""post process for 310 inference"""
import os
import argparse
import numpy as np
from src.config import vnet_cfg as cfg
from src.dataset import InferImagelist
from src.utils import evaluation
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="postprocess")
parser.add_argument("--result_path", type=str, required=True, help="result files path.")
parser.add_argument("--data_path", type=str, default="./promise", help="Path of dataset, default is ./promise")
parser.add_argument("--split_file_path", type=str, default="./split/eval.csv",
help="Path of dataset, default is ./split/eval.csv")
args = parser.parse_args()
dataInferlist = InferImagelist(cfg, args.data_path, args.split_file_path)
dataManagerInfer = dataInferlist.dataManagerInfer
for i in range(dataInferlist.__len__()):
_, img_id = dataInferlist.__getitem__(i)
result_file = os.path.join(args.result_path, img_id + "_0.bin")
output = np.fromfile(result_file, dtype=np.float32)
output = output.reshape(cfg.VolSize[0], cfg.VolSize[1], cfg.VolSize[2])
print("save predicted label for test '{}'".format(img_id))
dataManagerInfer.writeResultsFromNumpyLabel(output, img_id, '_test', '.mhd')
evaluation(os.path.join(args.data_path, 'gt'), cfg['dirPredictionImage'])
| 48.142857 | 115 | 0.698318 |
a9eb97ef7cc74832e2e53fba6df8ac730e91144b
| 143 |
py
|
Python
|
append_version_to_progname.py
|
slankes/HB-RF-ETH
|
e003aaa189f1fad8965da06b9e73734319d99093
|
[
"Apache-2.0"
] | 21 |
2020-07-16T13:18:13.000Z
|
2022-03-27T16:56:51.000Z
|
append_version_to_progname.py
|
slankes/HB-RF-ETH
|
e003aaa189f1fad8965da06b9e73734319d99093
|
[
"Apache-2.0"
] | 29 |
2020-09-17T14:53:37.000Z
|
2022-03-23T04:35:29.000Z
|
append_version_to_progname.py
|
slankes/HB-RF-ETH
|
e003aaa189f1fad8965da06b9e73734319d99093
|
[
"Apache-2.0"
] | 6 |
2020-12-20T10:33:20.000Z
|
2022-01-16T12:02:21.000Z
|
Import("env")
with open("version.txt") as fp:
version = fp.readline()
env.Replace(PROGNAME="firmware_%s" % version.replace(".", "_"))
| 23.833333 | 67 | 0.636364 |
e71086fd817817e3c98c5fea6f8b2efbf218f7d5
| 2,946 |
py
|
Python
|
marsyas-vamp/marsyas/src/swig/python/modules/marsyas_util.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/swig/python/modules/marsyas_util.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/swig/python/modules/marsyas_util.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/evn python
from pylab import *
import marsyas
import string
msm = marsyas.MarSystemManager()
# create a MarSystem from a recursive list specification
def create(net):
composite = msm.create("Gain/id") # will be overwritten
if net.__class__ == marsyas.MarSystem:
composite = net
elif len(net) == 2:
composite = msm.create(net[0])
msyslist = map(create,net[1])
msyslist = map(composite.addMarSystem,msyslist)
else:
composite = msm.create(net)
return composite
# return a dictionary containing the string references to all elements of a list specification
def mar_refs(net, level_predicate="", level="top"):
if level_predicate == None:
level_predicate = ""
out = {}; # This is the output dictionary
next_level = ""
if (len(net) == 2):
if level!="top": # In the top level, I don't add the group specification
next_level = level_predicate+net[0]+"/"
# the problem is that a string is a list and ["Something/name","Something/else"] is a legitimate network
if ([].__class__ == net[1].__class__):
for subnet in net[1]:
out.update(mar_refs(subnet, next_level, "nontop"))
else:
n = net.split("/")
out[n[1]] = level_predicate+net
return out
# convert a realvec to a numpy array
def realvec2array(inrealvec):
outarray = zeros((inrealvec.getCols(), inrealvec.getRows()))
k = 0;
for i in range(0,inrealvec.getCols()):
for j in range(0, inrealvec.getRows()):
outarray[i,j] = inrealvec[k]
k = k + 1
return outarray
# convert a realvec control to a numpy array. By specifying so, eo, st, et
# it is possible to return a subset of the samples and observations.
def control2array(net,cname,so=0,eo=0,st=0,et=0):
net_control = net.getControl(cname)
net_realvec = net_control.to_realvec()
net_array = realvec2array(net_realvec)
if et==0:
et = net_array.shape[0]
if eo==0:
eo = net_array.shape[1]
res_array = net_array.transpose()
res_array = res_array[so:eo,st:et]
if (et != 1):
res_array = flipud(res_array)
return res_array
# convenience function for plotting an array that corresponds
# to a Marsyas realvec
def marplot(data, cmap = 'jet', aspect='None', x_label='Samples',
y_label='Observations',sy=0,ey=0,sx=0,ex=0,
interpolation='bicubic', plot_title = "Marsyas plot"):
if ex==0:
ex = data.shape[0]
if ey==0:
ey = data.shape[1]
if (data.shape[0] > 1) and (data.shape[1] > 1):
imshow(data, cmap=cmap, aspect=aspect, extent=[sy,ey,sx,ex],
interpolation=interpolation)
elif (data.shape[0] == 1):
plot(linspace(0,ex,data.shape[1]), data[0,:])
elif (data.shape[1] == 1):
plot(linspace(0,ey,data.shape[0]), data[:,0])
xlabel(x_label)
ylabel(y_label)
title(plot_title)
| 33.101124 | 108 | 0.631365 |
e778cca89246c70fd08a4c9b98e8d471a1c6c386
| 181 |
py
|
Python
|
src/autograph/views.py
|
RobetSlovev39/AutoGraph
|
c8bdb358b95143ab0d8c6f7c475a6c21f7a76b95
|
[
"MIT"
] | null | null | null |
src/autograph/views.py
|
RobetSlovev39/AutoGraph
|
c8bdb358b95143ab0d8c6f7c475a6c21f7a76b95
|
[
"MIT"
] | null | null | null |
src/autograph/views.py
|
RobetSlovev39/AutoGraph
|
c8bdb358b95143ab0d8c6f7c475a6c21f7a76b95
|
[
"MIT"
] | null | null | null |
from .services.core import update_devices
from django.http import HttpResponse, HttpRequest
def index_view(request: HttpRequest) -> HttpResponse:
return HttpResponse('works')
| 25.857143 | 53 | 0.801105 |
82413fcb1d7e92b34a949ae2861531b9eac50322
| 1,045 |
py
|
Python
|
tools/generate-csv.py
|
ndoell/owasp-masvs
|
38caa79050b8669d0ed198158f8b767ff12d3c3b
|
[
"CC0-1.0"
] | 1,368 |
2016-09-14T02:34:05.000Z
|
2022-03-31T10:10:36.000Z
|
tools/generate-csv.py
|
ndoell/owasp-masvs
|
38caa79050b8669d0ed198158f8b767ff12d3c3b
|
[
"CC0-1.0"
] | 371 |
2016-09-14T12:07:14.000Z
|
2022-03-08T23:43:04.000Z
|
tools/generate-csv.py
|
ndoell/owasp-masvs
|
38caa79050b8669d0ed198158f8b767ff12d3c3b
|
[
"CC0-1.0"
] | 369 |
2016-09-15T20:05:12.000Z
|
2022-03-28T11:25:31.000Z
|
#!/usr/bin/python
""" Quick and Dirty Roberto Martelloni's script to generate a CSV """
import os
def fromFilenameToArea(filename):
splittedFilename = filename.split("-")
id = splittedFilename[1]
name = splittedFilename[2].split(".")
name = name[0].replace("_", " ")
return id, name
def parsemd(filename):
for line in open(filename):
if line.startswith("|"):
if line.find("| --- |") == 0: continue
if line.find("| # |") == 0: continue
if line.find("| #") == 0: continue
start = fromFilenameToArea(filename)
line = line.replace("*", "")
line = line.split("|")
print start[0] + "|",
print start[1] + "|",
print line[1] + "|",
print line[2] + "|",
print line[3] + "|",
print line[4] + "|"
def main():
for file in os.listdir("./Document"):
if file.find("-V") != -1:
parsemd("./Document/" + file)
if __name__ == '__main__':
main()
| 23.222222 | 69 | 0.499522 |
4166c469b02e4268ab4202a5ca776d48e180f028
| 2,532 |
py
|
Python
|
partner_ngos/programs_management/doctype/project_indicator_log/project_indicator_log.py
|
AkramMutaher/partner_ngos
|
4a345fb6989ff5a21db7fca07aa4e5174dca8f59
|
[
"MIT"
] | null | null | null |
partner_ngos/programs_management/doctype/project_indicator_log/project_indicator_log.py
|
AkramMutaher/partner_ngos
|
4a345fb6989ff5a21db7fca07aa4e5174dca8f59
|
[
"MIT"
] | null | null | null |
partner_ngos/programs_management/doctype/project_indicator_log/project_indicator_log.py
|
AkramMutaher/partner_ngos
|
4a345fb6989ff5a21db7fca07aa4e5174dca8f59
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Akram Mutaher and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.model.document import Document
from frappe.utils import (flt, getdate, get_last_day)
class ProjectIndicatorLog(Document):
def validate(self):
self.total_=self.men_+self.women_+self.boys_+self.girls_+self.unclassified_
self.validate_duplicate()
self.validate_dates()
def validate_duplicate(self):
conditions = " where docstatus<2 and project_indicator = '%s'" % self.project_indicator
conditions += " and month = '%s'" % self.month
if frappe.db.exists(self.doctype, self.name):
conditions += " and name <> '%s'" % self.name
sum_name = frappe.db.sql("""select count(name) from `tabProject Indicator Log` %s"""% conditions)[0][0]
if sum_name > 0:
frappe.throw(_("Already exists with same Project Indicator, and Month"))
def validate_dates(self):
months = {
"Jan": '01',
"Feb": '02',
"Mar": '03',
"Apr": '04',
"May": '05',
"Jun": '06',
"Jul": '07',
"Aug": '08',
"Sep": '09',
"Oct": '10',
"Nov": '11',
"Dec": '12'
}
if self.month and self.fiscal_year and (not self.start_date or not self.last_date):
self.start_date=getdate(self.fiscal_year+'-'+months[self.month]+'-01')
self.last_date=get_last_day(self.start_date)
def on_submit(self):
self.update_master(True)
# create the BSC Ledger Entry #
#
def on_cancel(self):
self.update_master(False)
def update_master(self, increase = True):
master = frappe.get_doc("Project Indicator", self.project_indicator)
master.db_set("men_", (master.men_+ self.men_) if increase == True else (master.men_- self.men_) )
master.db_set("women_", (master.women_+ self.women_) if increase == True else (master.women_- self.women_) )
master.db_set("boys_", (master.boys_+ self.boys_) if increase == True else (master.boys_- self.boys_) )
master.db_set("girls_", (master.girls_+ self.girls_) if increase == True else (master.girls_- self.girls_) )
master.db_set("unclassified_", (master.unclassified_+ self.unclassified_) if increase == True else (master.unclassified_- self.unclassified_) )
master.db_set("total_", (master.total_+ self.total_) if increase == True else (master.total_- self.total_) )
master.db_set("percent", ( flt(master.men_+ self.total_) / flt(master.total) * 100.0 ) if increase == True else ( flt(master.men_- self.total_) / flt(master.total) * 100.0 ))
| 37.791045 | 177 | 0.697077 |
d8dd2557068872ec19187051013dce2dc953ee23
| 381 |
py
|
Python
|
RDS/circle3_central_services/research_manager/src/tests/test_enum_status.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 10 |
2020-06-24T08:22:24.000Z
|
2022-01-13T16:17:36.000Z
|
RDS/circle3_central_services/research_manager/src/tests/test_enum_status.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 78 |
2020-01-23T14:32:06.000Z
|
2022-03-07T14:11:16.000Z
|
RDS/circle3_central_services/research_manager/src/tests/test_enum_status.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 1 |
2020-06-24T08:33:48.000Z
|
2020-06-24T08:33:48.000Z
|
import unittest
from lib.EnumStatus import Status
class Test_Enum_Status(unittest.TestCase):
def test_successor(self):
self.assertEqual(1, Status.CREATED.value)
self.assertEqual(Status.WORK, Status.CREATED.succ())
self.assertEqual(Status.DONE, Status.CREATED.succ().succ())
with self.assertRaises(IndexError):
Status.DELETED.succ()
| 34.636364 | 67 | 0.708661 |
996cc5b136e26ff7439f20c1e4227fa072359291
| 1,802 |
py
|
Python
|
pvs_suban/migrations/serializers.py
|
hackit90/django-invoices
|
770d5cffe7831539b885c517b0a435cc764e0a8c
|
[
"MIT"
] | null | null | null |
pvs_suban/migrations/serializers.py
|
hackit90/django-invoices
|
770d5cffe7831539b885c517b0a435cc764e0a8c
|
[
"MIT"
] | null | null | null |
pvs_suban/migrations/serializers.py
|
hackit90/django-invoices
|
770d5cffe7831539b885c517b0a435cc764e0a8c
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from rest_framework.serializers import ModelSerializer
from .models import Contact, InvoicePosition, Address, Invoice, Country
#Adresse integrieren
class AddressNestedSerializer(ModelSerializer):
class Meta:
model = Address
fields = ['street', 'zip', 'city', 'country', 'id']
class ContactSerializer(ModelSerializer):
addresses = AddressNestedSerializer(many=True)
class Meta:
model = Contact
fields = ['type', 'salutation', 'name', 'email','addresses', 'id']
read_only_fields = ['addresses']
#InvoicePosition integrieren
class InvoicePositionNestedSerializer(ModelSerializer):
class Meta:
model = InvoicePosition
fields = '__all__'
class InvoiceSerializer(ModelSerializer):
InvoicePositions = InvoicePositionNestedSerializer(many=True)
total_amount = serializers.FloatField(source='total')
class Meta:
model = Invoice
fields = ['title', 'body', 'date', 'due', 'condition', 'InvoicePositions', 'total_amount', 'id']
read_only_fields = ['InvoicePositions']
class InvoicePositionSerializer(ModelSerializer):
class Meta:
model = InvoicePosition
fields = '__all__'
class AddressSerializer(ModelSerializer):
country_name = serializers.SerializerMethodField(source='get_country_name')
contact = serializers.SerializerMethodField(source='get_contact')
class Meta:
model = Address
fields = ['street', 'zip', 'city', 'invoices', 'contact', 'country_name', 'id']
def get_country_name(self, obj):
return obj.country.value
def get_contact(self, obj):
return obj.contact.name
class CountrySerializer(ModelSerializer):
class Meta:
model = Country
fields = '__all__'
| 36.04 | 105 | 0.699223 |
9986b0dfc370a29617c0d095335ce679cb492b4d
| 284 |
py
|
Python
|
Scripts/Leibniz'sFormulaForCalculatingPi.py
|
kenanchristian/hacktoberfest
|
b55750bf4facb77abd532b66ed37101e2895c4d7
|
[
"MIT"
] | 8 |
2020-10-26T06:51:06.000Z
|
2021-04-02T13:01:27.000Z
|
Scripts/Leibniz'sFormulaForCalculatingPi.py
|
kenanchristian/hacktoberfest
|
b55750bf4facb77abd532b66ed37101e2895c4d7
|
[
"MIT"
] | 71 |
2020-10-25T22:46:02.000Z
|
2021-10-14T06:47:39.000Z
|
Scripts/Leibniz'sFormulaForCalculatingPi.py
|
kenanchristian/hacktoberfest
|
b55750bf4facb77abd532b66ed37101e2895c4d7
|
[
"MIT"
] | 77 |
2020-10-24T01:53:46.000Z
|
2021-10-01T06:25:27.000Z
|
def myPi(n):
denominator = 1
addto = 1
for i in range(n):
denominator = denominator + 2
addto = addto - (1/denominator)
denominator = denominator + 2
addto = addto + (1/denominator)
pi = addto * 4
return(pi)
print(myPi(1000000))
| 18.933333 | 39 | 0.56338 |
5120133cae90addfcec60d790951128203656f2d
| 1,077 |
py
|
Python
|
loesungen/chapter06/import-csv-semicolon.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | 2 |
2021-09-20T06:16:41.000Z
|
2022-01-17T14:24:43.000Z
|
loesungen/chapter06/import-csv-semicolon.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | null | null | null |
loesungen/chapter06/import-csv-semicolon.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | null | null | null |
import csv
# Sniffer-Instanz erzeugen
sniffer = csv.Sniffer()
# Schleife über die zu prüfenden Dateien
csv_filename = 'iris-semicolon.csv'
print(csv_filename)
with open(csv_filename, 'r', newline = '') as csv_file:
# Die ersten 1024 Byte der Datei lesen
chunk = csv_file.read(1024)
# Prüfen, ob die Datei einen Hedaer hat
header = sniffer.has_header(chunk)
print(f"- Header: {header}")
# CSV-Konfiguration (Dialect) ermitteln
dialect = sniffer.sniff(chunk)
# Beispielhaft das ermittelte Trennzeichen ausgeben
print(f"- Trennzeichen: {repr(dialect.delimiter)}")
# Wichtig: Datei "zurückspulen"
csv_file.seek(0)
# Reader mit dem ermittelten Dialect initialisieren
reader = csv.reader(csv_file, dialect)
# Falls Header vorhanden, überspringen (oder wegspeichern)
if header:
header_data = next(reader, None)
print("- Spaltentitel:", header_data)
iris_data = list(reader)
print(f"{len(iris_data)} Datensätze importiert.")
print(iris_data[0])
print(iris_data[50])
print(iris_data[100])
| 34.741935 | 62 | 0.701021 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.