hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4c08d48b19f075dae23585000733938c8ac68391
| 698 |
py
|
Python
|
python/coding_dojang/decorator_class_param_return.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/coding_dojang/decorator_class_param_return.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/coding_dojang/decorator_class_param_return.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
class Trace:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
r = self.func(*args, **kwargs)
print('{0}(args={1}, kwargs={2}) -> {3})'.format(self.func.__name__, args, kwargs, r))
return r
class Trace2:
def __call__(self, func):
def wrapper(*args, **kwargs):
r = func(*args, **kwargs)
print('{0}(args={1}, kwargs={2}) -> {3})'.format(func.__name__, args, kwargs, r))
return r
return wrapper
@Trace
def add(a, b):
return a + b
@Trace2()
def add2(a, b):
return a + b
print(add(10, 20))
print(add(a=10, b=20))
print(add2(10, 20))
print(add2(a=10, b=20))
| 21.151515 | 94 | 0.537249 |
d5d52baa97d1d0595989dc9119a6764cb3021caf
| 498 |
py
|
Python
|
apps/multivers/migrations/0008_product_margin.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | 1 |
2017-01-08T13:21:43.000Z
|
2017-01-08T13:21:43.000Z
|
apps/multivers/migrations/0008_product_margin.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | 17 |
2018-12-03T14:22:14.000Z
|
2021-07-14T15:15:12.000Z
|
apps/multivers/migrations/0008_product_margin.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | 2 |
2018-12-03T14:58:49.000Z
|
2019-12-01T13:24:42.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-03 13:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('multivers', '0007_auto_20170203_1408'),
]
operations = [
migrations.AddField(
model_name='product',
name='margin',
field=models.IntegerField(choices=[(0, 'No margin'), (1, 'Has margin')], default=1),
),
]
| 23.714286 | 96 | 0.610442 |
9112e971dae6cf9bd9a1aa319883d48f351d8c4e
| 1,281 |
py
|
Python
|
ParsingAndScraping/ivi/parser.py
|
KazuruK/FilmGetter
|
fd84bcaddf17d4b89ad6e5d27095535346c5f4a9
|
[
"BSD-3-Clause"
] | 1 |
2021-06-23T13:06:11.000Z
|
2021-06-23T13:06:11.000Z
|
ParsingAndScraping/ivi/parser.py
|
KazuruK/FilmGetter
|
fd84bcaddf17d4b89ad6e5d27095535346c5f4a9
|
[
"BSD-3-Clause"
] | 1 |
2021-06-23T21:21:52.000Z
|
2021-06-23T21:21:52.000Z
|
ParsingAndScraping/ivi/parser.py
|
KazuruK/FilmGetter
|
fd84bcaddf17d4b89ad6e5d27095535346c5f4a9
|
[
"BSD-3-Clause"
] | 1 |
2021-06-28T19:14:19.000Z
|
2021-06-28T19:14:19.000Z
|
import requests
from ParsingAndScraping.ivi.scrapper import ivi_search
from ParsingAndScraping.assistants import big_num
from ParsingAndScraping.assistants import empty_string_cleaner
from bs4 import BeautifulSoup
def parse_this(film_name):
url = ivi_search(film_name)
if len(url) == 0:
return ['Not available']
is_subscription_available = ''
ivi_price = '399'
hd_price = ''
sd_price = ''
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
free = str(soup.find('title'))
if free.find('бесплатно') != -1:
return ['Free']
else:
sub = soup.find('span', class_='nbl-button__secondaryText')
if sub is not None:
is_subscription_available = 'Available by Subscription. ' + ivi_price + ' rub'
page = str(soup.find('video-info'))
quote = big_num(page)
if len(quote) > 1:
del quote[len(quote) - 1]
hd_price = str(quote[0])
sd_price = str(quote[1])
else:
hd_price = str(quote[0])
'''print(is_free) #for tests
print(is_subscription_available)
print(hd_price.strip())
print(sd_price)'''
output_list = [is_subscription_available, hd_price, sd_price]
output = empty_string_cleaner(output_list)
return output
| 32.025 | 90 | 0.662763 |
5dc8644b81f7188968ee0814223ec9552db416ee
| 426 |
py
|
Python
|
nz_crawl_demo/day9/Agent/useragent_demo/useragent_demo/spiders/httpbin.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | null | null | null |
nz_crawl_demo/day9/Agent/useragent_demo/useragent_demo/spiders/httpbin.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 27 |
2020-02-12T07:55:58.000Z
|
2022-03-12T00:19:09.000Z
|
nz_crawl_demo/day9/Agent/useragent_demo/useragent_demo/spiders/httpbin.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 2 |
2020-02-18T01:54:55.000Z
|
2020-02-21T11:36:28.000Z
|
# -*- coding: utf-8 -*-
import scrapy
import json
class HttpbinSpider(scrapy.Spider):
name = 'httpbin'
allowed_domains = ['httpbin.org']
start_urls = ['http://httpbin.org/user-agent']
def parse(self, response):
user_agent = json.loads(response.text)['user-agent']
print("="*30)
print(user_agent)
print("="*30)
yield scrapy.Request(self.start_urls[0],dont_filter=True)
| 26.625 | 65 | 0.629108 |
5dd5ed93ae3e4c2eb0399723f48425243f45c730
| 20,896 |
py
|
Python
|
tests/transformers/bigbird/test_modeling.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
tests/transformers/bigbird/test_modeling.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
tests/transformers/bigbird/test_modeling.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import numpy as np
import paddle
from common_test import CommonTest
from util import softmax_with_cross_entropy, slow
from paddlenlp.transformers import BigBirdForSequenceClassification, \
BigBirdPretrainingCriterion, BigBirdForPretraining, BigBirdModel, \
BigBirdForQuestionAnswering, BigBirdForTokenClassification, BigBirdForMultipleChoice, \
BigBirdForMaskedLM, BigBirdForCausalLM
from paddlenlp.transformers import create_bigbird_rand_mask_idx_list
def create_input_data(config, seed=None):
if seed is not None:
np.random.seed(seed)
rand_mask_idx_list = create_bigbird_rand_mask_idx_list(
config["num_layers"], config["seq_len"], config["seq_len"],
config["nhead"], config["block_size"], config["window_size"],
config["num_global_blocks"], config["num_rand_blocks"], config["seed"])
input_ids = np.random.randint(low=0,
high=config['vocab_size'],
size=(config["batch_size"],
config["seq_len"]))
num_to_predict = int(config["seq_len"] * 0.15)
masked_lm_positions = np.random.choice(
config["seq_len"], (config["batch_size"], num_to_predict),
replace=False)
masked_lm_positions = np.sort(masked_lm_positions)
pred_padding_len = config["seq_len"] - num_to_predict
temp_masked_lm_positions = np.full(masked_lm_positions.size,
0,
dtype=np.int32)
mask_token_num = 0
for i, x in enumerate(masked_lm_positions):
for j, pos in enumerate(x):
temp_masked_lm_positions[
mask_token_num] = i * config["seq_len"] + pos
mask_token_num += 1
masked_lm_positions = temp_masked_lm_positions
return rand_mask_idx_list, input_ids, masked_lm_positions
class NpBigBirdPretrainingCriterion(object):
def __init__(self, vocab_size, use_nsp=False, ignore_index=0):
self.vocab_size = vocab_size
self.use_nsp = use_nsp
self.ignore_index = ignore_index
def __call__(self, prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels, masked_lm_scale,
masked_lm_weights):
masked_lm_loss = softmax_with_cross_entropy(
prediction_scores, masked_lm_labels, ignore_index=self.ignore_index)
masked_lm_loss = np.transpose(masked_lm_loss, [1, 0])
masked_lm_loss = np.sum(masked_lm_loss * masked_lm_weights) / (
np.sum(masked_lm_weights) + 1e-5)
scale = 1.0
if not self.use_nsp:
scale = 0.0
next_sentence_loss = softmax_with_cross_entropy(seq_relationship_score,
next_sentence_labels)
return masked_lm_loss + np.mean(next_sentence_loss) * scale
class TestBigBirdForSequenceClassification(CommonTest):
def set_input(self):
self.config = copy.deepcopy(
BigBirdModel.pretrained_init_configuration['bigbird-base-uncased'])
self.config['num_layers'] = 2
self.config['vocab_size'] = 1024
self.config['attn_dropout'] = 0.0
self.config['hidden_dropout_prob'] = 0.0
self.config['dim_feedforward'] = 1024
self.config['seq_len'] = 1024
self.config['batch_size'] = 2
self.config['max_position_embeddings'] = 2048
self.rand_mask_idx_list, self.input_ids, self.masked_lm_positions = create_input_data(
self.config)
def set_output(self):
self.expected_shape = (self.config['batch_size'], 2)
def setUp(self):
self.set_model_class()
self.set_input()
self.set_output()
def set_model_class(self):
self.TEST_MODEL_CLASS = BigBirdForSequenceClassification
def test_forward(self):
bigbird = BigBirdModel(**self.config)
model = self.TEST_MODEL_CLASS(bigbird)
input_ids = paddle.to_tensor(self.input_ids)
rand_mask_idx_list = paddle.to_tensor(self.rand_mask_idx_list)
output = model(input_ids, rand_mask_idx_list=rand_mask_idx_list)
self.check_output_equal(self.expected_shape, output.numpy().shape)
class TestBigBirdForQuestionAnswering(CommonTest):
def set_input(self):
self.config = copy.deepcopy(
BigBirdModel.pretrained_init_configuration['bigbird-base-uncased'])
self.config['num_layers'] = 2
self.config['vocab_size'] = 1024
self.config['attn_dropout'] = 0.0
self.config['hidden_dropout_prob'] = 0.0
self.config['dim_feedforward'] = 1024
self.config['seq_len'] = 1024
self.config['batch_size'] = 2
self.config['max_position_embeddings'] = 2048
self.rand_mask_idx_list, self.input_ids, self.masked_lm_positions = create_input_data(
self.config)
def set_output(self):
self.expected_shape1 = (self.config['batch_size'],
self.config['seq_len'])
self.expected_shape2 = (self.config['batch_size'],
self.config['seq_len'])
def setUp(self):
self.set_model_class()
self.set_input()
self.set_output()
def set_model_class(self):
self.TEST_MODEL_CLASS = BigBirdForQuestionAnswering
def test_forward(self):
bigbird = BigBirdModel(**self.config)
model = self.TEST_MODEL_CLASS(bigbird)
input_ids = paddle.to_tensor(self.input_ids)
rand_mask_idx_list = paddle.to_tensor(self.rand_mask_idx_list)
start_logits, end_logits = model(input_ids,
rand_mask_idx_list=rand_mask_idx_list)
self.check_output_equal(self.expected_shape1,
start_logits.numpy().shape)
self.check_output_equal(self.expected_shape2, end_logits.numpy().shape)
class TestBigBirdForTokenClassification(CommonTest):
def set_input(self):
self.config = copy.deepcopy(
BigBirdModel.pretrained_init_configuration['bigbird-base-uncased'])
self.config['num_layers'] = 2
self.config['vocab_size'] = 1024
self.config['attn_dropout'] = 0.0
self.config['hidden_dropout_prob'] = 0.0
self.config['dim_feedforward'] = 1024
self.config['seq_len'] = 1024
self.config['batch_size'] = 2
self.config['max_position_embeddings'] = 2048
self.rand_mask_idx_list, self.input_ids, self.masked_lm_positions = create_input_data(
self.config)
self.num_classes = 2
def set_output(self):
self.expected_shape = (self.config['batch_size'],
self.config['seq_len'], self.num_classes)
def setUp(self):
self.set_model_class()
self.set_input()
self.set_output()
def set_model_class(self):
self.TEST_MODEL_CLASS = BigBirdForTokenClassification
def test_forward(self):
bigbird = BigBirdModel(**self.config)
model = self.TEST_MODEL_CLASS(bigbird, num_classes=self.num_classes)
input_ids = paddle.to_tensor(self.input_ids)
rand_mask_idx_list = paddle.to_tensor(self.rand_mask_idx_list)
output = model(input_ids, rand_mask_idx_list=rand_mask_idx_list)
self.check_output_equal(self.expected_shape, output.numpy().shape)
class TestBigBirdForMultipleChoice(CommonTest):
def set_input(self):
self.config = copy.deepcopy(
BigBirdModel.pretrained_init_configuration['bigbird-base-uncased'])
self.config['num_layers'] = 2
self.config['vocab_size'] = 1024
self.config['attn_dropout'] = 0.0
self.config['hidden_dropout_prob'] = 0.0
self.config['dim_feedforward'] = 1024
self.config['seq_len'] = 1024
self.config['batch_size'] = 2
self.config['max_position_embeddings'] = 2048
self.rand_mask_idx_list, self.input_ids, self.masked_lm_positions = [], [], []
self.num_choices = 2
for i in range(self.num_choices):
rand_mask_idx_list, input_ids, masked_lm_positions = create_input_data(
self.config)
self.rand_mask_idx_list.append(rand_mask_idx_list)
self.input_ids.append(input_ids)
self.masked_lm_positions.append(masked_lm_positions)
self.rand_mask_idx_list = np.array(self.rand_mask_idx_list).swapaxes(
0, 1)
self.input_ids = np.array(self.input_ids).swapaxes(0, 1)
self.masked_lm_positions = np.array(self.masked_lm_positions).swapaxes(
0, 1)
def set_output(self):
self.expected_shape = (self.config['batch_size'], self.num_choices)
def setUp(self):
self.set_model_class()
self.set_input()
self.set_output()
def set_model_class(self):
self.TEST_MODEL_CLASS = BigBirdForMultipleChoice
def test_forward(self):
bigbird = BigBirdModel(**self.config)
model = self.TEST_MODEL_CLASS(bigbird, num_choices=self.num_choices)
input_ids = paddle.to_tensor(self.input_ids)
rand_mask_idx_list = paddle.to_tensor(self.rand_mask_idx_list)
output = model(input_ids, rand_mask_idx_list=rand_mask_idx_list)
self.check_output_equal(self.expected_shape, output.numpy().shape)
class TestBigBirdForMaskedLM(CommonTest):
def set_input(self):
self.config = copy.deepcopy(
BigBirdModel.pretrained_init_configuration['bigbird-base-uncased'])
self.config['num_layers'] = 2
self.config['vocab_size'] = 1024
self.config['attn_dropout'] = 0.0
self.config['hidden_dropout_prob'] = 0.0
self.config['dim_feedforward'] = 1024
self.config['seq_len'] = 1024
self.config['batch_size'] = 2
self.config['max_position_embeddings'] = 2048
self.rand_mask_idx_list, self.input_ids, self.masked_lm_positions = create_input_data(
self.config)
self.labels = np.random.randint(low=0,
high=self.config['vocab_size'],
size=(self.config["batch_size"],
self.config["seq_len"]))
def set_output(self):
self.expected_shape1 = (1, )
self.expected_shape2 = (self.config['batch_size'],
self.config['seq_len'],
self.config['vocab_size'])
self.expected_shape3 = (self.config['batch_size'],
self.config['seq_len'],
self.config['hidden_size'])
def setUp(self):
self.set_model_class()
self.set_input()
self.set_output()
def set_model_class(self):
self.TEST_MODEL_CLASS = BigBirdForMaskedLM
def test_forward(self):
bigbird = BigBirdModel(**self.config)
model = self.TEST_MODEL_CLASS(bigbird)
input_ids = paddle.to_tensor(self.input_ids)
rand_mask_idx_list = paddle.to_tensor(self.rand_mask_idx_list)
labels = paddle.to_tensor(self.labels)
masked_lm_loss, prediction_scores, sequence_output = model(
input_ids, rand_mask_idx_list=rand_mask_idx_list, labels=labels)
self.check_output_equal(self.expected_shape1,
masked_lm_loss.numpy().shape)
self.check_output_equal(self.expected_shape2,
prediction_scores.numpy().shape)
self.check_output_equal(self.expected_shape3,
sequence_output.numpy().shape)
class TestBigBirdForCausalLM(CommonTest):
def set_input(self):
self.config = copy.deepcopy(
BigBirdModel.pretrained_init_configuration['bigbird-base-uncased'])
self.config['num_layers'] = 2
self.config['vocab_size'] = 1024
self.config['attn_dropout'] = 0.0
self.config['hidden_dropout_prob'] = 0.0
self.config['dim_feedforward'] = 1024
self.config['seq_len'] = 1024
self.config['batch_size'] = 2
self.config['max_position_embeddings'] = 2048
self.rand_mask_idx_list, self.input_ids, self.masked_lm_positions = create_input_data(
self.config)
self.labels = np.random.randint(low=0,
high=self.config['vocab_size'],
size=(self.config["batch_size"],
self.config["seq_len"]))
def set_output(self):
self.expected_shape1 = (1, )
self.expected_shape2 = (self.config['batch_size'],
self.config['seq_len'],
self.config['vocab_size'])
self.expected_shape3 = (self.config['batch_size'],
self.config['seq_len'],
self.config['hidden_size'])
def setUp(self):
self.set_model_class()
self.set_input()
self.set_output()
def set_model_class(self):
self.TEST_MODEL_CLASS = BigBirdForCausalLM
def test_forward(self):
bigbird = BigBirdModel(**self.config)
model = self.TEST_MODEL_CLASS(bigbird)
input_ids = paddle.to_tensor(self.input_ids)
rand_mask_idx_list = paddle.to_tensor(self.rand_mask_idx_list)
labels = paddle.to_tensor(self.labels)
masked_lm_loss, prediction_scores, sequence_output = model(
input_ids, rand_mask_idx_list=rand_mask_idx_list, labels=labels)
self.check_output_equal(self.expected_shape1,
masked_lm_loss.numpy().shape)
self.check_output_equal(self.expected_shape2,
prediction_scores.numpy().shape)
self.check_output_equal(self.expected_shape3,
sequence_output.numpy().shape)
class TestBigBirdForPretraining(TestBigBirdForSequenceClassification):
def set_input(self):
self.config = copy.deepcopy(
BigBirdModel.pretrained_init_configuration['bigbird-base-uncased'])
self.config['num_layers'] = 2
self.config['vocab_size'] = 512
self.config['attn_dropout'] = 0.0
self.config['hidden_dropout_prob'] = 0.0
self.config['dim_feedforward'] = 1024
self.config['seq_len'] = 1024
self.config['batch_size'] = 2
self.config['max_position_embeddings'] = 2048
self.rand_mask_idx_list, self.input_ids, self.masked_lm_positions = create_input_data(
self.config)
def set_model_class(self):
self.TEST_MODEL_CLASS = BigBirdForPretraining
def set_output(self):
self.expected_pred_shape = (self.masked_lm_positions.shape[0],
self.config['vocab_size'])
self.expected_seq_shape = (self.config['batch_size'], 2)
def test_forward(self):
bigbird = BigBirdModel(**self.config)
model = self.TEST_MODEL_CLASS(bigbird)
input_ids = paddle.to_tensor(self.input_ids)
rand_mask_idx_list = paddle.to_tensor(self.rand_mask_idx_list)
masked_positions = paddle.to_tensor(self.masked_lm_positions)
output = model(input_ids,
rand_mask_idx_list=rand_mask_idx_list,
masked_positions=masked_positions)
self.check_output_equal(output[0].numpy().shape,
self.expected_pred_shape)
self.check_output_equal(output[1].numpy().shape,
self.expected_seq_shape)
class TestBigBirdPretrainingCriterionUseNSP(CommonTest):
def setUp(self):
self.config['vocab_size'] = 1024
self.criterion = BigBirdPretrainingCriterion(**self.config)
self.np_criterion = NpBigBirdPretrainingCriterion(**self.config)
def _construct_input_data(self, mask_num, vocab_size, batch_size):
prediction_scores = np.random.rand(mask_num, vocab_size).astype(
paddle.get_default_dtype())
seq_relationship_score = np.random.rand(batch_size, 2).astype(
paddle.get_default_dtype())
masked_lm_labels = np.random.randint(0, vocab_size, (mask_num, 1))
next_sentence_labels = np.random.randint(0, 2, (batch_size, 1))
masked_lm_scale = 1.0
masked_lm_weights = np.random.randint(0, 2, (mask_num)).astype(
paddle.get_default_dtype())
return prediction_scores, seq_relationship_score, masked_lm_labels, \
next_sentence_labels, masked_lm_scale, masked_lm_weights
def test_forward(self):
np_prediction_score, np_seq_relationship_score, np_masked_lm_labels, \
np_next_sentence_labels, masked_lm_scale, np_masked_lm_weights \
= self._construct_input_data(20, self.config['vocab_size'], 4)
prediction_score = paddle.to_tensor(np_prediction_score)
seq_relationship_score = paddle.to_tensor(np_seq_relationship_score)
masked_lm_labels = paddle.to_tensor(np_masked_lm_labels)
next_sentence_labels = paddle.to_tensor(np_next_sentence_labels)
masked_lm_weights = paddle.to_tensor(np_masked_lm_weights)
np_loss = self.np_criterion(np_prediction_score,
np_seq_relationship_score,
np_masked_lm_labels,
np_next_sentence_labels, masked_lm_scale,
np_masked_lm_weights)
loss = self.criterion(prediction_score, seq_relationship_score,
masked_lm_labels, next_sentence_labels,
masked_lm_scale, masked_lm_weights)
self.check_output_equal(np_loss, loss.numpy()[0])
class TestBigBirdPretrainingCriterionNotUseNSP(
TestBigBirdPretrainingCriterionUseNSP):
def setUp(self):
self.config['vocab_size'] = 1024
self.config['use_nsp'] = False
self.criterion = BigBirdPretrainingCriterion(**self.config)
self.np_criterion = NpBigBirdPretrainingCriterion(**self.config)
class TestBigBirdFromPretrain(CommonTest):
@slow
def test_bigbird_base_uncased(self):
model = BigBirdModel.from_pretrained('bigbird-base-uncased',
attn_dropout=0.0,
hidden_dropout_prob=0.0)
self.config = copy.deepcopy(model.config)
self.config['seq_len'] = 512
self.config['batch_size'] = 3
rand_mask_idx_list, input_ids, _ = create_input_data(self.config, 102)
input_ids = paddle.to_tensor(input_ids)
rand_mask_idx_list = paddle.to_tensor(rand_mask_idx_list)
output = model(input_ids, rand_mask_idx_list=rand_mask_idx_list)
expected_seq_shape = (self.config['batch_size'], self.config['seq_len'],
self.config['hidden_size'])
expected_pooled_shape = (self.config['batch_size'],
self.config['hidden_size'])
self.check_output_equal(output[0].numpy().shape, expected_seq_shape)
self.check_output_equal(output[1].numpy().shape, expected_pooled_shape)
expected_seq_slice = np.array([[0.06685783, 0.01576832, -0.14448889],
[0.16531630, 0.00974050, -0.15113291],
[0.08514148, -0.01252885, -0.12458798]])
# There's output diff about 1e-4 between cpu and gpu
self.check_output_equal(output[0].numpy()[0, 0:3, 0:3],
expected_seq_slice,
atol=1e-4)
expected_pooled_slice = np.array([[0.78695089, 0.87273526, -0.88046724],
[0.66016346, 0.74889791, -0.76608104],
[0.15944470, 0.25242448,
-0.34336662]])
self.check_output_equal(output[1].numpy()[0:3, 0:3],
expected_pooled_slice,
atol=1e-4)
if __name__ == "__main__":
unittest.main()
| 42.995885 | 94 | 0.633949 |
f8e57bcbc7b33d06b69243aff9192b57cec80693
| 1,051 |
py
|
Python
|
examples/myqueryset/delete_/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 5 |
2020-07-14T07:48:10.000Z
|
2021-12-20T21:20:10.000Z
|
examples/myqueryset/delete_/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 7 |
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
examples/myqueryset/delete_/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 1 |
2021-02-16T07:04:25.000Z
|
2021-02-16T07:04:25.000Z
|
from django.db import models
from django.utils import timezone
from django.template.defaultfilters import truncatechars
# Create your models here.
class brand(models.Model):
name = models.CharField(verbose_name="品牌名称", max_length=50)
description = models.TextField(verbose_name="品牌描述")
class product(models.Model):
name = models.CharField(verbose_name="商品名称", max_length=50, unique=True)
price = models.DecimalField(verbose_name="商品价格", max_digits=5, decimal_places=2)
description = models.TextField(verbose_name="商品描述")
production_date = models.DateField(verbose_name="生产日期")
expiration_date = models.IntegerField(verbose_name="有效期", help_text="按天")
date_joined = models.DateTimeField(verbose_name="商品录入时间", auto_now=timezone.now)
date_changed = models.DateTimeField(verbose_name="商品修改时间", auto_now=timezone.now, null=True)
# Django 不会将
brand_id = models.ForeignKey('brand', on_delete=models.CASCADE)
@property
def short_description(self):
return truncatechars(self.description, 100)
| 38.925926 | 96 | 0.759277 |
5d4478fcbd1601df16142faba97152e21a159835
| 1,256 |
py
|
Python
|
Packs/RiskSense/Scripts/RiskSenseDisplayCVEChartScript/RiskSenseDisplayCVEChartScript.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/RiskSense/Scripts/RiskSenseDisplayCVEChartScript/RiskSenseDisplayCVEChartScript.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/RiskSense/Scripts/RiskSenseDisplayCVEChartScript/RiskSenseDisplayCVEChartScript.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CommonServerPython import *
from typing import Dict, Any
def display_cve_chart() -> Dict[str, Any]:
cves_count = demisto.args().get('CvesCount', 0)
trending_cves_count = demisto.args().get('TrendingCvesCount', 0)
entry_result = {
"Type": 17,
"ContentsFormat": "bar",
"Contents": {
"stats": [
{
"data": [
cves_count,
],
"name": "CVEs that have ransomware threat",
"label": "CVEs that have ransomware threat",
"color": "rgb(0, 0, 255)"
},
{
"data": [
trending_cves_count
],
"name": "CVEs that are ransomware trending",
"label": "CVEs that are ransomware trending",
"color": "rgb(255, 0, 0)"
},
],
"params": {
"layout": "horizontal"
}
}
}
return entry_result
def main() -> None:
entry_result = display_cve_chart()
demisto.results(entry_result)
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| 27.304348 | 68 | 0.444268 |
5390439ba6c39c31ba4c90c3427c225d8387ddd4
| 3,316 |
py
|
Python
|
tests/unit/scalar_fields/test_xyz_scalar_fields.py
|
bernssolg/pyntcloud-master
|
84cf000b7a7f69a2c1b36f9624f05f65160bf992
|
[
"MIT"
] | 1,142 |
2016-10-10T08:55:30.000Z
|
2022-03-30T04:46:16.000Z
|
tests/unit/scalar_fields/test_xyz_scalar_fields.py
|
bernssolg/pyntcloud-master
|
84cf000b7a7f69a2c1b36f9624f05f65160bf992
|
[
"MIT"
] | 195 |
2016-10-10T08:30:37.000Z
|
2022-02-17T12:51:17.000Z
|
tests/unit/scalar_fields/test_xyz_scalar_fields.py
|
bernssolg/pyntcloud-master
|
84cf000b7a7f69a2c1b36f9624f05f65160bf992
|
[
"MIT"
] | 215 |
2017-02-28T00:50:29.000Z
|
2022-03-22T17:01:31.000Z
|
import pytest
import numpy as np
from pyntcloud.scalar_fields.xyz import (
PlaneFit,
SphereFit,
SphericalCoordinates,
CylindricalCoordinates
)
@pytest.mark.usefixtures("plane_pyntcloud")
def test_PlaneFit_max_dist(plane_pyntcloud):
scalar_field = PlaneFit(
pyntcloud=plane_pyntcloud)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert sum(scalar_field.to_be_added["is_plane"]) == 4
scalar_field = PlaneFit(
pyntcloud=plane_pyntcloud,
max_dist=0.4)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert sum(scalar_field.to_be_added["is_plane"]) == 5
@pytest.mark.usefixtures("sphere_pyntcloud")
def test_SphereFit_max_dist(sphere_pyntcloud):
scalar_field = SphereFit(
pyntcloud=sphere_pyntcloud)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert sum(scalar_field.to_be_added["is_sphere"]) == 4
scalar_field = SphereFit(
pyntcloud=sphere_pyntcloud,
max_dist=0.25)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert sum(scalar_field.to_be_added["is_sphere"]) == 5
@pytest.mark.usefixtures("pyntcloud_with_rgb_and_normals")
def test_SphericalCoordinates_bounds(pyntcloud_with_rgb_and_normals):
scalar_field = SphericalCoordinates(
pyntcloud=pyntcloud_with_rgb_and_normals)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert all(scalar_field.to_be_added["polar"] >= 0)
assert all(scalar_field.to_be_added["polar"] <= 180)
assert all(scalar_field.to_be_added["azimuthal"] >= -180)
assert all(scalar_field.to_be_added["azimuthal"] <= 180)
scalar_field = SphericalCoordinates(
pyntcloud=pyntcloud_with_rgb_and_normals,
degrees=False)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert all(scalar_field.to_be_added["polar"] >= 0)
assert all(scalar_field.to_be_added["polar"] <= np.pi)
assert all(scalar_field.to_be_added["azimuthal"] >= -np.pi)
assert all(scalar_field.to_be_added["azimuthal"] <= np.pi)
@pytest.mark.usefixtures("pyntcloud_with_rgb_and_normals")
def test_CylindricalCoordinates_bounds(pyntcloud_with_rgb_and_normals):
scalar_field = CylindricalCoordinates(
pyntcloud=pyntcloud_with_rgb_and_normals)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert all(scalar_field.to_be_added["angular_cylindrical"] >= -90)
assert all(scalar_field.to_be_added["angular_cylindrical"] <= 270)
scalar_field = CylindricalCoordinates(
pyntcloud=pyntcloud_with_rgb_and_normals,
degrees=False)
scalar_field.extract_info()
with np.errstate(divide='ignore', invalid='ignore'):
scalar_field.compute()
assert all(scalar_field.to_be_added["angular_cylindrical"] >= - (np.pi / 2))
assert all(scalar_field.to_be_added["angular_cylindrical"] <= (np.pi * 1.5))
| 33.16 | 80 | 0.723764 |
54e69e22cf9e1c4bba203d3f9b559bd796901852
| 442 |
py
|
Python
|
exercises/de/exc_01_08_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/de/exc_01_08_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/de/exc_01_08_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
nlp = spacy.load("de_core_news_sm")
text = "Apple wurde 1976 von Steve Wozniak, Steve Jobs und Ron Wayne gegründet."
# Verarbeite den Text
doc = ____
for token in doc:
# Greife auf den Text, die Wortart und die Dependenzrelation des Tokens zu
token_text = ____.____
token_pos = ____.____
token_dep = ____.____
# Dies dient nur zur Formatierung
print(f"{token_text:<12}{token_pos:<10}{token_dep:<10}")
| 26 | 80 | 0.714932 |
5310e4e7d6e5b0b13113a263604d8b9627a5af03
| 11,671 |
py
|
Python
|
_Dist/NeuralNetworks/_Tests/_UnitTests/d_Dist.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 1,107 |
2016-09-21T02:18:36.000Z
|
2022-03-29T02:52:12.000Z
|
_Dist/NeuralNetworks/_Tests/_UnitTests/d_Dist.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 18 |
2016-12-22T10:24:47.000Z
|
2022-03-11T23:18:43.000Z
|
_Dist/NeuralNetworks/_Tests/_UnitTests/d_Dist.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 776 |
2016-12-21T12:08:08.000Z
|
2022-03-21T06:12:08.000Z
|
import os
import sys
root_path = os.path.abspath("../../../../")
if root_path not in sys.path:
sys.path.append(root_path)
import copy
import unittest
import numpy as np
from Util.Util import DataUtil
from _Dist.NeuralNetworks.b_TraditionalML.SVM import DistLinearSVM
from _Dist.NeuralNetworks.g_DistNN.NN import DistBasic, DistAdvanced
from _Dist.NeuralNetworks._Tests._UnitTests.UnitTestUtil import clear_cache
base_params = {
"name": "UnitTest", "data_info": {},
"model_param_settings": {"n_epoch": 1, "max_epoch": 2}
}
nn = DistAdvanced(**copy.deepcopy(base_params))
basic_nn = DistBasic(**copy.deepcopy(base_params))
linear_svm = DistLinearSVM(**copy.deepcopy(base_params))
train_set, cv_set, test_set = DataUtil.gen_special_linear(1000, 2, 2, 2, one_hot=False)
(x, y), (x_cv, y_cv), (x_test, y_test) = train_set, cv_set, test_set
train_data = np.hstack([x, y.reshape([-1, 1])])
cv_data = np.hstack([x_cv, y_cv.reshape([-1, 1])])
test_data = np.hstack([x_test, y_test.reshape([-1, 1])])
train_and_cv_data = np.vstack([train_data, cv_data])
class TestDistNN(unittest.TestCase):
def test_00_k_series_from_numpy(self):
self.assertIsInstance(
nn.k_random(3, (train_and_cv_data, test_data), verbose=0), DistAdvanced,
msg="k-random failed"
)
self.assertIsInstance(
nn.k_fold(3, (train_and_cv_data, test_data), verbose=0), DistAdvanced,
msg="k-fold failed"
)
self.assertIsInstance(
basic_nn.k_random(3, (train_and_cv_data, test_data), verbose=0), DistBasic,
msg="k-random failed"
)
self.assertIsInstance(
basic_nn.k_fold(3, (train_and_cv_data, test_data), verbose=0), DistBasic,
msg="k-fold failed"
)
def test_01_predict(self):
self.assertIs(nn.predict(train_set[0]).dtype, np.dtype("float32"), "Predict failed")
self.assertIs(nn.predict_classes(cv_set[0]).dtype, np.dtype("int32"), "Predict classes failed")
self.assertIs(nn.predict_classes(test_set[0]).dtype, np.dtype("int32"), "Predict classes failed")
self.assertIs(basic_nn.predict(train_set[0]).dtype, np.dtype("float32"), "Predict failed")
self.assertIs(basic_nn.predict_classes(cv_set[0]).dtype, np.dtype("int32"), "Predict classes failed")
self.assertIs(basic_nn.predict_classes(test_set[0]).dtype, np.dtype("int32"), "Predict classes failed")
def test_02_evaluate(self):
self.assertEqual(len(nn.evaluate(*train_set, *cv_set, *test_set)), 3, "Evaluation failed")
self.assertEqual(len(basic_nn.evaluate(*train_set, *cv_set, *test_set)), 3, "Evaluation failed")
def test_03_save(self):
self.assertIsInstance(nn.save(), DistAdvanced, msg="Save failed")
self.assertIsInstance(basic_nn.save(), DistBasic, msg="Save failed")
def test_04_load(self):
global nn, basic_nn
nn = DistAdvanced(**base_params).load()
basic_nn = DistBasic(**base_params).load()
self.assertIsInstance(nn, DistAdvanced, "Load failed")
self.assertIsInstance(basic_nn, DistBasic, "Load failed")
def test_05_re_predict(self):
self.assertIs(nn.predict(train_set[0]).dtype, np.dtype("float32"), "Re-Predict failed")
self.assertIs(nn.predict_classes(cv_set[0]).dtype, np.dtype("int32"), "Re-Predict classes failed")
self.assertIs(nn.predict_classes(test_set[0]).dtype, np.dtype("int32"), "Re-Predict classes failed")
self.assertIs(basic_nn.predict(train_set[0]).dtype, np.dtype("float32"), "Re-Predict failed")
self.assertIs(basic_nn.predict_classes(cv_set[0]).dtype, np.dtype("int32"), "Re-Predict classes failed")
self.assertIs(basic_nn.predict_classes(test_set[0]).dtype, np.dtype("int32"), "Re-Predict classes failed")
def test_06_re_evaluate(self):
self.assertEqual(len(nn.evaluate(*train_set, *cv_set, *test_set)), 3, "Re-Evaluation failed")
self.assertEqual(len(basic_nn.evaluate(*train_set, *cv_set, *test_set)), 3, "Re-Evaluation failed")
def test_07_param_search(self):
params = [
{"model_param_settings": {"lr": 1e-2}},
{"model_param_settings": {"lr": 1e-3}, "model_structure_settings": {"use_pruner": False}}
]
self.assertIsInstance(
nn.param_search(params, data=(train_and_cv_data, test_data), verbose=0), DistAdvanced,
msg="param_search failed"
)
self.assertIsInstance(
basic_nn.param_search(params, data=(train_and_cv_data, test_data), verbose=0), DistBasic,
msg="param_search failed"
)
def test_08_random_search(self):
list_first_grid_params = {
"model_param_settings": [
{"lr": 1e-2},
{"lr": 1e-2, "loss": "mse"},
{"lr": 1e-3, "loss": "mse"}
],
"model_structure_settings": [
{"hidden_units": [256, 256]},
{"hidden_units": [128, 128], "use_pruner": False},
{"hidden_units": [128, 128], "use_pruner": False, "use_wide_network": False}
]
}
dict_first_grid_params = {
"model_param_settings": {
"lr": [1e-2, 1e-3],
"loss": ["mse", "cross_entropy"]
},
"model_structure_settings": {
"use_pruner": [False, True],
"use_wide_network": [False, True],
"hidden_units": [[128, 128], [256, 256]]
},
}
self.assertIsInstance(
nn.random_search(
4, list_first_grid_params, grid_order="list_first",
data=(train_and_cv_data, test_data), verbose=0
), DistAdvanced, msg="list_first_grid_search failed"
)
self.assertIsInstance(
nn.random_search(
8, dict_first_grid_params, grid_order="dict_first",
data=(train_and_cv_data, test_data), verbose=0
), DistAdvanced, msg="dict_first_grid_search failed"
)
self.assertIsInstance(
basic_nn.random_search(
4, list_first_grid_params, grid_order="list_first",
data=(train_and_cv_data, test_data), verbose=0
), DistBasic, msg="list_first_grid_search failed"
)
self.assertIsInstance(
basic_nn.random_search(
8, dict_first_grid_params, grid_order="dict_first",
data=(train_and_cv_data, test_data), verbose=0
), DistBasic, msg="dict_first_grid_search failed"
)
def test_09_grid_search(self):
list_first_grid_params = {
"model_param_settings": [
{"lr": 1e-2},
{"lr": 1e-3, "loss": "mse"}
],
"model_structure_settings": [
{"hidden_units": [256, 256]},
{"hidden_units": [128, 128], "use_pruner": False}
]
}
dict_first_grid_params = {
"model_param_settings": {
"lr": [1e-3, 1e-2],
"loss": ["mse", "cross_entropy"]
},
"model_structure_settings": {
"hidden_units": [[128, 256], [128, 256]]
}
}
self.assertIsInstance(
nn.grid_search(
list_first_grid_params, grid_order="list_first",
data=(train_and_cv_data, test_data), verbose=0
), DistAdvanced, msg="list_first_grid_search failed"
)
self.assertIsInstance(
nn.grid_search(
dict_first_grid_params, grid_order="dict_first",
data=(train_and_cv_data, test_data), verbose=0
), DistAdvanced, msg="dict_first_grid_search failed"
)
self.assertIsInstance(
basic_nn.grid_search(
list_first_grid_params, grid_order="list_first",
data=(train_and_cv_data, test_data), verbose=0
), DistBasic, msg="list_first_grid_search failed"
)
self.assertIsInstance(
basic_nn.grid_search(
dict_first_grid_params, grid_order="dict_first",
data=(train_and_cv_data, test_data), verbose=0
), DistBasic, msg="dict_first_grid_search failed"
)
def test_10_range_search(self):
range_grid_params = {
"model_param_settings": {
"lr": ["float", 1e-3, 1e-1, "log"],
"loss": ["choice", ["mse", "cross_entropy"]]
},
"model_structure_settings": {
"hidden_units": [
["int", "int"],
[128, 256], [128, 256]
],
"pruner_params": {
"alpha": ["float", 1e-4, 1e-2, "log"],
"beta": ["float", 0.3, 3, "log"],
"gamma": ["float", 0.5, 2, "log"]
}
},
"pre_process_settings": {
"pre_process_method": ["choice", ["normalize", None]],
"reuse_mean_and_std": ["choice", [True, False]]
},
"nan_handler_settings": {
"nan_handler_method": ["choice", ["median", "mean"]],
"reuse_nan_handler_values": ["choice", [True, False]]
}
}
self.assertIsInstance(
nn.range_search(
8, range_grid_params,
data=(train_and_cv_data, test_data), verbose=0
), DistAdvanced, msg="range_search failed"
)
self.assertIsInstance(
basic_nn.range_search(
8, range_grid_params,
data=(train_and_cv_data, test_data), verbose=0
), DistBasic, msg="range_search failed"
)
def test_99_clear_cache(self):
clear_cache()
class TestDistLinearSVM(unittest.TestCase):
def test_00_k_series_from_numpy(self):
self.assertIsInstance(
linear_svm.k_random(3, (train_and_cv_data, test_data), verbose=0), DistLinearSVM,
msg="k-random failed"
)
self.assertIsInstance(
linear_svm.k_fold(3, (train_and_cv_data, test_data), verbose=0), DistLinearSVM,
msg="k-fold failed"
)
def test_01_predict(self):
self.assertIs(linear_svm.predict(train_set[0]).dtype, np.dtype("float32"), "Predict failed")
self.assertIs(linear_svm.predict(cv_set[0]).dtype, np.dtype("float32"), "Predict failed")
self.assertIs(linear_svm.predict(test_set[0]).dtype, np.dtype("float32"), "Predict failed")
def test_02_evaluate(self):
self.assertEqual(len(linear_svm.evaluate(*train_set, *cv_set, *test_set)), 3, "Evaluation failed")
def test_03_save(self):
self.assertIsInstance(linear_svm.save(), DistLinearSVM, msg="Save failed")
def test_04_load(self):
global linear_svm
model = DistLinearSVM(**base_params).load()
self.assertIsInstance(model, DistLinearSVM, "Load failed")
def test_05_re_predict(self):
self.assertIs(linear_svm.predict(train_set[0]).dtype, np.dtype("float32"), "Re-Predict failed")
self.assertIs(linear_svm.predict(cv_set[0]).dtype, np.dtype("float32"), "Re-Predict failed")
self.assertIs(linear_svm.predict(test_set[0]).dtype, np.dtype("float32"), "Re-Predict failed")
def test_06_re_evaluate(self):
self.assertEqual(len(linear_svm.evaluate(*train_set, *cv_set, *test_set)), 3, "Re-Evaluation failed")
def test_99_clear_cache(self):
clear_cache()
if __name__ == '__main__':
unittest.main()
| 41.982014 | 114 | 0.599177 |
f4220eb4da2293b0fe13bff2a676cb524c93d605
| 1,759 |
py
|
Python
|
tests/test_serializers/test_serializers.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 8 |
2022-02-24T14:59:24.000Z
|
2022-03-31T04:37:55.000Z
|
tests/test_serializers/test_serializers.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 3 |
2022-02-27T17:08:52.000Z
|
2022-03-18T13:11:01.000Z
|
tests/test_serializers/test_serializers.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 2 |
2022-02-24T15:03:07.000Z
|
2022-03-15T03:12:00.000Z
|
"""handles tests for both base json and orjson serializers"""
from json import JSONDecodeError
from typing import List
from uuid import uuid4
import pytest
from msgpack.exceptions import ExtraData
from volley.serializers.base import BaseSerialization
from volley.serializers.json_serializer import JsonSerialization
from volley.serializers.msgpack_serializer import MsgPackSerialization
from volley.serializers.orjson_serializer import OrJsonSerialization
class CannotBeString:
"""an object that cannot be cast to string"""
def __str__(self) -> None: # type: ignore
pass
@pytest.fixture
def serializers() -> List[BaseSerialization]:
return [JsonSerialization(), OrJsonSerialization(), MsgPackSerialization()]
def test_interface(serializers: List[BaseSerialization]) -> None:
for serializer in serializers:
assert isinstance(serializer, BaseSerialization)
def test_success(serializers: List[BaseSerialization]) -> None:
msg = {"hello": f"world-{uuid4()}"}
for serializer in serializers:
serialized = serializer.serialize(msg)
assert isinstance(serialized, bytes)
deserialized = serializer.deserialize(serialized)
assert deserialized == msg
def test_fail(serializers: List[BaseSerialization]) -> None:
msg = {"time": CannotBeString(), "number": 42}
for serializer in serializers:
with pytest.raises(TypeError):
serializer.serialize(msg)
bad_json = b"abc : 123}"
with pytest.raises((JSONDecodeError, ExtraData)):
serializer.deserialize(bad_json)
non_str = CannotBeString()
with pytest.raises((JSONDecodeError, TypeError)): # type: ignore
serializer.deserialize(non_str) # type: ignore
| 30.859649 | 79 | 0.727118 |
f440f8694ff344576e8577e1e8b7347b4a77f290
| 557 |
py
|
Python
|
larq_compute_engine/tf/python/ops/compute_engine_ops.py
|
timdebruin/compute-engine
|
bdd3c080ea330ad911cfb6b02dd41b6c574a7cf4
|
[
"Apache-2.0"
] | null | null | null |
larq_compute_engine/tf/python/ops/compute_engine_ops.py
|
timdebruin/compute-engine
|
bdd3c080ea330ad911cfb6b02dd41b6c574a7cf4
|
[
"Apache-2.0"
] | null | null | null |
larq_compute_engine/tf/python/ops/compute_engine_ops.py
|
timdebruin/compute-engine
|
bdd3c080ea330ad911cfb6b02dd41b6c574a7cf4
|
[
"Apache-2.0"
] | null | null | null |
"""Use larq compute engine ops in python."""
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
_ops_lib = load_library.load_op_library(
resource_loader.get_path_to_datafile("_larq_compute_engine_ops.so")
)
bsign = _ops_lib.lqce_bsign
# binary convolution ops with the naming format bconv2d{bitpacking-bitwidth}
# the default bitpacking bitwidth is 64
bconv2d8 = _ops_lib.lqce_bconv2d8
bconv2d32 = _ops_lib.lqce_bconv2d32
bconv2d64 = _ops_lib.lqce_bconv2d64
bconv2d = _ops_lib.lqce_bconv2d64
| 30.944444 | 76 | 0.829443 |
be3a27e435a0c3555f093f226c3fe7d5335d5da0
| 545 |
py
|
Python
|
scripts/fabsp/screen.py
|
swoiow/dsc
|
5860e6bfaa70b700e025533c406a6bc52d4ab74b
|
[
"MIT"
] | null | null | null |
scripts/fabsp/screen.py
|
swoiow/dsc
|
5860e6bfaa70b700e025533c406a6bc52d4ab74b
|
[
"MIT"
] | null | null | null |
scripts/fabsp/screen.py
|
swoiow/dsc
|
5860e6bfaa70b700e025533c406a6bc52d4ab74b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fabric.api import cd, settings, sudo
def download():
"""
curl -o screen.tar.gz http://ftp.gnu.org/gnu/screen/screen-4.6.0.tar.gz
tar xzf screen.tar.gz
"""
with cd("/usr/src"), settings(warn_only=True):
for line in download.__doc__.split("\n"):
sudo(line)
def install():
"""
./configure
make
"""
with cd("/usr/src/screen-4.6.0"), settings(warn_only=True):
for line in install.__doc__.split("\n"):
sudo(line)
| 20.185185 | 75 | 0.570642 |
fe00935e83684dbf848a3e9b06fd9ba859dad86f
| 1,561 |
py
|
Python
|
test/test_tag18.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
test/test_tag18.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
test/test_tag18.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
try:
from pyventskalender import tag18_loesung as heute
except ImportError:
from pyventskalender import tag18 as heute
class Tag18Tests(TestCase):
def test_10_elemente_in_satz(self):
frischer_satz = heute.Satz()
for satzteil in ["adverbiale", "verb", "subjekt", "objekt"]:
self.assertIn(satzteil, dir(frischer_satz),
msg=f"Satz hat noch keinen Member {satzteil}")
self.assertEqual(type(frischer_satz.adverbiale), heute.Satzglied,
msg="Die adverbiale sollte ein Satzglied sein, kein {}".format(
type(frischer_satz.adverbiale)))
self.assertEqual(type(frischer_satz.verb), heute.Satzglied)
self.assertEqual(type(frischer_satz.subjekt), heute.Satzglied)
self.assertEqual(type(frischer_satz.objekt), heute.Satzglied)
def test_20_moeglichkeiten_in_satzglied(self):
moeglichkeiten = ["a", "b"]
satzglied = heute.Satzglied(moeglichkeiten)
self.assertEqual(satzglied.moeglichkeiten, moeglichkeiten)
self.assertEqual(satzglied.text, moeglichkeiten[0])
def test_30_listen_in_satz_uebergeben(self):
satz = heute.Satz()
self.assertEqual(satz.adverbiale.moeglichkeiten, heute.MOEGLICHE_ADVERBIALE)
self.assertEqual(satz.verb.moeglichkeiten, heute.MOEGLICHE_VERBEN)
self.assertEqual(satz.subjekt.moeglichkeiten, heute.MOEGLICHE_SUBJEKTE)
self.assertEqual(satz.objekt.moeglichkeiten, heute.MOEGLICHE_OBJEKTE)
| 44.6 | 88 | 0.700833 |
a3c02d7aae64130057a57dcc5a85383c7056ccfc
| 1,461 |
py
|
Python
|
src/test/tests/hybrid/mir_cache.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/test/tests/hybrid/mir_cache.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/test/tests/hybrid/mir_cache.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: mir_cache.py
#
# Tests: mesh - 3D curvilinear,multi-domain,ghost zones replicated.
# plots - volume, PC
#
# Defect ID: '3542
#
# Programmer: Hank Childs
# Date: July 22, 2003
#
# Modifications:
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
# ----------------------------------------------------------------------------
view = View3DAttributes()
view.viewNormal = (0.557976, 0.651128, 0.514485)
view.focus = (0.5, 0.5, 0.5)
view.viewUp = (-0.0955897, 0.666272, -0.739557)
view.viewAngle = 30
view.parallelScale = 0.866025
view.nearPlane = -1.73205
view.farPlane = 1.73205
view.perspective = 1
SetView3D(view)
#
# The volume plot does not need ghost zones, so this will create one
# set of MIRs.
#
OpenDatabase(silo_data_path("bigsil.silo"))
AddPlot("Volume", "dist")
sil = SILRestriction()
sets = sil.SetsInCategory("mat")
sil.TurnOffAll()
sil.TurnOnSet(sets[0])
SetPlotSILRestriction(sil)
DrawPlots()
DeleteAllPlots()
#
# The PC plot will need ghost zones. If it does not create a new set of MIRs,
# we will get garbled data.
#
AddPlot("Pseudocolor", "dist")
DrawPlots()
Test("mir_cache1")
#
# Make extra sure that there aren't garbled zones in the interior.
#
view.nearPlane = -0.3
SetView3D(view)
Test("mir_cache2")
Exit()
| 21.80597 | 78 | 0.613279 |
430a657b957501807ccf91597f8bfedc08ceead7
| 99 |
py
|
Python
|
Projects/Naver_map_halte_navigator.py
|
wooooooogi/SAlgorithm
|
bf76bb721785a52b6abf158077b554b0626ee1f7
|
[
"MIT"
] | null | null | null |
Projects/Naver_map_halte_navigator.py
|
wooooooogi/SAlgorithm
|
bf76bb721785a52b6abf158077b554b0626ee1f7
|
[
"MIT"
] | null | null | null |
Projects/Naver_map_halte_navigator.py
|
wooooooogi/SAlgorithm
|
bf76bb721785a52b6abf158077b554b0626ee1f7
|
[
"MIT"
] | null | null | null |
# Travling salesperson problem using Naver Map Application. (Map App. only can use halte under 5)
| 99 | 99 | 0.777778 |
4a4cd94229030b2fb72f219d6c73d096757e5f31
| 215 |
py
|
Python
|
exercises/fr/exc_01_02_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/exc_01_02_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/exc_01_02_02.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
# Importe spaCy
import ____
# Crée l'objet nlp anglais
nlp = ____
# Traite un texte (il signifie "Ceci est une phrase" en anglais)
doc = nlp("This is a sentence.")
# Affiche le texte du document
print(____.text)
| 17.916667 | 64 | 0.72093 |
4a81802e06cf756a1f503bf3bead3e73eab8e0ab
| 8,027 |
py
|
Python
|
helper/segmentation/report/utils.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | 1 |
2022-01-03T14:00:17.000Z
|
2022-01-03T14:00:17.000Z
|
helper/segmentation/report/utils.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | null | null | null |
helper/segmentation/report/utils.py
|
fishial/Object-Detection-Model
|
4792f65ea785156a8e240d9cdbbc0c9d013ea0bb
|
[
"CC0-1.0"
] | 1 |
2021-12-21T09:50:53.000Z
|
2021-12-21T09:50:53.000Z
|
import os
import sys
import cv2
import json
import shutil
import numpy as np
import pandas as pd
from tqdm import tqdm
from datetime import datetime
from os.path import isfile, join
from os import listdir
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.structures import BoxMode
from detectron2.data import build_detection_test_loader
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.engine import DefaultTrainer
def read_json(path):
if os.path.isfile(path):
with open(path) as f:
data = json.load(f)
return data
else:
return None
def get_current_date_in_format():
now = datetime.now()
return now.strftime("%d_%m_%Y_%H_%M_%S")
def get_dataset_dicts(img_dir, state, json_file="fishial_collection_correct.json"):
json_file = os.path.join(img_dir, json_file)
with open(json_file) as f:
data = json.load(f)
bodyes_shapes_ids = []
for i in data['categories']:
if i['name'] == 'General body shape':
bodyes_shapes_ids.append(int(i['id']))
dataset_dicts = []
img_dir = os.path.join(img_dir, state)
for i in tqdm(range(len(data['images']))):
if 'train_data' in data['images'][i]:
state_json = 'Train' if data['images'][i]['train_data'] else 'Test'
if state != state_json:
continue
record = {}
filename = os.path.join(img_dir, data['images'][i]['file_name'])
width, height = cv2.imread(filename).shape[:2]
record["file_name"] = filename
record["height"] = width
record["width"] = height
record["image_id"] = i
objs = []
for ann in data['annotations']:
if 'segmentation' in ann and ann['image_id'] == data['images'][i]['id'] and ann[
'category_id'] in bodyes_shapes_ids:
px = []
py = []
for z in range(int(len(ann['segmentation'][0]) / 2)):
px.append(ann['segmentation'][0][z * 2])
py.append(ann['segmentation'][0][z * 2 + 1])
obj = {
"bbox": [np.min(px).tolist(), np.min(py).tolist(), np.max(px).tolist(), np.max(py).tolist()],
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": ann['segmentation'],
"category_id": 0,
"iscrowd": 0
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
def beautifier_results(results):
line = 70 * "="
header = line + """\n| AP | AP50 | AP75 | APs | APm | APl | Name \n|:------:|:------:|:------:|:------:|:------:|:------:|:------:"""
for result in results:
header += """\n| {:.3f} | {:.3f} | {:.3f} | {:.3f} | {:.3f} | {:.3f} |{}""".format(
result[1]['segm']['AP'],
result[1]['segm']['AP50'],
result[1]['segm']['AP75'],
result[1]['segm']['APs'],
result[1]['segm']['APm'],
result[1]['segm']['APl'],
result[0])
return header + "\n" + line
def save_to_json(results, mypath):
json_file = {}
for result in results:
single_rec = {
result[0]: {
'AP': result[1]['segm']['AP'],
'AP50': result[1]['segm']['AP50'],
'AP75': result[1]['segm']['AP75'],
'APs': result[1]['segm']['APs'],
'APm': result[1]['segm']['APm'],
'APl': result[1]['segm']['APl']}}
json_file.update(single_rec)
total_path = os.path.join(mypath, "eval_scores.json")
save_json(json_file, total_path)
return json_file
def save_json(object, path):
with open(path, 'w') as f:
json.dump(object, f)
def custom_config(num_classes=1, train_name="fishial_Train", test_name="fishial_Test", output_dir="output"):
output_dir = os.path.join(output_dir, get_current_date_in_format())
cfg = get_cfg()
# get configuration from model_zoo
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
# Model
# cfg.MODEL.MASK_ON = True
cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
# cfg.MODEL.BACKBONE.NAME = "build_resnet_backbone"
# cfg.MODEL.RESNETS.DEPTH = 34
# Solver
cfg.SOLVER.BASE_LR = 0.00025
cfg.SOLVER.MAX_ITER = 35000
cfg.SOLVER.STEPS = []
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.CHECKPOINT_PERIOD = 2500
# Test
# cfg.TEST.DETECTIONS_PER_IMAGE = 40
cfg.TEST.EVAL_PERIOD = 35000
# # INPUT
# cfg.INPUT.MIN_SIZE_TRAIN = (800,)
# DATASETS
cfg.DATASETS.TEST = (test_name,)
cfg.DATASETS.TRAIN = (train_name,)
cfg.DATALOADER.NUM_WORKERS = 2
cfg.OUTPUT_DIR = output_dir
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
return cfg
def get_eval_on_selected_set(cfgs, dataset):
result = {'model_name': []}
for i in cfgs:
output_folder = i[1].OUTPUT_DIR
os.makedirs(output_folder, exist_ok=True)
trainer = DefaultTrainer(i[1])
trainer.resume_or_load(resume=True)
evaluator = COCOEvaluator(dataset, i[1], False, output_dir="./{}/".format(output_folder))
val_loader = build_detection_test_loader(i[1], dataset)
value_sd = inference_on_dataset(trainer.model, val_loader, evaluator)
for key in value_sd['segm']:
if key in result:
result[key].append(value_sd['segm'][key])
else:
result.update({key: [value_sd['segm'][key]]})
result['model_name'].append(i[0])
df = pd.DataFrame.from_dict(result)
df['SUM'] = df['AP'] + df['AP50'] + df['AP75'] + \
df['APs'] + df['APm'] + df['APl']
sum_c = df['SUM']
df.drop(labels=['SUM'], axis=1, inplace=True)
df.insert(0, 'SUM', sum_c)
return df
def run_eval_checkpoints(cfg, input_folder, test_dataset):
tmp_folder = os.path.join(os.path.join(input_folder, ".."), "tmp_folder")
os.makedirs(tmp_folder, exist_ok=True)
list_of_files_in_directory = [f for f in listdir(input_folder) if isfile(join(input_folder, f))]
array_of_eval_results = []
for file_name in list_of_files_in_directory:
splited = os.path.splitext(file_name)
if splited[1] == '.pth':
cfg.MODEL.WEIGHTS = os.path.join(input_folder, file_name)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=True)
evaluator = COCOEvaluator(test_dataset, cfg, False, output_dir=tmp_folder)
val_loader = build_detection_test_loader(cfg, "fishial_Test")
value_sd = inference_on_dataset(trainer.model, val_loader, evaluator)
array_of_eval_results.append([file_name, value_sd])
save_to_json(array_of_eval_results, input_folder)
def remove_folder(folder_path):
try:
shutil.rmtree(folder_path)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
def remove_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
else:
print("[CLEANER] File not found in the directory")
def remove_tmp_files(path):
files_to_remove = ["last_checkpoint", 'metrics.json', "model_final.pth"]
list_of_files_in_directory = [f for f in listdir(path) if isfile(join(path, f))]
for file_name in list_of_files_in_directory:
splited = file_name.split(".")
if splited[0] == 'events':
files_to_remove.append(file_name)
for i in files_to_remove:
remove_file(os.path.join(path, i))
| 33.726891 | 155 | 0.592251 |
6017d42bab57671f20a5df829f82afb4a8055a39
| 2,261 |
pyde
|
Python
|
sketches/fire/fire.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4 |
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/fire/fire.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/fire/fire.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3 |
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
from random import randint
empty = 0
tree = 1
burning = 20
a = 40
g = 1
nRows = 40
nCols = 40
w = h = 16
grid = []
newgrid = []
def setup():
global trees, fire
size(640, 640)
background(210, 180, 140)
trees = loadImage("tree.png")
fire = loadImage("fire.png")
for x in range(nRows):
grid.append([])
newgrid.append([])
for y in range(nCols):
# Randbedingungen
if (x > 0) and (y > 0) and (x < nRows-1) and (y < nCols-1) and randint(0, 10000) <= 2000:
grid[x].append(tree)
else:
grid[x].append(empty)
newgrid[:] = grid[:]
frameRate(2)
# noLoop()
def draw():
global grid, newgrid
global trees, fire
noStroke()
background(210, 180, 140)
for i in range(nRows):
for j in range(nCols):
if grid[i][j] == empty:
fill(210, 180, 140)
rect(i*w, j*h, w, h)
elif grid[i][j] == tree:
image(trees, i*w, j*h, w, h)
# fill(255, 0, 0)
# rect(i*w, j*h, w, h)
elif grid[i][j] == burning:
image(fire, i*w, j*h, w, h)
# fill(0, 255, 0)
# rect(i*w, j*h, w, h)
# newgrid[:] = grid[:]
calcNext()
# grid[:] = newgrid[:]
def calcNext():
global grid, newgrid
newgrid[:] = grid[:]
# Next Generation
for i in range(1, nRows-1):
for j in range(1, nCols-1):
if grid[i][j] == burning:
newgrid[i][j] = empty
# Brennt ein Nachbar?
if grid[i-1][j] == tree:
newgrid[i-1][j] = burning
if grid[i][j-1] == tree:
newgrid[i][j-1] = burning
if grid[i][j+1] == tree:
newgrid[i][j+1] = burning
if grid[i+1][j] == tree:
newgrid[i+1][j] = burning
elif grid[i][j] == empty:
if randint(0, 10000) < a:
newgrid[i][j] = tree
if grid[i][j] == tree:
# Schlägt ein Blitz ein?
if (random(10000) < 1):
newgrid[i][j] = burning
grid[:] = newgrid[:]
| 27.240964 | 101 | 0.435206 |
6025ef7f9bf178fcd286a805eee2c78eee9a92dd
| 1,524 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/stock/doctype/item_price/item_price.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/stock/doctype/item_price/item_price.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/stock/doctype/item_price/item_price.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import throw, _
class ItemPriceDuplicateItem(frappe.ValidationError): pass
from frappe.model.document import Document
class ItemPrice(Document):
def validate(self):
self.validate_item()
self.validate_price_list()
self.check_duplicate_item()
self.update_price_list_details()
self.update_item_details()
def validate_item(self):
if not frappe.db.exists("Item", self.item_code):
throw(_("Item {0} not found").format(self.item_code))
def validate_price_list(self):
enabled = frappe.db.get_value("Price List", self.price_list, "enabled")
if not enabled:
throw(_("Price List {0} is disabled").format(self.price_list))
def check_duplicate_item(self):
if frappe.db.sql("""select name from `tabItem Price`
where item_code=%s and price_list=%s and name!=%s""", (self.item_code, self.price_list, self.name)):
frappe.throw(_("Item {0} appears multiple times in Price List {1}").format(self.item_code, self.price_list),
ItemPriceDuplicateItem)
def update_price_list_details(self):
self.buying, self.selling, self.currency = \
frappe.db.get_value("Price List", {"name": self.price_list, "enabled": 1},
["buying", "selling", "currency"])
def update_item_details(self):
self.item_name, self.item_description = frappe.db.get_value("Item",
self.item_code, ["item_name", "description"])
| 34.636364 | 111 | 0.744751 |
607343d1bdf1b9e2ee33a1c5f5fe3ddc4b6936fd
| 3,748 |
py
|
Python
|
language/mutator.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | 1 |
2021-03-05T07:44:05.000Z
|
2021-03-05T07:44:05.000Z
|
language/mutator.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | 3 |
2017-06-04T03:01:31.000Z
|
2017-08-04T04:04:37.000Z
|
language/mutator.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | null | null | null |
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
import random
from collections import OrderedDict
import numpy as np
from utils.lib import O
from technix.tech_utils import Point, three_others, choice, StarPoint
def default():
"""
Default settings.
:return:
"""
return O(
f=0.75,
cr=0.3,
)
class Mutator(O):
def __init__(self, model, **settings):
self.model = model
self.settings = default().update(**settings)
O.__init__(self)
def mutate_random(self, point, population):
"""
Just another random point
:param point:
:param population:
:return:
"""
other = Point(self.model.generate())
other.evaluate(self.model)
while other in population or other == point:
other = Point(self.model.generate())
other.evaluate(self.model)
return other
def mutate_binary(self, point, population):
two, three, four = three_others(point, population)
random_key = choice(self.model.decisions.keys())
mutant_decisions = OrderedDict()
for key in self.model.decisions.keys():
r = random.random()
if r < self.settings.cr or key == random_key:
mutant_decisions[key] = random.choice([two.decisions[key], three.decisions[key], four.decisions[key]])
else:
mutant_decisions[key] = point.decisions[key]
return Point(mutant_decisions)
def generate(self, presets=None, size=10):
pop = []
presets = {} if presets is None else presets
while len(pop) < size:
solutions = OrderedDict()
model = self.model
if model.decision_map:
ref = {key: np.random.choice(vals) for key, vals in model.decision_map.items()}
for key, decision in model.decisions.items():
if decision.key in presets:
solutions[key] = decision.options[presets[decision.key]].id
else:
solutions[key] = decision.options[ref[decision.key]].id
else:
for key, decision in model.decisions.items():
if key in presets:
solutions[key] = decision.options[presets[key]].id
else:
solutions[key] = np.random.choice(decision.options.values()).id
pop.append(Point(solutions))
return pop
def decision_ranker(self, best, rest):
best_size = len(best)
rest_size = len(rest)
p_best = best_size / (best_size + rest_size)
p_rest = rest_size / (best_size + rest_size)
decisions = []
best_sols = [self.model.get_solution(sol.decisions) for sol in best]
rest_sols = [self.model.get_solution(sol.decisions) for sol in rest]
for d_id, values in self.model.get_decisions().items():
# Implement Ranks
best_scores = {v: 0 for v in values}
for point in best_sols:
# best_scores[self.model.nodes[point.decisions[d_id]].label] += 1
best_scores[point[d_id]] += 1
rest_scores = {v: 0 for v in values}
for point in rest_sols:
rest_scores[point[d_id]] += 1
for key in best_scores.keys():
l_best = best_scores[key] * p_best / len(best_sols)
l_rest = rest_scores[key] * p_rest / len(rest_sols)
sup = 0 if l_best == l_rest == 0 else l_best ** 2 / (l_best + l_rest)
decisions.append(StarPoint(support=sup,
value=key,
name=d_id))
decisions.sort(key=lambda x: x.support, reverse=True)
ranked, aux = [], set()
for dec in decisions:
if dec.name not in aux:
ranked.append(dec)
aux.add(dec.name)
assert len(ranked) == len(self.model.get_decisions()), "Mismatch after sorting support"
return ranked
| 33.168142 | 110 | 0.638741 |
60a6a1f50f00532b93420defeddef7429dd97f57
| 40,903 |
py
|
Python
|
.vscode/extensions/ms-vscode.cpptools-1.9.0/debugAdapters/lldb/lib/python2.7/site-packages/lldb/macosx/crashlog.py
|
Kvahn-ui/dotfiles
|
3f1364410f5bebcaacca6ae38a8e5fbb9bb51285
|
[
"MIT"
] | 3 |
2016-02-10T14:18:40.000Z
|
2018-02-05T03:15:56.000Z
|
lldb/crashlog.py
|
ddeville/scripts
|
672ac037952e0945a0575712ef2c2821e288a6ca
|
[
"MIT"
] | 4 |
2019-06-16T09:52:03.000Z
|
2019-08-18T02:11:35.000Z
|
lldb/crashlog.py
|
ddeville/scripts
|
672ac037952e0945a0575712ef2c2821e288a6ca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#----------------------------------------------------------------------
# Be sure to add the python path that points to the LLDB shared library.
#
# To use this in the embedded python interpreter using "lldb":
#
# cd /path/containing/crashlog.py
# lldb
# (lldb) script import crashlog
# "crashlog" command installed, type "crashlog --help" for detailed help
# (lldb) crashlog ~/Library/Logs/DiagnosticReports/a.crash
#
# The benefit of running the crashlog command inside lldb in the
# embedded python interpreter is when the command completes, there
# will be a target with all of the files loaded at the locations
# described in the crash log. Only the files that have stack frames
# in the backtrace will be loaded unless the "--load-all" option
# has been specified. This allows users to explore the program in the
# state it was in right at crash time.
#
# On MacOSX csh, tcsh:
# ( setenv PYTHONPATH /path/to/LLDB.framework/Resources/Python ; ./crashlog.py ~/Library/Logs/DiagnosticReports/a.crash )
#
# On MacOSX sh, bash:
# PYTHONPATH=/path/to/LLDB.framework/Resources/Python ./crashlog.py ~/Library/Logs/DiagnosticReports/a.crash
#----------------------------------------------------------------------
import commands
import cmd
import datetime
import glob
import optparse
import os
import platform
import plistlib
import pprint # pp = pprint.PrettyPrinter(indent=4); pp.pprint(command_args)
import re
import shlex
import string
import sys
import time
import uuid
try:
# Just try for LLDB in case PYTHONPATH is already correctly setup
import lldb
except ImportError:
lldb_python_dirs = list()
# lldb is not in the PYTHONPATH, try some defaults for the current platform
platform_system = platform.system()
if platform_system == 'Darwin':
# On Darwin, try the currently selected Xcode directory
xcode_dir = commands.getoutput("xcode-select --print-path")
if xcode_dir:
lldb_python_dirs.append(os.path.realpath(xcode_dir + '/../SharedFrameworks/LLDB.framework/Resources/Python'))
lldb_python_dirs.append(xcode_dir + '/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
lldb_python_dirs.append('/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
success = False
for lldb_python_dir in lldb_python_dirs:
if os.path.exists(lldb_python_dir):
if not (sys.path.__contains__(lldb_python_dir)):
sys.path.append(lldb_python_dir)
try:
import lldb
except ImportError:
pass
else:
print 'imported lldb from: "%s"' % (lldb_python_dir)
success = True
break
if not success:
print "error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly"
sys.exit(1)
from lldb.utils import symbolication
PARSE_MODE_NORMAL = 0
PARSE_MODE_THREAD = 1
PARSE_MODE_IMAGES = 2
PARSE_MODE_THREGS = 3
PARSE_MODE_SYSTEM = 4
class CrashLog(symbolication.Symbolicator):
"""Class that does parses darwin crash logs"""
parent_process_regex = re.compile('^Parent Process:\s*(.*)\[(\d+)\]');
thread_state_regex = re.compile('^Thread ([0-9]+) crashed with')
thread_regex = re.compile('^Thread ([0-9]+)([^:]*):(.*)')
app_backtrace_regex = re.compile('^Application Specific Backtrace ([0-9]+)([^:]*):(.*)')
frame_regex = re.compile('^([0-9]+)\s+([^ ]+)\s+(0x[0-9a-fA-F]+) +(.*)')
image_regex_uuid = re.compile('(0x[0-9a-fA-F]+)[- ]+(0x[0-9a-fA-F]+) +[+]?([^ ]+) +([^<]+)<([-0-9a-fA-F]+)> (.*)');
image_regex_no_uuid = re.compile('(0x[0-9a-fA-F]+)[- ]+(0x[0-9a-fA-F]+) +[+]?([^ ]+) +([^/]+)/(.*)');
empty_line_regex = re.compile('^$')
class Thread:
"""Class that represents a thread in a darwin crash log"""
def __init__(self, index, app_specific_backtrace):
self.index = index
self.frames = list()
self.idents = list()
self.registers = dict()
self.reason = None
self.queue = None
self.app_specific_backtrace = app_specific_backtrace
def dump(self, prefix):
if self.app_specific_backtrace:
print "%Application Specific Backtrace[%u] %s" % (prefix, self.index, self.reason)
else:
print "%sThread[%u] %s" % (prefix, self.index, self.reason)
if self.frames:
print "%s Frames:" % (prefix)
for frame in self.frames:
frame.dump(prefix + ' ')
if self.registers:
print "%s Registers:" % (prefix)
for reg in self.registers.keys():
print "%s %-5s = %#16.16x" % (prefix, reg, self.registers[reg])
def dump_symbolicated (self, crash_log, options):
this_thread_crashed = self.app_specific_backtrace
if not this_thread_crashed:
this_thread_crashed = self.did_crash()
if options.crashed_only and this_thread_crashed == False:
return
print "%s" % self
#prev_frame_index = -1
display_frame_idx = -1
for frame_idx, frame in enumerate(self.frames):
disassemble = (this_thread_crashed or options.disassemble_all_threads) and frame_idx < options.disassemble_depth;
if frame_idx == 0:
symbolicated_frame_addresses = crash_log.symbolicate (frame.pc & crash_log.addr_mask, options.verbose)
else:
# Any frame above frame zero and we have to subtract one to get the previous line entry
symbolicated_frame_addresses = crash_log.symbolicate ((frame.pc & crash_log.addr_mask) - 1, options.verbose)
if symbolicated_frame_addresses:
symbolicated_frame_address_idx = 0
for symbolicated_frame_address in symbolicated_frame_addresses:
display_frame_idx += 1
print '[%3u] %s' % (frame_idx, symbolicated_frame_address)
if (options.source_all or self.did_crash()) and display_frame_idx < options.source_frames and options.source_context:
source_context = options.source_context
line_entry = symbolicated_frame_address.get_symbol_context().line_entry
if line_entry.IsValid():
strm = lldb.SBStream()
if line_entry:
lldb.debugger.GetSourceManager().DisplaySourceLinesWithLineNumbers(line_entry.file, line_entry.line, source_context, source_context, "->", strm)
source_text = strm.GetData()
if source_text:
# Indent the source a bit
indent_str = ' '
join_str = '\n' + indent_str
print '%s%s' % (indent_str, join_str.join(source_text.split('\n')))
if symbolicated_frame_address_idx == 0:
if disassemble:
instructions = symbolicated_frame_address.get_instructions()
if instructions:
print
symbolication.disassemble_instructions (crash_log.get_target(),
instructions,
frame.pc,
options.disassemble_before,
options.disassemble_after, frame.index > 0)
print
symbolicated_frame_address_idx += 1
else:
print frame
def add_ident(self, ident):
if not ident in self.idents:
self.idents.append(ident)
def did_crash(self):
return self.reason != None
def __str__(self):
if self.app_specific_backtrace:
s = "Application Specific Backtrace[%u]" % self.index
else:
s = "Thread[%u]" % self.index
if self.reason:
s += ' %s' % self.reason
return s
class Frame:
"""Class that represents a stack frame in a thread in a darwin crash log"""
def __init__(self, index, pc, description):
self.pc = pc
self.description = description
self.index = index
def __str__(self):
if self.description:
return "[%3u] 0x%16.16x %s" % (self.index, self.pc, self.description)
else:
return "[%3u] 0x%16.16x" % (self.index, self.pc)
def dump(self, prefix):
print "%s%s" % (prefix, str(self))
class DarwinImage(symbolication.Image):
"""Class that represents a binary images in a darwin crash log"""
dsymForUUIDBinary = os.path.expanduser('~rc/bin/dsymForUUID')
if not os.path.exists(dsymForUUIDBinary):
dsymForUUIDBinary = commands.getoutput('which dsymForUUID')
dwarfdump_uuid_regex = re.compile('UUID: ([-0-9a-fA-F]+) \(([^\(]+)\) .*')
def __init__(self, text_addr_lo, text_addr_hi, identifier, version, uuid, path):
symbolication.Image.__init__(self, path, uuid);
self.add_section (symbolication.Section(text_addr_lo, text_addr_hi, "__TEXT"))
self.identifier = identifier
self.version = version
def locate_module_and_debug_symbols(self):
# Don't load a module twice...
if self.resolved:
return True
# Mark this as resolved so we don't keep trying
self.resolved = True
uuid_str = self.get_normalized_uuid_string()
print 'Getting symbols for %s %s...' % (uuid_str, self.path),
if os.path.exists(self.dsymForUUIDBinary):
dsym_for_uuid_command = '%s %s' % (self.dsymForUUIDBinary, uuid_str)
s = commands.getoutput(dsym_for_uuid_command)
if s:
plist_root = plistlib.readPlistFromString (s)
if plist_root:
plist = plist_root[uuid_str]
if plist:
if 'DBGArchitecture' in plist:
self.arch = plist['DBGArchitecture']
if 'DBGDSYMPath' in plist:
self.symfile = os.path.realpath(plist['DBGDSYMPath'])
if 'DBGSymbolRichExecutable' in plist:
self.path = os.path.expanduser (plist['DBGSymbolRichExecutable'])
self.resolved_path = self.path
if not self.resolved_path and os.path.exists(self.path):
dwarfdump_cmd_output = commands.getoutput('dwarfdump --uuid "%s"' % self.path)
self_uuid = self.get_uuid()
for line in dwarfdump_cmd_output.splitlines():
match = self.dwarfdump_uuid_regex.search (line)
if match:
dwarf_uuid_str = match.group(1)
dwarf_uuid = uuid.UUID(dwarf_uuid_str)
if self_uuid == dwarf_uuid:
self.resolved_path = self.path
self.arch = match.group(2)
break;
if not self.resolved_path:
self.unavailable = True
print "error\n error: unable to locate '%s' with UUID %s" % (self.path, uuid_str)
return False
if (self.resolved_path and os.path.exists(self.resolved_path)) or (self.path and os.path.exists(self.path)):
print 'ok'
# if self.resolved_path:
# print ' exe = "%s"' % self.resolved_path
# if self.symfile:
# print ' dsym = "%s"' % self.symfile
return True
else:
self.unavailable = True
return False
def __init__(self, path):
"""CrashLog constructor that take a path to a darwin crash log file"""
symbolication.Symbolicator.__init__(self);
self.path = os.path.expanduser(path);
self.info_lines = list()
self.system_profile = list()
self.threads = list()
self.backtraces = list() # For application specific backtraces
self.idents = list() # A list of the required identifiers for doing all stack backtraces
self.crashed_thread_idx = -1
self.version = -1
self.error = None
self.target = None
# With possible initial component of ~ or ~user replaced by that user's home directory.
try:
f = open(self.path)
except IOError:
self.error = 'error: cannot open "%s"' % self.path
return
self.file_lines = f.read().splitlines()
parse_mode = PARSE_MODE_NORMAL
thread = None
app_specific_backtrace = False
for line in self.file_lines:
# print line
line_len = len(line)
if line_len == 0:
if thread:
if parse_mode == PARSE_MODE_THREAD:
if thread.index == self.crashed_thread_idx:
thread.reason = ''
if self.thread_exception:
thread.reason += self.thread_exception
if self.thread_exception_data:
thread.reason += " (%s)" % self.thread_exception_data
if app_specific_backtrace:
self.backtraces.append(thread)
else:
self.threads.append(thread)
thread = None
else:
# only append an extra empty line if the previous line
# in the info_lines wasn't empty
if len(self.info_lines) > 0 and len(self.info_lines[-1]):
self.info_lines.append(line)
parse_mode = PARSE_MODE_NORMAL
# print 'PARSE_MODE_NORMAL'
elif parse_mode == PARSE_MODE_NORMAL:
if line.startswith ('Process:'):
(self.process_name, pid_with_brackets) = line[8:].strip().split(' [')
self.process_id = pid_with_brackets.strip('[]')
elif line.startswith ('Path:'):
self.process_path = line[5:].strip()
elif line.startswith ('Identifier:'):
self.process_identifier = line[11:].strip()
elif line.startswith ('Version:'):
version_string = line[8:].strip()
matched_pair = re.search("(.+)\((.+)\)", version_string)
if matched_pair:
self.process_version = matched_pair.group(1)
self.process_compatability_version = matched_pair.group(2)
else:
self.process = version_string
self.process_compatability_version = version_string
elif self.parent_process_regex.search(line):
parent_process_match = self.parent_process_regex.search(line)
self.parent_process_name = parent_process_match.group(1)
self.parent_process_id = parent_process_match.group(2)
elif line.startswith ('Exception Type:'):
self.thread_exception = line[15:].strip()
continue
elif line.startswith ('Exception Codes:'):
self.thread_exception_data = line[16:].strip()
continue
elif line.startswith ('Crashed Thread:'):
self.crashed_thread_idx = int(line[15:].strip().split()[0])
continue
elif line.startswith ('Report Version:'):
self.version = int(line[15:].strip())
continue
elif line.startswith ('System Profile:'):
parse_mode = PARSE_MODE_SYSTEM
continue
elif (line.startswith ('Interval Since Last Report:') or
line.startswith ('Crashes Since Last Report:') or
line.startswith ('Per-App Interval Since Last Report:') or
line.startswith ('Per-App Crashes Since Last Report:') or
line.startswith ('Sleep/Wake UUID:') or
line.startswith ('Anonymous UUID:')):
# ignore these
continue
elif line.startswith ('Thread'):
thread_state_match = self.thread_state_regex.search (line)
if thread_state_match:
app_specific_backtrace = False
thread_state_match = self.thread_regex.search (line)
thread_idx = int(thread_state_match.group(1))
parse_mode = PARSE_MODE_THREGS
thread = self.threads[thread_idx]
else:
thread_match = self.thread_regex.search (line)
if thread_match:
app_specific_backtrace = False
parse_mode = PARSE_MODE_THREAD
thread_idx = int(thread_match.group(1))
thread = CrashLog.Thread(thread_idx, False)
continue
elif line.startswith ('Binary Images:'):
parse_mode = PARSE_MODE_IMAGES
continue
elif line.startswith ('Application Specific Backtrace'):
app_backtrace_match = self.app_backtrace_regex.search (line)
if app_backtrace_match:
parse_mode = PARSE_MODE_THREAD
app_specific_backtrace = True
idx = int(app_backtrace_match.group(1))
thread = CrashLog.Thread(idx, True)
self.info_lines.append(line.strip())
elif parse_mode == PARSE_MODE_THREAD:
if line.startswith ('Thread'):
continue
frame_match = self.frame_regex.search(line)
if frame_match:
ident = frame_match.group(2)
thread.add_ident(ident)
if not ident in self.idents:
self.idents.append(ident)
thread.frames.append (CrashLog.Frame(int(frame_match.group(1)), int(frame_match.group(3), 0), frame_match.group(4)))
else:
print 'error: frame regex failed for line: "%s"' % line
elif parse_mode == PARSE_MODE_IMAGES:
image_match = self.image_regex_uuid.search (line)
if image_match:
image = CrashLog.DarwinImage (int(image_match.group(1),0),
int(image_match.group(2),0),
image_match.group(3).strip(),
image_match.group(4).strip(),
uuid.UUID(image_match.group(5)),
image_match.group(6))
self.images.append (image)
else:
image_match = self.image_regex_no_uuid.search (line)
if image_match:
image = CrashLog.DarwinImage (int(image_match.group(1),0),
int(image_match.group(2),0),
image_match.group(3).strip(),
image_match.group(4).strip(),
None,
image_match.group(5))
self.images.append (image)
else:
print "error: image regex failed for: %s" % line
elif parse_mode == PARSE_MODE_THREGS:
stripped_line = line.strip()
# "r12: 0x00007fff6b5939c8 r13: 0x0000000007000006 r14: 0x0000000000002a03 r15: 0x0000000000000c00"
reg_values = re.findall ('([a-zA-Z0-9]+: 0[Xx][0-9a-fA-F]+) *', stripped_line);
for reg_value in reg_values:
#print 'reg_value = "%s"' % reg_value
(reg, value) = reg_value.split(': ')
#print 'reg = "%s"' % reg
#print 'value = "%s"' % value
thread.registers[reg.strip()] = int(value, 0)
elif parse_mode == PARSE_MODE_SYSTEM:
self.system_profile.append(line)
f.close()
def dump(self):
print "Crash Log File: %s" % (self.path)
if self.backtraces:
print "\nApplication Specific Backtraces:"
for thread in self.backtraces:
thread.dump(' ')
print "\nThreads:"
for thread in self.threads:
thread.dump(' ')
print "\nImages:"
for image in self.images:
image.dump(' ')
def find_image_with_identifier(self, identifier):
for image in self.images:
if image.identifier == identifier:
return image
regex_text = '^.*\.%s$' % (identifier)
regex = re.compile(regex_text)
for image in self.images:
if regex.match(image.identifier):
return image
return None
def create_target(self):
#print 'crashlog.create_target()...'
if self.target is None:
self.target = symbolication.Symbolicator.create_target(self)
if self.target:
return self.target
# We weren't able to open the main executable as, but we can still symbolicate
print 'crashlog.create_target()...2'
if self.idents:
for ident in self.idents:
image = self.find_image_with_identifier (ident)
if image:
self.target = image.create_target ()
if self.target:
return self.target # success
print 'crashlog.create_target()...3'
for image in self.images:
self.target = image.create_target ()
if self.target:
return self.target # success
print 'crashlog.create_target()...4'
print 'error: unable to locate any executables from the crash log'
return self.target
def get_target(self):
return self.target
def usage():
print "Usage: lldb-symbolicate.py [-n name] executable-image"
sys.exit(0)
class Interactive(cmd.Cmd):
'''Interactive prompt for analyzing one or more Darwin crash logs, type "help" to see a list of supported commands.'''
image_option_parser = None
def __init__(self, crash_logs):
cmd.Cmd.__init__(self)
self.use_rawinput = False
self.intro = 'Interactive crashlogs prompt, type "help" to see a list of supported commands.'
self.crash_logs = crash_logs
self.prompt = '% '
def default(self, line):
'''Catch all for unknown command, which will exit the interpreter.'''
print "uknown command: %s" % line
return True
def do_q(self, line):
'''Quit command'''
return True
def do_quit(self, line):
'''Quit command'''
return True
def do_symbolicate(self, line):
description='''Symbolicate one or more darwin crash log files by index to provide source file and line information,
inlined stack frames back to the concrete functions, and disassemble the location of the crash
for the first frame of the crashed thread.'''
option_parser = CreateSymbolicateCrashLogOptions ('symbolicate', description, False)
command_args = shlex.split(line)
try:
(options, args) = option_parser.parse_args(command_args)
except:
return
if args:
# We have arguments, they must valid be crash log file indexes
for idx_str in args:
idx = int(idx_str)
if idx < len(self.crash_logs):
SymbolicateCrashLog (self.crash_logs[idx], options)
else:
print 'error: crash log index %u is out of range' % (idx)
else:
# No arguments, symbolicate all crash logs using the options provided
for idx in range(len(self.crash_logs)):
SymbolicateCrashLog (self.crash_logs[idx], options)
def do_list(self, line=None):
'''Dump a list of all crash logs that are currently loaded.
USAGE: list'''
print '%u crash logs are loaded:' % len(self.crash_logs)
for (crash_log_idx, crash_log) in enumerate(self.crash_logs):
print '[%u] = %s' % (crash_log_idx, crash_log.path)
def do_image(self, line):
'''Dump information about one or more binary images in the crash log given an image basename, or all images if no arguments are provided.'''
usage = "usage: %prog [options] <PATH> [PATH ...]"
description='''Dump information about one or more images in all crash logs. The <PATH> can be a full path, image basename, or partial path. Searches are done in this order.'''
command_args = shlex.split(line)
if not self.image_option_parser:
self.image_option_parser = optparse.OptionParser(description=description, prog='image',usage=usage)
self.image_option_parser.add_option('-a', '--all', action='store_true', help='show all images', default=False)
try:
(options, args) = self.image_option_parser.parse_args(command_args)
except:
return
if args:
for image_path in args:
fullpath_search = image_path[0] == '/'
for (crash_log_idx, crash_log) in enumerate(self.crash_logs):
matches_found = 0
for (image_idx, image) in enumerate(crash_log.images):
if fullpath_search:
if image.get_resolved_path() == image_path:
matches_found += 1
print '[%u] ' % (crash_log_idx), image
else:
image_basename = image.get_resolved_path_basename()
if image_basename == image_path:
matches_found += 1
print '[%u] ' % (crash_log_idx), image
if matches_found == 0:
for (image_idx, image) in enumerate(crash_log.images):
resolved_image_path = image.get_resolved_path()
if resolved_image_path and string.find(image.get_resolved_path(), image_path) >= 0:
print '[%u] ' % (crash_log_idx), image
else:
for crash_log in self.crash_logs:
for (image_idx, image) in enumerate(crash_log.images):
print '[%u] %s' % (image_idx, image)
return False
def interactive_crashlogs(options, args):
crash_log_files = list()
for arg in args:
for resolved_path in glob.glob(arg):
crash_log_files.append(resolved_path)
crash_logs = list();
for crash_log_file in crash_log_files:
#print 'crash_log_file = "%s"' % crash_log_file
crash_log = CrashLog(crash_log_file)
if crash_log.error:
print crash_log.error
continue
if options.debug:
crash_log.dump()
if not crash_log.images:
print 'error: no images in crash log "%s"' % (crash_log)
continue
else:
crash_logs.append(crash_log)
interpreter = Interactive(crash_logs)
# List all crash logs that were imported
interpreter.do_list()
interpreter.cmdloop()
def save_crashlog(debugger, command, result, dict):
usage = "usage: %prog [options] <output-path>"
description='''Export the state of current target into a crashlog file'''
parser = optparse.OptionParser(description=description, prog='save_crashlog',usage=usage)
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='display verbose debug info', default=False)
try:
(options, args) = parser.parse_args(shlex.split(command))
except:
result.PutCString ("error: invalid options");
return
if len(args) != 1:
result.PutCString ("error: invalid arguments, a single output file is the only valid argument")
return
out_file = open(args[0], 'w')
if not out_file:
result.PutCString ("error: failed to open file '%s' for writing...", args[0]);
return
target = debugger.GetSelectedTarget()
if target:
identifier = target.executable.basename
if lldb.process:
pid = lldb.process.id
if pid != lldb.LLDB_INVALID_PROCESS_ID:
out_file.write('Process: %s [%u]\n' % (identifier, pid))
out_file.write('Path: %s\n' % (target.executable.fullpath))
out_file.write('Identifier: %s\n' % (identifier))
out_file.write('\nDate/Time: %s\n' % (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
out_file.write('OS Version: Mac OS X %s (%s)\n' % (platform.mac_ver()[0], commands.getoutput('sysctl -n kern.osversion')));
out_file.write('Report Version: 9\n')
for thread_idx in range(lldb.process.num_threads):
thread = lldb.process.thread[thread_idx]
out_file.write('\nThread %u:\n' % (thread_idx))
for (frame_idx, frame) in enumerate(thread.frames):
frame_pc = frame.pc
frame_offset = 0
if frame.function:
block = frame.GetFrameBlock()
block_range = block.range[frame.addr]
if block_range:
block_start_addr = block_range[0]
frame_offset = frame_pc - block_start_addr.load_addr
else:
frame_offset = frame_pc - frame.function.addr.load_addr
elif frame.symbol:
frame_offset = frame_pc - frame.symbol.addr.load_addr
out_file.write('%-3u %-32s 0x%16.16x %s' % (frame_idx, frame.module.file.basename, frame_pc, frame.name))
if frame_offset > 0:
out_file.write(' + %u' % (frame_offset))
line_entry = frame.line_entry
if line_entry:
if options.verbose:
# This will output the fullpath + line + column
out_file.write(' %s' % (line_entry))
else:
out_file.write(' %s:%u' % (line_entry.file.basename, line_entry.line))
column = line_entry.column
if column:
out_file.write(':%u' % (column))
out_file.write('\n')
out_file.write('\nBinary Images:\n')
for module in target.modules:
text_segment = module.section['__TEXT']
if text_segment:
text_segment_load_addr = text_segment.GetLoadAddress(target)
if text_segment_load_addr != lldb.LLDB_INVALID_ADDRESS:
text_segment_end_load_addr = text_segment_load_addr + text_segment.size
identifier = module.file.basename
module_version = '???'
module_version_array = module.GetVersion()
if module_version_array:
module_version = '.'.join(map(str,module_version_array))
out_file.write (' 0x%16.16x - 0x%16.16x %s (%s - ???) <%s> %s\n' % (text_segment_load_addr, text_segment_end_load_addr, identifier, module_version, module.GetUUIDString(), module.file.fullpath))
out_file.close()
else:
result.PutCString ("error: invalid target");
def Symbolicate(debugger, command, result, dict):
try:
SymbolicateCrashLogs (shlex.split(command))
except:
result.PutCString ("error: python exception %s" % sys.exc_info()[0])
def SymbolicateCrashLog(crash_log, options):
if crash_log.error:
print crash_log.error
return
if options.debug:
crash_log.dump()
if not crash_log.images:
print 'error: no images in crash log'
return
if options.dump_image_list:
print "Binary Images:"
for image in crash_log.images:
if options.verbose:
print image.debug_dump()
else:
print image
target = crash_log.create_target ()
if not target:
return
exe_module = target.GetModuleAtIndex(0)
images_to_load = list()
loaded_images = list()
if options.load_all_images:
# --load-all option was specified, load everything up
for image in crash_log.images:
images_to_load.append(image)
else:
# Only load the images found in stack frames for the crashed threads
if options.crashed_only:
for thread in crash_log.threads:
if thread.did_crash():
for ident in thread.idents:
images = crash_log.find_images_with_identifier (ident)
if images:
for image in images:
images_to_load.append(image)
else:
print 'error: can\'t find image for identifier "%s"' % ident
else:
for ident in crash_log.idents:
images = crash_log.find_images_with_identifier (ident)
if images:
for image in images:
images_to_load.append(image)
else:
print 'error: can\'t find image for identifier "%s"' % ident
for image in images_to_load:
if not image in loaded_images:
err = image.add_module (target)
if err:
print err
else:
#print 'loaded %s' % image
loaded_images.append(image)
if crash_log.backtraces:
for thread in crash_log.backtraces:
thread.dump_symbolicated (crash_log, options)
print
for thread in crash_log.threads:
thread.dump_symbolicated (crash_log, options)
print
def CreateSymbolicateCrashLogOptions(command_name, description, add_interactive_options):
usage = "usage: %prog [options] <FILE> [FILE ...]"
option_parser = optparse.OptionParser(description=description, prog='crashlog',usage=usage)
option_parser.add_option('--verbose' , '-v', action='store_true', dest='verbose', help='display verbose debug info', default=False)
option_parser.add_option('--debug' , '-g', action='store_true', dest='debug', help='display verbose debug logging', default=False)
option_parser.add_option('--load-all' , '-a', action='store_true', dest='load_all_images', help='load all executable images, not just the images found in the crashed stack frames', default=False)
option_parser.add_option('--images' , action='store_true', dest='dump_image_list', help='show image list', default=False)
option_parser.add_option('--debug-delay' , type='int', dest='debug_delay', metavar='NSEC', help='pause for NSEC seconds for debugger', default=0)
option_parser.add_option('--crashed-only' , '-c', action='store_true', dest='crashed_only', help='only symbolicate the crashed thread', default=False)
option_parser.add_option('--disasm-depth' , '-d', type='int', dest='disassemble_depth', help='set the depth in stack frames that should be disassembled (default is 1)', default=1)
option_parser.add_option('--disasm-all' , '-D', action='store_true', dest='disassemble_all_threads', help='enabled disassembly of frames on all threads (not just the crashed thread)', default=False)
option_parser.add_option('--disasm-before' , '-B', type='int', dest='disassemble_before', help='the number of instructions to disassemble before the frame PC', default=4)
option_parser.add_option('--disasm-after' , '-A', type='int', dest='disassemble_after', help='the number of instructions to disassemble after the frame PC', default=4)
option_parser.add_option('--source-context', '-C', type='int', metavar='NLINES', dest='source_context', help='show NLINES source lines of source context (default = 4)', default=4)
option_parser.add_option('--source-frames' , type='int', metavar='NFRAMES', dest='source_frames', help='show source for NFRAMES (default = 4)', default=4)
option_parser.add_option('--source-all' , action='store_true', dest='source_all', help='show source for all threads, not just the crashed thread', default=False)
if add_interactive_options:
option_parser.add_option('-i', '--interactive', action='store_true', help='parse all crash logs and enter interactive mode', default=False)
return option_parser
def SymbolicateCrashLogs(command_args):
description='''Symbolicate one or more darwin crash log files to provide source file and line information,
inlined stack frames back to the concrete functions, and disassemble the location of the crash
for the first frame of the crashed thread.
If this script is imported into the LLDB command interpreter, a "crashlog" command will be added to the interpreter
for use at the LLDB command line. After a crash log has been parsed and symbolicated, a target will have been
created that has all of the shared libraries loaded at the load addresses found in the crash log file. This allows
you to explore the program as if it were stopped at the locations described in the crash log and functions can
be disassembled and lookups can be performed using the addresses found in the crash log.'''
option_parser = CreateSymbolicateCrashLogOptions ('crashlog', description, True)
try:
(options, args) = option_parser.parse_args(command_args)
except:
return
if options.debug:
print 'command_args = %s' % command_args
print 'options', options
print 'args', args
if options.debug_delay > 0:
print "Waiting %u seconds for debugger to attach..." % options.debug_delay
time.sleep(options.debug_delay)
error = lldb.SBError()
if args:
if options.interactive:
interactive_crashlogs(options, args)
else:
for crash_log_file in args:
crash_log = CrashLog(crash_log_file)
SymbolicateCrashLog (crash_log, options)
if __name__ == '__main__':
# Create a new debugger instance
lldb.debugger = lldb.SBDebugger.Create()
SymbolicateCrashLogs (sys.argv[1:])
lldb.SBDebugger.Destroy (lldb.debugger)
elif getattr(lldb, 'debugger', None):
lldb.debugger.HandleCommand('command script add -f lldb.macosx.crashlog.Symbolicate crashlog')
lldb.debugger.HandleCommand('command script add -f lldb.macosx.crashlog.save_crashlog save_crashlog')
print '"crashlog" and "save_crashlog" command installed, use the "--help" option for detailed help'
| 49.280723 | 218 | 0.556854 |
714e8a46a11a60eb8c0e661eacd5f4a91b1a32bf
| 2,160 |
py
|
Python
|
util/ap/py/LongestNonIncreasingSubseq.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2017-05-18T06:11:02.000Z
|
2017-05-18T06:11:02.000Z
|
util/ap/py/LongestNonIncreasingSubseq.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
util/ap/py/LongestNonIncreasingSubseq.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
'''
You can run it directly to see results.
'''
def lnis(nums):
# res is a 0-based array where
# res[i] stores the length
# of lnis ending the number nums[i]
#
# So when we output the length of
# lnis of the whole integer array,
# we will print max(res) instead of
# the last one of res.
res = [1] * len(nums)
for i in range(1, len(nums)):
for j in range(i):
if nums[i] <= nums[j] and res[j] >= res[i]:
res[i] = res[j] + 1
return res
def find_path(res):
idx = [0]
cnt = 2
flag = True
# If res[i] stores the length of lnis
# of the first (i+1) numbers instead of
# the length of lnis ending the number nums[i]
#
# Then, res should look like:
# [1, 1, 1, 2, 2, 3, 3]
# So the idx [2, 3, 5] is what we need, i.e.
# the last 1, and every first other numbers
# where the changes of lengh happen.
#
# However, we can use the original res. That is,
# to find the last before any change, and the
# locations where each change happens.
for i in range(len(res)):
if res[i] == 1 and flag:
idx[0] = i
elif res[i] == cnt:
idx.append(i)
cnt += 1
flag = False
return idx
def testcase():
# test cases
# assume that there's one element at least.
tests = []
tests.append([5, 6, 3, 6, 7, 2])
n = 55
print
'#' * n
print
print
'Problem: Longest Non-Increasing Sub-sequence'
print
print
'=' * n
for i, t in enumerate(tests):
assert t != []
res = lnis(t)
path = find_path(res)
subseq = [t[idx] for idx in path]
print
print
'test case #' + str(i) + ':'
print
' Longest Length: ' + str(max(res))
print
' Array of Integers ' + str(t)
print
' Indices of the path: ' + str(path)
print
' Longest Non-increasing Subsequence: ' + str(subseq)
print
print
'=' * n
def main():
testcase()
if __name__ == "__main__":
main()
| 21.818182 | 65 | 0.513426 |
1c3a5319a9e2f955caf7dd2d1ca5fa42ac6dd3c8
| 2,226 |
py
|
Python
|
Main App/app/main.py
|
Rathore25/Sapiens-QA
|
0bf794784c3a3b26b541dbc2fa894756c979e5c4
|
[
"MIT"
] | null | null | null |
Main App/app/main.py
|
Rathore25/Sapiens-QA
|
0bf794784c3a3b26b541dbc2fa894756c979e5c4
|
[
"MIT"
] | null | null | null |
Main App/app/main.py
|
Rathore25/Sapiens-QA
|
0bf794784c3a3b26b541dbc2fa894756c979e5c4
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify
from flask_cors import CORS
import os
import pandas as pd
import requests
import wget
import json
import wget
from cdqa.pipeline import QAPipeline
app = Flask(__name__)
CORS(app)
print("Started main.py !!!")
response = requests.get('https://docs.google.com/uc?export=download&id=1oSUFKZMao_gQxGDpCZuRXV6EULjFjmoZ')
sapiens_original = response.json()
print("Fetched sapiens original")
response = requests.get('https://docs.google.com/uc?export=download&id=1b5xy1Z4EuFVXkMOQIupl27a1kRNPguVr')
sapiens_annotated = response.json()
with open('./sapiens_annotated.json', 'w') as file:
json.dump(sapiens_annotated, file)
print("Fetched sapiens annotated")
dictionary_df = []
for item in sapiens_original['data']:
title = item['title']
paragraphs = []
for paragraph in item['paragraphs']:
paragraphs.append(paragraph['context'])
dictionary_df.append({'title':title, 'paragraphs':paragraphs})
df = pd.DataFrame(dictionary_df)
# Get original Bert_qa and then train on our annotated dataset
wget.download(url='https://github.com/cdqa-suite/cdQA/releases/download/bert_qa/bert_qa.joblib', out='./')
cdqa_pipeline = QAPipeline(reader='./bert_qa.joblib')
cdqa_pipeline.fit_retriever(df=df)
cdqa_pipeline.fit_reader('./sapiens_annotated.json')
# Use the pretrained annotated Distilbert file
#wget.download(url='https://github.com/Rathore25/Sapiens-QA/raw/main/Pretrained Data/sapiens_distilbert.joblib', out='./')
#cdqa_pipeline = QAPipeline(reader='./sapiens_distilbert.joblib')
#cdqa_pipeline.fit_retriever(df=df)
# Use the pretrained annotated Bert file
#wget.download(url='https://github.com/Rathore25/Sapiens-QA/raw/main/Pretrained Data/sapiens_bert.joblib', out='./')
#cdqa_pipeline = QAPipeline(reader='./sapiens_bert.joblib')
#cdqa_pipeline.fit_retriever(df=df)
@app.route("/api", methods=["GET"])
def api():
query = request.args.get("query")
prediction = cdqa_pipeline.predict(query=query)
return jsonify(
query=query, answer=prediction[0], title=prediction[1], paragraph=prediction[2], score=prediction[3]
)
# A welcome message to test our server
@app.route('/')
def index():
return "<h1>Welcome to Sapiens AI server !!</h1>"
| 30.916667 | 122 | 0.754717 |
c777a80aa72aea59183b054b6dd45d2f639227fc
| 261 |
py
|
Python
|
src/monoalpha/main.py
|
hacker-school/Kryptografie
|
de033d435ca5bbb908968596dfa8d12c26317167
|
[
"CC0-1.0"
] | null | null | null |
src/monoalpha/main.py
|
hacker-school/Kryptografie
|
de033d435ca5bbb908968596dfa8d12c26317167
|
[
"CC0-1.0"
] | null | null | null |
src/monoalpha/main.py
|
hacker-school/Kryptografie
|
de033d435ca5bbb908968596dfa8d12c26317167
|
[
"CC0-1.0"
] | null | null | null |
import monoalphabeticCipher as mc
cipher = mc.random_monoalpha_cipher()
print(cipher)
encrypted = mc.encrypt_with_monoalpha('Hello all you hackers out there!', cipher)
decrypted = mc.decrypt_with_monoalpha(encrypted, cipher)
print(encrypted)
print(decrypted)
| 26.1 | 81 | 0.816092 |
c7b95bcbc8e3e77affcc8c9dd34811fed84b27ca
| 19,949 |
py
|
Python
|
aci_app_creator.py
|
HyechurnJang/genaci
|
aac26c8b59a5d8b3884aa6f588f40f2e741c1cc3
|
[
"Apache-2.0"
] | null | null | null |
aci_app_creator.py
|
HyechurnJang/genaci
|
aac26c8b59a5d8b3884aa6f588f40f2e741c1cc3
|
[
"Apache-2.0"
] | null | null | null |
aci_app_creator.py
|
HyechurnJang/genaci
|
aac26c8b59a5d8b3884aa6f588f40f2e741c1cc3
|
[
"Apache-2.0"
] | null | null | null |
'''
ACI APIC AppCreator
@contact: [email protected]
@version: 1.1
'''
from __future__ import print_function
import argparse
import json
import os
import re
import readline
import shutil
import signal
import sys
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
import logging
import aci_app_validator
import aci_app_packager
readline.parse_and_bind('tab: complete')
readline.parse_and_bind('set editing-mode vi')
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
@staticmethod
def bold(text):
return '{}{}{}'.format(bcolors.BOLD, text, bcolors.ENDC)
@staticmethod
def header(text):
return '{}{}{}'.format(bcolors.HEADER, text, bcolors.ENDC)
@staticmethod
def blue(text):
return '{}{}{}'.format(bcolors.OKBLUE, text, bcolors.ENDC)
@staticmethod
def green(text):
return '{}{}{}'.format(bcolors.OKGREEN, text, bcolors.ENDC)
@staticmethod
def warning(text):
return '{}{}{}'.format(bcolors.WARNING, text, bcolors.ENDC)
@staticmethod
def fail(text):
return '{}{}{}'.format(bcolors.FAIL, text, bcolors.ENDC)
SCRIPT_DIR_PATH = os.path.dirname(os.path.realpath(__file__))
TEMPLATE_DIR = os.path.join(SCRIPT_DIR_PATH, 'AppTemplate')
STATELESS_TEMPLATE_DIR = os.path.join(TEMPLATE_DIR, 'StatelessAppTemplate')
STATEFUL_TEMPLATE_DIR = os.path.join(TEMPLATE_DIR, 'StatefulAppTemplate')
STATEFULJS_TEMPLATE_DIR = os.path.join(TEMPLATE_DIR, 'StatefulJS')
ACI_PACKAGER_PATH = os.path.join(SCRIPT_DIR_PATH, 'aci_app_packager.py')
if not os.path.isfile(ACI_PACKAGER_PATH):
print(bcolors.fail('Packager not found ({0})'.format(SCRIPT_DIR_PATH)))
sys.exit(1)
class MandatoryFieldsTemplate(object):
fields = {
'version': '1.0',
'name': 'MyApp',
'shortdescr': 'My first ACI app',
'vendor': 'Vendor',
'apicversion': '2.2(1n)',
'permissions': ['admin'],
'permissionslevel': 'read',
'author': 'Author',
'category': ['Beta'],
'contact-phone': '123-4567890',
'contact-url': 'http://www.cisco.com/go/aci',
'contact-email': '[email protected]'
}
def __init__(self, arg):
super(MandatoryFieldsTemplate, self).__init__()
@classmethod
def getFields(cls):
return cls.fields
MANDATORY_FIELDS_DESCRIPTION = {
'version': 'Format: M.m',
'name': 'App name',
'shortdescr': 'Description',
'vendor': 'Vendor name',
'apicversion': 'Min APIC version',
'permissions': 'Permissions required, let field empty when finished',
'permissionslevel': 'read/write',
'author': 'Author(s)',
'category': 'Category'
}
MANDATORY_FIELDS_STATEFUL_DESCRIPTION = {
'api': 'API description'
}
OPTIONAL_FIELDS_DESCRIPTION = {
'contact': 'Contact'
}
# Validating functions
def validator_factory(json_dict):
validator = aci_app_validator.Validator()
validator.appMeta = json_dict
return validator
def validate_field(field, value, state='stateless'):
if field == 'contact-email' or field == 'contact-url' or field == 'contact-phone':
return aci_app_validator.Validator.validateJsonFieldsDuringGeneration({'contact': {field: value}}, state)
return aci_app_validator.Validator.validateJsonFieldsDuringGeneration({field: value}, state)
# App creation functions
def output_directory_path(parent_out_dir, vendordomain, appid):
return os.path.join(parent_out_dir, '{0}_{1}'.format(vendordomain, appid))
def file_replace(fname, pat, s_after):
# first, see if the pattern is even in the file.
with open(fname) as f:
if not any(re.search(pat, line) for line in f):
return # pattern does not occur in file so we are done.
# pattern is in the file, so perform replace operation.
with open(fname) as f:
out_fname = fname + ".tmp"
out = open(out_fname, "w")
for line in f:
out.write(re.sub(pat, s_after, line))
out.close()
try:
os.rename(out_fname, fname)
except:
os.remove(fname)
os.rename(out_fname, fname)
# Read file
def read_content_file(fname):
s = ''
with open(fname, 'r') as f:
s = str(f.read())
return s
def copy_structure(app, misc):
state = misc['state']
outdir = misc['output_dir']
protocol = misc['protocol']
output_dir = ''
if outdir:
if not os.path.isdir(outdir):
raise Exception('Output directory doesn\'t exist')
output_dir = output_directory_path(outdir, app['vendordomain'], app['appid'])
else:
output_dir = output_directory_path(SCRIPT_DIR_PATH, app['vendordomain'], app['appid'])
shutil.copytree(STATELESS_TEMPLATE_DIR, output_dir)
if state == 'stateful':
files_in_stateful_dir = os.listdir(STATEFUL_TEMPLATE_DIR)
for item in files_in_stateful_dir:
path = os.path.join(STATEFUL_TEMPLATE_DIR, item)
if os.path.isdir(path):
shutil.copytree(path, os.path.join(output_dir, item))
uiassets_path = os.path.join(output_dir, 'UIAssets')
apphtml_path = os.path.join(uiassets_path, 'app.html')
file_replace(apphtml_path, '{{VENDORDOMAIN}}', app['vendordomain'])
file_replace(apphtml_path, '{{APPID}}', app['appid'])
file_replace(apphtml_path, '{{APPNAME}}', app['name'])
if state == 'stateful':
service_path = os.path.join(output_dir, 'Service')
serverpy_path = os.path.join(service_path, 'server.py')
file_replace(serverpy_path, '{{VENDORDOMAIN}}', app['vendordomain'])
file_replace(serverpy_path, '{{APPID}}', app['appid'])
file_replace(serverpy_path, '{{PROTOCOL}}', protocol)
query_url = 'window.BACKEND_QUERY_URL = location.protocol + "//" + window.location["host"] + "/appcenter/{}/{}";'.format(
app['vendordomain'], app['appid'])
file_replace(apphtml_path, '{{BACKEND_QUERY_URL}}', query_url)
testAPI = read_content_file(os.path.join(STATEFULJS_TEMPLATE_DIR, 'testAPI.html'))
file_replace(apphtml_path, '{{TEST_API}}', testAPI)
getTenant = read_content_file(os.path.join(STATEFULJS_TEMPLATE_DIR, 'getTenant.html'))
file_replace(apphtml_path, '{{GET_TENANT}}', getTenant)
alerts = read_content_file(os.path.join(STATEFULJS_TEMPLATE_DIR, 'alerts.html'))
file_replace(apphtml_path, '{{ALERTS_STATEFUL}}', alerts)
app['api'] = {}
app['api']['testAPI.json'] = "API to test the connectivity to the docker container."
app['api']['getTenant.json'] = "Get the list of the tenants in the docker container."
else:
file_replace(apphtml_path, '{{BACKEND_QUERY_URL}}', '')
file_replace(apphtml_path, '{{TEST_API}}', '')
file_replace(apphtml_path, '{{GET_TENANT}}', '')
file_replace(apphtml_path, '{{ALERTS_STATEFUL}}', '')
app_json_file = os.path.join(output_dir, 'app.json')
with open(app_json_file, 'w+') as appfile:
appfile.write(json.dumps(app, indent=4))
return output_dir
# Input functions
def input_custom_msg(msg):
if msg:
return raw_input('> {} '.format(msg))
else:
return raw_input('> ')
def input_line():
return input_custom_msg('')
def input_with_check(msg, check_fct, error_msg):
inp = ''
inp_correct = False
while not inp_correct:
inp = input_custom_msg(msg)
if not check_fct(inp):
print(bcolors.fail(error_msg))
else:
inp_correct = True
return inp
def yes_no_input():
def check_y_n(inp):
return inp and (inp.lower() == 'y' or inp.lower() == 'n')
return input_with_check('(y/n)', check_y_n, '"y"(es) or "n"(o)')
def input_check_json_field(field, state='stateless', skip_possible=False, custom_msg=''):
inp = ''
inp_correct = False
while not inp_correct:
inp = input_custom_msg(custom_msg)
if skip_possible and not inp:
inp_correct = True
else:
err_code = 0
err_msg = ''
if field == 'permissions':
err_code, err_msg = validate_field(field, [inp], state)
else:
err_code, err_msg = validate_field(field, inp, state)
if err_code != 0:
print(bcolors.fail(err_msg))
else:
inp_correct = True
return inp
### Packaging
def package_app(app_directory, output_dir):
#return os.system("python {0} -f {1}".format(ACI_PACKAGER_PATH, app_directory))
packager = aci_app_packager.Packager()
return packager.main(app_directory, SCRIPT_DIR_PATH, output_dir)
if __name__ == '__main__':
output_dir = '' # Where to output the app
appid = '' # appid, unique identifier of the app
vendordomain = '' # Organization
state = '' # 'staless' or 'stateful' (contains docker image)
protocol = '' # HTTPS or HTTP
parser = argparse.ArgumentParser()
parser._optionals.title = 'Optional arguments'
parser.add_argument('-o', '--output', help='Output directory', default='')
args = vars(parser.parse_args())
output_dir = ''
if not args['output']:
output_dir = SCRIPT_DIR_PATH
else:
output_dir = args['output']
if not os.path.isabs(output_dir):
output_dir = os.path.join(os.getcwd(), output_dir)
# Init the fields
fields = {}
fields['iconfile'] = 'icon.png'
# Title
print(bcolors.header('*******************************************'))
print(bcolors.bold(bcolors.header('* ACI App Creator *')))
print(bcolors.header('*******************************************'))
print('Welcome! This tool will guide you through the creation of a fully functional ACI App Center application.')
print('The information that you will provide can be changed later on, please read the ACI App Center Developer '
'Guide to learn more about it.')
print('')
print(bcolors.header('--- General information ---'))
print(bcolors.bold('Let\'s begin! What should be the name of your application? (e.g. "TestingApp", "MyFirstApp",'
'...)'))
print('Note: The name of the application will also be used as the application ID.\nThis can be changed afterwards '
'in "app.json".')
fields['name'] = input_check_json_field('appid', custom_msg='(Application name)')
fields['appid'] = fields['name']
print('')
print(bcolors.bold('What is the name of your company?'))
print('Note: The company name will also be used as the vendor domain.\nThis can be changed afterwards in '
'"app.json".')
fields['vendor'] = input_check_json_field('vendordomain', custom_msg='(Company name)')
fields['vendordomain'] = fields['vendor']
# Check if output directory exists
output_test = output_directory_path(output_dir, fields['vendordomain'], fields['appid'])
if os.path.exists(output_test):
print(bcolors.fail('The output directory ({0}) already exists...'.format(output_test)))
sys.exit(1)
print('')
print(bcolors.bold('Can you describe briefly what the application is supposed to do?'))
print('To skip it, leave the field empty. The following description will be used: "{}"'.format(
bcolors.blue(MandatoryFieldsTemplate.getFields()['shortdescr'])))
fields['shortdescr'] = input_check_json_field('shortdescr', custom_msg='(Description)', skip_possible=True)
if not fields['shortdescr']:
fields['shortdescr'] = MandatoryFieldsTemplate.getFields()['shortdescr']
print('')
print(bcolors.header('--- About you ---'))
print(bcolors.bold('What is your name?'))
fields['author'] = input_check_json_field('author', custom_msg='(Author)')
print('')
fields['contact'] = {}
print(bcolors.bold('What is your email address?'))
print('To skip it, leave the field empty. The following email address will be used: "{}"'.format(
bcolors.blue(MandatoryFieldsTemplate.getFields()['contact-email'])))
fields['contact']['contact-email'] = input_check_json_field('contact-email', custom_msg='(Email)',
skip_possible=True)
if not fields['contact']['contact-email']:
fields['contact']['contact-email'] = MandatoryFieldsTemplate.getFields()['contact-email']
print('')
print(bcolors.bold('What is the website of your company?'))
print('To skip it, leave the field empty. The following website will be used: "{}"'.format(
bcolors.blue(MandatoryFieldsTemplate.getFields()['contact-url'])))
fields['contact']['contact-url'] = input_check_json_field('contact-url', custom_msg='(URL)', skip_possible=True)
if not fields['contact']['contact-url']:
fields['contact']['contact-url'] = MandatoryFieldsTemplate.getFields()['contact-url']
print('')
print(bcolors.bold('What is the contact phone number of your company?'))
print('To skip it, leave the field empty. The following phone will be used: "{}"'.format(
bcolors.blue(MandatoryFieldsTemplate.getFields()['contact-phone'])))
fields['contact']['contact-phone'] = input_check_json_field('contact-phone', custom_msg='(Phone)',
skip_possible=True)
if not fields['contact']['contact-phone']:
fields['contact']['contact-phone'] = MandatoryFieldsTemplate.getFields()['contact-phone']
print('')
print(bcolors.header('--- App versions ---'))
print(bcolors.bold('Would you like to change the version of the application?'))
print('To skip it, leave the field empty. The following version will be used: "{}"'.format(
bcolors.blue(MandatoryFieldsTemplate.getFields()['version'])))
fields['version'] = input_check_json_field('version', custom_msg='(Version, the format is: Major.Minor)',
skip_possible=True)
if not fields['version']:
fields['version'] = MandatoryFieldsTemplate.getFields()['version']
print('')
print(bcolors.bold('Would you like to change the mininum APIC version on which the application can run?'))
print('To skip it, leave the field empty. The following APIC version will be used: "{}"'.format(
bcolors.blue(MandatoryFieldsTemplate.getFields()['apicversion'])))
fields['apicversion'] = input_check_json_field('apicversion',
custom_msg='(Min APIC version, the format is: Major.Minor(mp), where m=maintenance and p=patch)',
skip_possible=True)
if not fields['apicversion']:
fields['apicversion'] = MandatoryFieldsTemplate.getFields()['apicversion']
print('')
print(bcolors.header('--- Permissions ---'))
print(bcolors.bold('What permissions would you like the application to have?'))
print(
'Read more at this address: http://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/kb/b_KB_AAA-RBAC-roles-privileges.html')
print('To skip it, leave the field empty. The following permissions will be used: "{}"'.format(
bcolors.blue(MandatoryFieldsTemplate.getFields()['permissions'])))
fields['permissions'] = []
while True:
inp = input_check_json_field('permissions', custom_msg='(Permission)', skip_possible=True)
if len(fields['permissions']) == 0 and not inp:
fields['permissions'] = MandatoryFieldsTemplate.getFields()['permissions']
break
elif not inp:
break
else:
fields['permissions'].append(inp)
err_code, err_msg = validate_field('permissions', fields['permissions'])
if err_code != 0:
print(bcolors.fail(err_msg))
else:
inp_correct = True
print('')
print(bcolors.bold('Should the application run with read or write priviledges?'))
print('To skip it, leave the field empty. The following priviledge will be used: "{}"'.format(
bcolors.blue(MandatoryFieldsTemplate.getFields()['permissionslevel'])))
fields['permissionslevel'] = input_with_check('(Priviledge)',
lambda inp: inp == '' or inp == 'read' or inp == 'write',
'The input should be "read" or "write"')
if not fields['permissionslevel']:
fields['permissionslevel'] = MandatoryFieldsTemplate.getFields()['permissionslevel']
print('')
print(bcolors.header('--- Categories ---'))
categories = ['Tools and Utilities',
'Visibility and Monitoring',
'Optimization',
'Security',
'Networking',
'Cisco Automation and Orchestration',
'Beta']
print(bcolors.bold('Here are different categories:'))
print('')
print(bcolors.bold('\n'.join(categories)))
print('')
print(bcolors.bold('Please select the category(ies) which is(are) the most suitable for your app:'))
print('If none of the categories are selected, the following category will be used: "{}"'.format(
bcolors.blue(MandatoryFieldsTemplate.getFields()['category'])))
fields['category'] = []
for i in xrange(0, len(categories)):
inp = input_with_check('({}, y/n)'.format(categories[i]),
lambda x: x and (x.lower() == 'y' or x.lower() == 'n'), '"y"(es) or "n"(o)')
if inp == 'y':
fields['category'].append(categories[i])
if len(fields['category']) == 0:
fields['category'] = MandatoryFieldsTemplate.getFields()['category']
print('')
print(bcolors.header('--- Other ---'))
print(bcolors.bold('Would you like to add a docker container to your application?'))
print(
'Note: There are 2 kinds of apps: "{0}" and "{1}": \n - A stateless app is only composed of a front-end part, '
'it communicates with the APIC without keeping any state. \n - A stateful app have a back-end (docker '
'container) to keep a state amongst multiple launches of the app.'.format(
bcolors.bold('stateless'), bcolors.bold('stateful')))
inp = yes_no_input()
print('\n')
if inp.lower() == 'y':
state = 'stateful'
print(bcolors.bold('Will the container communicate with the APIC using HTTPS?'))
print('Note: If no, HTTP will be used.')
inp_https = yes_no_input()
protocol = 'https' if inp_https.lower() == 'y' else 'http'
else:
state = 'stateless'
err_code, err_msg = aci_app_validator.Validator.validateJsonFieldsDuringGeneration(fields, state)
if err_code != 0:
print(bcolors.fail('Validation failed: {}'.format(err_msg)))
sys.exit()
print('\n')
print(bcolors.green('We are now creating your fully functional app in this directory: {}...\n'.format(output_dir)))
app_directory = copy_structure(fields, {'state': state, 'output_dir': output_dir, 'protocol': protocol})
print(bcolors.green('The application has been successfully created.'))
print(
'Don\'t forget, you can modify the content of the directory and repackage the app using the command "python '
'aci_app_packager.py -f {}"'.format(
app_directory))
print('\n')
print(bcolors.header('--- Packaging ---'))
print(bcolors.bold('Would you like to package the app?'))
print('This will create a .aci application.')
inp = yes_no_input()
if inp.lower() == 'y':
rc = package_app(app_directory, output_dir)
if rc != 0:
print(bcolors.fail('Packaging of the application failed.'))
| 36.009025 | 148 | 0.632312 |
1be5cac2ead63e9044d2ba8d27c327dce3d7b124
| 576 |
py
|
Python
|
scripts/ping_database.py
|
Slanman3755/VERAS
|
07a6b26f9360e7bc605b767489cc86c683b57fae
|
[
"MIT"
] | null | null | null |
scripts/ping_database.py
|
Slanman3755/VERAS
|
07a6b26f9360e7bc605b767489cc86c683b57fae
|
[
"MIT"
] | null | null | null |
scripts/ping_database.py
|
Slanman3755/VERAS
|
07a6b26f9360e7bc605b767489cc86c683b57fae
|
[
"MIT"
] | null | null | null |
#! python
import click
import veras
@click.command()
@click.option("--protocol", "-c", default="bolt")
@click.option("--host", "-h", default="localhost")
@click.option("--port", "-p", default=7687)
@click.option("--username", "-u", default="neo4j")
@click.option("--password", "-i", default="neo4j")
def ping_database(protocol, host, port, username, password):
route_db = veras.route_database.RouteDatabase(f"{protocol}://{host}:{port}", username, password)
route_db.print_route("klax", "ksfo")
route_db.close()
if __name__ == "__main__":
ping_database()
| 27.428571 | 100 | 0.670139 |
403be07bfa3783f0c2b73ba11312f3329752835d
| 15,265 |
py
|
Python
|
src/game_sim.py
|
Gandagorn/PySchnaps
|
e8cd865fd60553e36e424eb82be81462f57e6f1f
|
[
"MIT"
] | null | null | null |
src/game_sim.py
|
Gandagorn/PySchnaps
|
e8cd865fd60553e36e424eb82be81462f57e6f1f
|
[
"MIT"
] | null | null | null |
src/game_sim.py
|
Gandagorn/PySchnaps
|
e8cd865fd60553e36e424eb82be81462f57e6f1f
|
[
"MIT"
] | null | null | null |
"""
28.09.2019
Schnapsen Simulation
Rules:
4 Farben:
H Herz 0
K Karo 1
P Pik 2
K Kreuz 3
Werte:
A Ass 11
Z Zehner 10
K König 4
D Dame 3
B Bube 2
Players:
G Geber
V Vorhand
Geben:
5 cards each (= 10)
1 card A Atout (= Trumpf)
9 cards T Talon
Gameplay:
Stichzwang {T, F}, Farbzwang {T, F}:
Spieler muss:
1) mit höherer Karte der angespielten Farbe stechen, sonst
2) eine niedrigere Karte der angespielten Farbe zurückgeben, sonst
3) mit Trumpf stechen, sonst
4) beliebige Karte abwerfen
Note: Farbzwang vor Stichzwang! Nur mit Trumpf stechen, wenn man Farbe nicht hat!
Stechen:
gleiche Farbe und höherer Wert
Trumpf
Talon:
Talonsperre {T, F}
Nach Runde bekommt Stecher höchste Karte, anderer nächsthöhere
wenn aufgebraucht => Farb- und Stichzwang = True
wenn zugedreht => Farb- und Stichzwang = True, Talonsperre = True
Ansagen:
König + Dame (Trumpf) = 40
König + Dame (nicht Trumpf) = 20
König oder Dame muss gespielt werden
Note: Ansagen zählen erst ab ersten Stich
Austausch:
Spieler mit Bube der Trumpffarbe darf gegen Trumpfkarte austauschen
Gewinnen:
66 Punkte nach Stich (braucht Ausmeldung!)
letzter Stich (nicht bei Talonsperre)
Punkte bei Sieg:
Hat der Gegner keinen Stich erzielt, gewinnt der Spieler drei Punkte.
hat der Gegner 32 oder weniger Augen erhalten, gewinnt der Spieler zwei Punkte.
hat der Gegner 33 oder mehr Augen erhalten, gewinnt der Spieler einen Punkt.
Weird Rules:
Liegt nur mehr eine Karte als Talon auf der offenen Trumpfkarte, darf man austauschen, jedoch nicht zudrehen.
Kann der Spieler, der den Talon gesperrt hat, keine 66 Augen erzielen, bzw. kann sich sein Gegner zuvor ausmelden, so gewinnt der Gegner
drei Punkte, falls er zum Zeitpunkt des Zudrehens noch stichlos war, ansonsten
zwei Punkte
Programm Syntax:
Command: [Ansage, Stich, Wechsel, Sperre]
"""
import random
import numpy as np
decode_table = {
"colors": {
0: "Herz",
1: "Karo",
2: "Pik",
3: "Kreuz",
},
"values": {
2: "Bube",
3: "Dame",
4: "König",
10: "Zehner",
11: "Ass",
}
}
#%%
# util functions
def decodeCards(cards):
"""
converts numerical cards array to string cards array
"""
return [decode_table["colors"][i] + " "+ decode_table["values"][j] for i,j in cards]
def encodeCards(cards_str):
"""
converts string cards array to numerical cards array
"""
reverse_decode_table = getReverseDecodeTable()
cards = []
for card in cards_str:
card = card.strip().split(" ")
cards.append(card)
return [(reverse_decode_table["colors"][i], reverse_decode_table["values"][j]) for i,j in cards]
def generateNewDeck():
"""
generates a new random deck
"""
cards = []
for color in range(4):
for value in [2,3,4,10,11]:
cards.append( (color, value) )
random.shuffle(cards)
return cards
def getInput(player, action):
"""
handles all user input
returns the numeric encoding of the card
"""
ret = None
# get input from command line
if action == "card":
while True:
card = input(player.name+", which card to play? ")
try:
card = encodeCards([card])[0]
except:
print("card is written as: [color] [value]!")
continue
ret = card
break
elif action == "call":
while True:
first_card = input(player.name+", first card to call? ")
second_card = input(player.name+", second card to call? ")
cards = [first_card, second_card]
try:
cards = encodeCards(cards)
except:
print("card is written as: [color] [value]!")
continue
ret = cards
break
elif action == None:
ret = input(player.name+", what do you want to do?").strip()
# print(ret) # to see piped input
return ret
def getReverseDecodeTable():
"""
reverses the decoding table allow quick access with string values
"""
reverse_colors = {v:k for k, v in decode_table["colors"].items()}
reverse_values = {v:k for k, v in decode_table["values"].items()}
return {"colors": reverse_colors, "values":reverse_values}
#%%
# Game Class
class Game:
def __init__(self, cards=None):
if cards == None:
self.cards = generateNewDeck()
else:
self.cards = cards
self.playerA = Player(self.cards[:5], "Alice")
self.playerB = Player(self.cards[5:10], "Bob")
self.players = [self.playerA, self.playerB]
self.talon = self.cards[10:19]
self.trump = [self.cards[19]]
self.trump_color = self.trump[0][0]
self.talon_closed = False
self.who_closed = None
def cardsClosedValid(self, cardA, cardB, cardsB):
"""
checks whether Player B played a valid card when the talon is closed
"""
cardA_color, cardA_value = cardA
cardB_color, cardB_value = cardB
other_cards_with_A_color = [card for card in cardsB if card[0] == cardA_color]
if cardA_color == cardB_color:
if cardB_value > cardA_value:
return True
else:
# check if there was a card that could have beat A's
if len(other_cards_with_A_color) == 0:
return True
bigger_cards = np.array([value for color, value in other_cards_with_A_color]) > cardA_value
return not np.cumsum(bigger_cards)[-1]
else:
# check if player B had other cards with cardA's color
if cardA_color in [color for color, value in other_cards_with_A_color]:
return False
else:
return True
def playersAction(self, players):
"""
handles the players input and checks if the input is correct
"""
cards = []
for player in players:
called = False
called_cards = None
while True:
user_input = getInput(player, None)
if user_input == "quit":
exit()
if user_input == "Stechen":
card = getInput(player, "card")
if called and card not in called_cards:
print("Not allowed to play that! Play one of the called cards")
elif card not in player.cards:
print("Not allowed to play that! Card not in possession")
else:
if card in player.cards:
cards.append(card)
break
else:
print("Player does not have this card")
elif user_input == "Ansage":
if not called:
called_cards = getInput(player, "call")
if player.processCall(called_cards, self.trump_color):
called = True
else:
print("Only 1 call per round!")
elif user_input == "Tausch":
self.trade(player)
elif user_input == "Zudrehen":
self.talon_closed = True
self.must_color = True
self.must_sting = True
self.who_closed = player
print("Closed talon")
else:
print("Commands are: [Stechen, Ansage, Tausch, Zudrehen]")
cardA, cardB = cards
playerA, playerB = players
playerA.cards.remove(cardA)
playerB.cards.remove(cardB)
# check if correct cards have been played when talon is closed
if self.talon_closed and not self.cardsClosedValid(cardA, cardB, playerB.cards):
print(playerA.name)
print(playerB.name)
print(playerB.cards)
print("Wrong Card from {}!".format(playerB.name))
print("{} wins the game!".format(playerA.name))
exit()
return cards
def print(self):
"""
prints the round
"""
playerA, playerB = self.players
cardsA_str = decodeCards(playerA.cards)
cardsB_str = decodeCards(playerB.cards)
talon_str = decodeCards(self.talon)
trump_str = decodeCards(self.trump)
trump_color_str = decode_table["colors"][self.trump_color]
print(2* ('#'*80 + '\n'))
print("{}'s cards are: {}".format(playerA.name, ", ".join(cardsA_str)))
print("{}'s points {} + {}".format(playerA.name, playerA.points, playerA.called_points))
print("{}'s cards are: {}".format(playerB.name, ", ".join(cardsB_str)))
print("{}'s points {} + {}".format(playerB.name, playerB.points, playerB.called_points))
print("Talon cards are: {}".format(", ".join(talon_str)))
print("Trump card is: {}".format(", ".join(trump_str)))
print("Trump color is: {}".format(trump_color_str))
print(self.players[0].name + " is starting\n")
def round(self):
"""
handles the steps of each round
"""
# print standings
self.print()
# check which player is first
playerA, playerB = self.players
# players make moves and play their cards
cardA, cardB = self.playersAction([playerA, playerB])
print("{} vs {}".format(decodeCards([cardA])[0], decodeCards([cardB])[0]))
# check which card won
if self.sting(cardA, cardB) == 0:
winner = playerA
loser = playerB
else:
winner = playerB
loser = playerA
# winner gets points
winner.points += cardA[1] + cardB[1]
print(winner.name+" won "+str(cardA[1] + cardB[1])+" points in this round!\n")
if winner.getTotalPoints() >= 66:
print("{} wins the game with {} points!".format(winner.name, str(winner.getTotalPoints())))
exit()
if loser.getTotalPoints() >= 66:
# it is possible to win the game while losing a round
print("{} wins the game with {} points!".format(loser.name, str(loser.getTotalPoints())))
exit()
# running out of cards: last sting wins
if len(playerA.cards) == 0:
if self.who_closed != None:
# last sting does not win, closing player has to have enough, otherwise he loses
# if he won, this if would not be reached
print("{} lost the game with {} points, even with closing it!".format(self.who_closed.name, str(self.who_closed.getTotalPoints())))
if playerA == self.who_closed:
print("{} wins the game with {} points!".format(playerB.name, str(playerB.getTotalPoints())))
else:
print("{} wins the game with {} points!".format(playerA.name, str(playerA.getTotalPoints())))
else:
# last sting wins
print("{} wins the game with {} points!".format(winner.name, str(winner.getTotalPoints())))
exit()
# take new cards from talon
if not self.talon_closed:
if len(self.talon) > 1:
winner.cards.append(self.talon.pop())
loser.cards.append(self.talon.pop())
else:
winner.cards.append(self.talon.pop())
loser.cards.append(self.trump[0])
self.trump = []
self.talon_closed = True
# set starting order for next round
self.players = [winner, loser]
def sting(self,cardA, cardB):
"""
returns index of winner, 0 = cardA, 1 = cardB
"""
cA_color, cA_value = cardA
cB_color, cB_value = cardB
if cA_color == cB_color:
return cB_value > cA_value # same color, value decides
else:
if cA_color == self.trump_color: # trump vs non-trump
return 0
elif cB_color == self.trump_color: # non-trump vs trump
return 1
return 0 # non-trump vs non-trump
def trade(self, player):
"""
trade boy of trump color with trump card
"""
trump_boy = (self.trump_color, 2)
if not trump_boy in player.cards:
print("no trade possible! Card not in possession")
else:
index = player.cards.index(trump_boy)
player.cards[index] = self.trump[0]
self.trump[0] = trump_boy
print("Trade successfull! New Trump is "+ decodeCards(self.trump)[0])
class Player:
def __init__(self, cards, name):
self.cards = cards
self.points = 0
self.name = name
self.called_points = 0
def processCall(self, called_cards, trump_color):
"""
checks players cards if call is possible and adds points
"""
# check if in possesion
if called_cards[0] not in self.cards or called_cards[1] not in self.cards:
print("cards not in possesion")
return False
# len 2 and same color
if len(called_cards) != 2 or called_cards[0][0] != called_cards[1][0]:
print("not same color")
return False
# check if Dame and König
values = [called_cards[0][1], called_cards[1][1]]
if 3 not in values or 4 not in values:
print("not Dame or Bube")
return False
# check if trump:
if called_cards[0][0] == trump_color:
self.called_points += 40
else:
self.called_points += 20
return True
def getTotalPoints(self):
"""
returns players total points, check if called points are valid
"""
if self.points > 0:
return self.points + self.called_points
else:
return self.points
cards = [(2, 10), (2, 4), (0, 10), (3, 4), (2, 3), (3, 3), (3, 11), (3, 2), (0, 2), (1, 2), (0, 3), (0, 11), (0, 4), (1, 11), (2, 2), (1, 10), (2, 11), (3, 10), (1, 4), (1, 3)] #generateNewDeck()
# g = Game(cards)
if __name__ == '__main__':
g = Game()
while True:
g.round()
| 31.216769 | 195 | 0.532853 |
408ad44e6e4b5f4c2aff4fbdc17258ec9a6ac689
| 24,041 |
py
|
Python
|
Contrib-Inspur/openbmc/poky/bitbake/lib/toaster/toastermain/management/commands/buildimport.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 5 |
2019-11-11T07:57:26.000Z
|
2022-03-28T08:26:53.000Z
|
Contrib-Inspur/openbmc/poky/bitbake/lib/toaster/toastermain/management/commands/buildimport.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 3 |
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
Contrib-Inspur/openbmc/poky/bitbake/lib/toaster/toastermain/management/commands/buildimport.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 11 |
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
#
# BitBake Toaster Implementation
#
# Copyright (C) 2018 Wind River Systems
#
# SPDX-License-Identifier: GPL-2.0-only
#
# buildimport: import a project for project specific configuration
#
# Usage:
# (a) Set up Toaster environent
#
# (b) Call buildimport
# $ /path/to/bitbake/lib/toaster/manage.py buildimport \
# --name=$PROJECTNAME \
# --path=$BUILD_DIRECTORY \
# --callback="$CALLBACK_SCRIPT" \
# --command="configure|reconfigure|import"
#
# (c) Return is "|Default_image=%s|Project_id=%d"
#
# (d) Open Toaster to this project using for example:
# $ xdg-open http://localhost:$toaster_port/toastergui/project_specific/$project_id
#
# (e) To delete a project:
# $ /path/to/bitbake/lib/toaster/manage.py buildimport \
# --name=$PROJECTNAME --delete-project
#
# ../bitbake/lib/toaster/manage.py buildimport --name=test --path=`pwd` --callback="" --command=import
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from orm.models import ProjectManager, Project, Release, ProjectVariable
from orm.models import Layer, Layer_Version, LayerSource, ProjectLayer
from toastergui.api import scan_layer_content
from django.db import OperationalError
import os
import re
import os.path
import subprocess
import shutil
# Toaster variable section delimiters
TOASTER_PROLOG = '#=== TOASTER_CONFIG_PROLOG ==='
TOASTER_EPILOG = '#=== TOASTER_CONFIG_EPILOG ==='
# quick development/debugging support
verbose = 2
def _log(msg):
if 1 == verbose:
print(msg)
elif 2 == verbose:
f1=open('/tmp/toaster.log', 'a')
f1.write("|" + msg + "|\n" )
f1.close()
__config_regexp__ = re.compile( r"""
^
(?P<exp>export\s+)?
(?P<var>[a-zA-Z0-9\-_+.${}/~]+?)
(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
\s* (
(?P<colon>:=) |
(?P<lazyques>\?\?=) |
(?P<ques>\?=) |
(?P<append>\+=) |
(?P<prepend>=\+) |
(?P<predot>=\.) |
(?P<postdot>\.=) |
=
) \s*
(?!'[^']*'[^']*'$)
(?!\"[^\"]*\"[^\"]*\"$)
(?P<apo>['\"])
(?P<value>.*)
(?P=apo)
$
""", re.X)
class Command(BaseCommand):
args = "<name> <path> <release>"
help = "Import a command line build directory"
vars = {}
toaster_vars = {}
def add_arguments(self, parser):
parser.add_argument(
'--name', dest='name', required=True,
help='name of the project',
)
parser.add_argument(
'--path', dest='path', required=True,
help='path to the project',
)
parser.add_argument(
'--release', dest='release', required=False,
help='release for the project',
)
parser.add_argument(
'--callback', dest='callback', required=False,
help='callback for project config update',
)
parser.add_argument(
'--delete-project', dest='delete_project', required=False,
help='delete this project from the database',
)
parser.add_argument(
'--command', dest='command', required=False,
help='command (configure,reconfigure,import)',
)
# Extract the bb variables from a conf file
def scan_conf(self,fn):
vars = self.vars
toaster_vars = self.toaster_vars
#_log("scan_conf:%s" % fn)
if not os.path.isfile(fn):
return
f = open(fn, 'r')
#statements = ast.StatementGroup()
lineno = 0
is_toaster_section = False
while True:
lineno = lineno + 1
s = f.readline()
if not s:
break
w = s.strip()
# skip empty lines
if not w:
continue
# evaluate Toaster sections
if w.startswith(TOASTER_PROLOG):
is_toaster_section = True
continue
if w.startswith(TOASTER_EPILOG):
is_toaster_section = False
continue
s = s.rstrip()
while s[-1] == '\\':
s2 = f.readline().strip()
lineno = lineno + 1
if (not s2 or s2 and s2[0] != "#") and s[0] == "#" :
echo("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
s = s[:-1] + s2
# skip comments
if s[0] == '#':
continue
# process the line for just assignments
m = __config_regexp__.match(s)
if m:
groupd = m.groupdict()
var = groupd['var']
value = groupd['value']
if groupd['lazyques']:
if not var in vars:
vars[var] = value
continue
if groupd['ques']:
if not var in vars:
vars[var] = value
continue
# preset empty blank for remaining operators
if not var in vars:
vars[var] = ''
if groupd['append']:
vars[var] += value
elif groupd['prepend']:
vars[var] = "%s%s" % (value,vars[var])
elif groupd['predot']:
vars[var] = "%s %s" % (value,vars[var])
elif groupd['postdot']:
vars[var] = "%s %s" % (vars[var],value)
else:
vars[var] = "%s" % (value)
# capture vars in a Toaster section
if is_toaster_section:
toaster_vars[var] = vars[var]
# DONE WITH PARSING
f.close()
self.vars = vars
self.toaster_vars = toaster_vars
# Update the scanned project variables
def update_project_vars(self,project,name):
pv, create = ProjectVariable.objects.get_or_create(project = project, name = name)
if (not name in self.vars.keys()) or (not self.vars[name]):
self.vars[name] = pv.value
else:
if pv.value != self.vars[name]:
pv.value = self.vars[name]
pv.save()
# Find the git version of the installation
def find_layer_dir_version(self,path):
# * rocko ...
install_version = ''
cwd = os.getcwd()
os.chdir(path)
p = subprocess.Popen(['git', 'branch', '-av'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
out = out.decode("utf-8")
for branch in out.split('\n'):
if ('*' == branch[0:1]) and ('no branch' not in branch):
install_version = re.sub(' .*','',branch[2:])
break
if 'remotes/m/master' in branch:
install_version = re.sub('.*base/','',branch)
break
os.chdir(cwd)
return install_version
# Compute table of the installation's registered layer versions (branch or commit)
def find_layer_dir_versions(self,INSTALL_URL_PREFIX):
lv_dict = {}
layer_versions = Layer_Version.objects.all()
for lv in layer_versions:
layer = Layer.objects.filter(pk=lv.layer.pk)[0]
if layer.vcs_url:
url_short = layer.vcs_url.replace(INSTALL_URL_PREFIX,'')
else:
url_short = ''
# register the core, branch, and the version variations
lv_dict["%s,%s,%s" % (url_short,lv.dirpath,'')] = (lv.id,layer.name)
lv_dict["%s,%s,%s" % (url_short,lv.dirpath,lv.branch)] = (lv.id,layer.name)
lv_dict["%s,%s,%s" % (url_short,lv.dirpath,lv.commit)] = (lv.id,layer.name)
#_log(" (%s,%s,%s|%s) = (%s,%s)" % (url_short,lv.dirpath,lv.branch,lv.commit,lv.id,layer.name))
return lv_dict
# Apply table of all layer versions
def extract_bblayers(self):
# set up the constants
bblayer_str = self.vars['BBLAYERS']
TOASTER_DIR = os.environ.get('TOASTER_DIR')
INSTALL_CLONE_PREFIX = os.path.dirname(TOASTER_DIR) + "/"
TOASTER_CLONE_PREFIX = TOASTER_DIR + "/_toaster_clones/"
INSTALL_URL_PREFIX = ''
layers = Layer.objects.filter(name='openembedded-core')
for layer in layers:
if layer.vcs_url:
INSTALL_URL_PREFIX = layer.vcs_url
break
INSTALL_URL_PREFIX = INSTALL_URL_PREFIX.replace("/poky","/")
INSTALL_VERSION_DIR = TOASTER_DIR
INSTALL_URL_POSTFIX = INSTALL_URL_PREFIX.replace(':','_')
INSTALL_URL_POSTFIX = INSTALL_URL_POSTFIX.replace('/','_')
INSTALL_URL_POSTFIX = "%s_%s" % (TOASTER_CLONE_PREFIX,INSTALL_URL_POSTFIX)
# get the set of available layer:layer_versions
lv_dict = self.find_layer_dir_versions(INSTALL_URL_PREFIX)
# compute the layer matches
layers_list = []
for line in bblayer_str.split(' '):
if not line:
continue
if line.endswith('/local'):
continue
# isolate the repo
layer_path = line
line = line.replace(INSTALL_URL_POSTFIX,'').replace(INSTALL_CLONE_PREFIX,'').replace('/layers/','/').replace('/poky/','/')
# isolate the sub-path
path_index = line.rfind('/')
if path_index > 0:
sub_path = line[path_index+1:]
line = line[0:path_index]
else:
sub_path = ''
# isolate the version
if TOASTER_CLONE_PREFIX in layer_path:
is_toaster_clone = True
# extract version from name syntax
version_index = line.find('_')
if version_index > 0:
version = line[version_index+1:]
line = line[0:version_index]
else:
version = ''
_log("TOASTER_CLONE(%s/%s), version=%s" % (line,sub_path,version))
else:
is_toaster_clone = False
# version is from the installation
version = self.find_layer_dir_version(layer_path)
_log("LOCAL_CLONE(%s/%s), version=%s" % (line,sub_path,version))
# capture the layer information into layers_list
layers_list.append( (line,sub_path,version,layer_path,is_toaster_clone) )
return layers_list,lv_dict
#
def find_import_release(self,layers_list,lv_dict,default_release):
# poky,meta,rocko => 4;openembedded-core
release = default_release
for line,path,version,layer_path,is_toaster_clone in layers_list:
key = "%s,%s,%s" % (line,path,version)
if key in lv_dict:
lv_id = lv_dict[key]
if 'openembedded-core' == lv_id[1]:
_log("Find_import_release(%s):version=%s,Toaster=%s" % (lv_id[1],version,is_toaster_clone))
# only versions in Toaster managed layers are accepted
if not is_toaster_clone:
break
try:
release = Release.objects.get(name=version)
except:
pass
break
_log("Find_import_release:RELEASE=%s" % release.name)
return release
# Apply the found conf layers
def apply_conf_bblayers(self,layers_list,lv_dict,project,release=None):
for line,path,version,layer_path,is_toaster_clone in layers_list:
# Assert release promote if present
if release:
version = release
# try to match the key to a layer_version
key = "%s,%s,%s" % (line,path,version)
key_short = "%s,%s,%s" % (line,path,'')
lv_id = ''
if key in lv_dict:
lv_id = lv_dict[key]
lv = Layer_Version.objects.get(pk=int(lv_id[0]))
pl,created = ProjectLayer.objects.get_or_create(project=project,
layercommit=lv)
pl.optional=False
pl.save()
_log(" %s => %s;%s" % (key,lv_id[0],lv_id[1]))
elif key_short in lv_dict:
lv_id = lv_dict[key_short]
lv = Layer_Version.objects.get(pk=int(lv_id[0]))
pl,created = ProjectLayer.objects.get_or_create(project=project,
layercommit=lv)
pl.optional=False
pl.save()
_log(" %s ?> %s" % (key,lv_dict[key_short]))
else:
_log("%s <= %s" % (key,layer_path))
found = False
# does local layer already exist in this project?
try:
for pl in ProjectLayer.objects.filter(project=project):
if pl.layercommit.layer.local_source_dir == layer_path:
found = True
_log(" Project Local Layer found!")
except Exception as e:
_log("ERROR: Local Layer '%s'" % e)
pass
if not found:
# Does Layer name+path already exist?
try:
layer_name_base = os.path.basename(layer_path)
_log("Layer_lookup: try '%s','%s'" % (layer_name_base,layer_path))
layer = Layer.objects.get(name=layer_name_base,local_source_dir = layer_path)
# Found! Attach layer_version and ProjectLayer
layer_version = Layer_Version.objects.create(
layer=layer,
project=project,
layer_source=LayerSource.TYPE_IMPORTED)
layer_version.save()
pl,created = ProjectLayer.objects.get_or_create(project=project,
layercommit=layer_version)
pl.optional=False
pl.save()
found = True
# add layer contents to this layer version
scan_layer_content(layer,layer_version)
_log(" Parent Local Layer found in db!")
except Exception as e:
_log("Layer_exists_test_failed: Local Layer '%s'" % e)
pass
if not found:
# Insure that layer path exists, in case of user typo
if not os.path.isdir(layer_path):
_log("ERROR:Layer path '%s' not found" % layer_path)
continue
# Add layer to db and attach project to it
layer_name_base = os.path.basename(layer_path)
# generate a unique layer name
layer_name_matches = {}
for layer in Layer.objects.filter(name__contains=layer_name_base):
layer_name_matches[layer.name] = '1'
layer_name_idx = 0
layer_name_test = layer_name_base
while layer_name_test in layer_name_matches.keys():
layer_name_idx += 1
layer_name_test = "%s_%d" % (layer_name_base,layer_name_idx)
# create the layer and layer_verion objects
layer = Layer.objects.create(name=layer_name_test)
layer.local_source_dir = layer_path
layer_version = Layer_Version.objects.create(
layer=layer,
project=project,
layer_source=LayerSource.TYPE_IMPORTED)
layer.save()
layer_version.save()
pl,created = ProjectLayer.objects.get_or_create(project=project,
layercommit=layer_version)
pl.optional=False
pl.save()
# register the layer's content
_log(" Local Layer Add content")
scan_layer_content(layer,layer_version)
_log(" Local Layer Added '%s'!" % layer_name_test)
# Scan the project's conf files (if any)
def scan_conf_variables(self,project_path):
# scan the project's settings, add any new layers or variables
if os.path.isfile("%s/conf/local.conf" % project_path):
self.scan_conf("%s/conf/local.conf" % project_path)
self.scan_conf("%s/conf/bblayers.conf" % project_path)
# Import then disable old style Toaster conf files (before 'merged_attr')
old_toaster_local = "%s/conf/toaster.conf" % project_path
if os.path.isfile(old_toaster_local):
self.scan_conf(old_toaster_local)
shutil.move(old_toaster_local, old_toaster_local+"_old")
old_toaster_layer = "%s/conf/toaster-bblayers.conf" % project_path
if os.path.isfile(old_toaster_layer):
self.scan_conf(old_toaster_layer)
shutil.move(old_toaster_layer, old_toaster_layer+"_old")
# Scan the found conf variables (if any)
def apply_conf_variables(self,project,layers_list,lv_dict,release=None):
if self.vars:
# Catch vars relevant to Toaster (in case no Toaster section)
self.update_project_vars(project,'DISTRO')
self.update_project_vars(project,'MACHINE')
self.update_project_vars(project,'IMAGE_INSTALL_append')
self.update_project_vars(project,'IMAGE_FSTYPES')
self.update_project_vars(project,'PACKAGE_CLASSES')
# These vars are typically only assigned by Toaster
#self.update_project_vars(project,'DL_DIR')
#self.update_project_vars(project,'SSTATE_DIR')
# Assert found Toaster vars
for var in self.toaster_vars.keys():
pv, create = ProjectVariable.objects.get_or_create(project = project, name = var)
pv.value = self.toaster_vars[var]
_log("* Add/update Toaster var '%s' = '%s'" % (pv.name,pv.value))
pv.save()
# Assert found BBLAYERS
if 0 < verbose:
for pl in ProjectLayer.objects.filter(project=project):
release_name = 'None' if not pl.layercommit.release else pl.layercommit.release.name
print(" BEFORE:ProjectLayer=%s,%s,%s,%s" % (pl.layercommit.layer.name,release_name,pl.layercommit.branch,pl.layercommit.commit))
self.apply_conf_bblayers(layers_list,lv_dict,project,release)
if 0 < verbose:
for pl in ProjectLayer.objects.filter(project=project):
release_name = 'None' if not pl.layercommit.release else pl.layercommit.release.name
print(" AFTER :ProjectLayer=%s,%s,%s,%s" % (pl.layercommit.layer.name,release_name,pl.layercommit.branch,pl.layercommit.commit))
def handle(self, *args, **options):
project_name = options['name']
project_path = options['path']
project_callback = options['callback'] if options['callback'] else ''
release_name = options['release'] if options['release'] else ''
#
# Delete project
#
if options['delete_project']:
try:
print("Project '%s' delete from Toaster database" % (project_name))
project = Project.objects.get(name=project_name)
# TODO: deep project delete
project.delete()
print("Project '%s' Deleted" % (project_name))
return
except Exception as e:
print("Project '%s' not found, not deleted (%s)" % (project_name,e))
return
#
# Create/Update/Import project
#
# See if project (by name) exists
project = None
try:
# Project already exists
project = Project.objects.get(name=project_name)
except Exception as e:
pass
# Find the installation's default release
default_release = Release.objects.get(id=1)
# SANITY: if 'reconfig' but project does not exist (deleted externally), switch to 'import'
if ("reconfigure" == options['command']) and (None == project):
options['command'] = 'import'
# 'Configure':
if "configure" == options['command']:
# Note: ignore any existing conf files
# create project, SANITY: reuse any project of same name
project = Project.objects.create_project(project_name,default_release,project)
# 'Re-configure':
if "reconfigure" == options['command']:
# Scan the directory's conf files
self.scan_conf_variables(project_path)
# Scan the layer list
layers_list,lv_dict = self.extract_bblayers()
# Apply any new layers or variables
self.apply_conf_variables(project,layers_list,lv_dict)
# 'Import':
if "import" == options['command']:
# Scan the directory's conf files
self.scan_conf_variables(project_path)
# Remove these Toaster controlled variables
for var in ('DL_DIR','SSTATE_DIR'):
self.vars.pop(var, None)
self.toaster_vars.pop(var, None)
# Scan the layer list
layers_list,lv_dict = self.extract_bblayers()
# Find the directory's release, and promote to default_release if local paths
release = self.find_import_release(layers_list,lv_dict,default_release)
# create project, SANITY: reuse any project of same name
project = Project.objects.create_project(project_name,release,project)
# Apply any new layers or variables
self.apply_conf_variables(project,layers_list,lv_dict,release)
# WORKAROUND: since we now derive the release, redirect 'newproject_specific' to 'project_specific'
project.set_variable('INTERNAL_PROJECT_SPECIFIC_SKIPRELEASE','1')
# Set up the project's meta data
project.builddir = project_path
project.merged_attr = True
project.set_variable(Project.PROJECT_SPECIFIC_CALLBACK,project_callback)
project.set_variable(Project.PROJECT_SPECIFIC_STATUS,Project.PROJECT_SPECIFIC_EDIT)
if ("configure" == options['command']) or ("import" == options['command']):
# preset the mode and default image recipe
project.set_variable(Project.PROJECT_SPECIFIC_ISNEW,Project.PROJECT_SPECIFIC_NEW)
project.set_variable(Project.PROJECT_SPECIFIC_DEFAULTIMAGE,"core-image-minimal")
# Assert any extended/custom actions or variables for new non-Toaster projects
if not len(self.toaster_vars):
pass
else:
project.set_variable(Project.PROJECT_SPECIFIC_ISNEW,Project.PROJECT_SPECIFIC_NONE)
# Save the updated Project
project.save()
_log("Buildimport:project='%s' at '%d'" % (project_name,project.id))
if ('DEFAULT_IMAGE' in self.vars) and (self.vars['DEFAULT_IMAGE']):
print("|Default_image=%s|Project_id=%d" % (self.vars['DEFAULT_IMAGE'],project.id))
else:
print("|Project_id=%d" % (project.id))
| 42.02972 | 203 | 0.549104 |
46268c046a00724cf26ff86383d189c22e9d74cc
| 6,274 |
py
|
Python
|
mumath/glyph.py
|
fourpoints/mumath
|
f1c36c4a5b3c32a3e7f8e7a922eafea8b7a14fd4
|
[
"MIT"
] | null | null | null |
mumath/glyph.py
|
fourpoints/mumath
|
f1c36c4a5b3c32a3e7f8e7a922eafea8b7a14fd4
|
[
"MIT"
] | null | null | null |
mumath/glyph.py
|
fourpoints/mumath
|
f1c36c4a5b3c32a3e7f8e7a922eafea8b7a14fd4
|
[
"MIT"
] | null | null | null |
from .context.tokens import *
from .context import base
from .util import listify
from importlib import import_module
from collections import namedtuple
from types import ModuleType, SimpleNamespace
try:
from functools import cache
except importError:
from functools import lru_cache as cache
# For diffs
# The functions look weird, but they set the first and second bit
# to either 0 or 1. E.g. `(+1).__or__(0b00) == 0b01`.
flags = {
OPEN_NEXT: (+1).__or__,
SHUT_NEXT: (~1).__and__,
OPEN_PREV: (+2).__or__,
SHUT_PREV: (~2).__and__,
}
ttypes = {
# basic syntactical components
# KEYWORD: "keyword",
TEXT_SEP: "text_separator",
SUBB: "subb",
SUB: "sub",
SUPP: "supp",
SUP: "sup",
OPEN_NEXT: "open_next",
SHUT_NEXT: "shut_next",
OPEN_PREV: "open_prev",
SHUT_PREV: "shut_prev",
SOFT_SPACE: "soft_space",
STRING: "string",
COMMENT: "comment",
# basic functional components
MATRIX: "matrix",
BEGIN: "begin",
END: "end",
ENVIRONMENT: "environments",
OVER: "over",
CHOOSE: "choose",
SERIES: "series", # macro
SQRT: "sqrt",
CLASS_: "class_",
TEXT: "text",
NO_NUMBER: "no_number",
PRESCRIPT: "prescript",
UNDERSET: "underset",
OVERSET: "overset",
FRAC: "frac",
BINOM: "binom",
ROOT: "root",
DISPLAYSTYLE: "displaystyle",
PAD: "pad",
# general components
RELATION: "relations",
IDENTIFIER: "identifiers",
OPERATOR: "operators",
BINOP: "binary_operators",
FUNCTION: "functions",
NORM: "brackets",
NUMBER: "numbers",
HAT: "hats",
SHOE: "shoes",
OPEN: "open_brackets",
CLOSE: "close_brackets",
COL_SEP: "col_separators",
ROW_SEP: "row_separators",
VARIANT: "fonts",
ENCLOSE: "enclosures",
SPACE: "spaces",
# Must come last so it doesn't override custom words
# Maybe separate type?
WORD: "words",
}
# May add a "name" to this
Symbol = namedtuple("Symbol", ("pattern", "property"))
class Context(namedtuple("Context", ttypes.values())):
ttypes = ttypes
@staticmethod
def symbols(symbols):
return list(map(Symbol._make, symbols.items()))
@classmethod
def from_base(cls):
glyph = cls._make(cls.symbols(getattr(base, name)) for name in cls._fields)
glyph.normalize()
return glyph
@classmethod
def from_namespace(cls, context):
glyph = cls._make(cls.symbols(getattr(context, name, {})) for name in cls._fields)
glyph.normalize()
return glyph
@staticmethod
def _normalize_symbol(attrib):
if isinstance(attrib, tuple):
return attrib
elif isinstance(attrib, str):
return (attrib, {})
else:
raise TypeError(f"Unknown type {type(attrib)} for {attrib}")
def _normalize(self, symbols):
for i, symbol in enumerate(symbols):
symbols[i] = symbol._replace(
property=self._normalize_symbol(symbol.property))
def normalize(self):
for symbols in [
self.operators,
self.binary_operators,
self.relations,
self.functions,
self.identifiers,
self.brackets,
]:
self._normalize(symbols)
def items(self):
# ttype_name is aligned with Glyph
yield from zip(ttypes, self)
class Glyph:
areas = []
index = {}
flags = flags
def __init__(self, areas, index):
# Note: instance attributes are different from class attributes
self.areas = areas
self.index = index
@classmethod
def _get(cls, area):
# Lazy getter
context = cls.areas[cls.index[area]]
if not isinstance(context, Context):
if isinstance(context, str):
context = import_module(context)
elif isinstance(context, ModuleType):
pass
elif isinstance(context, dict):
context = SimpleNamespace(**context)
else:
raise TypeError(f"Invalid type '{type(context)}' for area.")
context = Context.from_namespace(context)
cls.areas[cls.index[area]] = context
return context
@classmethod
@cache
def from_area(cls, area=None, base=True):
areas = listify(area)
if base is True:
areas = areas + ("base",)
index = {area: cls.index[area] for area in areas}
areas = [cls._get(area) for area in areas]
return cls(areas, index)
@classmethod
def register_area(cls, area, context):
# TODO: Handle overwrites?
assert area not in cls.index
cls.areas.append(context)
cls.index[area] = cls.areas.index(context)
@classmethod
def register_areas(cls, area=None, **areas):
if area is None:
for area, context in areas.items():
cls.register_area(area, context)
if isinstance(area, dict):
for area, context in area.items():
cls.register_area(area, context)
else:
raise TypeError(f"Invalid type {type(area)} for area.")
@classmethod
def register_extensions(cls):
# from importlib import metadata
# metadata.entry_points(group="mumath.extensions")
pass
def __iter__(self):
for i, area in enumerate(self.areas):
for j, (ttype, symbols) in enumerate(area.items()):
for k, symbol in enumerate(symbols):
yield (i, j, k), ttype, symbol
def patterns(self):
for ijk, ttype, symbol in self:
yield ijk, ttype, symbol.pattern
def __getitem__(self, key):
k0, k1, k2 = key
return self.areas[k0][k1][k2]
def property(self, key, default=None):
try:
return self[key].property
except KeyError:
return default
# This should probably be defined at instance-level
# But that requires some refactoring
Glyph.register_areas({
"base": "mumath.context.base",
"physics": "mumath.context.physics",
"chemistry": "mumath.context.chemistry",
"statistics": "mumath.context.statistics",
})
Glyph.register_extensions()
| 26.472574 | 90 | 0.601371 |
d3c2bfdf03de23c30f4f5ead11f0837b38ddbcda
| 241 |
py
|
Python
|
1-two-sum/1-two-sum.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
1-two-sum/1-two-sum.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
1-two-sum/1-two-sum.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
for i, num in enumerate(nums[:-1]):
for j in range(i+1, len(nums)):
if num+nums[j]==target:
return [i,j]
| 40.166667 | 64 | 0.497925 |
312e852e0ab25283164f049be99b9003dc3a7086
| 662 |
py
|
Python
|
src/bo4e/enum/netzebene.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/enum/netzebene.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/enum/netzebene.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
# pylint:disable=missing-module-docstring
from bo4e.enum.strenum import StrEnum
class Netzebene(StrEnum):
"""
Auflistung möglicher Netzebenen innerhalb der Energiearten Strom und Gas.
"""
NSP = "NSP" #: Niederspannung; Strom
MSP = "MSP" #: Mittelspannung; Strom
HSP = "HSP" #: Hochspannung; Strom
HSS = "HSS" #: Hoechstspannung; Strom
MSP_NSP_UMSP = "MSP_NSP_UMSP" #: MS/NS Umspannung; Strom
HSP_MSP_UMSP = "HSP_MSP_UMSP" #: HS/MS Umspannung; Strom
HSS_HSP_UMSP = "HSS_HSP_UMSP" #: HOES/HS Umspannung; Strom
HD = "HD" #: Hochdruck; Gas
MD = "MD" #: Mitteldruck; Gas
ND = "ND" #: Niederdruck; Gas
| 31.52381 | 77 | 0.655589 |
5b04761ada7382d0d888d035ba0424a1b72c73c2
| 2,392 |
py
|
Python
|
tests/onegov/core/test_browser_session.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/core/test_browser_session.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/core/test_browser_session.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import pytest
from dogpile.cache import make_region
from dogpile.cache.api import NO_VALUE
from onegov.core.browser_session import BrowserSession
from onegov.core import cache
def test_browser_session_mangle():
session = BrowserSession({}, 'token')
assert session._cache.mangle('test')\
== 'e352b6fc45b2bc144082d507d6a51faec0bbeab5313974f7:test'
with pytest.raises(AssertionError):
session._cache.mangle('')
with pytest.raises(AssertionError):
session._cache.mangle(None)
def test_browser_session_cache():
cache = make_region().configure('dogpile.cache.memory')
session = BrowserSession(cache, 'token')
key = 'e352b6fc45b2bc144082d507d6a51faec0bbeab5313974f7:name'
assert not session.has('name')
assert 'name' not in session
with pytest.raises(AttributeError):
session.name
with pytest.raises(KeyError):
session['name']
assert cache.get(key) is NO_VALUE
session.name = 'test'
assert session.has('name')
assert 'name' in session
assert session.name == 'test'
assert session['name'] == 'test'
assert cache.get(key) == 'test'
del session.name
assert not session.has('name')
assert 'name' not in session
with pytest.raises(AttributeError):
session.name
with pytest.raises(KeyError):
session['name']
assert cache.get(key) is NO_VALUE
def test_browser_session_cache_prefix():
cache = make_region().configure('dogpile.cache.memory')
session = BrowserSession(cache, 'foo')
session.name = 'test'
assert session.name == 'test'
session = BrowserSession(cache, 'foo')
assert session.name == 'test'
session = BrowserSession(cache, 'bar')
with pytest.raises(AttributeError):
session.name
session = BrowserSession(cache, 'bar')
with pytest.raises(AttributeError):
session.name
def test_browser_session_count(redis_url):
session = BrowserSession(cache.get('sessions', 60, redis_url), 'token')
assert session.count() == 0
session['foo'] = 'bar'
assert session.count() == 1
session['foo'] = 'baz'
assert session.count() == 1
del session['foo']
assert session.count() == 0
session['asdf'] = 'qwerty'
assert session.count() == 1
session.flush()
assert session.count() == 0
session.flush()
assert session.count() == 0
| 24.408163 | 75 | 0.674749 |
818d96f792b14d7ccf99e03db9f6973b1a67e7bf
| 676 |
py
|
Python
|
crawlab/flower.py
|
anhilo/crawlab
|
363f4bf7a4ccc192a99850998c1bd0fc363832a1
|
[
"BSD-3-Clause"
] | 1 |
2019-08-20T14:26:39.000Z
|
2019-08-20T14:26:39.000Z
|
crawlab/flower.py
|
anhilo/crawlab
|
363f4bf7a4ccc192a99850998c1bd0fc363832a1
|
[
"BSD-3-Clause"
] | null | null | null |
crawlab/flower.py
|
anhilo/crawlab
|
363f4bf7a4ccc192a99850998c1bd0fc363832a1
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import subprocess
# make sure the working directory is in system path
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(FILE_DIR, '..'))
sys.path.append(ROOT_PATH)
from utils.log import other
from config import BROKER_URL
if __name__ == '__main__':
p = subprocess.Popen([sys.executable, '-m', 'celery', 'flower', '-b', BROKER_URL],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=ROOT_PATH)
for line in iter(p.stdout.readline, 'b'):
if line.decode('utf-8') != '':
other.info(line.decode('utf-8'))
| 32.190476 | 86 | 0.622781 |
81912007bd471b663a28440263224fd9aac8139e
| 3,136 |
py
|
Python
|
research/cv/inception_resnet_v2/src/dataset.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/inception_resnet_v2/src/dataset.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/inception_resnet_v2/src/dataset.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Create train or eval dataset."""
import os
import mindspore.common.dtype as mstype
import mindspore.dataset as de
import mindspore.dataset.transforms.c_transforms as C2
import mindspore.dataset.vision.c_transforms as C
def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, config=None):
"""
Create a train or eval dataset.
Args:
dataset_path (str): The path of dataset.
do_train (bool): Whether dataset is used for train or eval.
repeat_num (int): The repeat times of dataset. Default: 1.
batch_size (int): The batch size of dataset. Default: 32.
Returns:
Dataset.
"""
do_shuffle = bool(do_train)
device_num, rank_id = _get_rank_info()
if device_num == 1 or not do_train:
ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=config.work_nums, shuffle=do_shuffle)
else:
ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=config.work_nums,
shuffle=do_shuffle, num_shards=device_num, shard_id=rank_id)
image_length = 299
if do_train:
trans = [
C.RandomCropDecodeResize(image_length, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
C.RandomHorizontalFlip(prob=0.5),
C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
]
else:
trans = [
C.Decode(),
C.Resize((int(image_length / 0.875), int(image_length / 0.875))),
C.CenterCrop(image_length)
]
trans += [
C.Rescale(1.0 / 255.0, 0.0),
C.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
C.HWC2CHW()
]
type_cast_op = C2.TypeCast(mstype.int32)
ds = ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=config.work_nums)
ds = ds.map(input_columns="image", operations=trans, num_parallel_workers=config.work_nums)
# apply batch operations
ds = ds.batch(batch_size, drop_remainder=True)
# apply dataset repeat operation
ds = ds.repeat(repeat_num)
return ds
def _get_rank_info():
"""
get rank size and rank id
"""
rank_size = int(os.environ.get("RANK_SIZE", 1))
if rank_size > 1:
from mindspore.communication.management import get_rank, get_group_size
rank_size = get_group_size()
rank_id = get_rank()
else:
rank_size = rank_id = None
return rank_size, rank_id
| 33.72043 | 107 | 0.655293 |
8191879558f66b4817abb137a9ce2a5b4339a098
| 32,601 |
py
|
Python
|
skripte/python/edit_tools/wortliste.py
|
hyphenation/languages-german
|
4330a0b1254e88615cb4c83e4b3dc2f0810c76d9
|
[
"MIT"
] | null | null | null |
skripte/python/edit_tools/wortliste.py
|
hyphenation/languages-german
|
4330a0b1254e88615cb4c83e4b3dc2f0810c76d9
|
[
"MIT"
] | null | null | null |
skripte/python/edit_tools/wortliste.py
|
hyphenation/languages-german
|
4330a0b1254e88615cb4c83e4b3dc2f0810c76d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# :Copyright: © 2012 Günter Milde.
# :Licence: This work may be distributed and/or modified under
# the conditions of the `LaTeX Project Public License`,
# either version 1.3 of this license or (at your option)
# any later version.
# :Version: 0.1 (2012-02-07)
# wortliste.py
# ***********
#
# ::
"""Hilfsmittel für die Arbeit mit der `Wortliste`"""
# .. contents::
#
# Die hier versammelten Funktionen und Klassen dienen der Arbeit an und
# mit der freien `Wortliste der deutschsprachigen Trennmustermannschaft`_
# ("Lembergsche Liste")
#
# Vorspann
#
# ::
import difflib
import re
import codecs
import unicodedata
# WordFile
# ========
#
# Klasse zum Lesen und Schreiben der `Wortliste`::
class WordFile(file):
# encoding
# --------
#
# ::
encoding = 'utf8'
# Iteration
# ---------
#
# Die spezielle Funktion `__iter__` wird aufgerufen wenn über eine
# Klasseninstanz iteriert wird.
#
# Liefer einen Iterator über die "geparsten" Zeilen (Datenfelder)::
def __iter__(self):
line = self.readline().rstrip().decode(self.encoding)
while line:
yield WordEntry(line)
line = self.readline().rstrip().decode(self.encoding)
# asdict
# ------
#
# Lies Datei und trage die Zeilen mit ungetrenntem Wort
# als `key` und den Datenfeldern als `value` in ein `dictionary`
# (assoziatives Array) ein::
def asdict(self):
words = dict()
for entry in self:
words[entry[0]] = entry
return words
# writelines
# -----------
#
# Schreibe eine Liste von `unicode` Strings (Zeilen ohne Zeilenendezeichen)
# in die Datei `destination`::
def writelines(self, lines, destination, encoding=None):
outfile = codecs.open(destination, 'w',
encoding=(encoding or self.encoding))
outfile.write(u'\n'.join(lines))
outfile.write(u'\n')
# write_entry
# ------------
#
# Schreibe eine Liste von Datenfeldern (geparste Zeilen) in die Datei
# `destination`::
def write_entry(self, wortliste, destination, encoding=None):
lines = [unicode(entry) for entry in wortliste]
self.writelines(lines, destination, encoding)
# WordEntry
# =========
#
# Klasse für Einträge (Zeilen) der Wortliste
#
# Beispiel:
#
# >>> from wortliste import WordEntry
#
# >>> aalbestand = WordEntry(u'Aalbestand;Aal=be<stand # Test')
# >>> print aalbestand
# Aalbestand;Aal=be<stand # Test
#
# ::
class WordEntry(list):
# Argumente
# ---------
#
# Kommentare (aktualisiert, wenn Kommentar vorhanden)::
comment = u''
# Feldbelegung:
#
# 1. Wort ungetrennt
# 2. Wort mit Trennungen, falls für alle Varianten identisch,
# anderenfalls leer
# 3. falls Feld 2 leer, Trennung nach traditioneller Rechtschreibung
# 4. falls Feld 2 leer, Trennung nach reformierter Rechtschreibung (2006)
# 5. falls Feld 2 leer, Trennung für Wortform, die entweder in
# der Schweiz oder mit Großbuchstaben oder Kapitälchen benutzt wird
# und für traditionelle und reformierte Rechtschreibung identisch ist
# 6. falls Feld 5 leer, Trennung für Wortform, die entweder in
# der Schweiz oder mit Großbuchstaben oder Kapitälchen benutzt wird,
# traditionelle Rechtschreibung
# 7. falls Feld 5 leer, Trennung für Wortform, die entweder in
# der Schweiz oder mit Großbuchstaben oder Kapitälchen benutzt wird,
# reformierte Rechtschreibung (2006)
# 8. falls Feld 5 leer, Trennung nach (deutsch)schweizerischer
# Rechtschreibung; insbesondere Wörter mit "sss" gefolgt von
# einem Vokal, die wie andere Dreifachkonsonanten gehandhabt wurden
# (also anders, als der Duden früher vorgeschrieben hat), z.B.
# "süssauer"
#
# Sprachvarianten (Tags nach [BCP47]_) (Die Zählung der Indizes beginn in
# Python bei 0)::
sprachvarianten = {
'de': 1, # Deutsch, allgemeingültig
'de-1901': 2, # "traditionell" (nach Rechtschreibreform 1901)
'de-1996': 3, # reformierte Reformschreibung (1996)
'de-x-GROSS': 4, # ohne ß (Schweiz oder GROSS) allgemein
'de-1901-x-GROSS': 5, # ohne ß (Schweiz oder GROSS) "traditionell"
'de-1996-x-GROSS': 6, # ohne ß (Schweiz oder GROSS) "reformiert"
# 'de-CH-1996': 6, # Alias für 'de-1996-x-GROSS'
'de-CH-1901': 7, # ohne ß (Schweiz) "traditionell" ("süssauer")
}
# Initialisierung::
def __init__(self, line, delimiter=';'):
self.delimiter = delimiter
# eventuell vorhandenen Kommentar abtrennen und speichern::
if '#' in line:
line = line.split(u'#')
self.comment = u'#'.join(line[1:])
line = line[0].rstrip()
# Zerlegen in Datenfelder, in Liste eintragen::
list.__init__(self, line.split(delimiter))
# Rückverwandlung in String
# -----------------------------------
#
# Erzeugen eines Eintrag-Strings (Zeile) aus der Liste der Datenfelder und
# dem Kommentar
#
# >>> unicode(aalbestand)
# u'Aalbestand;Aal=be<stand # Test'
#
# ::
def __unicode__(self):
line = ';'.join(self)
if self.comment:
line += ' #' + self.comment
return line
def __str__(self):
return unicode(self).encode('utf8')
# lang_index
# ---------------
#
# Index des zur Sprachvariante gehörenden Datenfeldes:
#
# >>> aalbestand.lang_index('de')
# 1
# >>> aalbestand.lang_index('de-1901')
# 1
# >>> aalbestand.lang_index('de-1996')
# 1
# >>> aalbestand.lang_index('de-x-GROSS')
# 1
# >>> aalbestand.lang_index('de-1901-x-GROSS')
# 1
# >>> aalbestand.lang_index('de-1996-x-GROSS')
# 1
# >>> abbeissen = WordEntry(
# ... u'abbeissen;-2-;-3-;-4-;-5-;ab<bei-ssen;ab<beis-sen;ab<beis-sen')
# >>> print abbeissen.lang_index('de')
# None
# >>> print abbeissen.lang_index('de-x-GROSS')
# None
# >>> abbeissen.lang_index('de-CH-1901')
# 7
# >>> urlaubstipp = WordEntry(u'Urlaubstipp;-2-;-3-;Ur<laubs=tipp')
# >>> print urlaubstipp.lang_index('de')
# None
# >>> print urlaubstipp.lang_index('de-1901')
# None
# >>> print urlaubstipp.lang_index('de-1996')
# 3
# >>> print urlaubstipp.lang_index('de-x-GROSS')
# None
# >>> print urlaubstipp.lang_index('de-1901-x-GROSS')
# None
# ::
def lang_index(self, lang):
assert lang in self.sprachvarianten, \
'Sprachvariante "%s" nicht in %s' % (lang,
self.sprachvarianten.keys())
# Einfacher Fall: eine allgemeine Schreibweise::
if len(self) == 2:
return 1
# Spezielle Schreibung::
try:
i = self.sprachvarianten[lang]
feld = self[i]
except IndexError:
if i > 4 and len(self) == 5:
return 4 # Allgemeine Schweiz/GROSS Schreibung:
return None # Feld nicht vorhanden
if feld.startswith('-'): # '-1-', '-2-', ...
return None # leeres Feld
return i
# Trennmuster für Sprachvariante ausgeben
#
# >>> aalbestand.get('de')
# u'Aal=be<stand'
# >>> aalbestand.get('de-1901')
# u'Aal=be<stand'
# >>> aalbestand.get('de-1996')
# u'Aal=be<stand'
# >>> aalbestand.get('de-x-GROSS')
# u'Aal=be<stand'
# >>> aalbestand.get('de-1901-x-GROSS')
# u'Aal=be<stand'
# >>> aalbestand.get('de-1996-x-GROSS')
# u'Aal=be<stand'
# >>> aalbestand.get('de-CH-1901')
# u'Aal=be<stand'
#
# >>> print abbeissen.get('de')
# None
# >>> print abbeissen.get('de-x-GROSS')
# None
# >>> print abbeissen.get('de,de-x-GROSS')
# None
# >>> abbeissen.get('de-1901-x-GROSS')
# u'ab<bei-ssen'
# >>> abbeissen.get('de,de-1901,de-1901-x-GROSS')
# u'ab<bei-ssen'
# >>> abbeissen.get('de-CH-1901')
# u'ab<beis-sen'
#
# ::
def get(self, sprachvarianten):
for lang in sprachvarianten.split(','):
i = self.lang_index(lang) # integer>0 or None
if i:
return self[i]
return None
# Trennmuster für Sprachvariante setzen
#
# >>> abbeissen.set('test', 'de-1901-x-GROSS')
# >>> print abbeissen
# abbeissen;-2-;-3-;-4-;-5-;test;ab<beis-sen;ab<beis-sen
#
# >>> abbeissen.set('test', 'de-1901')
# Traceback (most recent call last):
# ...
# IndexError: kann kein leeres Feld setzen
#
# >>> abbeissen.set('test', 'de-1901,de-1901-x-GROSS')
# >>> print abbeissen
# abbeissen;-2-;-3-;-4-;-5-;test;ab<beis-sen;ab<beis-sen
#
# ::
def set(self, wort, sprachvarianten):
for lang in sprachvarianten.split(','):
i = self.lang_index(lang)
if i is None:
continue
if wort is None:
wort = u'-%d-' % i+1
self[i] = wort
return
raise IndexError, "kann kein leeres Feld setzen"
# Felder für alle Sprachvarianten ausfüllen
#
# >>> print str(aalbestand), len(aalbestand)
# Aalbestand;Aal=be<stand # Test 2
# >>> aalbestand.expand_fields()
# >>> print len(aalbestand)
# 8
# >>> auffrass = WordEntry('auffrass;-2-;-3-;-4-;auf-frass')
# >>> auffrass.expand_fields()
# >>> print auffrass
# auffrass;-2-;-3-;-4-;auf-frass;auf-frass;auf-frass;auf-frass
#
# ::
def expand_fields(self):
fields = [self.get(sv) or '-%d-' % (self.sprachvarianten[sv] + 1)
for sv in sorted(self.sprachvarianten.keys(),
key=self.sprachvarianten.get)]
# return fields
for i, field in enumerate(fields):
try:
self[i+1] = field # Feld 1 ist "key" (ungetrennt)
except IndexError:
self.append(field)
# Felder für Sprachvarianten zusammenfassen
#
# >>> aalbestand.conflate_fields()
# >>> print aalbestand
# Aalbestand;Aal=be<stand # Test
# >>> auffrass.conflate_fields()
# >>> print auffrass
# auffrass;-2-;-3-;-4-;auf-frass
# >>> entry = WordEntry(u'distanziert;-2-;di-stan-ziert;di-stan-ziert')
# >>> entry.conflate_fields()
# >>> print entry
# distanziert;di-stan-ziert
#
# Aber nicht, wenn die Trennstellen sich unterscheiden:
#
# >>> abenddienste = WordEntry(
# ... u'Abenddienste;-2-;Abend=dien-ste;Abend=diens-te')
# >>> abenddienste.conflate_fields()
# >>> print abenddienste
# Abenddienste;-2-;Abend=dien-ste;Abend=diens-te
#
# ::
def conflate_fields(self):
if len(self) == 8:
if self[7] == self[6] == self[5]:
self[4] = self[5] # umschreiben auf GROSS-allgemein
self.pop()
self.pop()
self.pop()
if len(self) == 5:
if self[4] == self[2]: # de-x-GROSS == de-1901
self.pop()
else:
return
if len(self) >= 4:
if self[3] == self[2]: # de-1996 == de-1901
self[1] = self[2] # Umschreiben auf de (allgemein)
self.pop()
self.pop()
# Prüfe auf Vorkommen von Regeländerungen der Orthographiereform 1996.
#
# >>> entry = WordEntry(u'Würste;Wür-ste')
# >>> entry.regelaenderungen()
# >>> print unicode(entry)
# Würste;-2-;Wür-ste;Würs-te
# >>> entry = WordEntry(u'Würste;Würs-te')
# >>> entry.regelaenderungen()
# >>> print unicode(entry)
# Würste;-2-;Wür-ste;Würs-te
# >>> entry = WordEntry(u'Hecke;He-cke')
# >>> entry.regelaenderungen()
# >>> print unicode(entry)
# Hecke;-2-;He{ck/k-k}e;He-cke
# >>> entry = WordEntry(u'Ligusterhecke;Ligu-ster=he{ck/k-k}e')
# >>> entry.regelaenderungen()
# >>> print unicode(entry)
# Ligusterhecke;-2-;Ligu-ster=he{ck/k-k}e;Ligus-ter=he-cke
# >>> entry = WordEntry(u'Hass;Hass')
# >>> entry.regelaenderungen()
# >>> print unicode(entry)
# Hass;-2-;-3-;Hass;Hass
# >>> entry = WordEntry(u'fasst;fasst')
# >>> entry.regelaenderungen()
# >>> print unicode(entry)
# fasst;-2-;-3-;fasst;fasst
# >>> entry = WordEntry(u'Missbrauch;Miss<brauch')
# >>> entry.regelaenderungen()
# >>> print unicode(entry)
# Missbrauch;-2-;-3-;Miss<brauch;Miss<brauch
# >>> entry = WordEntry(u'schlifffest;schliff=fest')
# >>> entry.regelaenderungen()
# >>> print unicode(entry)
# schlifffest;-2-;-3-;schliff=fest
#
# ::
def regelaenderungen(self):
# Trennregeländerungen:
r1901 = (u'-st', u'{ck/k-k}')
r1996 = (u's-t', u'-ck')
w1901 = self.get('de-1901')
w1996 = self.get('de-1996')
w_x_GROSS = None
if w1901 is None or w1996 is None:
return
for r1, r2 in zip(r1901, r1996):
w1901 = w1901.replace(r2,r1)
w1996 = w1996.replace(r1,r2)
# kein Schluss-ss und sst in de-1901 (ungetrenntes "ss" nur in Ausnahmen)
# aber: 'ßt' und Schluß-ß auch in de-1996 möglich (langer Vokal)
if u'ss' in w1901:
w_x_GROSS = w1901
w1901 = None
# Dreikonsonantenregel:
if w1901 and re.search(ur'(.)\1=\1', w1901):
w1901 = None
# Speichern:
if w1901 == w1996: # keine Regeländerung im Wort
if len(self) > 2:
self.conflate_fields()
return
if w1901 is None:
self.extend( ['']*(4-len(self)) )
self[1] = u'-2-'
self[2] = u'-3-'
self[3] = w1996
else:
self.extend( ['']*(4-len(self)) )
self[1] = u'-2-'
self[2] = w1901
self[3] = w1996
if w_x_GROSS:
self.append(w_x_GROSS)
# Funktionen
# ==========
#
# join_word
# ---------
#
# Trennzeichen entfernen::
def join_word(word, assert_complete=False):
# Einfache Trennzeichen:
#
# == ================================================================
# \· ungewichtete Trennstelle (solche, wo sich noch niemand um die
# Gewichtung gekümmert hat)
# \. unerwünschte Trennstelle (sinnentstellend), z.B. Ur·in.stinkt
# oder ungünstige Trennstelle (verwirrend), z.B. Atom·en.er·gie
# in ungewichteten Wörtern
# \= Trennstelle an Wortfugen (Wort=fu-ge)
# \< Trennstelle nach Präfix (Vor<sil-be)
# \> Trennstelle vor Suffix (Freund>schaf-ten)
# \- Nebentrennstelle (ge-hen)
# == ================================================================
#
# ::
table = {}
for char in u'·.=|-_<>':
table[ord(char)] = None
key = word.translate(table)
# Spezielle Trennungen für die traditionelle Rechtschreibung
# (siehe ../../dokumente/README.wortliste)::
if '{' in key or '}' in key:
key = key.replace(u'{ck/kk}', u'ck')
key = key.replace(u'{ck/k', u'k')
key = key.replace(u'k}', u'k')
# Konsonanthäufungen an Wortfuge: '{xx/xxx}' -> 'xx':
key = re.sub(ur'\{(.)\1/\1\1\1\}', ur'\1\1', key)
# schon getrennt: ('{xx/xx' -> 'xx' und 'x}' -> 'x'):
key = re.sub(ur'\{(.)\1/\1\1$', ur'\1\1', key)
key = re.sub(ur'^(.)\}', ur'\1', key)
# Trennstellen in doppeldeutigen Wörtern::
if '[' in key or ']' in key:
key = re.sub(ur'\[(.*)/\1\]', ur'\1', key)
# schon getrennt:
key = re.sub(ur'\[([^/\[]+)$', ur'\1', key)
key = re.sub(ur'^([^/\]]+)\]', ur'\1', key)
# Test auf verbliebene komplexe Trennstellen::
if assert_complete:
for spez in u'[{/}]':
if spez in key:
raise AssertionError('Spezialtrennung %s, %s' %
(word.encode('utf8'), key.encode('utf8')))
return key
# zerlege
# -------
#
# Zerlege ein Wort mit Trennzeichen in eine Liste von Silben und eine Liste
# von Trennzeichen)
#
# >>> from wortliste import zerlege
#
# >>> zerlege(u'Haupt=stel-le')
# ([u'Haupt', u'stel', u'le'], [u'=', u'-'])
# >>> zerlege(u'Ge<samt=be<triebs=rats==chef')
# ([u'Ge', u'samt', u'be', u'triebs', u'rats', u'chef'], [u'<', u'=', u'<', u'=', u'=='])
# >>> zerlege(u'an<stands>los')
# ([u'an', u'stands', u'los'], [u'<', u'>'])
# >>> zerlege(u'An<al.pha-bet')
# ([u'An', u'al', u'pha', u'bet'], [u'<', u'.', u'-'])
#
# ::
def zerlege(wort):
silben = re.split(u'[-·._<>=]+', wort)
trennzeichen = re.split(u'[^-·._|<>=]+', wort)
return silben, [tz for tz in trennzeichen if tz]
# TransferError
# -------------
#
# Fehler beim Übertragen von Trennstellen mit uebertrage_::
class TransferError(ValueError):
def __init__(self, wort1, wort2):
msg = u'Inkompatibel: %s %s' % (wort1, wort2)
ValueError.__init__(self, msg.encode('utf8'))
def __unicode__(self):
return str(self).decode('utf8')
# uebertrage
# ----------
#
# Übertrage die Trennzeichen von `wort1` auf `wort2`:
#
# >>> from wortliste import uebertrage, TransferError
#
# >>> uebertrage(u'Haupt=stel-le', u'Haupt·stel·le')
# u'Haupt=stel-le'
#
# Auch teilweise Übertragung, von "kategorisiert" nach "unkategorisiert":
#
# >>> print uebertrage(u'Haupt=stel-le', u'Haupt=stel·le')
# Haupt=stel-le
#
# >>> print uebertrage(u'Haupt·stel-le', u'Haupt=stel·le')
# Haupt=stel-le
#
# >>> print uebertrage(u'Aus<stel-ler', u'Aus-stel-ler')
# Aus<stel-ler
#
# >>> print uebertrage(u'Freund>schaf·ten', u'Freund-schaf-ten')
# Freund>schaf-ten
#
# Übertragung doppelter Marker:
#
# >>> print uebertrage(u'ver<<aus<ga-be', u'ver<aus<ga-be')
# ver<<aus<ga-be
#
# >>> print uebertrage(u'freund>lich>>keit', u'freund>lich>keit')
# freund>lich>>keit
# >>> print uebertrage(u'Amts==haupt=stel-le', u'Amts=haupt=stel-le')
# Amts==haupt=stel-le
# Kein Überschreiben doppelter Marker:
# >>> print uebertrage(u'ver<aus<ga-be', u'ver<<aus<ga-be')
# ver<<aus<ga-be
#
# >>> print uebertrage(u'Amts=haupt=stel-le', u'Amts==haupt=stel·le')
# Amts==haupt=stel-le
#
# Erhalt des Markers für ungünstige Stellen:
# >>> print uebertrage(u'An·al.pha·bet', u'An<al.pha-bet')
# An<al.pha-bet
#
# Keine Übertragung, wenn die Zahl oder Position der Trennstellen
# unterschiedlich ist oder bei unterschiedlichen Wörtern:
#
# >>> try:
# ... uebertrage(u'Ha-upt=stel-le', u'Haupt=stel·le')
# ... uebertrage(u'Haupt=ste-lle', u'Haupt=stel·le')
# ... uebertrage(u'Waupt=stel-le', u'Haupt=stel·le')
# ... except TransferError:
# ... pass
#
# Übertragung auch bei unterschiedlicher Schreibung oder Position der
# Trennstellen mit `strict=False` (für Abgleich zwischen Sprachvarianten):
#
# >>> uebertrage(u'er-ster', u'ers·ter', strict=False)
# u'ers-ter'
# >>> uebertrage(u'Fluß=bett', u'Fluss·bett', strict=False)
# u'Fluss=bett'
# >>> uebertrage(u'ab>bei-ßen', u'ab>beis·sen', strict=False)
# u'ab>beis-sen'
# >>> print uebertrage(u'Aus<tausch=dien-stes', u'Aus-tausch=diens-tes', False)
# Aus<tausch=diens-tes
#
# Auch mit `strict=False` muß die Zahl der Trennstellen übereinstimmen
# (Ausnahmen siehe unten):
#
# >>> try:
# ... uebertrage(u'Ha-upt=ste-lle', u'Haupt=stel·le', strict=False)
# ... except TransferError:
# ... pass
#
# Akzeptiere unterschiedliche Anzahl von Trennungen bei st und ck nach
# Selbstlaut:
#
# >>> uebertrage(u'acht=ecki-ge', u'acht·e{ck/k·k}i·ge', strict=False)
# u'acht=e{ck/k-k}i-ge'
# >>> uebertrage(u'As-to-ria', u'Asto·ria', strict=False)
# u'Asto-ria'
# >>> uebertrage(u'Asto-ria', u'As·to·ria', strict=False)
# u'As-to-ria'
# >>> uebertrage(u'So-fa=ecke', u'So·fa=e{ck/k-k}e', strict=False)
# u'So-fa=e{ck/k-k}e'
# >>> uebertrage(u'Drei=ecks=ecke', u'Drei=ecks==e{ck/k-k}e', strict=False)
# u'Drei=ecks==e{ck/k-k}e'
#
# Mit ``upgrade=False`` werden nur unspezifische Trennstellen überschrieben:
#
# >>> print uebertrage(u'an=stel-le', u'an<stel·le', upgrade=False)
# an<stel-le
#
# >>> print uebertrage(u'Aus<stel-ler', u'Aus-stel-ler', upgrade=False)
# Aus-stel-ler
#
# >>> print uebertrage(u'Aus-stel-ler', u'Aus<stel-ler', upgrade=False)
# Aus<stel-ler
#
# >>> print uebertrage(u'vor<an<<stel-le', u'vor-an<stel·le', upgrade=False)
# vor-an<stel-le
#
# ::
selbstlaute = u'aeiouäöüAEIOUÄÖÜ'
def uebertrage(wort1, wort2, strict=True, upgrade=True):
silben1, trennzeichen1 = zerlege(wort1)
silben2, trennzeichen2 = zerlege(wort2)
# Prüfe strikte Übereinstimmung:
if silben1 != silben2 and strict:
if u'<' in trennzeichen1 or u'·' in trennzeichen2:
raise TransferError(wort1, wort2)
else:
return wort2
# Prüfe ungefähre Übereinstimmung:
if len(trennzeichen1) != len(trennzeichen2):
# Selbstlaut + st oder ck?
for s in selbstlaute:
if (wort2.find(s+u'{ck/k·k}') != -1 or
wort2.find(s+u'{ck/k-k}') != -1):
wort1 = re.sub(u'%sck([%s])'%(s,selbstlaute),
ur'%s-ck\1'%s, wort1)
silben1, trennzeichen1 = zerlege(wort1)
if wort2.find(s+u's·t') != -1:
wort1 = wort1.replace(s+u'st', s+u's-t')
silben1, trennzeichen1 = zerlege(wort1)
elif wort1.find(s+u's-t') != -1:
wort1 = wort1.replace(s+u's-t', s+u'st')
silben1, trennzeichen1 = zerlege(wort1)
# print u'retry:', silben1, trennzeichen1
# immer noch ungleiche Zahl an Trennstellen?
if len(trennzeichen1) != len(trennzeichen2):
raise TransferError(wort1, wort2)
# Baue wort3 aus silben2 und spezifischeren Trennzeichen:
wort3 = silben2.pop(0)
for t1,t2 in zip(trennzeichen1, trennzeichen2):
if ((t2 == u'·' and t1 != u'.') # unspezifisch
or upgrade and
((t2 in (u'-', u'<') and t1 in (u'<', u'<<', u'<=')) # Praefixe
or (t2 in (u'-', u'>') and t1 in (u'>', u'>>', u'=>')) # Suffixe
or (t2 in (u'-', u'=') and t1 in (u'=', u'==', u'===')) # W-fugen
)
):
wort3 += t1
elif t2 == u'.' and t1 not in u'·.':
wort3 += t1 + t2
else:
wort3 += t2
wort3 += silben2.pop(0)
return wort3
# Übertrag kategorisierter Trennstellen zwischen den Feldern aller Einträge
# in `wortliste`::
def sprachabgleich(entry, vorbildentry=None):
if len(entry) <= 2:
return # allgemeine Schreibung
mit_affix = None # < oder >
kategorisiert = None # kein ·
unkategorisiert = None # mindestens ein ·
gewichtet = None # == oder <= oder =>
for field in entry[1:]:
if field.startswith('-'): # -2-, -3-, ...
continue
if u'{' in field and u'[' in field: # Bi-ber==be[t=t/{tt/tt=t}]uch
continue # zu komplex
if u'·' in field:
unkategorisiert = field
elif u'<' in field or u'>' in field:
mit_affix = field
else:
kategorisiert = field
if u'==' in field or u'<=' in field or u'=>' in field:
gewichtet = field
if vorbildentry:
for field in vorbildentry[1:]:
if field.startswith('-'): # -2-, -3-, ...
continue
if u'{' in field and u'[' in field: # Bi-ber==be[t=t/{tt/tt=t}]uch
continue # zu komplex
if not mit_affix and u'<' in field or u'>' in field :
mit_affix = field
elif not kategorisiert and unkategorisiert and u'·' not in field:
kategorisiert = field
if not gewichtet and u'==' in field or u'<=' in field or u'=>' in field:
gewichtet = field
# print 've:', mit_affix, kategorisiert, unkategorisiert
if mit_affix and (kategorisiert or unkategorisiert or gewichtet):
for i in range(1,len(entry)):
if entry[i].startswith('-'): # -2-, -3-, ...
continue
if u'<' not in entry[i] or u'·' in entry[i]:
try:
entry[i] = uebertrage(mit_affix, entry[i], strict=False)
except TransferError, e:
if not '/' in entry[i]:
print u'Sprachabgleich:', unicode(e)
# print mit_affix+u':', unicode(entry)
elif kategorisiert and unkategorisiert:
for i in range(1,len(entry)):
if u'·' in entry[i]:
try:
entry[i] = uebertrage(kategorisiert, entry[i], strict=False)
except TransferError, e:
print u'Sprachabgleich:', unicode(e)
# print kategorisiert, unicode(entry)
elif gewichtet:
for i in range(1,len(entry)):
if u'=' in entry[i] and not (
u'{' in entry[i] and u'[' in entry[i]):
try:
entry[i] = uebertrage(gewichtet, entry[i], strict=False)
except TransferError, e:
print u'Sprachabgleich:', unicode(e)
# Großschreibung in Kleinschreibung wandeln und umgekehrt
#
# Diese Version funktioniert auch für Wörter mit Trennzeichen (während
# str.title() nach jedem Trennzeichen wieder groß anfängt)
#
# >>> from wortliste import toggle_case
# >>> toggle_case(u'Ha-se')
# u'ha-se'
# >>> toggle_case(u'arm')
# u'Arm'
# >>> toggle_case(u'frei=bier')
# u'Frei=bier'
# >>> toggle_case(u'L}a-ger')
# u'l}a-ger'
#
# Keine Änderung bei Wörtern mit Großbuchstaben im Inneren:
#
# >>> toggle_case(u'USA')
# u'USA'
# >>> toggle_case(u'iRFD')
# u'iRFD'
#
# >>> toggle_case(u'gri[f-f/{ff/ff')
# u'Gri[f-f/{ff/ff'
# >>> toggle_case(u'Gri[f-f/{ff/ff')
# u'gri[f-f/{ff/ff'
#
# ::
def toggle_case(wort):
try:
key = join_word(wort, assert_complete=True)
except AssertionError:
key = wort[0]
if key.istitle():
return wort.lower()
elif key.islower():
return wort[0].upper() + wort[1:]
else:
return wort
# Sortierschlüssel
# ================
#
# Duden-Sortierung für die Wortliste
#
# >>> from wortliste import sortkey_duden
# >>> sortkey_duden([u"Abflußröhren"])
# u'abflussrohren a*bflu*szroehren'
# >>> sortkey_duden([u"Abflußrohren"])
# u'abflussrohren a*bflu*szro*hren'
# >>> sortkey_duden([u"Abflussrohren"])
# u'abflussrohren'
#
# >>> s = sorted([[u"Abflußröhren"], [u"Abflußrohren"], [u"Abflussrohren"]],
# ... key=sortkey_duden)
# >>> print ', '.join(e[0] for e in s)
# Abflussrohren, Abflußrohren, Abflußröhren
#
# Umschreibung
#
# Ligaturen auflösen und andere "normalisierunde" Ersetzungen für den
# (Haupt-)Sortierschlüssel (Akzente werden über ``unicodedata.normalize``
# entfernt)::
umschrift_skey = {
ord(u'æ'): u'ae',
ord(u'œ'): u'oe',
ord(u'ſ'): u's',
}
# "Zweitschlüssel" zur Unterscheidung von Umlauten/SZ und Basisbuchstaben::
umschrift_subkey = {
ord(u'a'): u'a*',
ord(u'å'): u'aa',
ord(u'ä'): u'ae',
ord(u'o'): u'o*',
ord(u'ö'): u'oe',
ord(u'ø'): u'oe',
ord(u'u'): u'u*',
ord(u'ü'): u'ue',
ord(u'ß'): u'sz',
}
# sortkey_duden
# -------------
#
# Sortiere nach erstem Feld, alphabetisch gemäß Duden-Regeln::
def sortkey_duden(entry):
# Sortieren nach erstem Feld (ungetrenntes Wort)::
key = entry[0]
if len(entry) == 1: # ein Muster pro Zeile, siehe z.B. pre-1901
key = join_word(key)
# Großschreibung ignorieren:
#
# Der Duden sortiert Wörter, die sich nur in der Großschreibung unterscheiden
# "klein vor groß" (ASCII sortiert "groß vor klein"). In der
# `Trennmuster-Wortliste` kommen Wörter nur mit der häufiger anzutreffenden
# Großschreibung vor, denn der TeX-Trennalgorithmus ignoriert Großschreibung.
# ::
key = key.lower()
# Ersetzungen:
#
# ß -> ss ::
skey = key.replace(u'ß', u'ss')
# Restliche Akzente weglassen: Wandeln in Darstellung von Buchstaben mit
# Akzent als "Grundzeichen + kombinierender Akzent". Anschließend alle
# nicht-ASCII-Zeichen ignorieren::
skey = skey.translate(umschrift_skey)
skey = unicodedata.normalize('NFKD', skey)
skey = unicode(skey.encode('ascii', 'ignore'))
# "Zweitschlüssel" für das eindeutige Einsortieren von Wörtern mit
# gleichem Schlüssel (Masse/Maße, waren/wären, ...):
#
# * "*" nach aou für die Unterscheidung Grund-/Umlaut
# * ß->sz
#
# ::
if key != skey:
subkey = key.translate(umschrift_subkey)
skey = u'%s %s' % (skey,subkey)
# Gib den Sortierschlüssel zurück::
return skey
# udiff
# ------------
#
# Vergleiche zwei Sequenzen von `WordEntries`, gib einen "unified diff" als
# Byte-String zurück (weil difflib nicht mit Unicode-Strings arbeiten kann).
#
# Beispiel:
#
# >>> from wortliste import udiff
# >>> print udiff([abbeissen, aalbestand], [abbeissen], 'alt', 'neu')
# --- alt
# +++ neu
# @@ -1,2 +1 @@
# abbeissen;-2-;-3-;-4-;-5-;test;ab<beis-sen;ab<beis-sen
# -Aalbestand;Aal=be<stand # Test
#
# ::
def udiff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=1, encoding='utf8'):
a = [unicode(entry).encode(encoding) for entry in a]
b = [unicode(entry).encode(encoding) for entry in b]
diff = difflib.unified_diff(a, b, fromfile, tofile,
fromfiledate, tofiledate, n, lineterm='')
if diff:
return '\n'.join(diff)
else:
return None
def test_keys(wortliste):
"""Teste Übereinstimmung des ungetrennten Wortes in Feld 1
mit den Trennmustern nach Entfernen der Trennmarker.
Schreibe Inkonsistenzen auf die Standardausgabe.
`wortliste` ist ein Iterator über die Einträge (Klasse `WordEntry`)
"""
is_OK = True
for entry in wortliste:
# Test der Übereinstimmung ungetrenntes/getrenntes Wort
# für alle Felder:
key = entry[0]
for wort in entry[1:]:
if wort.startswith(u'-'): # leere Felder
continue
if key != join_word(wort):
is_OK = False
print u"\nkey '%s' != join_word('%s')" % (key, wort),
if key.lower() == join_word(wort).lower():
print(u" Abgleich der Großschreibung mit"
u"`prepare-patch.py grossabgleich`."),
return is_OK
# Test
# ====
#
# ::
if __name__ == '__main__':
import sys
# sys.stdout mit UTF8 encoding (wie in Python 3)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
print u"Test der Werkzeuge und inneren Konsistenz der Wortliste\n"
wordfile = WordFile('../../../wortliste')
# print 'Dateiobjekt:', wordfile
# Liste der Datenfelder (die Klasseninstanz als Argument für `list` liefert
# den Iterator über die Felder, `list` macht daraus eine Liste)::
wortliste = list(wordfile)
print len(wortliste), u"Einträge\n"
# Sprachauswahl::
# Sprachtags:
#
# sprache = 'de-1901' # traditionell
# sprache = 'de-1996' # Reformschreibung
# sprache = 'de-x-GROSS' # ohne ß (Schweiz oder GROSS) allgemein
# sprache = 'de-1901-x-GROSS' # ohne ß (Schweiz oder GROSS) "traditionell"
# sprache = 'de-1996-x-GROSS' # ohne ß (Schweiz oder GROSS) "reformiert"
# sprache = 'de-CH-1901' # ohne ß (Schweiz) "traditionell" ("süssauer")
#
# worte = [entry.get(sprache) for entry in wortliste if wort is not None]
# print len(worte), u"Einträge für Sprachvariante", sprache
# Test keys::
print u"Teste Schlüssel-Trennmuster-Übereinstimmung:",
if test_keys(wortliste):
print u"OK",
print
# Doppeleinträge::
doppelte = 0
words = set()
for entry in wortliste:
key = entry[0].lower()
if key in words:
doppelte += 1
print unicode(entry)
words.add(key)
print doppelte,
print u"Doppeleinträge (ohne Berücksichtigung der Großschreibung)."
if doppelte:
print u" Entfernen mit `prepare-patch.py doppelte`."
print u" Patch vor Anwendung durchsehen!"
# Ein Wörterbuch (dict Instanz)::
# wordfile.seek(0) # Pointer zurücksetzen
# words = wordfile.asdict()
#
# print len(words), u"Wörterbucheinträge"
# Zeilenrekonstruktion::
# am Beispiel der Scheiterbeige:
# original = u'beige;beige # vgl. Scheiter-bei-ge'
# entry = words[u"beige"]
# line = unicode(entry)
# assert original == line, "Rejoined %s != %s" % (line, original)
# komplett:
wordfile.seek(0) # Pointer zurücksetzen
OK = 0
line = wordfile.readline().rstrip().decode(wordfile.encoding)
while line:
entry = WordEntry(line)
if line == unicode(entry):
OK +=1
else:
print u'-', line,
print u'+', unicode(entry)
line = wordfile.readline().rstrip().decode(wordfile.encoding)
print OK, u"Einträge rekonstruiert"
# Quellen
# =======
#
# .. [BCP47] A. Phillips und M. Davis, (Editoren.),
# `Tags for Identifying Languages`, http://www.rfc-editor.org/rfc/bcp/bcp47.txt
#
# .. _Wortliste der deutschsprachigen Trennmustermannschaft:
# http://mirrors.ctan.org/language/hyphenation/dehyph-exptl/projektbeschreibung.pdf
| 29.529891 | 89 | 0.58851 |
c4d4a4be09ac53d98f5066b2ab316b38c36ca046
| 674 |
py
|
Python
|
easy/4/python/config.py
|
carlan/dailyprogrammer
|
f8448c6a35277c567d0f1ecab781d45b294c8d0f
|
[
"MIT"
] | 1 |
2019-02-26T16:34:06.000Z
|
2019-02-26T16:34:06.000Z
|
easy/4/python/config.py
|
carlan/dailyprogrammer
|
f8448c6a35277c567d0f1ecab781d45b294c8d0f
|
[
"MIT"
] | null | null | null |
easy/4/python/config.py
|
carlan/dailyprogrammer
|
f8448c6a35277c567d0f1ecab781d45b294c8d0f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""config.py: challenge #4"""
__author__ = "Carlan Calazans"
__copyright__ = "Copyright 2016, Carlan Calazans"
__credits__ = ["Carlan Calazans"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Carlan Calazans"
__email__ = "carlancalazans at gmail dot com"
__status__ = "Development"
class Config(object):
TEMPLATE = 'sldlswswswsldls'
LEGEND = {
"s": '!@$%^&*-_+=:|~?/.;',
"d": '0123456789',
"w": 'sister offense temporary sock finish experience issue mouth position deck seminar begin live blonde impound foot ambiguity smile breed lung'.split(),
"l": 'abcdefghijklmnoprstuvwxyz'
}
| 30.636364 | 158 | 0.676558 |
48217dfaf1efc2c3038caec0116cfdc8b9f7f904
| 610 |
py
|
Python
|
aoc2020/day_06/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_06/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_06/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
from .part_1 import Solution as Part1
class Solution(Part1):
expected = 6
count = lambda s, f, l: len(list(filter(f, l)))
def solve(self) -> any:
grp_size, rt, yes_answers = 0, 0, [0] * 26
for line in self.resource_lines("input"):
if line == "":
rt += self.count(lambda x: x == grp_size, yes_answers)
grp_size, yes_answers = 0, [0] * 26
else:
grp_size += 1
for c in line:
yes_answers[self.c2i(c)] += 1
return rt + self.count(lambda x: x == grp_size, yes_answers)
| 32.105263 | 70 | 0.514754 |
485a95b7fb38df76b358e2331f5333070b3b8a5d
| 21,436 |
py
|
Python
|
wz/ui_modules/PUPILS.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/ui_modules/PUPILS.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/ui_modules/PUPILS.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
ui_modules/PUPILS.py
Last updated: 2021-05-23
Pupil table management.
=+LICENCE=============================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE========================================
"""
### Messages
### Labels, etc.
_UPDATE_PUPILS = "Schülerdaten verwalten"
_UPDATE_PUPILS_TEXT = """## Keine Klasse (bzw. alle Klassen)
Auf dieser Seite gibt es zwei optionen:
- Die gesamten Schülerdaten können als Tabellendatei (xlsx oder tsv)
gespeichert werden.
- Die gesamten Schülerdaten können von einer externen Tabelle
aktualisiert werden.
Bei der Aktualisierung werden nur die Felder, die in der Tabellendatei
vorhanden sind, berücksichtigt. Alle anderen Felder in der Datenbank
bleiben unverändert. Zusätzliche Felder in der Eingabetabelle, die für
die Datenbank nicht relevant sind, werden ignoriert.
Die Änderungen zu den bestehenden Daten werden angezeigt. Anhand dieser
Baum-Anzeige können Sie wählen, welche dann tatsächlich umgesetzt werden.
## Klasse auswählen
Zuerst wird eine Tabelle mit den Daten von allen Schülern in der gewählten
Klasse angezeigt.
Diese Daten können auch geändert werden. Wenn Änderungen eingegeben werden,
werden sie nicht sofort gespeichert. Das Abspeichern muss durch
Anklicken der entsprechenden Schaltfläche ausgelöst werden.
Durch weitere Schaltflächen können Sie
- einen neuen Schüler aufnehmen. Durch Anklicken dieser Schaltfläche
erscheint eine Tabelle, in die Sie die Daten des Schülers eingeben können.
- einen Schüler aus der Datenbank entfernen.
*Achtung:* Normalerweise sollten Schüler nicht aus der Datenbank entfernt
werden, sondern durch ein Datum im Feld „Schulaustritt“ als „nicht
mehr vorhanden“ gekennzeichnet werden.
Es gibt auch die Möglichkeit die Daten dieser Klasse als Tabellendatei
zu speichern. Eine solche Tabelle kann auch eingelesen werden um die
Klassendaten aus einer externen Quelle zu aktualisieren.
## Schüler auswählen
Wenn ein Schüler im Menü unter dem Klassenmenü ausgewählt wird, werden
nur die Daten des gewählten Schülers angezeigt. Auch die Einträge in
dieser Tabelle können geändert werden. Für manche Felder (z.B. Datum)
gibt es hier angepasste Eingabe-Methoden.
"""
_DELTA_TEXT = """Die möglichen Änderungen werden als Baum dargestellt.
Indem Sie auf die entsprechenden Kontrollfelder klicken, können Sie
einzelne Änderungen (ab)wählen. Durch das Kontrollfeld einer Klasse
können alle Änderungen einer Klasse (ab)gewählt werden.
Um die Änderungen auszuführen, klicken Sie auf die Schaltfläche „Speichern“.
"""
_DELTA_QUIT = "Abbrechen"
_CLASS = "Klasse:"
_SAVE = "Änderungen Speichern"
_NEW_PUPIL = "Neuer Schüler"
_REMOVE_PUPIL = "Schüler entfernen"
_EXPORT = "Tabelle exportieren"
_IMPORT = "Tabelle importieren"
_ALL_CLASSES = "* Alle Klassen *"
_ALL_PUPILS = "* Ganze Klasse *"
_ENTER_PID_TITLE = "Neue Schülerkennung"
_ENTER_PID = "Wählen Sie eine neue,\neindeutige Schülerkennung"
_REMOVE_TITLE = "Schülerdaten löschen"
_REMOVE = "Wollen Sie wirklich {name} aus der Datenbank entfernen?"
_REMOVE_PID = "Wollen Sie wirklich Schüler {pid} aus der Datenbank entfernen?"
_NO_CHANGES = "Fertig?"
_WARN_NO_CHANGES = "Sie haben keine Änderungen gewählt.\n" \
"Wollen Sie diese Ansicht verlassen?"
_FILEOPEN = "Datei öffnen"
_TABLE_FILE = "Tabellendatei (*.xlsx *.ods *.tsv)"
_SAVE_TABLE_FILE = "Tabellendatei (*.xlsx *.tsv)"
_TABLE_FILE_NAME = "Schuelerdaten_{klass}"
_FULL_TABLE_FILE_NAME = "Schuelerdaten"
# Maximum display length (characters) of a pupil delta:
_DELTA_LEN_MAX = 80
#####################################################
import os
from qtpy.QtWidgets import QStackedWidget, QLabel, QTreeWidget, \
QTreeWidgetItem, QPushButton, QHBoxLayout, QVBoxLayout, \
QWidget, QInputDialog, QTextEdit
from qtpy.QtCore import Qt
from ui.ui_support import TabPage, openDialog, VLine, KeySelect, \
QuestionDialog, GuiError, saveDialog
from ui.table import TableWidget
from ui.pupil_grid import PupilGrid, GridView
### +++++
class StackedWidget_info(QTextEdit):
def __init__(self, tab_widget):
self._tab = tab_widget
super().__init__()
self.setReadOnly(True)
self.setMarkdown(_UPDATE_PUPILS_TEXT)
#
def is_modified(self):
return False
#
# def changes(self):
# return False
#
def activate(self):
for pb in ('EXPORT', 'IMPORT', 'C_CHOOSE'):
self._tab.enable(pb, True)
#
def deactivate(self):
return True
###
class StackedWidget_class(QWidget):
def __init__(self, tab_widget):
self._tab = tab_widget
super().__init__()
vbox = QVBoxLayout(self)
self.table = TableWidget(paste = True, on_changed = self.val_changed)
vbox.addWidget(self.table)
self._changes = None
self._row = -1
self.table.itemSelectionChanged.connect(self.selection_changed)
#
def selection_changed(self):
"""Selection changes are used to enable and disable the "remove
pupil data" button.
"""
tsr = self.table.selectedRanges()
if len(tsr) == 1:
tsr1 = tsr[0]
if tsr1.rowCount() == 1:
self._row = tsr1.topRow()
self._tab.enable('REMOVE', True)
return
self._tab.enable('REMOVE', False)
self._row = -1
#
def is_modified(self):
return bool(self._changes)
#
def activate(self, fields, pupil_list):
for pb in ('EXPORT', 'IMPORT', 'ADD', 'C_CHOOSE', 'P_CHOOSE'):
self._tab.enable(pb, True)
# Translated headers:
self.flist, tlist = [], []
for f, t in fields:
if f == 'PID':
tpid = t
continue
self.flist.append(f)
tlist.append(t)
self.table.setColumnCount(len(self.flist))
self.table.setRowCount(len(pupil_list))
self.table.setHorizontalHeaderLabels(tlist)
# Use the pupil-ids as row headers
self.pidlist = [pdata['PID'] for pdata in pupil_list]
self.table.setVerticalHeaderLabels(self.pidlist)
self.rows = []
for pdata in pupil_list:
r = len(self.rows)
cols = []
for f in self.flist:
val = pdata.get(f) or ''
self.table.set_text(r, len(cols), val)
cols.append(val)
self.rows.append(cols)
#?
self.table.resizeColumnsToContents()
self._changes = set()
#
def deactivate(self):
self.table.clear()
self._changes = None
self.pidlist = None
self.rows = None
self.flist = None
#
def val_changed(self, row, col, text):
if self._changes == None: # table not active
return
tag = f'{row:02}:{col:02}'
old = self.rows[row][col]
if text == old:
self._changes.discard(tag)
else:
self._changes.add(tag)
self._tab.enable('SAVE', self.is_modified())
#
def remove_pupil(self):
pid = self.pidlist[self._row]
if QuestionDialog(_REMOVE_TITLE,
_REMOVE_PID.format(pid = pid)):
BACKEND('PUPILS_remove', pid = pid)
#
def save(self):
"""Update pupils with modified fields.
"""
data = []
rows = {int(tag.split(':', 1)[0]) for tag in self._changes}
for row in rows:
pdata = {'PID': self.pidlist[row]}
col = 0
for f in self.flist:
pdata[f] = self.table.get_text(row, col)
col += 1
data.append(pdata)
#for pdata in data:
# print("§§§", pdata)
BACKEND('PUPILS_new_table_data', data = data)
###
class StackedWidget_delta(QWidget):
def __init__(self, tab_widget):
self._tab = tab_widget
super().__init__()
vbox = QVBoxLayout(self)
hbox = QHBoxLayout()
vbox.addLayout(hbox)
hbox.addWidget(QLabel(_DELTA_TEXT))
hbox.addWidget(VLine())
qb = QPushButton(_DELTA_QUIT)
hbox.addWidget(qb)
qb.clicked.connect(self.done)
self.tree = QTreeWidget()
vbox.addWidget(self.tree)
self.tree.setHeaderHidden(True)
self.tree.setWordWrap(True)
self.changes = None
self.elements = None
FUNCTIONS['pupils_DELTA'] = self.DELTA
FUNCTIONS['pupils_DELTA_COMPLETE'] = self.DELTA_COMPLETE
#
def is_modified(self):
raise BUG("This method should not be called")
#
def activate(self):
self.tree.clear()
self.elements = []
self.error = False
#
def deactivate(self):
self.tree.clear()
self.elements = None
#
def DELTA(self, klass, delta):
"""Add elements (changes) for the given class.
"""
if self.error:
return
items = []
self.elements.append((klass, items))
parent = QTreeWidgetItem(self.tree)
parent.setText(0, "Klasse {}".format(klass))
parent.setFlags(parent.flags() | Qt.ItemIsTristate
| Qt.ItemIsUserCheckable)
for d in delta:
child = QTreeWidgetItem(parent)
items.append((child, d))
child.setFlags(child.flags() | Qt.ItemIsUserCheckable)
op, pdata = d[0], d[1]
name = pdata['FIRSTNAME'] + ' ' + pdata['LASTNAME']
if op == 'NEW':
text = 'Neu: %s' % name
elif op == 'DELTA':
text = 'Ändern %s: %s' % (name, str(d[2]))
if len(text) > _DELTA_LEN_MAX:
child.setToolTip(0, '<p>' + text + '</p>')
text = text[:_DELTA_LEN_MAX - 4] + ' ...'
elif op == 'REMOVE':
text = 'Entfernen: %s' % name
else:
SHOW_ERROR("Unexpected pupil-delta operator: %s" % op)
self.error = True
return
child.setText(0, text)
child.setCheckState(0, Qt.Checked)
#
def DELTA_COMPLETE(self):
if self.error:
BACKEND('PUPILS_get_classes', reset = '')
else:
self._tab.enable('SAVE', True)
#
def save(self):
"""Perform the selected changes.
Transmit the change info class-for-class, so that the data chunks
don't get too big.
"""
changes = False
for k, items in self.elements:
# Filter the changes lists
dlist = [d for child, d in items
if child.checkState(0) == Qt.Checked]
if dlist:
changes = True
BACKEND('PUPILS_class_update', klass = k,
delta_list = dlist)
if changes:
# Now perform the actual update
BACKEND('PUPILS_table_update')
elif QuestionDialog(_NO_CHANGES, _WARN_NO_CHANGES):
self.done()
#
def done(self):
BACKEND('PUPILS_get_classes', reset = '')
###
class StackedWidget_pupil(GridView):
def __init__(self, tab_widget):
self._tab = tab_widget
super().__init__()
self.pupil_scene = None
#
def is_modified(self):
return bool(self.pupil_scene.changes())
#
def set_changed(self, show):
self._tab.enable('SAVE', show)
#
def activate(self, pdata, name, exists):
self.pupil_scene = PupilGrid(self, self._tab.INFO)
self.pupil_scene.set_pupil(pdata, name)
self.set_scene(self.pupil_scene)
self.pupil_scene.view()
for pb in ('ADD', 'REMOVE', 'C_CHOOSE'):
self._tab.enable(pb, True)
self._tab.enable('P_CHOOSE', exists)
self.exists = exists
#
def deactivate(self):
self.pupil_scene = None
self.set_scene(None)
#
def remove_pupil(self):
if QuestionDialog(_REMOVE_TITLE,
_REMOVE.format(name = self.pupil_scene.text('title'))):
if self.exists:
BACKEND('PUPILS_remove',
pid = self.pupil_scene.pupil_data['PID'])
else:
BACKEND('PUPILS_get_classes', reset = '')
#
def save(self):
BACKEND('PUPILS_new_data', data = self.pupil_scene.pupil_data)
###
class ManagePupils(TabPage):
"""Manage the pupil data on an individual basis, or else via tables
for a class or even the whole school (e.g. when the data is to be
synchronized with a master database).
For class/school editing, the entries to be changed are shown and
may be deselected.
"""
def __init__(self):
super().__init__(_UPDATE_PUPILS)
self._widgets = {}
self.INFO = None # set on entry
topbox = QHBoxLayout()
self.vbox.addLayout(topbox)
#*********** The "main" widget ***********
self.main = QStackedWidget()
topbox.addWidget(self.main)
### The stacked widgets:
# 0) Text describing the available functions
_w = StackedWidget_info(self)
self.main.addWidget(_w)
self._widgets['INFO'] = _w
# 1) Table with data for all pupils in a class
_w = StackedWidget_class(self)
self.main.addWidget(_w)
self._widgets['CLASS'] = _w
# 2) Tree showing pending changes (update from table)
_w = StackedWidget_delta(self)
self.main.addWidget(_w)
self._widgets['DELTA'] = _w
# 3) Custom editor-table for individual pupil-data
_w = StackedWidget_pupil(self)
self.main.addWidget(_w)
self._widgets['PUPIL'] = _w
topbox.addWidget(VLine())
cbox = QVBoxLayout()
topbox.addLayout(cbox)
### Select class
self.class_select = KeySelect(changed_callback = self.class_changed)
self._widgets['C_CHOOSE'] = self.class_select
cbox.addWidget(QLabel(_CLASS))
cbox.addWidget(self.class_select)
### List of pupils
self.pselect = KeySelect(changed_callback = self.pupil_changed)
self._widgets['P_CHOOSE'] = self.pselect
cbox.addWidget(self.pselect)
cbox.addSpacing(30)
### Save (changed) data
_w = QPushButton(_SAVE)
self._widgets['SAVE'] = _w
cbox.addWidget(_w)
_w.clicked.connect(self.save)
cbox.addSpacing(30)
### Add a new pupil, or delete the entry for an existing pupil
_w = QPushButton(_NEW_PUPIL)
self._widgets['ADD'] = _w
cbox.addWidget(_w)
_w.clicked.connect(self.new_pupil)
cbox.addSpacing(10)
_w = QPushButton(_REMOVE_PUPIL)
self._widgets['REMOVE'] = _w
cbox.addWidget(_w)
_w.clicked.connect(self.remove_pupil)
cbox.addStretch(1)
### Export the current class table (or complete table)
_w = QPushButton(_EXPORT)
self._widgets['EXPORT'] = _w
cbox.addWidget(_w)
_w.clicked.connect(self.export)
cbox.addSpacing(30)
### Import table (class or whole school)
_w = QPushButton(_IMPORT)
self._widgets['IMPORT'] = _w
cbox.addWidget(_w)
_w.clicked.connect(self.update)
#
def set_widget(self, tag, **params):
"""Select the widget to be displayed in the "main" stack.
"""
current = self.main.currentWidget()
if current:
current.deactivate()
new = self._widgets[tag]
self.main.setCurrentWidget(new)
# Allow each function group to decide which buttons are enabled
for pb in ('SAVE', 'ADD', 'REMOVE', 'EXPORT', 'IMPORT',
'P_CHOOSE', 'C_CHOOSE'):
self.enable(pb, False)
new.activate(**params)
#
def is_modified(self):
return self.main.currentWidget().is_modified()
#
def enable(self, tag, on):
"""Enable or disable the widget with given tag.
"""
self._widgets[tag].setEnabled(on)
#
def enter(self):
"""Called when the tab is selected.
"""
self.set_widget('INFO')
BACKEND('PUPILS_get_info') # -> SET_INFO(...)
#
def SET_INFO(self, **params):
"""CALLBACK: set up basic info for pupils.
Expected parameters:
<fields> is a list of field names:
[[field1_internal_name, field1_local_name], ... ]
<SEX> is a list of possible values.
"""
self.INFO = params
#print("INFO: ", self.INFO)
BACKEND('PUPILS_get_classes') # -> SET_CLASSES(..., '')
#
#
def leave(self):
"""Called when the tab is deselected.
"""
self.main.currentWidget().deactivate()
#
def SET_CLASSES(self, classes, klass):
"""CALLBACK: Supplies the classes as a list: [class10, class9, ...]
and the selected class. Set the class selection widget
and trigger a "change of class" signal.
"""
self.set_widget('INFO')
try:
ix = classes.index(klass) + 1
except ValueError:
ix = 0
self.class_select.set_items([('', _ALL_CLASSES)] +
[(c, c) for c in classes], index = ix)
self.class_select.trigger()
#
def class_changed(self, klass):
"""Manual selection of a class (including the 'empty' class,
meaning "no class" or "all classes", according to usage ...).
"""
if self.leave_ok():
self.leave()
BACKEND('PUPILS_set_class', klass = klass) # -> SET_PUPILS(...)
return True
return False
#
def SET_PUPILS(self, pupils, pid):
"""CALLBACK: Supplies the pupils as a list of (pid, name) pairs.
<pid> is the id of the selected pupil (it may be invalid).
Set the pupil selection widget and trigger a "change of pupil"
signal.
"""
self.pselect.set_items([('', _ALL_PUPILS)] + pupils)
try:
self.pselect.reset(pid)
except GuiError:
pass
BACKEND('PUPILS_set_pupil', pid = pid)
#
def pupil_changed(self, pid):
"""A new pupil has been selected: reset the grid accordingly.
"""
if not self.leave_ok():
return False
BACKEND('PUPILS_set_pupil', pid = pid)
return True
#
def SET_INFO_VIEW(self):
"""Show info page.
"""
self.set_widget('INFO')
#
def SET_CLASS_VIEW(self, pdata_list):
"""Show class editor.
"""
self.set_widget('CLASS', fields = self.INFO['fields'],
pupil_list = pdata_list)
#
def SET_PUPIL_VIEW(self, pdata, name, exists = True):
self.set_widget('PUPIL', pdata = pdata, name = name, exists = exists)
#
def new_pupil(self):
if not self.leave_ok():
return
# First enter pid (which is not editable).
# The back-end call is necessary to get a pid suggestion (as
# well as the rest of the "dummy" data).
BACKEND('PUPILS_new_pupil')
#
def NEW_PUPIL(self, data, ask_pid = None):
if ask_pid:
etext = data.get('__ERROR__')
mtext = _ENTER_PID
if etext:
mtext = etext + '\n\n' + mtext
pid, ok = QInputDialog.getText(self, _ENTER_PID_TITLE,
mtext, text = ask_pid)
if ok:
if pid != ask_pid:
# Need to check validity of the pid
BACKEND('PUPILS_new_pupil', pid = pid)
return
else:
return
self.SET_PUPIL_VIEW(data, _NEW_PUPIL, exists = False)
#
def save(self):
self.main.currentWidget().save()
#
def remove_pupil(self):
self.main.currentWidget().remove_pupil()
#
def export(self):
if not self.leave_ok():
return
klass = self.class_select.selected()
fpath = saveDialog(_SAVE_TABLE_FILE,
_TABLE_FILE_NAME.format(klass = klass) if klass
else _FULL_TABLE_FILE_NAME)
if fpath:
BACKEND('PUPILS_export_data', filepath = fpath, klass = klass)
#
def update(self):
if self.leave_ok():
fpath = openDialog(_TABLE_FILE)
if fpath:
# Ask for the changes
BACKEND('PUPILS_table_delta', filepath = fpath)
#
def DELTA_START(self):
self.set_widget('DELTA')
tab_pupils = ManagePupils()
TABS.append(tab_pupils)
FUNCTIONS['pupils_SET_INFO'] = tab_pupils.SET_INFO
FUNCTIONS['pupils_SET_CLASSES'] = tab_pupils.SET_CLASSES
FUNCTIONS['pupils_SET_PUPILS'] = tab_pupils.SET_PUPILS
FUNCTIONS['pupils_SET_CLASS_VIEW'] = tab_pupils.SET_CLASS_VIEW
FUNCTIONS['pupils_SET_PUPIL_VIEW'] = tab_pupils.SET_PUPIL_VIEW
FUNCTIONS['pupils_SET_INFO_VIEW'] = tab_pupils.SET_INFO_VIEW
FUNCTIONS['pupils_NEW_PUPIL'] = tab_pupils.NEW_PUPIL
FUNCTIONS['pupils_DELTA_START'] = tab_pupils.DELTA_START
| 33.598746 | 78 | 0.608836 |
6f9335269b376a8d3e3bf661559622e426576304
| 4,666 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v4_2/party_model.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v4_2/party_model.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v4_2/party_model.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
def execute():
frappe.reload_doc("accounts", "doctype", "account")
frappe.reload_doc("setup", "doctype", "company")
frappe.reload_doc("accounts", "doctype", "gl_entry")
frappe.reload_doc("accounts", "doctype", "journal_entry_account")
receivable_payable_accounts = create_receivable_payable_account()
if receivable_payable_accounts:
set_party_in_jv_and_gl_entry(receivable_payable_accounts)
delete_individual_party_account()
remove_customer_supplier_account_report()
def create_receivable_payable_account():
receivable_payable_accounts = frappe._dict()
def _create_account(args):
if args["parent_account"] and frappe.db.exists("Account", args["parent_account"]):
account_id = frappe.db.get_value("Account",
{"account_name": args["account_name"], "company": args["company"]})
if not account_id:
account = frappe.new_doc("Account")
account.is_group = 0
account.update(args)
account.insert()
account_id = account.name
frappe.db.set_value("Company", args["company"], ("default_receivable_account"
if args["account_type"]=="Receivable" else "default_payable_account"), account_id)
receivable_payable_accounts.setdefault(args["company"], {}).setdefault(args["account_type"], account_id)
for company in frappe.db.sql_list("select name from tabCompany"):
_create_account({
"account_name": "Debtors",
"account_type": "Receivable",
"company": company,
"parent_account": get_parent_account(company, "Customer")
})
_create_account({
"account_name": "Creditors",
"account_type": "Payable",
"company": company,
"parent_account": get_parent_account(company, "Supplier")
})
return receivable_payable_accounts
def get_parent_account(company, master_type):
parent_account = None
if "receivables_group" in frappe.db.get_table_columns("Company"):
parent_account = frappe.db.get_value("Company", company,
"receivables_group" if master_type=="Customer" else "payables_group")
if not parent_account:
parent_account = frappe.db.get_value("Account", {"company": company,
"account_name": "Accounts Receivable" if master_type=="Customer" else "Accounts Payable"})
if not parent_account:
parent_account = frappe.db.sql_list("""select parent_account from tabAccount
where company=%s and ifnull(master_type, '')=%s and ifnull(master_name, '')!='' limit 1""",
(company, master_type))
parent_account = parent_account[0][0] if parent_account else None
return parent_account
def set_party_in_jv_and_gl_entry(receivable_payable_accounts):
accounts = frappe.db.sql("""select name, master_type, master_name, company from `tabAccount`
where ifnull(master_type, '') in ('Customer', 'Supplier') and ifnull(master_name, '') != ''""", as_dict=1)
account_map = frappe._dict()
for d in accounts:
account_map.setdefault(d.name, d)
if not account_map:
return
for dt in ["Journal Entry Account", "GL Entry"]:
records = frappe.db.sql("""select name, account from `tab%s`
where account in (%s) and ifnull(party, '') = '' and docstatus < 2""" %
(dt, ", ".join(['%s']*len(account_map))), tuple(account_map.keys()), as_dict=1)
for i, d in enumerate(records):
account_details = account_map.get(d.account, {})
account_type = "Receivable" if account_details.get("master_type")=="Customer" else "Payable"
new_account = receivable_payable_accounts[account_details.get("company")][account_type]
frappe.db.sql("update `tab{0}` set account=%s, party_type=%s, party=%s where name=%s".format(dt),
(new_account, account_details.get("master_type"), account_details.get("master_name"), d.name))
if i%500 == 0:
frappe.db.commit()
def delete_individual_party_account():
frappe.db.sql("""delete from `tabAccount`
where ifnull(master_type, '') in ('Customer', 'Supplier')
and ifnull(master_name, '') != ''
and not exists(select gle.name from `tabGL Entry` gle
where gle.account = tabAccount.name)""")
accounts_not_deleted = frappe.db.sql_list("""select tabAccount.name from `tabAccount`
where ifnull(master_type, '') in ('Customer', 'Supplier')
and ifnull(master_name, '') != ''
and exists(select gle.name from `tabGL Entry` gle where gle.account = tabAccount.name)""")
if accounts_not_deleted:
print("Accounts not deleted: " + "\n".join(accounts_not_deleted))
def remove_customer_supplier_account_report():
for d in ["Customer Account Head", "Supplier Account Head"]:
frappe.delete_doc("Report", d)
| 39.542373 | 108 | 0.729104 |
fba41cb91da20a3719f2c7d150788a524e270931
| 864 |
py
|
Python
|
bot/exts/fun/conversation_starters.py
|
thecoderkitty/fluffington-bot
|
f518e7b66487aaf9e6c507ced43e15760d604be2
|
[
"MIT"
] | null | null | null |
bot/exts/fun/conversation_starters.py
|
thecoderkitty/fluffington-bot
|
f518e7b66487aaf9e6c507ced43e15760d604be2
|
[
"MIT"
] | null | null | null |
bot/exts/fun/conversation_starters.py
|
thecoderkitty/fluffington-bot
|
f518e7b66487aaf9e6c507ced43e15760d604be2
|
[
"MIT"
] | null | null | null |
import pathlib
import random
import discord
from discord.ext import commands
import yaml
from bot.bot import Bot
FORM_URL = "https://forms.gle/sb2jNbvVcTorNPTX6"
with pathlib.Path("bot/resources/fun/starters.yaml").open("r", encoding="utf8") as f:
STARTERS = yaml.load(f, Loader=yaml.FullLoader)
class ConversationStarters(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(name="topic")
async def topic(self, ctx: commands.Context):
random_topic = random.choice(STARTERS)
topic_embed = discord.Embed(
description=f"you want to suggest a new topic? [click here]({FORM_URL})"
)
topic_embed.title = random_topic
await ctx.send(embed=topic_embed)
def setup(bot: Bot):
"""load the ConversationStarters cog"""
bot.add_cog(ConversationStarters(bot))
| 26.181818 | 85 | 0.694444 |
83b7a3131f12c060d9d815727925d0311b38b78f
| 17,094 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/traci/_simulation.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/traci/_simulation.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/traci/_simulation.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
# -*- coding: utf-8 -*-
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2011-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file _simulation.py
# @author Daniel Krajzewicz
# @author Jakob Erdmann
# @author Michael Behrisch
# @date 2011-03-15
from __future__ import absolute_import
import warnings
from . import constants as tc
from .domain import Domain
from .exceptions import FatalTraCIError
class Stage(object):
def __init__(self,
type=tc.INVALID_INT_VALUE,
vType="",
line="",
destStop="",
edges=[],
travelTime=tc.INVALID_DOUBLE_VALUE,
cost=tc.INVALID_DOUBLE_VALUE,
length=tc.INVALID_DOUBLE_VALUE,
intended="",
depart=tc.INVALID_DOUBLE_VALUE,
departPos=tc.INVALID_DOUBLE_VALUE,
arrivalPos=tc.INVALID_DOUBLE_VALUE,
description=""):
self.type = type
self.vType = vType
self.line = line
self.destStop = destStop
self.edges = edges
self.travelTime = travelTime
self.cost = cost
self.length = length
self.intended = intended
self.depart = depart
self.departPos = departPos
self.arrivalPos = arrivalPos
self.description = description
def __attr_repr__(self, attrname, default=""):
if getattr(self, attrname) == default:
return ""
else:
val = getattr(self, attrname)
if val == tc.INVALID_DOUBLE_VALUE:
val = "INVALID"
return "%s=%s" % (attrname, val)
def __repr__(self):
return "Stage(%s)" % ', '.join([v for v in [
self.__attr_repr__("type"),
self.__attr_repr__("vType"),
self.__attr_repr__("line"),
self.__attr_repr__("destStop"),
self.__attr_repr__("edges"),
self.__attr_repr__("travelTime"),
self.__attr_repr__("cost"),
self.__attr_repr__("length"),
self.__attr_repr__("intended"),
self.__attr_repr__("depart"),
self.__attr_repr__("departPos"),
self.__attr_repr__("arrivalPos"),
self.__attr_repr__("description"),
] if v != ""])
def _readStage(result):
# compound size and type
assert(result.read("!i")[0] == 13)
stageType = result.readTypedInt()
vType = result.readTypedString()
line = result.readTypedString()
destStop = result.readTypedString()
edges = result.readTypedStringList()
travelTime = result.readTypedDouble()
cost = result.readTypedDouble()
length = result.readTypedDouble()
intended = result.readTypedString()
depart = result.readTypedDouble()
departPos = result.readTypedDouble()
arrivalPos = result.readTypedDouble()
description = result.readTypedString()
return Stage(stageType, vType, line, destStop, edges, travelTime, cost,
length, intended, depart, departPos, arrivalPos, description)
def _writeStage(stage):
format = "tisssldddsddds"
values = [13, stage.type, stage.vType, stage.line, stage.destStop, stage.edges,
stage.travelTime, stage.cost, stage.length, stage.intended,
stage.depart, stage.departPos, stage.arrivalPos, stage.description]
return format, values
_RETURN_VALUE_FUNC = {tc.FIND_ROUTE: _readStage}
class SimulationDomain(Domain):
Stage = Stage
def __init__(self):
Domain.__init__(self, "simulation", tc.CMD_GET_SIM_VARIABLE, tc.CMD_SET_SIM_VARIABLE,
tc.CMD_SUBSCRIBE_SIM_VARIABLE, tc.RESPONSE_SUBSCRIBE_SIM_VARIABLE,
tc.CMD_SUBSCRIBE_SIM_CONTEXT, tc.RESPONSE_SUBSCRIBE_SIM_CONTEXT,
_RETURN_VALUE_FUNC)
@staticmethod
def walkingStage(edges, arrivalPos, destStop="", description=""):
return Stage(2, "", "", destStop, edges, 0, 0, 0, "", 0, 0, arrivalPos, description)
def getTime(self):
"""getTime() -> double
Returns the current simulation time in s.
"""
return self._getUniversal(tc.VAR_TIME)
def step(self, time=0.):
"""step(double) -> None
Make a simulation step and simulate up to the given sim time (in seconds).
If the given value is 0 or absent, exactly one step is performed.
Values smaller than or equal to the current sim time result in no action.
"""
if self._connection is None:
raise FatalTraCIError("Not connected.")
return self._connection.simulationStep(time)
def getCurrentTime(self):
"""getCurrentTime() -> integer
Returns the current simulation time in ms.
"""
warnings.warn("getCurrentTime is deprecated, please use getTime which returns floating point seconds",
stacklevel=2)
return self._getUniversal(tc.VAR_TIME_STEP)
def getLoadedNumber(self):
"""getLoadedNumber() -> integer
Returns the number of vehicles which were loaded in this time step.
"""
return self._getUniversal(tc.VAR_LOADED_VEHICLES_NUMBER)
def getLoadedIDList(self):
"""getLoadedIDList() -> list(string)
Returns a list of ids of vehicles which were loaded in this time step.
"""
return self._getUniversal(tc.VAR_LOADED_VEHICLES_IDS)
def getDepartedNumber(self):
"""getDepartedNumber() -> integer
Returns the number of vehicles which departed (were inserted into the road network) in this time step.
"""
return self._getUniversal(tc.VAR_DEPARTED_VEHICLES_NUMBER)
def getDepartedIDList(self):
"""getDepartedIDList() -> list(string)
Returns a list of ids of vehicles which departed (were inserted into the road network) in this time step.
"""
return self._getUniversal(tc.VAR_DEPARTED_VEHICLES_IDS)
def getArrivedNumber(self):
"""getArrivedNumber() -> integer
Returns the number of vehicles which arrived (have reached their destination and are removed from the road
network) in this time step.
"""
return self._getUniversal(tc.VAR_ARRIVED_VEHICLES_NUMBER)
def getArrivedIDList(self):
"""getArrivedIDList() -> list(string)
Returns a list of ids of vehicles which arrived (have reached their destination and are removed from the road
network) in this time step.
"""
return self._getUniversal(tc.VAR_ARRIVED_VEHICLES_IDS)
def getParkingStartingVehiclesNumber(self):
"""getParkingStartingVehiclesNumber() -> integer
.
"""
return self._getUniversal(tc.VAR_PARKING_STARTING_VEHICLES_NUMBER)
def getParkingStartingVehiclesIDList(self):
"""getParkingStartingVehiclesIDList() -> list(string)
.
"""
return self._getUniversal(tc.VAR_PARKING_STARTING_VEHICLES_IDS)
def getParkingEndingVehiclesNumber(self):
"""getParkingEndingVehiclesNumber() -> integer
.
"""
return self._getUniversal(tc.VAR_PARKING_ENDING_VEHICLES_NUMBER)
def getParkingEndingVehiclesIDList(self):
"""getParkingEndingVehiclesIDList() -> list(string)
.
"""
return self._getUniversal(tc.VAR_PARKING_ENDING_VEHICLES_IDS)
def getStopStartingVehiclesNumber(self):
"""getStopStartingVehiclesNumber() -> integer
.
"""
return self._getUniversal(tc.VAR_STOP_STARTING_VEHICLES_NUMBER)
def getStopStartingVehiclesIDList(self):
"""getStopStartingVehiclesIDList() -> list(string)
.
"""
return self._getUniversal(tc.VAR_STOP_STARTING_VEHICLES_IDS)
def getStopEndingVehiclesNumber(self):
"""getStopEndingVehiclesNumber() -> integer
.
"""
return self._getUniversal(tc.VAR_STOP_ENDING_VEHICLES_NUMBER)
def getStopEndingVehiclesIDList(self):
"""getStopEndingVehiclesIDList() -> list(string)
.
"""
return self._getUniversal(tc.VAR_STOP_ENDING_VEHICLES_IDS)
def getCollidingVehiclesNumber(self):
"""getCollidingVehiclesNumber() -> integer
Return number of vehicles involved in a collision (typically 2 per
collision).
"""
return self._getUniversal(tc.VAR_COLLIDING_VEHICLES_NUMBER)
def getCollidingVehiclesIDList(self):
"""getCollidingVehiclesIDList() -> list(string)
Return Ids of vehicles involved in a collision (typically 2 per
collision).
"""
return self._getUniversal(tc.VAR_COLLIDING_VEHICLES_IDS)
def getEmergencyStoppingVehiclesNumber(self):
"""getEmergencyStoppingVehiclesNumber() -> integer
Return number of vehicles that performed an emergency stop in the last step
"""
return self._getUniversal(tc.VAR_EMERGENCYSTOPPING_VEHICLES_NUMBER)
def getEmergencyStoppingVehiclesIDList(self):
"""getEmergencyStoppingVehiclesIDList() -> list(string)
Return Ids of vehicles that peformed an emergency stop in the last step
"""
return self._getUniversal(tc.VAR_EMERGENCYSTOPPING_VEHICLES_IDS)
def getMinExpectedNumber(self):
"""getMinExpectedNumber() -> integer
Returns the number of vehicles which are in the net plus the
ones still waiting to start. This number may be smaller than
the actual number of vehicles still to come because of delayed
route file parsing. If the number is 0 however, it is
guaranteed that all route files have been parsed completely
and all vehicles have left the network.
"""
return self._getUniversal(tc.VAR_MIN_EXPECTED_VEHICLES)
def getBusStopIDList(self):
return self._getUniversal(tc.VAR_BUS_STOP_ID_LIST)
def getBusStopWaiting(self, stopID):
"""getBusStopWaiting() -> integer
Get the total number of waiting persons at the named bus stop.
"""
return self._getUniversal(tc.VAR_BUS_STOP_WAITING, stopID)
def getBusStopWaitingIDList(self, stopID):
"""getBusStopWaiting() -> list(string)
Get the IDs of waiting persons at the named bus stop.
"""
return self._getUniversal(tc.VAR_BUS_STOP_WAITING_IDS, stopID)
def getStartingTeleportNumber(self):
"""getStartingTeleportNumber() -> integer
Returns the number of vehicles which started to teleport in this time step.
"""
return self._getUniversal(tc.VAR_TELEPORT_STARTING_VEHICLES_NUMBER)
def getStartingTeleportIDList(self):
"""getStartingTeleportIDList() -> list(string)
Returns a list of ids of vehicles which started to teleport in this time step.
"""
return self._getUniversal(tc.VAR_TELEPORT_STARTING_VEHICLES_IDS)
def getEndingTeleportNumber(self):
"""getEndingTeleportNumber() -> integer
Returns the number of vehicles which ended to be teleported in this time step.
"""
return self._getUniversal(tc.VAR_TELEPORT_ENDING_VEHICLES_NUMBER)
def getEndingTeleportIDList(self):
"""getEndingTeleportIDList() -> list(string)
Returns a list of ids of vehicles which ended to be teleported in this time step.
"""
return self._getUniversal(tc.VAR_TELEPORT_ENDING_VEHICLES_IDS)
def getDeltaT(self):
"""getDeltaT() -> double
Returns the length of one simulation step in seconds
"""
return self._getUniversal(tc.VAR_DELTA_T)
def getNetBoundary(self):
"""getNetBoundary() -> ((double, double), (double, double))
The boundary box of the simulation network.
"""
return self._getUniversal(tc.VAR_NET_BOUNDING_BOX)
def convert2D(self, edgeID, pos, laneIndex=0, toGeo=False):
posType = tc.POSITION_2D
if toGeo:
posType = tc.POSITION_LON_LAT
return self._getUniversal(tc.POSITION_CONVERSION, "", "trB", 2, (edgeID, pos, laneIndex), posType)
def convert3D(self, edgeID, pos, laneIndex=0, toGeo=False):
posType = tc.POSITION_3D
if toGeo:
posType = tc.POSITION_LON_LAT_ALT
return self._getUniversal(tc.POSITION_CONVERSION, "", "trB", 2, (edgeID, pos, laneIndex), posType)
def convertRoad(self, x, y, isGeo=False, vClass="ignoring"):
format = "toBs"
if isGeo:
format = "tgBs"
result = self._getCmd(tc.POSITION_CONVERSION, "", format, 3, (x, y), tc.POSITION_ROADMAP, vClass)
result.read("!B")
return result.readString(), result.readDouble(), result.read("!B")[0]
def convertGeo(self, x, y, fromGeo=False):
format = "toB"
toType = tc.POSITION_LON_LAT
if fromGeo:
format = "tgB"
toType = tc.POSITION_2D
return self._getUniversal(tc.POSITION_CONVERSION, "", format, 2, (x, y), toType)
def getDistance2D(self, x1, y1, x2, y2, isGeo=False, isDriving=False):
"""getDistance2D(double, double, double, double, boolean, boolean) -> double
Returns the distance between the two coordinate pairs (x1,y1) and (x2,y2)
If isGeo=True, coordinates are interpreted as longitude and latitude rather
than cartesian coordinates in meters.
If isDriving=True, the coordinates are mapped onto the road network and the
length of the shortest route in the network is returned. Otherwise, the
straight-line distance is returned.
"""
format = "tggu" if isGeo else "toou"
distType = tc.REQUEST_AIRDIST
if isDriving:
distType = tc.REQUEST_DRIVINGDIST
return self._getUniversal(tc.DISTANCE_REQUEST, "", format, 3, (x1, y1), (x2, y2), distType)
def getDistanceRoad(self, edgeID1, pos1, edgeID2, pos2, isDriving=False):
"""getDistanceRoad(string, double, string, double, boolean) -> double
Reads two positions on the road network and an indicator whether the air or the driving distance shall be
computed. Returns the according distance.
"""
distType = tc.REQUEST_AIRDIST
if isDriving:
distType = tc.REQUEST_DRIVINGDIST
return self._getUniversal(tc.DISTANCE_REQUEST, "", "trru", 3,
(edgeID1, pos1, 0), (edgeID2, pos2, 0), distType)
def findRoute(self, fromEdge, toEdge, vType="", depart=-1., routingMode=0):
return self._getUniversal(tc.FIND_ROUTE, "", "tsssdi", 5, fromEdge, toEdge, vType, depart, routingMode)
def findIntermodalRoute(self, fromEdge, toEdge, modes="", depart=-1., routingMode=0, speed=-1.,
walkFactor=-1., departPos=0., arrivalPos=tc.INVALID_DOUBLE_VALUE, departPosLat=0.,
pType="", vType="", destStop=""):
answer = self._getCmd(tc.FIND_INTERMODAL_ROUTE, "", "tsssdidddddsss", 13,
fromEdge, toEdge, modes, depart, routingMode, speed, walkFactor,
departPos, arrivalPos, departPosLat, pType, vType, destStop)
answer.read("!B") # Type
result = []
for _ in range(answer.readInt()):
answer.read("!B") # Type
result.append(_readStage(answer))
return tuple(result)
def clearPending(self, routeID=""):
self._setCmd(tc.CMD_CLEAR_PENDING_VEHICLES, "", "s", routeID)
def saveState(self, fileName):
self._setCmd(tc.CMD_SAVE_SIMSTATE, "", "s", fileName)
def loadState(self, fileName):
self._setCmd(tc.CMD_LOAD_SIMSTATE, "", "s", fileName)
def writeMessage(self, msg):
self._setCmd(tc.CMD_MESSAGE, "", "s", msg)
def subscribe(self, varIDs=(tc.VAR_DEPARTED_VEHICLES_IDS,), begin=0, end=2**31 - 1):
"""subscribe(list(integer), double, double) -> None
Subscribe to one or more simulation values for the given interval.
"""
Domain.subscribe(self, "", varIDs, begin, end)
def getSubscriptionResults(self):
"""getSubscriptionResults() -> dict(integer: <value_type>)
Returns the subscription results for the last time step.
It is not possible to retrieve older subscription results than the ones
from the last time step.
"""
return Domain.getSubscriptionResults(self, "")
| 37.986667 | 117 | 0.648707 |
79105e9db751863b607396d1a212e15dddfc2736
| 2,450 |
py
|
Python
|
Python/Buch_ATBS/Teil_2/Kapitel_15_Aufgaben_zeitlich_Planen_und_Programme_starten/03_xkcd_multithread_download/03_xkcd_multithread_download.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | null | null | null |
Python/Buch_ATBS/Teil_2/Kapitel_15_Aufgaben_zeitlich_Planen_und_Programme_starten/03_xkcd_multithread_download/03_xkcd_multithread_download.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Buch_ATBS/Teil_2/Kapitel_15_Aufgaben_zeitlich_Planen_und_Programme_starten/03_xkcd_multithread_download/03_xkcd_multithread_download.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
# 03_xkcd_multithread_download.py
# In dieser Übung geht es darum den Download der Comics zu beschleunigen
# indem man mehrere Threads zum downloaden nutzt.
import os, threading, requests, bs4
os.chdir(os.path.dirname(__file__))
target_dir='.\\comics'
source_url='https://xkcd.com'
# Prüfe ob Seite erreichbar
url_content=requests.get(source_url)
try:
url_content.raise_for_status()
except:
print('URL xkcd.com kann nicht aufgerufen werden. Script wird beendet.')
exit()
# Downloade die Comics als Thread
def download_comic(comic_url):
file_name=comic_url.split('/')[-1]
new_file=open(target_dir+'\\'+file_name, 'wb')
get_comic=requests.get(comic_url)
try:
get_comic.raise_for_status()
for chunk in get_comic.iter_content(10**6):
new_file.write(chunk)
new_file.close()
except:
print('Bild-URL %s ist fehlerhaft') % (comic_url)
# Sammle die Links zu den Comics und den weiterführenden Seiten
link_counter=0
threads=[]
def scrape_comic_links(url_name):
global link_counter, threads
while link_counter != int(comic_target_amount):
url_content=requests.get(url_name)
try:
url_content.raise_for_status()
bs4_object=bs4.BeautifulSoup(url_content.text, features='html.parser')
bs4_next_result=bs4_object.select('a[rel="prev"]')
next_url=bs4_next_result[0].get('href')
bs4_comic_result=bs4_object.select('div #comic img')
comic_url=bs4_comic_result[0].get('src')
comic_url='https://'+comic_url.lstrip('/')
url_name=source_url+next_url
link_counter+=1
# Starte Download-Thread
thread_object=threading.Thread(name='Download_Comic', target=download_comic, args=[comic_url])
thread_object.start()
# Füge diesen Thread einer Liste hinzu um später zu prüfen ob alles Abgearbeitet wurde.
threads.append(thread_object)
except:
print('URL nicht gefunden.')
return
else:
link_counter=0
return
while True:
print('Wieviele Comics sollen heruntergeladen werden?')
comic_target_amount=input()
if comic_target_amount.isdecimal():
scrape_comic_links(source_url)
# Warte bis alle Prozesse abgeschlossen sind.
for thread in threads:
thread.join()
print('Downloads abgeschlossen')
break
| 34.507042 | 106 | 0.672653 |
f73d121b6b27b06f03eb59a172064c7c0937ce64
| 6,791 |
py
|
Python
|
lib/moerdergraphall.py
|
orithena/moerderspiel
|
7a11598cd80f26824376207805d3a937b9c1d831
|
[
"MIT"
] | 16 |
2015-09-30T13:45:07.000Z
|
2022-01-31T16:45:50.000Z
|
lib/moerdergraphall.py
|
orithena/moerderspiel
|
7a11598cd80f26824376207805d3a937b9c1d831
|
[
"MIT"
] | 5 |
2015-09-16T13:44:08.000Z
|
2018-02-06T11:24:59.000Z
|
lib/moerdergraphall.py
|
orithena/moerderspiel
|
7a11598cd80f26824376207805d3a937b9c1d831
|
[
"MIT"
] | 4 |
2015-05-28T17:39:55.000Z
|
2022-01-29T01:27:42.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os.path
import yapgvb as graph
from yapgvb import RenderingContext, CLIRenderError
import textwrap
import math
import colorsys
import pickle
from moerderklassen import *
from utils import colorgen
import utils
class MyRenderingContext(RenderingContext):
""" Used only if yapgvb does not use libboost.
W/o libboost, this class is used to modify the parameters passed to graphviz/dot.
"""
def render(self, graph, output_type, destfile):
from yapgvb import tempfile
if isinstance(destfile,file):
filename = destfile.name
destfile.close()
elif isinstance(destfile,str):
filename = destfile
else:
raise Exception
temp = tempfile('.dot')
graph._write_dot(temp)
cmd = "%s -Gsize=100,50 -T%s -o%s %s" % (self._engine_executable, output_type, filename, temp)
ier = os.system(cmd)
if ier:
#check_graphviz_working()
raise CLIRenderError("Error code %s rendering %s" % (ier, temp))
#os.remove(temp)
def moerdergraphall(game, filename, alledges=False, nodefontsize=8.0, edgefontsize=8.0, rounds=None):
if rounds is None:
rounds = game.rounds.values()
elif type(rounds) is not list:
rounds = [rounds]
# G is the main Graph object
G = graph.Digraph("Moerder")
G.model = 'subset'
G.overlap = 'compress'
G.splines = True
G.normalize = True
G.packmode = 'graph'
G.rankdir = 'LR'
# a dict for indexing all nodes
nodes = {}
# we need to keep some of the nodes in mind
prev_node = first_node = node = None
# make a copy of the participant list so we don't jumble up the original list
participants = sorted(rounds, key=lambda x: len(x.participants))[-1].participants[:]
gmnode = G.add_node('Game Master')
gmnode.label = 'Game Master'
gmnode.fontsize = nodefontsize
gmnode.fontname = 'arial'
gmnode.color = 'gray'
gmnode.fontcolor = 'gray'
gmnode.style = 'rounded'
hnode = inode = G.add_node('invisible')
inode.style = 'invisible'
inode.pos = (0.0, 0.0)
if len(participants) > 120:
sorrynode = G.add_node(u'Sorry, zu viele Nodes in diesem Graph...')
sorrynode.label = u'Sorry, zu viele Nodes in diesem Graph...'
sorrynode.style = 'rounded,filled'
sorrynode.fontsize = nodefontsize
sorrynode.style = 'rounded,filled'
sorrynode.penwidth = 2
sorrynode.color = '#00003380'
sorrynode.fillcolor = '#FFFFFF00'
sorrynode.margin = 0.01
# do the layout math and save to file
if graph.__dict__.has_key('_yapgvb_py'):
# if yapgvb works in python-only mode
rc = MyRenderingContext()
G.layout(graph.engines.dot, rendering_context=rc)
G.render(filename, rendering_context=rc)
else:
# if yapgvb has libboost support compiled in
G.layout(graph.engines.dot)
G.render(filename)
return
massmurderers = game.getMassMurderer()
massmurdererlist = [ player.public_id for player in massmurderers['killers'] ] if len(massmurderers) > 0 else []
if not alledges:
# if not admin/gameover view: sort nodes prior to adding them to the graph
participants.sort(key = lambda p: p.player.name + p.player.info)
nodecount = len(participants)
nodesperline = math.trunc(math.sqrt(nodecount))
# for each participant, add a node to the graph bearing his name
nodenumber = 0
for participant in participants:
nodenumber += 1
name = participant.player.name
if len(participant.player.info) > 0:
name += "\\n" + participant.player.info
name = utils.dotescape(name)
node = G.add_node(participant.player.public_id)
node.label = name.encode('utf-8')
node.fontsize = nodefontsize
node.style = 'rounded,filled'
node.penwidth = 2
node.color = '#00003380'
node.fillcolor = '#FFFFFF00'
node.margin = 0.01
nodeweight = game.getDeathsCount(participant) + game.getKillsCount(participant)
#node.group = str(nodeweight)
node.pos = ( nodenumber % nodesperline, nodenumber / nodesperline)
if nodeweight == 0:
iedge = G.add_edge(inode, node)
iedge.style = 'invisible'
iedge.arrowhead = 'none'
iedge.weight = 0.1
node.pos = (0.0, 0.0)
#iedge.constraint = False
if not prev_node:
first_node = node
# put all the nodes into a dict so we could find them fast by the player's id (needed later)
nodes[participant.player.public_id] = node
prev_node = node
node.fontname = 'arial'
# kicked participants are gray
if participant.killed() and participant.killedby.killer is None:
#node.color = '#FF6666FF'
#node.fontcolor = '#33333388'
#node.fillcolor = '#66666622'
node.style += ',dashed'
# mass murderers are black
if participant.player.public_id in massmurdererlist:
node.color = 'black'
node.fillcolor = 'black'
node.fontcolor = 'white'
# dead participants are red
if (game.getDeathsCount(participant) >= len(game.rounds)):
node.color = '#FF0000FF'
node.penwidth = 2
#node.fontcolor = '#FFFFFFFF'
#node.fillcolor = '#FF0000FF'
colorgenerator = colorgen(0.86)
for round in game.rounds.values():
edgecolor = next(colorgenerator)
if round not in rounds:
continue
for participant in round.participants:
if alledges or participant.killed():
edge = G.add_edge(nodes[participant.getInitialKiller().player.public_id], nodes[participant.player.public_id])
edge.color = edgecolor
edge.style = 'dashed'
edge.penwidth = 2
edge.weight = 6.0
#edge.constraint = False
if participant.killed():
if not participant.killedby.killer is None:
# normal case
edge = G.add_edge(nodes[participant.killedby.killer.player.public_id], nodes[participant.player.public_id])
else:
# special case of a game master kill
edge = G.add_edge(gmnode, nodes[participant.player.public_id])
edge.color = edgecolor
edge.fontcolor = 'red'
edge.style = 'solid'
edge.penwidth = 4
edge.weight = 10.0
# set edge label to kill description
label = utils.dateformat(participant.killedby.date) + ":\\n"
maxlinelen = max(24, math.trunc(math.ceil(math.sqrt(6 * len(participant.killedby.reason)))))
label += "\\n".join(textwrap.wrap(participant.killedby.reason, maxlinelen)).replace('"', "'")
edge.label = ''.join([ c for c in label.encode('utf-8') if ord(c) < 2048])
edge.fontsize = edgefontsize
edge.fontname = 'arial'
# do the layout math and save to file
if graph.__dict__.has_key('_yapgvb_py'):
# if yapgvb works in python-only mode
rc = MyRenderingContext()
G.layout(graph.engines.dot, rendering_context=rc)
G.render(filename, rendering_context=rc)
else:
# if yapgvb has libboost support compiled in
G.layout(graph.engines.dot)
G.render(filename)
def _loadgame(gamefile):
input = open(gamefile, 'rd')
ret = pickle.load(input)
input.close()
return ret
if __name__ == "__main__":
import sys
game = _loadgame(sys.argv[1])
moerdergraphall(game, sys.argv[2], alledges=True)
| 33.453202 | 114 | 0.709616 |
e39a5d6c0dd8a167e618783026d23eebbd516c13
| 386 |
py
|
Python
|
zencad/examples/1.GeomPrim/2.prim2d/rectangle.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | 5 |
2018-04-11T14:11:40.000Z
|
2018-09-12T19:03:36.000Z
|
zencad/examples/1.GeomPrim/2.prim2d/rectangle.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
zencad/examples/1.GeomPrim/2.prim2d/rectangle.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
ZenCad API example: rectangle
last update: 04.10.2019
"""
from zencad import *
m0 = rectangle(a=10, b=6, center=True)
m1 = rectangle(a=10, b=6)
m2 = rectangle(6, center=True)
m3 = square(6)
display(m0, color=color.yellow)
display(m1.up(0.3), color=color.green)
display(m2.forw(10), color=color.red)
display(m3.forw(10).up(0.3), color=color.white)
show()
| 18.380952 | 47 | 0.69171 |
e3c9369aafadd50d504b5c7d342598f8599cf3ed
| 5,265 |
py
|
Python
|
app/datasources/stac/query.py
|
geospatial-jeff/cognition-datasources-api
|
9c2f0332f231638e5fb615d2293c32a3ad96654d
|
[
"Apache-2.0"
] | 18 |
2019-03-27T20:09:37.000Z
|
2022-02-04T17:38:07.000Z
|
app/datasources/stac/query.py
|
geospatial-jeff/cognition-datasources-api
|
9c2f0332f231638e5fb615d2293c32a3ad96654d
|
[
"Apache-2.0"
] | 3 |
2019-06-05T15:12:17.000Z
|
2020-04-30T17:54:37.000Z
|
app/datasources/stac/query.py
|
geospatial-jeff/cognition-datasources-api
|
9c2f0332f231638e5fb615d2293c32a3ad96654d
|
[
"Apache-2.0"
] | 3 |
2019-06-29T01:16:37.000Z
|
2020-04-30T17:25:26.000Z
|
import operator
from datetime import datetime
import os
from schema import Schema, And
from geomet import wkt
class STACQueryError(BaseException):
pass
class STACQuery(object):
@staticmethod
def load_spatial(spatial):
schema = Schema({
"type": And(
str,
lambda n: n == "Polygon"
),
"coordinates": And(
# Checking type of coordinates
list,
# Confirming the geometry is closed (the first and last positions are equivalent)
lambda n: n[0][0] == n[0][-1],
# Confirming individual coordinates are [lat, long] ordered
lambda n: len(list(filter(lambda x: -180 <= x[0] <= 180, n[0]))) == len(n[0]),
lambda n: len(list(filter(lambda y: -90 <= y[1] <= 90, n[0]))) == len(n[0])
)
})
schema.validate(spatial)
return spatial
@staticmethod
def load_temporal(temporal):
type_schema = Schema((str, str))
type_schema.validate(temporal)
# Validate the dates individually
for idx, item in enumerate(temporal):
# Checking for full-date notation "YYYY-MM-DD"
if len(item) == 10:
date_schema = Schema(
And(
lambda n: 0 <= int(n.split('-')[0]) <= 2100,
lambda n: 1 <= int(n.split('-')[1]) <= 12,
lambda n: 1 <= int(n.split('-')[2]) <= 31,
error="Invalid configuration, must be of format `YYYY-MM-DD`"
)
)
date_schema.validate(item)
_date = datetime.strptime("{}T00:00:00.000Z".format(item), "%Y-%m-%dT%H:%M:%S.%fZ")
# Checking for full-date-full-time notation "YYYY-MM-DDT:hh:mm:ss.msZ"
elif len(item) == 24:
date_schema = Schema(
And(
lambda n: 0 <= int(n.split('-')[0]) <= 2100, # YYYY
lambda n: 1 <= int(n.split('-')[1]) <= 12, # MM
lambda n: 1 <= int(n.split('-')[2].split('T')[0]) <= 31, # DD
lambda n: n[10] == 'T',
lambda n: 0 <= int(n.split(':')[0].split('T')[-1]) <= 24, # hh
lambda n: 0 <= int(n.split(':')[1]) <= 60, # mm
lambda n: 0 <= int(n.split(':')[-1].split('.')[0]) <= 60, # ss
lambda n: 0 <= int(n.split(':')[-1].split('.')[-1][:-1]) <= 999, # ms
lambda n: n[-1] == 'Z',
error="Invalid temporal configuration, must be of format `YYYY-MM-DDThh:mm:ss.mssZ`"
)
)
date_schema.validate(item)
_date = datetime.strptime(item, "%Y-%m-%dT%H:%M:%S.%fZ")
else:
raise STACQueryError("Temporal must be of form `YYYY-MM-DD` or `YYYY-MM-DDThh:mm:ss.msZ`")
if idx == 0:
start_date = _date
elif idx == 1:
end_date = _date
if start_date > end_date:
raise STACQueryError("Temporal must be of form (start_date, end_date)")
return (start_date, end_date)
def __init__(self, spatial, temporal=None, properties=None):
self.spatial = self.load_spatial(spatial)
if temporal:
self.temporal = self.load_temporal(temporal)
if properties:
self.properties = properties
def bbox(self):
"""
:return: Standard STAC bounding box of [xmin, ymin, xmax, ymax]
"""
return [
min([x[0] for x in self.spatial['coordinates'][0]]),
min([x[1] for x in self.spatial['coordinates'][0]]),
max([x[0] for x in self.spatial['coordinates'][0]]),
max([x[1] for x in self.spatial['coordinates'][0]]),
]
def wkt(self):
return wkt.dumps(self.spatial)
def check_temporal(self, date_time):
if self.temporal[0] <= date_time <= self.temporal[1]:
return True
else:
return False
def check_properties(self, asset):
for item in self.properties:
equality = next(iter(self.properties[item]))
comparison_operator = getattr(operator, equality)
if not comparison_operator(asset[item], self.properties[item][equality]):
return False
return True
# def check_spatial(self, name):
# static_dir = os.path.join(os.path.dirname(__file__), '..', 'static')
# rtree_location = os.path.join(static_dir, '{}_rtree'.format(name))
#
# try:
# idx = index.Rtree(rtree_location)
# return [x.object for x in idx.intersection(self.bbox(), objects=True)]
# except:
# # Look for rtree in current directory
# try:
# idx = index.Rtree('index')
# return [x.object for x in idx.intersection(self.bbox(), objects=True)]
# except:
# raise FileNotFoundError("Could not find rtree for the datasource at the following path: {}".format(rtree_location))
| 38.430657 | 133 | 0.501235 |
540ff5820e3c35112a173c155f318b432f0522a1
| 94 |
py
|
Python
|
Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/000Exercicios/03funcionario/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/000Exercicios/03funcionario/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/000Exercicios/03funcionario/main.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
from funcionario import Funcionario
f1 = Funcionario('Pedro', 1000, 30)
print(f1.aumento())
| 15.666667 | 35 | 0.744681 |
58a5dbcb6f54bba29816586a72929ce30ada4ee5
| 23,205 |
py
|
Python
|
tests/notest_server_tcp.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | 2 |
2020-12-25T19:37:42.000Z
|
2021-03-26T04:59:12.000Z
|
tests/notest_server_tcp.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | 6 |
2018-08-07T22:48:52.000Z
|
2021-10-07T18:38:20.000Z
|
tests/notest_server_tcp.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""notest_server_tcp.py - tests of the server module of DHParser,
connections via tcp.
Author: Eckhart Arnold <[email protected]>
Copyright 2019 Bavarian Academy of Sciences and Humanities
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Quite slow under MS Windows! Therefore, renamed to notest_server_tcp.py
# so that it not regularly called when running pytest/nosetest on
# the test directory.
import asyncio
import functools
import json
import multiprocessing
import os
import sys
import time
from typing import Callable, Union
if __name__ == "__main__":
import multiprocessing
multiprocessing.freeze_support()
if sys.platform.lower().startswith('win'):
multiprocessing.set_start_method('spawn')
# else:
# multiprocessing.set_start_method('forkserver')
scriptpath = os.path.abspath(os.path.dirname(__file__) or '.')
sys.path.append(os.path.abspath(os.path.join(scriptpath, '..')))
from DHParser.configuration import set_config_value
from DHParser.server import Server, spawn_tcp_server, stop_tcp_server, asyncio_run, asyncio_connect, \
split_header, has_server_stopped, STOP_SERVER_REQUEST_BYTES, IDENTIFY_REQUEST, \
SERVER_OFFLINE, connection_cb_dummy
from DHParser.lsp import gen_lsp_table
from DHParser.testing import unique_name
TEST_PORT = 8000 + os.getpid() % 1000
# print('>>> ', sys.version, TEST_PORT)
# adding pid % 100 hopefully prevents interference, if `test_server.py` is run in
# parallel with several different python versions, as done by `run.py`
def compiler_dummy(src: str, log_dir: str='') -> str:
return src
def long_running(duration: float, **kwargs) -> float:
time.sleep(float(duration))
return duration
def send_request(request: str, expect_response: bool = True) -> str:
response = ''
async def send(request):
nonlocal response
reader, writer = await asyncio_connect('127.0.0.1', TEST_PORT)
writer.write(request.encode())
if expect_response:
response = (await reader.read(8192)).decode()
writer.close()
if sys.version_info >= (3, 7): await writer.wait_closed()
asyncio_run(send(request))
return response
jrpc_id = 0
def json_rpc(method: str, params: dict) -> str:
global jrpc_id
jrpc_id += 1
s = json.dumps({'jsonrpc': '2.0', 'id': jrpc_id, 'method': method, 'params': params})
return 'Content-Length: %i\n\n' % len(s) + s
class TestServer:
spawn = multiprocessing.get_start_method() in ["spawn", "forkserver"]
def setup(self):
stop_tcp_server('127.0.0.1', TEST_PORT)
def teardown(self):
stop_tcp_server('127.0.0.1', TEST_PORT)
def test_server_process(self):
"""Basic Test of server module."""
async def compile_remote(src):
reader, writer = await asyncio_connect('127.0.0.1', TEST_PORT)
writer.write(src.encode())
data = await reader.read(500)
writer.close()
if sys.version_info >= (3, 7): await writer.wait_closed()
assert data.decode() == "Test", data.decode()
p = None
try:
p = spawn_tcp_server('127.0.0.1', TEST_PORT,
(compiler_dummy, set()))
asyncio_run(compile_remote('Test'))
finally:
stop_tcp_server('127.0.0.1', TEST_PORT)
if p is not None:
p.join()
def test_service_call(self):
async def identify_server():
main_reader, main_writer = await asyncio_connect('127.0.0.1', TEST_PORT)
main_writer.write(IDENTIFY_REQUEST.encode())
data = await main_reader.read(500)
assert b'already connected' not in data
service_reader, service_writer = await asyncio_connect('127.0.0.1', TEST_PORT)
service_writer.write(IDENTIFY_REQUEST.encode())
data = await service_reader.read(500)
assert b'already connected' in data
await asyncio.sleep(0.01)
assert service_reader.at_eof()
service_writer.close()
if sys.version_info >= (3, 7): await service_writer.wait_closed()
service_reader, service_writer = await asyncio_connect('127.0.0.1', TEST_PORT)
service_writer.write(json_rpc('identify', {}).encode())
data = await service_reader.read(500)
assert b'already connected' in data
await asyncio.sleep(0.01)
assert service_reader.at_eof()
service_writer.close()
if sys.version_info >= (3, 7): await service_writer.wait_closed()
main_writer.close()
if sys.version_info >= (3, 7): await main_writer.wait_closed()
p = None
try:
p = spawn_tcp_server('127.0.0.1', TEST_PORT)
asyncio_run(identify_server())
finally:
stop_tcp_server('127.0.0.1', TEST_PORT)
if p is not None:
p.join()
def test_identify(self):
"""Test server's 'identify/'-command."""
async def send_request(request):
reader, writer = await asyncio_connect('127.0.0.1', TEST_PORT)
writer.write(request.encode() if isinstance(request, str) else request)
data = await reader.read(500)
await writer.drain()
writer.close()
if sys.version_info >= (3, 7): await writer.wait_closed()
return data.decode()
p = None
try:
from timeit import timeit
p = spawn_tcp_server('127.0.0.1', TEST_PORT, compiler_dummy)
result = asyncio_run(send_request(IDENTIFY_REQUEST))
assert isinstance(result, str) and result.startswith('DHParser'), result
finally:
stop_tcp_server('127.0.0.1', TEST_PORT)
if p is not None:
p.join()
def test_terminate(self):
"""Test different ways of sending a termination message to server:
http-request, plain-text and json-rpc."""
async def terminate_server(termination_request, expected_response):
reader, writer = await asyncio_connect('127.0.0.1', TEST_PORT)
writer.write(termination_request)
data = await reader.read(500)
writer.close()
if sys.version_info >= (3, 7): await writer.wait_closed()
assert data.find(expected_response) >= 0, str(data)
p = None
try:
# plain text stop request
p = spawn_tcp_server('127.0.0.1', TEST_PORT, (compiler_dummy, set()))
asyncio_run(terminate_server(STOP_SERVER_REQUEST_BYTES,
b'DHParser server at 127.0.0.1:%i stopped!' % TEST_PORT))
assert asyncio_run(has_server_stopped('127.0.0.1', TEST_PORT))
# http stop request
p = spawn_tcp_server('127.0.0.1', TEST_PORT, (compiler_dummy, set()))
asyncio_run(terminate_server(b'GET ' + STOP_SERVER_REQUEST_BYTES + b' HTTP',
b'DHParser server at 127.0.0.1:%i stopped!' % TEST_PORT))
assert asyncio_run(has_server_stopped('127.0.0.1', TEST_PORT))
# json_rpc stop request
p = spawn_tcp_server('127.0.0.1', TEST_PORT, (compiler_dummy, set()))
jsonrpc = json.dumps({"jsonrpc": "2.0", "method": STOP_SERVER_REQUEST_BYTES.decode(),
'id': 1})
asyncio_run(terminate_server(jsonrpc.encode(),
b'DHParser server at 127.0.0.1:%i stopped!' % TEST_PORT))
assert asyncio_run(has_server_stopped('127.0.0.1', TEST_PORT))
finally:
stop_tcp_server('127.0.0.1', TEST_PORT)
if p is not None:
p.join()
def test_long_running_task(self):
"""Test, whether delegation of (long-running) tasks to
processes or threads works."""
sequence = []
if self.spawn:
SLOW, FAST = 0.1, 0.01
else:
SLOW, FAST = 0.02, 0.001
async def run_tasks():
def extract_result(data: bytes):
header, data, backlog = split_header(data)
return json.loads(data.decode())['result']
reader, writer = await asyncio_connect('127.0.0.1', TEST_PORT)
sequence.append(SLOW)
sequence.append(FAST)
writer.write(json_rpc('long_running', {'duration': SLOW}).encode())
writer.write(json_rpc('long_running', {'duration': FAST}).encode())
await writer.drain()
sequence.append(extract_result(await reader.read(500)))
sequence.append(extract_result(await reader.read(500)))
writer.close()
if sys.version_info >= (3, 7): await writer.wait_closed()
if sys.version_info >= (3, 6):
p = None
try:
p = spawn_tcp_server('127.0.0.1', TEST_PORT,
(long_running, frozenset(['long_running']), frozenset(),
connection_cb_dummy, 'Long-Running-Test', False))
asyncio_run(run_tasks())
assert sequence == [SLOW, FAST, FAST, SLOW], str(sequence)
finally:
stop_tcp_server('127.0.0.1', TEST_PORT)
if p is not None:
p.join()
sequence = []
p = None
try:
p = spawn_tcp_server('127.0.0.1', TEST_PORT,
(long_running, frozenset(), frozenset(['long_running']),
connection_cb_dummy, 'Long-Running-Test', False))
asyncio_run(run_tasks())
assert sequence == [SLOW, FAST, FAST, SLOW], str(sequence)
finally:
stop_tcp_server('127.0.0.1', TEST_PORT)
if p is not None:
p.join()
sequence = []
p = None
try:
p = spawn_tcp_server('127.0.0.1', TEST_PORT,
(long_running, frozenset(), frozenset(),
connection_cb_dummy, 'Long-Running-Test', False))
asyncio_run(run_tasks())
assert sequence.count(SLOW) == 2 and sequence.count(FAST) == 2
finally:
stop_tcp_server('127.0.0.1', TEST_PORT)
if p is not None:
p.join()
sequence = []
class TestSpawning:
"""Tests spawning a server by starting a script via subprocess.Popen."""
def setup(self):
stop_tcp_server('127.0.0.1', TEST_PORT)
def teardown(self):
stop_tcp_server('127.0.0.1', TEST_PORT)
def test_spawn(self):
spawn_tcp_server('127.0.0.1', TEST_PORT)
async def identify():
try:
reader, writer = await asyncio_connect('127.0.0.1', TEST_PORT)
writer.write(IDENTIFY_REQUEST.encode())
data = await reader.read(500)
await writer.drain()
writer.close()
if sys.version_info >= (3, 7): await writer.wait_closed()
return data.decode()
except ConnectionRefusedError:
return ''
result = asyncio_run(identify())
assert result.startswith('DHParser'), result
def lsp_rpc(f: Callable):
"""A decorator for LanguageServerProtocol-methods. This wrapper
filters out calls that are made before initializing the server and
after shutdown and returns an error message instead.
This decorator should only be used on methods of
LanguageServerProtocol-objects as it expects the first parameter
to be a the `self`-reference of this object.
All LSP-methods should be decorated with this decorator except
initialize and exit
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
self = args[0]
except IndexError:
self = kwargs['self']
if self.shared.shutdown:
return {'code': -32600, 'message': 'language server already shut down'}
elif not self.shared.initialized:
return {'code': -32002, 'message': 'language server not initialized'}
else:
return f(*args, **kwargs)
return wrapper
class LSP:
def __init__(self):
manager = multiprocessing.Manager() # saving this in an object-variable would make objects unpickleable due to weak references
self.shared = manager.Namespace()
self.shared.initialized = False
self.shared.shutdown = False
self.shared.processId = 0
self.shared.rootUri = ''
self.shared.clientCapabilities = ''
self.shared.serverCapabilities = json.dumps('{}')
def lsp_initialize(self, **kwargs):
if self.shared.initialized or self.shared.processId != 0:
return {"code": -32002, "message": "Server has already been initialized."}
self.shared.processId = kwargs['processId']
self.shared.rootUri = kwargs['rootUri']
self.shared.clientCapabilities = json.dumps(kwargs['capabilities'])
return {'capabilities': json.loads(self.shared.serverCapabilities)}
def lsp_initialized(self, **kwargs):
assert self.shared.processId != 0
self.shared.initialized = True
return None
@lsp_rpc
def lsp_custom(self, *args, **kwargs):
if args and not kwargs:
return {'args': args}
elif kwargs and not args:
return kwargs
else:
return {'args': args, 'kwargs': kwargs }
@lsp_rpc
def lsp_check(self, **kwargs):
return {'processId': self.shared.processId}
@lsp_rpc
def lsp_shutdown(self, **kwargs):
self.shared.shutdown = True
return {}
def lsp_exit(self, **kwargs):
self.shared.shutdown = True
return None
class TestLanguageServer:
"""Tests for the generic LanguageServer-class."""
def setup(self):
stop_tcp_server('127.0.0.1', TEST_PORT)
self.p = None
self.DEBUG = False
if self.DEBUG:
from DHParser import log
log.start_logging('LOGS')
set_config_value('log_server', True)
def teardown(self):
stop_tcp_server('127.0.0.1', TEST_PORT)
if self.p is not None:
self.p.join()
if self.DEBUG:
from DHParser import log
log.suspend_logging()
def start_server(self):
stop_tcp_server('127.0.0.1', TEST_PORT)
if self.p is not None:
self.p.join()
self.lsp = LSP()
lsp_table = gen_lsp_table(self.lsp, prefix='lsp_')
self.p = spawn_tcp_server('127.0.0.1', TEST_PORT, (lsp_table, frozenset(), frozenset()))
def test_initialize(self):
self.start_server()
async def sequence_test():
reader, writer = await asyncio_connect('127.0.0.1', TEST_PORT)
async def send(request: str, expect_response: bool = True) -> str:
writer.write(request.encode())
await writer.drain()
if expect_response:
return (await reader.read(8192)).decode()
return ''
response = await send(json_rpc('initialize',
{'processId': 701,
'rootUri': 'file://~/tmp',
'capabilities': {}}))
i = response.find('{') - 1
res = json.loads(response[i:])
assert 'result' in res and 'capabilities' in res['result'], str(res)
r2, w2 = await asyncio_connect('127.0.0.1', TEST_PORT)
w2.write(json_rpc('initialize', {'processId': 701,
'rootUri': 'file://~/tmp', 'capabilities': {}}).encode())
fail = await r2.read(8192)
assert b'error' in fail and b'already connected' in fail
w2.write_eof(); w2.close()
if sys.version_info >= (3, 7): await w2.wait_closed()
r2, w2 = await asyncio_connect('127.0.0.1', TEST_PORT)
w2.write(json_rpc('custom', {}).encode())
fail = await r2.read(8192)
assert b'result' not in fail
assert b'not a service function' in fail
w2.write_eof(); w2.close()
if sys.version_info >= (3, 7): await w2.wait_closed()
response = await send(json_rpc('custom', {}))
assert response.find('error') >= 0
response = await send(json_rpc('initialized', {}), expect_response=False)
assert response == '', response
response = await send(json_rpc('custom', {'test': 1}))
assert response.find('test') >= 0, str(response)
response = await send(json_rpc('check', {}))
assert response.find('701') >= 0
response = await send(json_rpc('non_existant_function', {}))
assert response.find('-32601') >= 0 # method not found
response = await send(json_rpc('non_existant_function', {'a': 1, 'b': 2, 'c': 3}))
assert response.find('-32601') >= 0 # method not found
# test plain-data call
response = await send('custom(1)')
assert response.find('1') >= 0
# test plain-data false call
response = await send('non_existant_function()')
assert response.find('No function named "non_extistant_function"')
response = await send('non_existant_function(1)')
assert response.find('No function named "non_extistant_function"')
response = await send(json_rpc('shutdown', {}))
assert response.find('error') < 0
# after shutdown, any function call except "exit()" should yield error
response = await send(json_rpc('custom', {}))
assert response.find('error') >= 0
response = await send(json_rpc('exit', {}))
assert response == '', response
writer.close()
if sys.version_info >= (3, 7): await writer.wait_closed()
asyncio_run(sequence_test())
def test_initialization_sequence(self):
self.start_server()
async def initialization_sequence():
reader, writer = await asyncio_connect('127.0.0.1', TEST_PORT)
writer.write(json_rpc('initialize',
{'processId': 702,
'rootUri': 'file://~/tmp',
'capabilities': {}}).encode())
response = (await reader.read(8192)).decode()
i = response.find('{')
res = json.loads(response[i:])
assert 'result' in res and 'capabilities' in res['result'], str(res)
writer.write(json_rpc('initialized', {}).encode())
writer.write(json_rpc('custom', {'test': 1}).encode())
response = (await reader.read(8192)).decode()
assert response.find('test') >= 0
writer.close()
if sys.version_info >= (3, 7): await writer.wait_closed()
asyncio_run(initialization_sequence())
def test_varying_data_chunk_sizes(self):
self.start_server()
async def initialization_sequence():
reader, writer = await asyncio_connect('127.0.0.1', TEST_PORT)
writer.write(json_rpc('initialize',
{'processId': 702,
'rootUri': 'file://~/tmp',
'capabilities': {}}).encode())
response = (await reader.read(8192)).decode()
i = response.find('{')
res = json.loads(response[i:])
assert 'result' in res and 'capabilities' in res['result'], str(res)
# several commands in one chunk
writer.write(json_rpc('initialized', {}).encode() + json_rpc('custom', {'test': 1}).encode())
response = (await reader.read(8192)).decode()
assert response.find('test') >= 0
data = json_rpc('custom', {'test': 2}).encode()
i = data.find(b'\n\n')
assert i > 0, str(data)
writer.write(data[:i + 2])
await asyncio.sleep(1)
writer.write(data[i + 2:])
response = (await reader.read(8192)).decode()
assert response.find('test') >= 0
data = json_rpc('custom', {'test': 3}).encode()
i = data.find(b'\n\n')
assert i > 0, str(data)
writer.write(data[:i + 2])
await asyncio.sleep(0.1)
writer.write(data[i + 2:] + json_rpc('custom', {'test': 4}).encode())
response = (await reader.read(8192)).decode()
assert response.find('test') >= 0
writer.write(b'')
await writer.drain()
writer.write_eof()
await writer.drain()
writer.close()
if sys.version_info >= (3, 7): await writer.wait_closed()
asyncio_run(initialization_sequence())
# def test_multiple_connections(self):
# self.start_server()
# async def initialization_sequence():
# reader, writer = await asyncio_connect('127.0.0.1', TEST_PORT)
# writer.write(json_rpc('initialize',
# {'processId': 702,
# 'rootUri': 'file://~/tmp',
# 'capabilities': {}}).encode())
# response = (await reader.read(8192)).decode()
# i = response.find('{')
# # print(len(response), response)
# res = json.loads(response[i:])
# assert 'result' in res and 'capabilities' in res['result'], str(res)
#
# writer.write(json_rpc('initialized', {}).encode())
#
# writer.write(json_rpc('custom', {'test': 1}).encode())
# response = (await reader.read(8192)).decode()
# assert response.find('test') >= 0
#
# writer.close()
# if sys.version_info >= (3, 7): await writer.wait_closed()
#
# asyncio_run(initialization_sequence())
if __name__ == "__main__":
# BROKEN, because TEST_PORT ist not fixed any more
# if "--killserver" in sys.argv:
# result = stop_tcp_server('127.0.0.1', TEST_PORT)
# print('server stopped' if result is None else "server wasn't running")
# sys.exit(0)
from DHParser.testing import runner
runner("", globals())
| 39.397284 | 135 | 0.573195 |
58a948501ba68feaeb97009250070f3c3e45c847
| 9,719 |
py
|
Python
|
examples/readme_example/readme_exampleParser.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | 2 |
2020-12-25T19:37:42.000Z
|
2021-03-26T04:59:12.000Z
|
examples/readme_example/readme_exampleParser.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | 6 |
2018-08-07T22:48:52.000Z
|
2021-10-07T18:38:20.000Z
|
examples/readme_example/readme_exampleParser.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#######################################################################
#
# SYMBOLS SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
import collections
from functools import partial
import os
import sys
try:
scriptpath = os.path.dirname(__file__)
except NameError:
scriptpath = ''
dhparser_parentdir = os.path.abspath(os.path.join(scriptpath, r'../..'))
if scriptpath not in sys.path:
sys.path.append(scriptpath)
if dhparser_parentdir not in sys.path:
sys.path.append(dhparser_parentdir)
try:
import regex as re
except ImportError:
import re
from DHParser import start_logging, suspend_logging, resume_logging, is_filename, load_if_file, \
Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, Drop, AnyChar, \
Lookbehind, Lookahead, Alternative, Pop, Text, Synonym, Counted, Interleave, INFINITE, \
Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \
ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \
grammar_changed, last_value, matching_bracket, PreprocessorFunc, is_empty, remove_if, \
Node, TransformerCallable, TransformationDict, transformation_factory, traverse, \
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
replace_by_children, remove_empty, remove_tokens, flatten, all_of, any_of, \
merge_adjacent, collapse, collapse_children_if, transform_content, WHITESPACE_PTYPE, \
TOKEN_PTYPE, remove_children, remove_content, remove_brackets, change_tag_name, \
remove_anonymous_tokens, keep_children, is_one_of, not_one_of, has_content, apply_if, peek, \
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \
transform_content, replace_content_with, forbid, assert_content, remove_infix_operator, \
add_error, error_on, recompile_grammar, left_associative, lean_left, set_config_value, \
get_config_value, node_maker, access_thread_locals, access_presets, \
finalize_presets, ErrorCode, RX_NEVER_MATCH, set_tracer, resume_notices_on, \
trace_history, has_descendant, neg, has_ancestor, optional_last_value, insert, \
positions_of, replace_tag_names, add_attributes, delimit_children, merge_connected, \
has_attr, has_parent, ThreadLocalSingletonFactory
#######################################################################
#
# PREPROCESSOR SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def get_preprocessor() -> PreprocessorFunc:
return nil_preprocessor
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class readme_exampleGrammar(Grammar):
r"""Parser for a readme_example source file.
"""
source_hash__ = "e58ae4ad1239eea54c3eb9c605fb8d43"
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'#.*'
comment_rx__ = re.compile(COMMENT__)
WHITESPACE__ = r'\s*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
wsp__ = Whitespace(WSP_RE__)
dwsp__ = Drop(Whitespace(WSP_RE__))
value = Series(RegExp('\\"[^"\\n]*\\"'), dwsp__)
key = Series(RegExp('\\w+'), dwsp__)
entry = Series(key, Series(Drop(Text("=")), dwsp__), value)
key_store = Series(dwsp__, ZeroOrMore(entry))
root__ = key_store
_raw_grammar = ThreadLocalSingletonFactory(readme_exampleGrammar, ident=1)
def get_grammar() -> readme_exampleGrammar:
grammar = _raw_grammar()
if get_config_value('resume_notices'):
resume_notices_on(grammar)
elif get_config_value('history_tracking'):
set_tracer(grammar, trace_history)
try:
if not grammar.__class__.python_src__:
grammar.__class__.python_src__ = get_grammar.python_src__
except AttributeError:
pass
return grammar
def parse_readme_example(document, start_parser = "root_parser__", *, complete_match=True):
return get_grammar()(document, start_parser, complete_match)
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
readme_example_AST_transformation_table = {
# AST Transformations for the readme_example-grammar
"<": flatten,
"key_store": [],
"entry": [],
"key": [],
"value": [],
"*": replace_by_single_child
}
def Createreadme_exampleTransformer() -> TransformerCallable:
"""Creates a transformation function that does not share state with other
threads or processes."""
return partial(traverse, processing_table=readme_example_AST_transformation_table.copy())
def get_transformer() -> TransformerCallable:
"""Returns a thread/process-exclusive transformation function."""
THREAD_LOCALS = access_thread_locals()
try:
transformer = THREAD_LOCALS.readme_example_00000001_transformer_singleton
except AttributeError:
THREAD_LOCALS.readme_example_00000001_transformer_singleton = Createreadme_exampleTransformer()
transformer = THREAD_LOCALS.readme_example_00000001_transformer_singleton
return transformer
#######################################################################
#
# COMPILER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
class readme_exampleCompiler(Compiler):
"""Compiler for the abstract-syntax-tree of a readme_example source file.
"""
def __init__(self):
super(readme_exampleCompiler, self).__init__()
def reset(self):
super().reset()
# initialize your variables here, not in the constructor!
def on_key_store(self, node):
return self.fallback_compiler(node)
# def on_entry(self, node):
# return node
# def on_key(self, node):
# return node
# def on_value(self, node):
# return node
def get_compiler() -> readme_exampleCompiler:
"""Returns a thread/process-exclusive readme_exampleCompiler-singleton."""
THREAD_LOCALS = access_thread_locals()
try:
compiler = THREAD_LOCALS.readme_example_00000001_compiler_singleton
except AttributeError:
THREAD_LOCALS.readme_example_00000001_compiler_singleton = readme_exampleCompiler()
compiler = THREAD_LOCALS.readme_example_00000001_compiler_singleton
return compiler
#######################################################################
#
# END OF DHPARSER-SECTIONS
#
#######################################################################
def compile_src(source: str):
"""Compiles ``source`` and returns (result, errors, ast).
"""
result_tuple = compile_source(source, get_preprocessor(), get_grammar(), get_transformer(),
get_compiler())
return result_tuple
if __name__ == "__main__":
# recompile grammar if needed
if __file__.endswith('Parser.py'):
grammar_path = os.path.abspath(__file__).replace('Parser.py', '.ebnf')
else:
grammar_path = os.path.splitext(__file__)[0] + '.ebnf'
parser_update = False
def notify():
global parser_update
parser_update = True
print('recompiling ' + grammar_path)
if os.path.exists(grammar_path) and os.path.isfile(grammar_path):
if not recompile_grammar(grammar_path, force=False, notify=notify):
error_file = os.path.basename(__file__).replace('Parser.py', '_ebnf_ERRORS.txt')
with open(error_file, encoding="utf-8") as f:
print(f.read())
sys.exit(1)
elif parser_update:
print(os.path.basename(__file__) + ' has changed. '
'Please run again in order to apply updated compiler')
sys.exit(0)
else:
print('Could not check whether grammar requires recompiling, '
'because grammar was not found at: ' + grammar_path)
from argparse import ArgumentParser
parser = ArgumentParser(description="Parses a readme_example-file and shows its syntax-tree.")
parser.add_argument('files', nargs=1)
parser.add_argument('-d', '--debug', action='store_const', const='debug')
parser.add_argument('-x', '--xml', action='store_const', const='xml')
args = parser.parse_args()
file_name, log_dir = args.files[0], ''
if not os.path.exists(file_name):
print('File "%s" not found!' % file_name)
sys.exit(1)
if not os.path.isfile(file_name):
print('"%s" is not a file!' % file_name)
sys.exit(1)
if args.debug is not None:
log_dir = 'LOGS'
set_config_value('history_tracking', True)
set_config_value('resume_notices', True)
set_config_value('log_syntax_trees', set(['cst', 'ast'])) # don't use a set literal, here
start_logging(log_dir)
result, errors, _ = compile_src(file_name)
if errors:
cwd = os.getcwd()
rel_path = file_name[len(cwd):] if file_name.startswith(cwd) else file_name
for error in errors:
print(rel_path + ':' + str(error))
sys.exit(1)
else:
print(result.serialize(how='default' if args.xml is None else 'xml')
if isinstance(result, Node) else result)
| 37.380769 | 103 | 0.642967 |
49ff16c4b485979ee35f59bc06e7ca34e0b7a947
| 1,604 |
py
|
Python
|
Python/Buch_ATBS/Teil_2/Kapitel_13_Arbeiten_mit_Word_und_PDF_Dokumenten/06_pdf_seiten_mit_bestimmtem_textinhalt_finden/06_pdf_seiten_mit_bestimmtem_textinhalt_finden.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | null | null | null |
Python/Buch_ATBS/Teil_2/Kapitel_13_Arbeiten_mit_Word_und_PDF_Dokumenten/06_pdf_seiten_mit_bestimmtem_textinhalt_finden/06_pdf_seiten_mit_bestimmtem_textinhalt_finden.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Buch_ATBS/Teil_2/Kapitel_13_Arbeiten_mit_Word_und_PDF_Dokumenten/06_pdf_seiten_mit_bestimmtem_textinhalt_finden/06_pdf_seiten_mit_bestimmtem_textinhalt_finden.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
# 06_pdf_seiten_mit_bestimmtem_textinhalt_finden.py
# Dieses Übungsscript soll alle PDF-Dateien eines Unterordners durchsuchen, diese Auslesen
# und wenn eine Seite den gesuchten String enthält in ein neues File speichern.
import os, PyPDF2, re
os.chdir(os.path.dirname(__file__))
target_path='.\\search_me'
target_file='.\\results.pdf'
if os.path.exists(target_file):
os.remove(target_file)
print('Bitte Suchbegriff eingeben:')
search_string=input()
# Finde alle PDF's im Ordner search_me und in dessen Unterordner
file_list=[]
for path in os.walk(target_path):
for i in range(len(path)):
if len(path[i]) > 0 and type(path[i]) == list:
for file_name in path[i]:
possible_pdf=(path[0]+'\\'+file_name)
if os.path.isfile(possible_pdf):
file_list+=[possible_pdf]
# Öffne alle PDF's im lesemodus
write_pdf=PyPDF2.PdfFileWriter()
counter=0
for file_name in file_list:
print('öffne File: '+file_name)
pdf_file_open=open(file_name, 'rb')
pdf_content=PyPDF2.PdfFileReader(pdf_file_open)
for page in range(pdf_content.numPages):
current_page=pdf_content.getPage(page)
extracted=current_page.extractText()
search_pattern=re.compile(r'.?'+search_string.lower()+r'.?')
search_results=search_pattern.findall(extracted.lower())
if len(search_results) > 0:
write_pdf.addPage(current_page)
counter+=1
target_file_open=open(target_file, 'wb')
write_pdf.write(target_file_open)
target_file_open.close()
print('Gefundene Einträge: '+str(counter))
| 34.12766 | 91 | 0.703865 |
4d73a64f545af771e2745579aa71d4923068637b
| 29,106 |
py
|
Python
|
research/cvtmodel/regnet/src/regnet_x_400mf.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cvtmodel/regnet/src/regnet_x_400mf.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cvtmodel/regnet/src/regnet_x_400mf.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.ops as P
from mindspore import nn
class Module3(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
conv2d_0_padding, conv2d_0_pad_mode, conv2d_0_group):
super(Module3, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=conv2d_0_kernel_size,
stride=conv2d_0_stride,
padding=conv2d_0_padding,
pad_mode=conv2d_0_pad_mode,
dilation=(1, 1),
group=conv2d_0_group,
has_bias=True)
self.relu_1 = nn.ReLU()
def construct(self, x):
opt_conv2d_0 = self.conv2d_0(x)
opt_relu_1 = self.relu_1(opt_conv2d_0)
return opt_relu_1
class Module8(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module3_0_conv2d_0_in_channels,
module3_0_conv2d_0_out_channels, module3_0_conv2d_0_kernel_size, module3_0_conv2d_0_stride,
module3_0_conv2d_0_padding, module3_0_conv2d_0_pad_mode, module3_0_conv2d_0_group,
module3_1_conv2d_0_in_channels, module3_1_conv2d_0_out_channels, module3_1_conv2d_0_kernel_size,
module3_1_conv2d_0_stride, module3_1_conv2d_0_padding, module3_1_conv2d_0_pad_mode,
module3_1_conv2d_0_group):
super(Module8, self).__init__()
self.module3_0 = Module3(conv2d_0_in_channels=module3_0_conv2d_0_in_channels,
conv2d_0_out_channels=module3_0_conv2d_0_out_channels,
conv2d_0_kernel_size=module3_0_conv2d_0_kernel_size,
conv2d_0_stride=module3_0_conv2d_0_stride,
conv2d_0_padding=module3_0_conv2d_0_padding,
conv2d_0_pad_mode=module3_0_conv2d_0_pad_mode,
conv2d_0_group=module3_0_conv2d_0_group)
self.module3_1 = Module3(conv2d_0_in_channels=module3_1_conv2d_0_in_channels,
conv2d_0_out_channels=module3_1_conv2d_0_out_channels,
conv2d_0_kernel_size=module3_1_conv2d_0_kernel_size,
conv2d_0_stride=module3_1_conv2d_0_stride,
conv2d_0_padding=module3_1_conv2d_0_padding,
conv2d_0_pad_mode=module3_1_conv2d_0_pad_mode,
conv2d_0_group=module3_1_conv2d_0_group)
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
def construct(self, x):
module3_0_opt = self.module3_0(x)
module3_1_opt = self.module3_1(module3_0_opt)
opt_conv2d_0 = self.conv2d_0(module3_1_opt)
return opt_conv2d_0
class Module0(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
conv2d_2_group, conv2d_4_in_channels, conv2d_4_out_channels):
super(Module0, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_1 = nn.ReLU()
self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
out_channels=conv2d_2_out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1, 1, 1),
pad_mode="pad",
dilation=(1, 1),
group=conv2d_2_group,
has_bias=True)
self.relu_3 = nn.ReLU()
self.conv2d_4 = nn.Conv2d(in_channels=conv2d_4_in_channels,
out_channels=conv2d_4_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_6 = nn.ReLU()
def construct(self, x):
opt_conv2d_0 = self.conv2d_0(x)
opt_relu_1 = self.relu_1(opt_conv2d_0)
opt_conv2d_2 = self.conv2d_2(opt_relu_1)
opt_relu_3 = self.relu_3(opt_conv2d_2)
opt_conv2d_4 = self.conv2d_4(opt_relu_3)
opt_add_5 = P.Add()(x, opt_conv2d_4)
opt_relu_6 = self.relu_6(opt_add_5)
return opt_relu_6
class Module11(nn.Cell):
def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
module0_0_conv2d_2_out_channels, module0_0_conv2d_2_group, module0_0_conv2d_4_in_channels,
module0_0_conv2d_4_out_channels, module0_1_conv2d_0_in_channels, module0_1_conv2d_0_out_channels,
module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels, module0_1_conv2d_2_group,
module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels, module0_2_conv2d_0_in_channels,
module0_2_conv2d_0_out_channels, module0_2_conv2d_2_in_channels, module0_2_conv2d_2_out_channels,
module0_2_conv2d_2_group, module0_2_conv2d_4_in_channels, module0_2_conv2d_4_out_channels,
module0_3_conv2d_0_in_channels, module0_3_conv2d_0_out_channels, module0_3_conv2d_2_in_channels,
module0_3_conv2d_2_out_channels, module0_3_conv2d_2_group, module0_3_conv2d_4_in_channels,
module0_3_conv2d_4_out_channels, module0_4_conv2d_0_in_channels, module0_4_conv2d_0_out_channels,
module0_4_conv2d_2_in_channels, module0_4_conv2d_2_out_channels, module0_4_conv2d_2_group,
module0_4_conv2d_4_in_channels, module0_4_conv2d_4_out_channels, module0_5_conv2d_0_in_channels,
module0_5_conv2d_0_out_channels, module0_5_conv2d_2_in_channels, module0_5_conv2d_2_out_channels,
module0_5_conv2d_2_group, module0_5_conv2d_4_in_channels, module0_5_conv2d_4_out_channels):
super(Module11, self).__init__()
self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
conv2d_2_group=module0_0_conv2d_2_group,
conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
conv2d_4_out_channels=module0_0_conv2d_4_out_channels)
self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
conv2d_2_group=module0_1_conv2d_2_group,
conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
conv2d_4_out_channels=module0_1_conv2d_4_out_channels)
self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
conv2d_2_group=module0_2_conv2d_2_group,
conv2d_4_in_channels=module0_2_conv2d_4_in_channels,
conv2d_4_out_channels=module0_2_conv2d_4_out_channels)
self.module0_3 = Module0(conv2d_0_in_channels=module0_3_conv2d_0_in_channels,
conv2d_0_out_channels=module0_3_conv2d_0_out_channels,
conv2d_2_in_channels=module0_3_conv2d_2_in_channels,
conv2d_2_out_channels=module0_3_conv2d_2_out_channels,
conv2d_2_group=module0_3_conv2d_2_group,
conv2d_4_in_channels=module0_3_conv2d_4_in_channels,
conv2d_4_out_channels=module0_3_conv2d_4_out_channels)
self.module0_4 = Module0(conv2d_0_in_channels=module0_4_conv2d_0_in_channels,
conv2d_0_out_channels=module0_4_conv2d_0_out_channels,
conv2d_2_in_channels=module0_4_conv2d_2_in_channels,
conv2d_2_out_channels=module0_4_conv2d_2_out_channels,
conv2d_2_group=module0_4_conv2d_2_group,
conv2d_4_in_channels=module0_4_conv2d_4_in_channels,
conv2d_4_out_channels=module0_4_conv2d_4_out_channels)
self.module0_5 = Module0(conv2d_0_in_channels=module0_5_conv2d_0_in_channels,
conv2d_0_out_channels=module0_5_conv2d_0_out_channels,
conv2d_2_in_channels=module0_5_conv2d_2_in_channels,
conv2d_2_out_channels=module0_5_conv2d_2_out_channels,
conv2d_2_group=module0_5_conv2d_2_group,
conv2d_4_in_channels=module0_5_conv2d_4_in_channels,
conv2d_4_out_channels=module0_5_conv2d_4_out_channels)
def construct(self, x):
module0_0_opt = self.module0_0(x)
module0_1_opt = self.module0_1(module0_0_opt)
module0_2_opt = self.module0_2(module0_1_opt)
module0_3_opt = self.module0_3(module0_2_opt)
module0_4_opt = self.module0_4(module0_3_opt)
module0_5_opt = self.module0_5(module0_4_opt)
return module0_5_opt
class Module10(nn.Cell):
def __init__(self):
super(Module10, self).__init__()
self.module0_0 = Module0(conv2d_0_in_channels=400,
conv2d_0_out_channels=400,
conv2d_2_in_channels=400,
conv2d_2_out_channels=400,
conv2d_2_group=25,
conv2d_4_in_channels=400,
conv2d_4_out_channels=400)
self.module0_1 = Module0(conv2d_0_in_channels=400,
conv2d_0_out_channels=400,
conv2d_2_in_channels=400,
conv2d_2_out_channels=400,
conv2d_2_group=25,
conv2d_4_in_channels=400,
conv2d_4_out_channels=400)
self.module0_2 = Module0(conv2d_0_in_channels=400,
conv2d_0_out_channels=400,
conv2d_2_in_channels=400,
conv2d_2_out_channels=400,
conv2d_2_group=25,
conv2d_4_in_channels=400,
conv2d_4_out_channels=400)
self.module0_3 = Module0(conv2d_0_in_channels=400,
conv2d_0_out_channels=400,
conv2d_2_in_channels=400,
conv2d_2_out_channels=400,
conv2d_2_group=25,
conv2d_4_in_channels=400,
conv2d_4_out_channels=400)
def construct(self, x):
module0_0_opt = self.module0_0(x)
module0_1_opt = self.module0_1(module0_0_opt)
module0_2_opt = self.module0_2(module0_1_opt)
module0_3_opt = self.module0_3(module0_2_opt)
return module0_3_opt
class MindSporeModel(nn.Cell):
def __init__(self):
super(MindSporeModel, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=3,
out_channels=32,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1, 1, 1),
pad_mode="pad",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_1 = nn.ReLU()
self.conv2d_2 = nn.Conv2d(in_channels=32,
out_channels=32,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module8_0 = Module8(conv2d_0_in_channels=32,
conv2d_0_out_channels=32,
module3_0_conv2d_0_in_channels=32,
module3_0_conv2d_0_out_channels=32,
module3_0_conv2d_0_kernel_size=(1, 1),
module3_0_conv2d_0_stride=(1, 1),
module3_0_conv2d_0_padding=0,
module3_0_conv2d_0_pad_mode="valid",
module3_0_conv2d_0_group=1,
module3_1_conv2d_0_in_channels=32,
module3_1_conv2d_0_out_channels=32,
module3_1_conv2d_0_kernel_size=(3, 3),
module3_1_conv2d_0_stride=(2, 2),
module3_1_conv2d_0_padding=(1, 1, 1, 1),
module3_1_conv2d_0_pad_mode="pad",
module3_1_conv2d_0_group=2)
self.relu_9 = nn.ReLU()
self.conv2d_10 = nn.Conv2d(in_channels=32,
out_channels=64,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module8_1 = Module8(conv2d_0_in_channels=64,
conv2d_0_out_channels=64,
module3_0_conv2d_0_in_channels=32,
module3_0_conv2d_0_out_channels=64,
module3_0_conv2d_0_kernel_size=(1, 1),
module3_0_conv2d_0_stride=(1, 1),
module3_0_conv2d_0_padding=0,
module3_0_conv2d_0_pad_mode="valid",
module3_0_conv2d_0_group=1,
module3_1_conv2d_0_in_channels=64,
module3_1_conv2d_0_out_channels=64,
module3_1_conv2d_0_kernel_size=(3, 3),
module3_1_conv2d_0_stride=(2, 2),
module3_1_conv2d_0_padding=(1, 1, 1, 1),
module3_1_conv2d_0_pad_mode="pad",
module3_1_conv2d_0_group=4)
self.relu_17 = nn.ReLU()
self.module0_0 = Module0(conv2d_0_in_channels=64,
conv2d_0_out_channels=64,
conv2d_2_in_channels=64,
conv2d_2_out_channels=64,
conv2d_2_group=4,
conv2d_4_in_channels=64,
conv2d_4_out_channels=64)
self.conv2d_25 = nn.Conv2d(in_channels=64,
out_channels=160,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module8_2 = Module8(conv2d_0_in_channels=160,
conv2d_0_out_channels=160,
module3_0_conv2d_0_in_channels=64,
module3_0_conv2d_0_out_channels=160,
module3_0_conv2d_0_kernel_size=(1, 1),
module3_0_conv2d_0_stride=(1, 1),
module3_0_conv2d_0_padding=0,
module3_0_conv2d_0_pad_mode="valid",
module3_0_conv2d_0_group=1,
module3_1_conv2d_0_in_channels=160,
module3_1_conv2d_0_out_channels=160,
module3_1_conv2d_0_kernel_size=(3, 3),
module3_1_conv2d_0_stride=(2, 2),
module3_1_conv2d_0_padding=(1, 1, 1, 1),
module3_1_conv2d_0_pad_mode="pad",
module3_1_conv2d_0_group=10)
self.relu_32 = nn.ReLU()
self.module11_0 = Module11(module0_0_conv2d_0_in_channels=160,
module0_0_conv2d_0_out_channels=160,
module0_0_conv2d_2_in_channels=160,
module0_0_conv2d_2_out_channels=160,
module0_0_conv2d_2_group=10,
module0_0_conv2d_4_in_channels=160,
module0_0_conv2d_4_out_channels=160,
module0_1_conv2d_0_in_channels=160,
module0_1_conv2d_0_out_channels=160,
module0_1_conv2d_2_in_channels=160,
module0_1_conv2d_2_out_channels=160,
module0_1_conv2d_2_group=10,
module0_1_conv2d_4_in_channels=160,
module0_1_conv2d_4_out_channels=160,
module0_2_conv2d_0_in_channels=160,
module0_2_conv2d_0_out_channels=160,
module0_2_conv2d_2_in_channels=160,
module0_2_conv2d_2_out_channels=160,
module0_2_conv2d_2_group=10,
module0_2_conv2d_4_in_channels=160,
module0_2_conv2d_4_out_channels=160,
module0_3_conv2d_0_in_channels=160,
module0_3_conv2d_0_out_channels=160,
module0_3_conv2d_2_in_channels=160,
module0_3_conv2d_2_out_channels=160,
module0_3_conv2d_2_group=10,
module0_3_conv2d_4_in_channels=160,
module0_3_conv2d_4_out_channels=160,
module0_4_conv2d_0_in_channels=160,
module0_4_conv2d_0_out_channels=160,
module0_4_conv2d_2_in_channels=160,
module0_4_conv2d_2_out_channels=160,
module0_4_conv2d_2_group=10,
module0_4_conv2d_4_in_channels=160,
module0_4_conv2d_4_out_channels=160,
module0_5_conv2d_0_in_channels=160,
module0_5_conv2d_0_out_channels=160,
module0_5_conv2d_2_in_channels=160,
module0_5_conv2d_2_out_channels=160,
module0_5_conv2d_2_group=10,
module0_5_conv2d_4_in_channels=160,
module0_5_conv2d_4_out_channels=160)
self.conv2d_75 = nn.Conv2d(in_channels=160,
out_channels=400,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module8_3 = Module8(conv2d_0_in_channels=400,
conv2d_0_out_channels=400,
module3_0_conv2d_0_in_channels=160,
module3_0_conv2d_0_out_channels=400,
module3_0_conv2d_0_kernel_size=(1, 1),
module3_0_conv2d_0_stride=(1, 1),
module3_0_conv2d_0_padding=0,
module3_0_conv2d_0_pad_mode="valid",
module3_0_conv2d_0_group=1,
module3_1_conv2d_0_in_channels=400,
module3_1_conv2d_0_out_channels=400,
module3_1_conv2d_0_kernel_size=(3, 3),
module3_1_conv2d_0_stride=(2, 2),
module3_1_conv2d_0_padding=(1, 1, 1, 1),
module3_1_conv2d_0_pad_mode="pad",
module3_1_conv2d_0_group=25)
self.relu_82 = nn.ReLU()
self.module10_0 = Module10()
self.module11_1 = Module11(module0_0_conv2d_0_in_channels=400,
module0_0_conv2d_0_out_channels=400,
module0_0_conv2d_2_in_channels=400,
module0_0_conv2d_2_out_channels=400,
module0_0_conv2d_2_group=25,
module0_0_conv2d_4_in_channels=400,
module0_0_conv2d_4_out_channels=400,
module0_1_conv2d_0_in_channels=400,
module0_1_conv2d_0_out_channels=400,
module0_1_conv2d_2_in_channels=400,
module0_1_conv2d_2_out_channels=400,
module0_1_conv2d_2_group=25,
module0_1_conv2d_4_in_channels=400,
module0_1_conv2d_4_out_channels=400,
module0_2_conv2d_0_in_channels=400,
module0_2_conv2d_0_out_channels=400,
module0_2_conv2d_2_in_channels=400,
module0_2_conv2d_2_out_channels=400,
module0_2_conv2d_2_group=25,
module0_2_conv2d_4_in_channels=400,
module0_2_conv2d_4_out_channels=400,
module0_3_conv2d_0_in_channels=400,
module0_3_conv2d_0_out_channels=400,
module0_3_conv2d_2_in_channels=400,
module0_3_conv2d_2_out_channels=400,
module0_3_conv2d_2_group=25,
module0_3_conv2d_4_in_channels=400,
module0_3_conv2d_4_out_channels=400,
module0_4_conv2d_0_in_channels=400,
module0_4_conv2d_0_out_channels=400,
module0_4_conv2d_2_in_channels=400,
module0_4_conv2d_2_out_channels=400,
module0_4_conv2d_2_group=25,
module0_4_conv2d_4_in_channels=400,
module0_4_conv2d_4_out_channels=400,
module0_5_conv2d_0_in_channels=400,
module0_5_conv2d_0_out_channels=400,
module0_5_conv2d_2_in_channels=400,
module0_5_conv2d_2_out_channels=400,
module0_5_conv2d_2_group=25,
module0_5_conv2d_4_in_channels=400,
module0_5_conv2d_4_out_channels=400)
self.module0_1 = Module0(conv2d_0_in_channels=400,
conv2d_0_out_channels=400,
conv2d_2_in_channels=400,
conv2d_2_out_channels=400,
conv2d_2_group=25,
conv2d_4_in_channels=400,
conv2d_4_out_channels=400)
self.avgpool2d_160 = nn.AvgPool2d(kernel_size=(7, 7))
self.flatten_161 = nn.Flatten()
self.dense_162 = nn.Dense(in_channels=400, out_channels=1000, has_bias=True)
def construct(self, input_1):
opt_conv2d_0 = self.conv2d_0(input_1)
opt_relu_1 = self.relu_1(opt_conv2d_0)
opt_conv2d_2 = self.conv2d_2(opt_relu_1)
module8_0_opt = self.module8_0(opt_relu_1)
opt_add_8 = P.Add()(opt_conv2d_2, module8_0_opt)
opt_relu_9 = self.relu_9(opt_add_8)
opt_conv2d_10 = self.conv2d_10(opt_relu_9)
module8_1_opt = self.module8_1(opt_relu_9)
opt_add_16 = P.Add()(opt_conv2d_10, module8_1_opt)
opt_relu_17 = self.relu_17(opt_add_16)
module0_0_opt = self.module0_0(opt_relu_17)
opt_conv2d_25 = self.conv2d_25(module0_0_opt)
module8_2_opt = self.module8_2(module0_0_opt)
opt_add_31 = P.Add()(opt_conv2d_25, module8_2_opt)
opt_relu_32 = self.relu_32(opt_add_31)
module11_0_opt = self.module11_0(opt_relu_32)
opt_conv2d_75 = self.conv2d_75(module11_0_opt)
module8_3_opt = self.module8_3(module11_0_opt)
opt_add_81 = P.Add()(opt_conv2d_75, module8_3_opt)
opt_relu_82 = self.relu_82(opt_add_81)
module10_0_opt = self.module10_0(opt_relu_82)
module11_1_opt = self.module11_1(module10_0_opt)
module0_1_opt = self.module0_1(module11_1_opt)
opt_avgpool2d_160 = self.avgpool2d_160(module0_1_opt)
opt_flatten_161 = self.flatten_161(opt_avgpool2d_160)
opt_dense_162 = self.dense_162(opt_flatten_161)
return opt_dense_162
| 60.26087 | 119 | 0.517969 |
4d96a5fc4581769bfa4cb282a742bdb3dcd6371c
| 5,109 |
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/spline.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/spline.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/spline.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
#!/usr/bin/env python
# Author : Pierre Schnizer
"""
Wrapper for the splines of gsl. This solver wraps all features as described
in Chapter 26 of the gsl documentation.
Difference between spline and interpolation module:
--------------------------------------------------
In the interpolation module the data for the independent and dependent data are
kept as reference in the various objects,whereas the spline module copies these
data into the internal C gsl_spline struct.
"""
from . import errors
from . import gslwrap
from . import interpolation
_acceleration = interpolation._acceleration
_common = interpolation._common
#linear = interpolation.linear
#polynomial = interpolation.polynomial
#cspline = interpolation.cspline
#cspline_periodic = interpolation.cspline_periodic
#akima = interpolation.akima
#akima_periodic = interpolation.akima_periodic
class _spline(_common):
_type = None
def __init__(self, n):
assert(self._type != None)
if n <= 0:
msg = "Number of elements must be positive but was %d!"
raise errors.gsl_InvalidArgumentError(msg % (n,))
tmp = gslwrap.pygsl_spline(self._type, n)
if tmp == None:
raise errors.gsl_GenericError("Failed to allocate spline!")
self._object = tmp
def init(self, xa, ya):
"""
input : xa, ya
xa ... array of independent values
ya ... array of dependent values
This method initializes this for the data (xa,ay) where ya and ya are
arrays of the size, which was set, when this object was initialised.
The interpolation object copys the data arrays xa and ya and and
stores the static state computed from the data. The ya data array is
always assumed to be strictly ordered; the behavior for other
arrangements is not defined.
"""
self._object.init((xa,ya))
def eval_vector(self, x):
"""
input : x
a vector of independent values
"""
return self._object.eval_vector(x)
def eval_e_vector(self, x):
"""
input : x
a vector of independent values
"""
return self._object.eval_e_vector(x)
def eval_deriv_vector(self, x):
"""
input : x
a vector of independent values
"""
return self._object.eval_deriv_vector(x)
def eval_deriv_e_vector(self, x):
"""
input : x
a vector of independent values
"""
return self._object.eval_deriv_e_vector(x)
def eval_deriv2_vector(self, x):
"""
input : x
a vector of independent values
"""
return self._object.eval_deriv2_vector(x)
def eval_deriv2_e_vector(self, x):
"""
input : x
a vector of independent values
"""
return self._object.eval_deriv2_e_vector(x)
def eval_integ_vector(self, a, b):
"""
input : a, b
two vector of independent values of same length
"""
return self._object.eval_deriv_vector(a, b)
def eval_integ_e_vector(self, a, b):
"""
input : a, b
two vector of independent values of same length
"""
return self._object.eval_deriv_e_vector(a, b)
def name(self):
"""
Returns the name of the interpolation type used
"""
# The spline struct has no name method, thus I use the class name.
return self.__class__.__name__
def GetSplineCObject(self):
"""
Useful when using GSL in C code.
"""
raise pygsl_NotImplementedError
def GetAccelCObject(self):
"""
Useful when using GSL in C code.
"""
raise pygsl_NotImplementedError
class linear(_spline):
"""
Linear interpolation.
"""
_type = gslwrap.cvar.gsl_interp_linear
class polynomial(_spline):
"""
Polynomial interpolation. This method should only be used for
interpolating small numbers of points because polynomial interpolation
introduces large oscillations, even for well-behaved datasets. The number
of terms in the interpolating polynomial is equal to the number of points.
"""
_type = gslwrap.cvar.gsl_interp_polynomial
class cspline(_spline):
"""
Cubic spline with natural boundary conditions.
"""
_type = gslwrap.cvar.gsl_interp_cspline
class cspline_periodic(_spline):
"""
Cubic spline with periodic boundary conditions
"""
_type = gslwrap.cvar.gsl_interp_cspline_periodic
class akima(_spline):
"""
Akima spline with natural boundary conditions
"""
_type = gslwrap.cvar.gsl_interp_akima
class akima_periodic(_spline):
"""
Akima spline with periodic boundary conditions
"""
_type = gslwrap.cvar.gsl_interp_akima_periodic
| 29.531792 | 82 | 0.609121 |
4d9fa403ce6b252624dbc04df0771323c80ccee8
| 20,327 |
py
|
Python
|
create_delete_transaction_from_search_bundle.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
create_delete_transaction_from_search_bundle.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
create_delete_transaction_from_search_bundle.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
from json import dumps
# put search bundle inline here
dict_b = {
"resourceType": "Bundle",
"id": "5d592a0c-1d5a-4de0-8eb8-619f1fb8391e",
"meta": {
"lastUpdated": "2021-01-07T05:37:25.388+00:00"
},
"type": "searchset",
"total": 25,
"link": [
{
"relation": "self",
"url": "http://hapi.fhir.org/baseR4/Group?_elements=id&_tag=2020-Sep&_tag%3Anot=2021-Jan"
},
{
"relation": "next",
"url": "http://hapi.fhir.org/baseR4?_getpages=5d592a0c-1d5a-4de0-8eb8-619f1fb8391e&_getpagesoffset=20&_count=20&_pretty=true&_bundletype=searchset&_elements=id"
}
],
"entry": [
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-location-202",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-location-202",
"meta": {
"versionId": "5",
"lastUpdated": "2020-12-16T00:16:01.438+00:00",
"source": "Health_eData_Inc#nHzuB8UULkvS8LF6",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-practitioner-301",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-practitioner-301",
"meta": {
"versionId": "6",
"lastUpdated": "2020-12-16T00:09:50.373+00:00",
"source": "Health_eData_Inc#PMPix0aQoI3J8h27",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-practitioner-300",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-practitioner-300",
"meta": {
"versionId": "6",
"lastUpdated": "2020-12-16T00:07:45.984+00:00",
"source": "Health_eData_Inc#uW2JIdfJnr4e6IsI",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-location-200",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-location-200",
"meta": {
"versionId": "5",
"lastUpdated": "2020-12-15T23:49:49.764+00:00",
"source": "Health_eData_Inc#kTv3yYG3uFIhABTb",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-all-1002",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-all-1002",
"meta": {
"versionId": "5",
"lastUpdated": "2020-09-30T00:31:32.053+00:00",
"source": "Health_eData_Inc#xXG4ePql1TNKMBIT",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-all-1001",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-all-1001",
"meta": {
"versionId": "5",
"lastUpdated": "2020-09-30T00:31:32.053+00:00",
"source": "Health_eData_Inc#xXG4ePql1TNKMBIT",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-location-2007",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-location-2007",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:22:09.041+00:00",
"source": "Health_eData_Inc#xQnDXntef8Ihowbs",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-location-2006",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-location-2006",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:22:09.041+00:00",
"source": "Health_eData_Inc#xQnDXntef8Ihowbs",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-location-2005",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-location-2005",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:22:09.041+00:00",
"source": "Health_eData_Inc#xQnDXntef8Ihowbs",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-location-2004",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-location-2004",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:22:09.041+00:00",
"source": "Health_eData_Inc#xQnDXntef8Ihowbs",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-location-2000",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-location-2000",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:22:09.041+00:00",
"source": "Health_eData_Inc#xQnDXntef8Ihowbs",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-location-2008",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-location-2008",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:22:09.041+00:00",
"source": "Health_eData_Inc#xQnDXntef8Ihowbs",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-practitioner-3008",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-practitioner-3008",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:21:22.823+00:00",
"source": "Health_eData_Inc#avf4s5AkqVLaJF0N",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-practitioner-3007",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-practitioner-3007",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:21:22.823+00:00",
"source": "Health_eData_Inc#avf4s5AkqVLaJF0N",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-practitioner-3000",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-practitioner-3000",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:21:22.823+00:00",
"source": "Health_eData_Inc#avf4s5AkqVLaJF0N",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-practitioner-3001",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-practitioner-3001",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:21:22.823+00:00",
"source": "Health_eData_Inc#avf4s5AkqVLaJF0N",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-practitioner-3002",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-practitioner-3002",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:21:22.823+00:00",
"source": "Health_eData_Inc#avf4s5AkqVLaJF0N",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/argo-pl-group-practitioner-3003",
"resource": {
"resourceType": "Group",
"id": "argo-pl-group-practitioner-3003",
"meta": {
"versionId": "2",
"lastUpdated": "2020-09-30T00:21:22.823+00:00",
"source": "Health_eData_Inc#avf4s5AkqVLaJF0N",
"tag": [
{
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/1494065",
"resource": {
"resourceType": "Group",
"id": "1494065",
"meta": {
"versionId": "1",
"lastUpdated": "2020-09-29T19:23:37.270+00:00",
"source": "#K1gzzIXvyOn8IR0x",
"tag": [
{
"system": "http://hl7.org/Connectathon",
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
},
{
"fullUrl": "http://hapi.fhir.org/baseR4/Group/1494064",
"resource": {
"resourceType": "Group",
"id": "1494064",
"meta": {
"versionId": "1",
"lastUpdated": "2020-09-29T19:23:36.975+00:00",
"source": "#gfxqly5bkf2BCzCf",
"tag": [
{
"system": "http://hl7.org/Connectathon",
"code": "2020-Sep"
},
{
"system": "http://terminology.hl7.org/CodeSystem/v3-ObservationValue",
"code": "SUBSETTED",
"display": "Resource encoded in summary mode"
}
]
}
},
"search": {
"mode": "match"
}
}
]
}
dict_del_b = {
"resourceType": "Bundle",
"type": "transaction",
"entry":[],
}
for e in dict_b['entry']:
request = dict(
method = 'DELETE',
url = '/'.join(e['fullUrl'].split('/')[-2:]),
)
entry = dict(
request = request
)
dict_del_b['entry'].append(entry)
print(len(dict_del_b['entry']))
# get transaction bundle from console here
print(dumps(dict_del_b, indent=2))
| 37.093066 | 172 | 0.348797 |
678627b10248706b9bfd99f6c69f2414e1874355
| 1,704 |
py
|
Python
|
terminal_blog/models/menu.py
|
jcromerohdz/FlaskDev
|
29539259cba3a0e18c205fb439ee916fb12e5318
|
[
"MIT"
] | null | null | null |
terminal_blog/models/menu.py
|
jcromerohdz/FlaskDev
|
29539259cba3a0e18c205fb439ee916fb12e5318
|
[
"MIT"
] | null | null | null |
terminal_blog/models/menu.py
|
jcromerohdz/FlaskDev
|
29539259cba3a0e18c205fb439ee916fb12e5318
|
[
"MIT"
] | null | null | null |
from models.database import Database
from models.blog import Blog
class Menu(object):
def __init__(self):
self.user = input("Enter your author name: ")
self.user_blog = None
if self._user_has_account():
print("Welcome back {}".format(self.user))
else:
self._prompt_user_for_account()
def _user_has_account(self):
blog = Database.find_one('posts', {'author': self.user})
if blog is not None:
self.user_blog = Blog.from_mongo(blog['id'])
return True
else:
return False
def _prompt_user_for_account(self):
title = input("Enter a title: ")
description = input("Enter a blog description: ")
blog = Blog(author=self.user, title=title, description=description)
blog.save_to_mongo()
self.user_blog=blog
def run_menu(self):
read_or_write = input("Do you want to read (R) or write (W) blogs? ")
if read_or_write == 'R':
self._list_blogs()
self._view_blog()
elif read_or_write == 'W':
self.user_blog.new_post()
else:
print("Thank you for blogging!")
def _list_blogs(self):
blogs = Database.find(collection='posts', query={})
for b in blogs:
print("ID: {}, Title: {}, Author: {}".format(b['id'], b['title'], b['author']))
def _view_blog(self):
blog_to_read = input("Enter the ID of the blog you'd like to read (copy and pasete from the list): ")
blog = Database.find_one('posts', {'id':blog_to_read})
print("DATE: {}, Title: {}\n\n{}".format(blog['created_date'], blog['title'], blog['content']))
| 35.5 | 109 | 0.587441 |
67c17a1c17c00d3caed7a53ce51ec94727f86b6d
| 5,893 |
py
|
Python
|
lale/lib/lale/map.py
|
vishalbelsare/lale
|
654ca29ec0234b478d26724a25df28b28f5c0bc0
|
[
"Apache-2.0"
] | null | null | null |
lale/lib/lale/map.py
|
vishalbelsare/lale
|
654ca29ec0234b478d26724a25df28b28f5c0bc0
|
[
"Apache-2.0"
] | null | null | null |
lale/lib/lale/map.py
|
vishalbelsare/lale
|
654ca29ec0234b478d26724a25df28b28f5c0bc0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import lale.datasets.data_schemas
import lale.docstrings
import lale.operators
from lale.helpers import (
_is_ast_attribute,
_is_ast_subscript,
_is_pandas_df,
_is_spark_df,
)
class _MapImpl:
def __init__(self, columns, remainder="passthrough"):
self.columns = columns
self.remainder = remainder
def transform(self, X):
table_name = lale.datasets.data_schemas.get_table_name(X)
columns_to_keep = []
def get_map_function_output(column, new_column_name):
functions_module = importlib.import_module("lale.lib.lale.functions")
if _is_ast_subscript(column._expr) or _is_ast_attribute(column._expr):
function_name = "identity"
else:
function_name = column._expr.func.id
map_func_to_be_called = getattr(functions_module, function_name)
return map_func_to_be_called(X, column, new_column_name)
if isinstance(self.columns, list):
for column in self.columns:
new_column_name, X = get_map_function_output(column, None)
columns_to_keep.append(new_column_name)
elif isinstance(self.columns, dict):
for new_column_name, column in self.columns.items():
new_column_name, X = get_map_function_output(column, new_column_name)
columns_to_keep.append(new_column_name)
else:
raise ValueError("columns must be either a list or a dictionary.")
mapped_df = X # Do nothing as X already has the right columns
if self.remainder == "drop":
if _is_pandas_df(X):
mapped_df = X[columns_to_keep]
elif _is_spark_df(X):
mapped_df = X.select(columns_to_keep)
else:
raise ValueError(
"Only Pandas or Spark dataframe are supported as inputs. Please check that pyspark is installed if you see this error for a Spark dataframe."
)
mapped_df = lale.datasets.data_schemas.add_table_name(mapped_df, table_name)
return mapped_df
_hyperparams_schema = {
"allOf": [
{
"description": "This first sub-object lists all constructor arguments with their types, one at a time, omitting cross-argument constraints, if any.",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": [],
"properties": {
"columns": {
"description": "Mappings for producing output columns.",
"anyOf": [
{
"description": "Dictionary of output column names and mapping expressions.",
"type": "object",
"additionalProperties": {"laleType": "expression"},
},
{
"description": "List of mapping expressions. The output column name is determined by a heuristic based on the input column name and the transformation function.",
"type": "array",
"items": {"laleType": "expression"},
},
],
"default": [],
},
"remainder": {
"description": "Transformation for the remaining columns.",
"anyOf": [
{"enum": ["passthrough", "drop"]},
{"description": "Mapping expression.", "laleType": "operator"},
],
"default": "passthrough",
},
},
}
]
}
_input_transform_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "The outer array is over rows.",
"anyOf": [
{"laleType": "Any"},
{
"type": "array",
"items": {
"description": "The inner array is over columns.",
"type": "array",
"items": {"laleType": "Any"},
},
},
],
}
},
}
_output_transform_schema = {
"description": "The outer array is over rows.",
"anyOf": [
{
"type": "array",
"items": {
"description": "The inner array is over columns.",
"type": "array",
"items": {"laleType": "Any"},
},
},
{"laleType": "Any"},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra map operator.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.map.html",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
Map = lale.operators.make_operator(_MapImpl, _combined_schemas)
lale.docstrings.set_docstrings(Map)
| 36.83125 | 190 | 0.555405 |
c05ae130bc62cfb660f95c99a67eedcfe2179a96
| 7,599 |
py
|
Python
|
covidash/dashboard/app.py
|
CRitter93/covidash
|
9daaa8e17c2487068bfd7a7b581880ee6698cedd
|
[
"Apache-2.0"
] | 1 |
2020-05-11T17:54:58.000Z
|
2020-05-11T17:54:58.000Z
|
covidash/dashboard/app.py
|
CRitter93/covidash
|
9daaa8e17c2487068bfd7a7b581880ee6698cedd
|
[
"Apache-2.0"
] | null | null | null |
covidash/dashboard/app.py
|
CRitter93/covidash
|
9daaa8e17c2487068bfd7a7b581880ee6698cedd
|
[
"Apache-2.0"
] | null | null | null |
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from covidash.dashboard.data_store import DataStore
import covidash.dashboard.figures as figures
from covidash.dashboard.figures import DATA_TYPE_MAPPING
data_store = DataStore('../../data')
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children='Covid-19 Ausbreitung in Deutschland'),
html.Div(children=[
html.Div(children=[
html.Div(children='''
Dieses Dashboard dient der Visualisierung und Analyse der zeitlichen und geographischen Ausbreitung von Covid-19 in Deutschland.\n
Die Daten stammen vom Robert-Koch Institut und können auf der folgenden Seite abgerufen werden (Stand {}):
'''.format(data_store.get('latest_date')), className='ten columns'),
dcc.Link('NPGEO Corona Hub 2020',
href="https://npgeo-corona-npgeo-de.hub.arcgis.com/", className='two columns')
], className='row'),
html.Div(children=[
dcc.DatePickerSingle(
id='date-picker',
min_date_allowed=data_store.get('first_date'),
max_date_allowed=data_store.get('latest_date'),
initial_visible_month=data_store.get('latest_date'),
date=str(data_store.get('latest_date')),
display_format='DD.MM.YYYY',
className='two columns'
),
dcc.Dropdown(
id='data-dropdown',
options=[
{'label': DATA_TYPE_MAPPING[type], 'value': type} for type in DATA_TYPE_MAPPING
],
value='total',
clearable=False,
className='two columns'
),
dcc.Dropdown(
id='granularity-dropdown',
options=[
{'label': 'Landkreise', 'value': 'landkreis'},
{'label': 'Bundesländer', 'value': 'bundesland'}
],
value='landkreis',
clearable=False,
className='two columns'
),
], className='row')
],
className='row'),
html.Div(children=[
dcc.Graph(
id='map-graph',
figure=figures.get_main_map_landkreise(data_store),
className="six columns",
),
html.Div(children=[
dcc.Graph(
id='the-curve-graph',
figure=figures.the_curve_line(data_store, {}),
className='row'
),
html.Div(children=[
dcc.Graph(
id='gender-graph',
figure=figures.bar_chart(data_store, 'gender', {}),
className='six columns'
),
dcc.Graph(
id='age-graph',
figure=figures.bar_chart(data_store, 'age', {}),
className='six columns'
)
],
className='row')],
className="six columns")],
className='row',
style={'height': 'auto'}),
# hidden div for storing filtering
html.Div(children='{}', id='filter-values', style={'display': 'none'})
], style={'height': 'auto'})
@app.callback(
Output('filter-values', 'children'),
[Input('map-graph', 'selectedData'), Input('gender-graph', 'selectedData'), Input('age-graph', 'selectedData'), Input('date-picker', 'date'),
Input('data-dropdown', 'value'), Input('granularity-dropdown', 'value')],
[State('filter-values', 'children')]
)
def update_filtering(geo_selection, gender_selection, age_selection, date, data, granularity, current_filters):
current_filters = json.loads(current_filters)
current_granularity = current_filters.get('granularity')
current_data = current_filters.get('data')
filters = {}
if geo_selection:
filters['geo'] = [data['location'] for data in geo_selection['points']]
if gender_selection:
filters['gender'] = [data['x'] for data in gender_selection['points']]
if age_selection:
filters['age'] = [data['x'] for data in age_selection['points']]
if date:
filters['date'] = date
if data:
filters['data'] = data
if current_data != data:
filters['data_changed'] = True
if granularity:
filters['granularity'] = granularity
if current_granularity != granularity:
filters['granularity_changed'] = True
# clear geo filters
if filters.get('geo') is not None:
del filters['geo']
return json.dumps(filters)
@app.callback(
[Output('the-curve-graph', 'figure'), Output('gender-graph', 'figure'), Output('age-graph', 'figure'), Output('map-graph', 'figure')],
[Input('filter-values', 'children')],
[State('the-curve-graph', 'figure'), State('gender-graph', 'figure'), State('age-graph', 'figure'), State('map-graph', 'figure')])
def filter_graphs(filter_data, the_curve, gender_bar, age_bar, map_graph):
filters = json.loads(filter_data)
if filters.get('granularity_changed'):
if filters.get('granularity') == 'landkreis':
map_figure = figures.get_main_map_landkreise(data_store)
elif filters.get('granularity') == 'bundesland':
map_figure = figures.get_main_map_bundeslaender(data_store)
else:
raise ValueError
# update map anyways to apply the correct filter
map_figure = figures.update_map(data_store, map_figure, filters)
else:
map_figure = figures.update_map(data_store, map_graph, filters)
if filters.get('data_changed'):
if filters['data'] == 'total':
the_curve_figure = figures.the_curve_line(data_store, filters)
else:
the_curve_figure = figures.the_curve_bar(data_store, filters)
else:
the_curve_figure = figures.update_the_curve(data_store, the_curve, filters)
return [the_curve_figure,
figures.update_bar(data_store, gender_bar, 'gender', filters),
figures.update_bar(data_store, age_bar, 'age', filters),
map_figure]
if __name__ == '__main__':
app.run_server(debug=True)
| 45.232143 | 162 | 0.504803 |
97d804b6b5c7ff541e4e008c3133e89951c5fdb4
| 280 |
py
|
Python
|
weibo/test/testZZ.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
weibo/test/testZZ.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
weibo/test/testZZ.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
#coding:utf8
'''
Created on 2016年6月6日
@author: wb-zhaohaibo
'''
# import re
# detail_cont = "411023199112300511"
# pattern = re.compile(ur'/^(\d{15}$|^\d{18}$|^\d{17}(\d|X|x))$/')
# aa = re.match(pattern, detail_cont, 0)
# print aa
import this
print this
| 14.736842 | 67 | 0.592857 |
3f37e5c2431a14543279c7f67d969c5845bd49e2
| 10,720 |
py
|
Python
|
Packs/PhishLabs/Integrations/PhishLabsIOC/PhishLabsIOC_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/PhishLabs/Integrations/PhishLabsIOC/PhishLabsIOC_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/PhishLabs/Integrations/PhishLabsIOC/PhishLabsIOC_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CommonServerPython import *
def test_create_indicator_no_date():
from PhishLabsIOC import create_indicator_content
files_json = """
{
"attributes": [
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "xyz",
"name": "md5",
"value": "c8092abd8d581750c0530fa1fc8d8318"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "abc",
"name": "filetype",
"value": "application/zip"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "qwe",
"name": "name",
"value": "Baycc.zip"
}
],
"createdAt": "2019-05-14T13:03:45Z",
"falsePositive": false,
"id": "def",
"type": "Attachment",
"value": "c8092abd8d581750c0530fa1fc8d8318"
} """
result = {
'ID': 'def',
'Indicator': 'c8092abd8d581750c0530fa1fc8d8318',
'Type': 'Attachment',
'CreatedAt': '2019-05-14T13:03:45Z',
'UpdatedAt': '',
'FalsePositive': False,
}
indicator = json.loads(files_json)
actual = create_indicator_content(indicator)
assert actual == result
def test_create_indicator_with_none_date():
from PhishLabsIOC import create_indicator_content
files_json = """
{
"attributes": [
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "xyz",
"name": "md5",
"value": "c8092abd8d581750c0530fa1fc8d8318"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "abc",
"name": "filetype",
"value": "application/zip"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "qwe",
"name": "name",
"value": "Baycc.zip"
}
],
"createdAt": "2019-05-14T13:03:45Z",
"updatedAt": "0001-01-01T00:00:00Z",
"falsePositive": false,
"id": "def",
"type": "Attachment",
"value": "c8092abd8d581750c0530fa1fc8d8318"
} """
result = {
'ID': 'def',
'Indicator': 'c8092abd8d581750c0530fa1fc8d8318',
'Type': 'Attachment',
'CreatedAt': '2019-05-14T13:03:45Z',
'UpdatedAt': '',
'FalsePositive': False,
}
indicator = json.loads(files_json)
actual = create_indicator_content(indicator)
assert actual == result
def test_create_indicator_with_date():
from PhishLabsIOC import create_indicator_content
files_json = """
{
"attributes": [
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "xyz",
"name": "md5",
"value": "c8092abd8d581750c0530fa1fc8d8318"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "abc",
"name": "filetype",
"value": "application/zip"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "qwe",
"name": "name",
"value": "Baycc.zip"
}
],
"createdAt": "2019-05-14T13:03:45Z",
"updatedAt": "2019-05-14T13:03:45Z",
"falsePositive": false,
"id": "def",
"type": "Attachment",
"value": "c8092abd8d581750c0530fa1fc8d8318"
} """
result = {
'ID': 'def',
'Indicator': 'c8092abd8d581750c0530fa1fc8d8318',
'Type': 'Attachment',
'CreatedAt': '2019-05-14T13:03:45Z',
'UpdatedAt': '2019-05-14T13:03:45Z',
'FalsePositive': False,
}
indicator = json.loads(files_json)
actual = create_indicator_content(indicator)
assert actual == result
def test_populate_context_files():
from PhishLabsIOC import populate_context, get_file_properties, create_phishlabs_object
files_json = """
{
"attributes": [
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "xyz",
"name": "md5",
"value": "c8092abd8d581750c0530fa1fc8d8318"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "abc",
"name": "filetype",
"value": "application/zip"
},
{
"createdAt": "2019-05-14T13:03:45Z",
"id": "qwe",
"name": "name",
"value": "Baycc.zip"
}
],
"createdAt": "2019-05-14T13:03:45Z",
"falsePositive": false,
"id": "def",
"type": "Attachment",
"updatedAt": "0001-01-01T00:00:00Z",
"value": "c8092abd8d581750c0530fa1fc8d8318"
} """
file = json.loads(files_json)
file_md5, file_name, file_type = get_file_properties(file)
phishlabs_entry = create_phishlabs_object(file)
phishlabs_entry['Name'] = file_name
phishlabs_entry['Type'] = file_type
phishlabs_entry['MD5'] = file_md5
phishlabs_result = [{
'ID': 'def',
'CreatedAt': '2019-05-14T13:03:45Z',
'Name': 'Baycc.zip',
'Type': 'application/zip',
'MD5': 'c8092abd8d581750c0530fa1fc8d8318',
'Attribute': [
{
'CreatedAt': '2019-05-14T13:03:45Z',
'Type': None,
'Name': 'md5',
'Value': 'c8092abd8d581750c0530fa1fc8d8318'
},
{
'CreatedAt': '2019-05-14T13:03:45Z',
'Type': None,
'Name': 'filetype',
'Value': 'application/zip'
},
{
'CreatedAt': '2019-05-14T13:03:45Z',
'Type': None,
'Name': 'name',
'Value': 'Baycc.zip'
}
]
}]
global_entry = {
'Name': file_name,
'Type': file_type,
'MD5': file_md5
}
global_result = [{
'Name': 'Baycc.zip',
'Type': 'application/zip',
'MD5': 'c8092abd8d581750c0530fa1fc8d8318'
}]
context = populate_context([], [], [(global_entry, phishlabs_entry)], [])
assert len(context.keys()) == 2
assert context[outputPaths['file']] == global_result
assert context['PhishLabs.File(val.ID && val.ID === obj.ID)'] == phishlabs_result
def test_populate_context_emails():
from PhishLabsIOC import populate_context, get_email_properties, create_phishlabs_object
emails_json = """
{
"attributes":[
{
"createdAt":"2019-05-13T16:54:18Z",
"id":"abc",
"name":"email-body",
"value":"-----Original Message-----From: A Sent: Monday, May 13, 2019 12:22 PMTo:"
},
{
"createdAt":"2019-05-13T16:54:18Z",
"id":"def",
"name":"from",
"value":"[email protected]"
},
{
"createdAt":"2019-05-13T16:54:18Z",
"id":"cf3182ca-92ec-43b6-8aaa-429802a99fe5",
"name":"to",
"value":"[email protected]"
}
],
"createdAt":"2019-05-13T16:54:18Z",
"falsePositive":false,
"id":"ghi",
"type":"E-mail",
"updatedAt":"0001-01-01T00:00:00Z",
"value":"FW: Task"
} """
email = json.loads(emails_json)
email_body, email_to, email_from = get_email_properties(email)
phishlabs_entry = create_phishlabs_object(email)
phishlabs_entry['To'] = email_to,
phishlabs_entry['From'] = email_from,
phishlabs_entry['Body'] = email_body
phishlabs_entry['Subject'] = email.get('value')
phishlabs_result = [{
'ID': 'ghi',
'CreatedAt': '2019-05-13T16:54:18Z',
'To': ('[email protected]',),
'From': ('[email protected]',),
'Body': '-----Original Message-----From: A Sent: Monday, May 13, 2019 12:22 PMTo:',
'Subject': 'FW: Task',
'Attribute':
[{
'CreatedAt': '2019-05-13T16:54:18Z',
'Type': None,
'Name': 'email-body',
'Value': '-----Original Message-----From: A Sent: Monday, May 13, 2019 12:22 PMTo:'
},
{
'CreatedAt': '2019-05-13T16:54:18Z',
'Type': None,
'Name': 'from',
'Value': '[email protected]'
},
{
'CreatedAt': '2019-05-13T16:54:18Z',
'Type': None,
'Name': 'to',
'Value': '[email protected]'
}]
}]
global_entry = {
'To': email_to,
'From': email_from,
'Body': email_body,
'Subject': email.get('value')
}
global_result = [{
'To': '[email protected]',
'From': '[email protected]',
'Body': '-----Original Message-----From: A Sent: Monday, May 13, 2019 12:22 PMTo:',
'Subject': 'FW: Task'
}]
context = populate_context([], [], [], [], [(global_entry, phishlabs_entry)])
assert len(context.keys()) == 2
assert context['Email'] == global_result
assert context['PhishLabs.Email(val.ID && val.ID === obj.ID)'] == phishlabs_result
def test_indicator_type_finder():
from PhishLabsIOC import indicator_type_finder
indicator_data_1 = {
'value': '[email protected]',
'type': "Sender"
}
indicator_data_2 = {
'value': 'https://www.some.path/[email protected]',
'type': "URL"
}
assert indicator_type_finder(indicator_data_1) == 'Email'
assert indicator_type_finder(indicator_data_2) == 'URL'
| 31.43695 | 99 | 0.445336 |
e13e0016be4333c45b0e4ead7742b3210fc1cb51
| 12,505 |
py
|
Python
|
Packs/ApiModules/Scripts/GSuiteApiModule/GSuiteApiModule_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/ApiModules/Scripts/GSuiteApiModule/GSuiteApiModule_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/ApiModules/Scripts/GSuiteApiModule/GSuiteApiModule_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import json
import pytest
from GSuiteApiModule import DemistoException, COMMON_MESSAGES, GSuiteClient
with open('test_data/service_account_json.txt') as f:
TEST_JSON = f.read()
PROXY_METHOD_NAME = 'GSuiteApiModule.handle_proxy'
CREDENTIAL_SUBJECT = '[email protected]'
MOCKER_HTTP_METHOD = 'GSuiteApiModule.GSuiteClient.http_request'
@pytest.fixture
def gsuite_client():
headers = {
'Content-Type': 'application/json'
}
return GSuiteClient(GSuiteClient.safe_load_non_strict_json(TEST_JSON), base_url='https://www.googleapis.com/',
verify=False, proxy=False, headers=headers)
def test_safe_load_non_strict_json():
"""
Scenario: Dictionary should be prepared from json string.
Given:
- json as string.
When:
- Preparing dictionary from string.
Then:
- Ensure valid json should be loaded successfully.
"""
excepted_json = json.loads(TEST_JSON, strict=False)
assert GSuiteClient.safe_load_non_strict_json(TEST_JSON) == excepted_json
def test_safe_load_non_strict_json_parse_error():
"""
Scenario: Failed to load json when invalid json string is given.
Given:
- Empty json string.
When:
- Preparing dictionary from string.
Then:
- Ensure Exception is raised with proper error message.
"""
with pytest.raises(ValueError, match=COMMON_MESSAGES['JSON_PARSE_ERROR']):
GSuiteClient.safe_load_non_strict_json('Invalid json')
def test_safe_load_non_strict_json_empty():
"""
Scenario: Returns {}(blank) dictionary when empty json string is given.
Given:
- Invalid json as string.
When:
- Preparing dictionary from string.
Then:
- Ensure {}(blank) dictionary should be returned.
"""
assert GSuiteClient.safe_load_non_strict_json('') == {}
def test_validate_and_extract_response(mocker):
"""
Scenario: Parse response when status code is 200 or 204.
Given:
- Tuple containing response object and content.
When:
- Validating and loading json from response.
Then:
- Ensure content json should be parsed successfully.
"""
from GSuiteApiModule import httplib2, demisto
mocker.patch.object(demisto, 'debug')
response = httplib2.Response({'status': 200})
expected_content = {'response': {}}
assert GSuiteClient.validate_and_extract_response((response, b'{"response": {}}')) == expected_content
def test_validate_and_extract_response_error(mocker):
"""
Scenario: Should raise exception when status code is not 200 or 204.
Given:
- Tuple containing response object and content.
When:
- Validating and loading json from response.
Then:
- Ensure the Demisto exception should be raised respective to status code.
"""
from GSuiteApiModule import httplib2, demisto
mocker.patch.object(demisto, 'debug')
response = httplib2.Response({'status': 400})
with pytest.raises(DemistoException, match=COMMON_MESSAGES['BAD_REQUEST_ERROR'].format('BAD REQUEST')):
GSuiteClient.validate_and_extract_response((response, b'{"error": {"message":"BAD REQUEST"}}'))
response = httplib2.Response({'status': 509})
with pytest.raises(DemistoException, match=COMMON_MESSAGES['UNKNOWN_ERROR'].format(509, 'error')):
GSuiteClient.validate_and_extract_response((response, b'{"error": {"message":"error"}}'))
def test_get_http_client(mocker):
"""
Scenario: Should return http client object with configured proxy, verify and timeout parameters.
Given:
- proxy: Boolean indicates whether to use proxy or not.
- verify: Boolean indicates whether to use ssl certification.
- timeout: Timeout value for request.
When:
- Initializing httplib2.Http object when proxy, timeout and verify parameters provided.
Then:
- Ensure configured Http() object should be return.
"""
from GSuiteApiModule import httplib2
mocker.patch(PROXY_METHOD_NAME, return_value={'https': 'http url'})
http = GSuiteClient.get_http_client(proxy=True, verify=False, timeout=60)
assert isinstance(http, httplib2.Http)
assert http.disable_ssl_certificate_validation is True
assert http.timeout == 60
def test_get_http_client_prefix_https_addition(mocker):
"""
Scenario: Should return Http object with proxy configured with prefix https.
Given:
- proxy: Boolean indicates whether to use proxy or not.
- verify: Boolean indicates whether to use ssl certification.
- timeout: Timeout value for request.
When:
- Initializing httplib2.Http object when proxy, timeout and verify parameters provided.
Then:
- Ensure prefix https should be added before given https proxy value.
"""
from GSuiteApiModule import httplib2
mocker.patch(PROXY_METHOD_NAME, return_value={'https': 'demisto:[email protected]:3128'})
http = GSuiteClient.get_http_client(proxy=True, verify=True)
assert isinstance(http, httplib2.Http)
assert http.proxy_info.proxy_host == '0.0.0.0'
assert http.proxy_info.proxy_port == 3128
assert http.proxy_info.proxy_user == 'demisto'
assert http.proxy_info.proxy_pass == 'admin'
def test_set_authorized_http(gsuite_client):
"""
Scenario: Initialize AuthorizedHttp with given subject, scopes and timeout.
Given:
- scopes: List of scopes needed to make request.
- subject: To link subject with credentials.
- timeout: Timeout value for request.
When:
- Initializing AuthorizedHttp with the parameters provided.
Then:
- Ensure AuthorizedHttp is returned with configuration.
"""
from GSuiteApiModule import AuthorizedHttp
gsuite_client.set_authorized_http(scopes=['scope1', 'scope2'], subject=CREDENTIAL_SUBJECT)
assert isinstance(gsuite_client.authorized_http, AuthorizedHttp)
def test_http_request(mocker, gsuite_client):
"""
Scenario: Request to API call should give response.
Given:
- url_suffix: url_suffix of url.
- params: Parameters to pass in request url.
- method: Method to use while making http request.
- body: Request body.
When:
- Initializing AuthorizedHttp with the parameters provided.
Then:
- Ensure AuthorizedHttp is returned with configuration.
"""
from GSuiteApiModule import httplib2, AuthorizedHttp
content = '{"items": {}}'
response = httplib2.Response({'status': 200, 'content': content})
mocker.patch.object(AuthorizedHttp, 'request', return_value=(response, content))
gsuite_client.set_authorized_http(scopes=['scope1', 'scope2'], subject=CREDENTIAL_SUBJECT)
expected_response = gsuite_client.http_request(url_suffix='url_suffix', params={'userId': 'abc'}, )
assert expected_response == {'items': {}}
def test_http_request_http_error(mocker, gsuite_client):
"""
Scenario: Proxy setup is invalid, Request to API call should give respective message for proxy error.
Given:
- url_suffix: url_suffix of url.
- params: Parameters to pass in request url.
When:
- Initializing AuthorizedHttp with the parameters provided.
Then:
- Ensure Demisto exception is raised with respective proxy error.
"""
from GSuiteApiModule import httplib2, AuthorizedHttp
gsuite_client.set_authorized_http(scopes=['scope1', 'scope2'], subject=CREDENTIAL_SUBJECT)
# Proxy Error
mocker.patch.object(AuthorizedHttp, 'request', side_effect=httplib2.socks.HTTPError((407, b'proxy error')))
with pytest.raises(DemistoException):
gsuite_client.http_request(url_suffix='url_suffix', params={'userId': 'abc'})
# HTTP Error
mocker.patch.object(AuthorizedHttp, 'request', side_effect=httplib2.socks.HTTPError((409, b'HTTP error')))
with pytest.raises(DemistoException):
gsuite_client.http_request(url_suffix='url_suffix', params={'userId': 'abc'})
# HTTP Error no tuple
mocker.patch.object(AuthorizedHttp, 'request', side_effect=httplib2.socks.HTTPError('HTTP error'))
with pytest.raises(DemistoException):
gsuite_client.http_request(url_suffix='url_suffix', params={'userId': 'abc'})
def test_http_request_timeout_error(mocker, gsuite_client):
"""
Scenario: url is invalid, Request to API call should give respective message for connection timeout.
Given:
- url_suffix: url_suffix of url.
- params: Parameters to pass in request url.
When:
- Initializing AuthorizedHttp with the parameters provided.
Then:
- Ensure Demisto exception is raised with respective connection timeout error.
"""
from GSuiteApiModule import AuthorizedHttp
gsuite_client.set_authorized_http(scopes=['scope1', 'scope2'], subject=CREDENTIAL_SUBJECT)
mocker.patch.object(AuthorizedHttp, 'request', side_effect=TimeoutError('timeout error'))
with pytest.raises(DemistoException, match=COMMON_MESSAGES['TIMEOUT_ERROR'].format('timeout error')):
gsuite_client.http_request(url_suffix='url_suffix', params={'userId': 'abc'})
def test_http_request_transport_error(mocker, gsuite_client):
"""
Scenario: url is invalid, Request to API call should give respective message for transport error.
Given:
- url_suffix: url_suffix of url.
- params: Parameters to pass in request url.
When:
- Initializing AuthorizedHttp with the parameters provided.
Then:
- Ensure Demisto exception is raised with respective transport error.
"""
from GSuiteApiModule import AuthorizedHttp, exceptions
gsuite_client.set_authorized_http(scopes=['scope1', 'scope2'], subject=CREDENTIAL_SUBJECT)
mocker.patch.object(AuthorizedHttp, 'request', side_effect=exceptions.TransportError('proxyerror'))
with pytest.raises(DemistoException, match=COMMON_MESSAGES['PROXY_ERROR']):
gsuite_client.http_request(url_suffix='url_suffix', params={'userId': 'abc'})
mocker.patch.object(AuthorizedHttp, 'request', side_effect=exceptions.TransportError('new error'))
with pytest.raises(DemistoException, match=COMMON_MESSAGES['TRANSPORT_ERROR'].format('new error')):
gsuite_client.http_request(url_suffix='url_suffix', params={'userId': 'abc'})
def test_http_request_refresh_error(mocker, gsuite_client):
"""
Scenario: Failed to generate/refresh token, Request to API call should give respective message.
Given:
- url_suffix: url_suffix of url.
- params: Parameters to pass in request url.
When:
- Initializing AuthorizedHttp with the parameters provided.
Then:
- Ensure Demisto exception is raised with respective refresh error message.
"""
from GSuiteApiModule import AuthorizedHttp, exceptions
gsuite_client.set_authorized_http(scopes=['scope1', 'scope2'], subject=CREDENTIAL_SUBJECT)
mocker.patch.object(AuthorizedHttp, 'request', side_effect=exceptions.RefreshError(
"invalid_request: Invalid impersonation & quot; sub & quot; field."))
with pytest.raises(DemistoException, match=COMMON_MESSAGES['REFRESH_ERROR'].format(
"invalid_request: Invalid impersonation & quot; sub & quot; field.")):
gsuite_client.http_request(url_suffix='url_suffix', params={'userId': 'abc'})
def test_http_request_error(mocker, gsuite_client):
"""
Scenario: Some unknown error occurred during request to API call should give respective message.
Given:
- url_suffix: url_suffix of url.
- params: Parameters to pass in request url.
When:
- Initializing AuthorizedHttp with the parameters provided.
Then:
- Ensure Demisto exception is raised with respective error.
"""
from GSuiteApiModule import AuthorizedHttp
gsuite_client.set_authorized_http(scopes=['scope1', 'scope2'], subject=CREDENTIAL_SUBJECT)
mocker.patch.object(AuthorizedHttp, 'request', side_effect=Exception('error'))
with pytest.raises(DemistoException, match='error'):
gsuite_client.http_request(url_suffix='url_suffix', params={'userId': 'abc'})
def test_strip_dict():
"""
Scenario: Call to test-module should return 'ok' if API call succeeds.
Given:
- A dictionary with entries having whitespaces and empty values
When:
- Calling strip_dict() method.
Then:
- Ensure returned dictionary has stripped values and entries with empty values are removed.
"""
sample_input = {"key1": " VALUE_1 ", "key2": ""}
sample_output = {"key1": "VALUE_1"}
assert GSuiteClient.strip_dict(sample_input) == sample_output
| 33.257979 | 114 | 0.722671 |
e1882bb8873cd8b16dc3263d1574bf6fcbfe54ae
| 138 |
py
|
Python
|
backend/parser.py
|
giliam/turbo-songwriter
|
3d100b08dc19c60d7a1324120e06bd9f971eea5a
|
[
"MIT"
] | null | null | null |
backend/parser.py
|
giliam/turbo-songwriter
|
3d100b08dc19c60d7a1324120e06bd9f971eea5a
|
[
"MIT"
] | 12 |
2017-09-25T20:13:29.000Z
|
2020-02-12T00:12:41.000Z
|
backend/parser.py
|
giliam/turbo-songwriter
|
3d100b08dc19c60d7a1324120e06bd9f971eea5a
|
[
"MIT"
] | null | null | null |
from songwriter.parser import Parser
parser = Parser("../data/file/word/document.xml")
parser.parse()
parser.save_songs()
parser.compile()
| 27.6 | 49 | 0.775362 |
8311e94e16aac9bbe67ce428d4f4a2e4bea2f7c6
| 103 |
py
|
Python
|
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Lab/Solutions/P06_NumberInRange.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Lab/Solutions/P06_NumberInRange.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L03_ConditionalStatementsAdvanced/Lab/Solutions/P06_NumberInRange.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | 1 |
2022-02-23T13:03:14.000Z
|
2022-02-23T13:03:14.000Z
|
number = int(input())
if -100 <= number <= 100 and number != 0:
print('Yes')
else:
print('No')
| 17.166667 | 41 | 0.553398 |
831fa34d1ba29f04e0ddb0a332450fbc8c6ccbfe
| 3,786 |
py
|
Python
|
official/cv/ADNet/src/utils/precision_plot.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/ADNet/src/utils/precision_plot.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/ADNet/src/utils/precision_plot.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from matplotlib import pyplot as plt
from src.utils.overlap_ratio import overlap_ratio
def plot_result(Z, title, show=True, save_plot=None, xlabel=None, ylabel=None) -> None:
plt.plot(Z)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
plt.title(title)
plt.ylim([0, 1])
if save_plot:
plt.savefig(save_plot)
if show:
plt.show()
plt.clf()
def distance_precision_plot(bboxes, ground_truth, title, show=True, save_plot=None):
# PRECISION_PLOT
# Calculates precision for a series of distance thresholds (percentage of frames where the distance to the ground
# truth is within the threshold). The results are shown in a new figure if SHOW is true.
# Accepts positions and ground truth as Nx2 matrices(for N frames), and a title string.
# matlab code credit:
# Joao F.Henriques, 2014
# http: // www.isr.uc.pt / ~henriques /
positions = bboxes[:, [1, 0]] + bboxes[:, [3, 2]] / 2
ground_truth = ground_truth[:, [1, 0]] + ground_truth[:, [3, 2]] / 2
max_threshold = 50 # used for graphs in the paper
precisions = np.zeros([max_threshold, 1])
if len(positions) != len(ground_truth):
print("WARNING: the size of positions and ground_truth are not same")
# just ignore any extra frames, in either results or ground truth
n = min(len(positions), len(ground_truth))
positions = positions[:n]
ground_truth = ground_truth[:n]
# calculate distances to ground truth over all frames
distances = np.sqrt(
np.square(positions[:, 0] - ground_truth[:, 0]) + np.square(positions[:, 1] - ground_truth[:, 1]))
distances = distances[~np.isnan(distances)]
# compute precision
precisions = []
for p in range(max_threshold):
precisions.append(len(distances[distances <= p]) / len(distances))
# plot
if show or save_plot:
if save_plot is not None:
save_plot += '-distance'
plot_result(precisions, title, show=show, save_plot=save_plot, xlabel='distance threshold', ylabel='precision')
return precisions
def iou_precision_plot(bboxes, ground_truth, title, show=True, save_plot=None):
max_threshold = 100 # used for graphs in the paper
# precisions = np.zeros([max_threshold, 1])
if len(bboxes) != len(ground_truth):
print("WARNING: the size of iou and ground_truth are not same")
# just ignore any extra frames, in either results or ground truth
n = min(len(bboxes), len(ground_truth))
ground_truth = ground_truth[:n]
iou = overlap_ratio(bboxes, ground_truth)
iou = np.array(iou)
# compute precision
precisions = []
for p in range(max_threshold):
precisions.append(len(iou[iou >= p/100.0]) / len(iou))
# plot
if show or save_plot:
if save_plot is not None:
save_plot += '-iou'
plot_result(precisions, title,
show=show, save_plot=save_plot, xlabel='iou threshold (x0.01)', ylabel='precision')
return precisions
| 35.716981 | 119 | 0.658214 |
7fecff4e502e25dcfb1a0010472a0d8f6da1744f
| 1,133 |
py
|
Python
|
Aggregator/agg_zugdaten.py
|
socialdistancingdashboard/virushack
|
6ef69d26c5719d0bf257f4594ed2488dd73cdc40
|
[
"Apache-2.0"
] | 29 |
2020-03-21T00:47:51.000Z
|
2021-07-17T15:50:33.000Z
|
Aggregator/agg_zugdaten.py
|
socialdistancingdashboard/virushack
|
6ef69d26c5719d0bf257f4594ed2488dd73cdc40
|
[
"Apache-2.0"
] | 7 |
2020-03-21T14:04:26.000Z
|
2022-03-02T08:05:40.000Z
|
Aggregator/agg_zugdaten.py
|
socialdistancingdashboard/virushack
|
6ef69d26c5719d0bf257f4594ed2488dd73cdc40
|
[
"Apache-2.0"
] | 13 |
2020-03-21T01:08:08.000Z
|
2020-04-08T17:21:11.000Z
|
import os
import re
import pandas as pd
from datetime import datetime, timedelta, date
# compatibility with ipython
#os.chdir(os.path.dirname(__file__))
import json
import boto3
from pathlib import Path
from coords_to_kreis import coords_convert
import settings
def aggregate(date):
client_s3 = boto3.client("s3")
s3 = boto3.resource('s3')
content_object = s3.Object(settings.BUCKET, "aggdata/live/{}/{}/{}/zugdata.json".format(str(date.year).zfill(4), str(date.month).zfill(2), str(date.day).zfill(2)))
file_content = content_object.get()['Body'].read().decode('utf-8')
json_content = json.loads(file_content)
df = pd.DataFrame(json_content)
df["landkreis"] = coords_convert(df)
print(df.shape)
print(df["landkreis"].unique().shape)
#df["district"] = coords_convert(df)
#print(df.columns)
df.drop(["lon", "lat", 'geometry', "name", "date"], inplace = True, axis = 1)
df = df.set_index("landkreis")
df = 1 - df
df = df.reset_index()
# aggregate by region # aggregate by region
return df.to_dict()
#pd.DataFrame(aggregate(date.today() - timedelta(days = 4)))
| 35.40625 | 167 | 0.68579 |
a1280907fe5372581a6729c778029088ec482c1f
| 3,431 |
py
|
Python
|
deploy/deployctl/subcommands/elasticsearch.py
|
broadinstitute/gnomadjs
|
00da72cdc2cb0753f822c51456ec15147c024a1d
|
[
"MIT"
] | 38 |
2018-02-24T02:33:52.000Z
|
2020-03-03T23:17:04.000Z
|
deploy/deployctl/subcommands/elasticsearch.py
|
broadinstitute/gnomadjs
|
00da72cdc2cb0753f822c51456ec15147c024a1d
|
[
"MIT"
] | 385 |
2018-02-21T16:53:13.000Z
|
2020-03-04T00:52:40.000Z
|
deploy/deployctl/subcommands/elasticsearch.py
|
broadinstitute/gnomadjs
|
00da72cdc2cb0753f822c51456ec15147c024a1d
|
[
"MIT"
] | 13 |
2020-05-01T13:03:54.000Z
|
2022-02-28T13:12:57.000Z
|
import argparse
import os
import subprocess
import sys
import typing
import jinja2
from deployctl.shell import kubectl
def deployment_directory() -> str:
return os.path.realpath(os.path.join(os.path.dirname(__file__), "../../manifests/elasticsearch"))
def render_template_and_apply(
template_path: str, context: typing.Optional[typing.Dict[str, typing.Any]] = None
) -> None:
if not context:
context = {}
with open(template_path) as template_file:
template = jinja2.Template(template_file.read())
manifest = template.render(**context)
kubectl(["apply", "-f", "-"], input=manifest)
def apply_elasticsearch(**kwargs) -> None:
render_template_and_apply(os.path.join(deployment_directory(), "elasticsearch.yaml.jinja2"), kwargs)
render_template_and_apply(os.path.join(deployment_directory(), "elasticsearch.load-balancer.yaml.jinja2"), kwargs)
def get_elasticsearch_cluster(cluster_name: str) -> None:
print(kubectl(["get", "elasticsearch", cluster_name]), end="")
def get_elasticsearch_password(cluster_name: str) -> None:
# ECK creates this secret when the cluster is created.
print(
kubectl(["get", "secret", f"{cluster_name}-es-elastic-user", "-o=go-template={{.data.elastic | base64decode}}"])
)
def load_datasets(cluster_name: str, dataproc_cluster: str, secret: str, datasets: str):
# Matches service name in deploy/manifests/elasticsearch.load-balancer.yaml.jinja2
elasticsearch_load_balancer_ip = kubectl(
["get", "service", f"{cluster_name}-elasticsearch-lb", "--output=jsonpath={.status.loadBalancer.ingress[0].ip}"]
)
subprocess.check_call(
[
sys.argv[0],
"data-pipeline",
"run",
"export_to_elasticsearch",
f"--cluster={dataproc_cluster}",
"--",
f"--host={elasticsearch_load_balancer_ip}",
f"--secret={secret}",
f"--datasets={datasets}",
]
)
def main(argv: typing.List[str]) -> None:
parser = argparse.ArgumentParser(prog="deployctl")
subparsers = parser.add_subparsers()
apply_parser = subparsers.add_parser("apply")
apply_parser.set_defaults(action=apply_elasticsearch)
apply_parser.add_argument("--cluster-name", default="gnomad")
apply_parser.add_argument("--n-ingest-pods", type=int, default=0)
get_parser = subparsers.add_parser("get")
get_parser.set_defaults(action=get_elasticsearch_cluster)
get_parser.add_argument("--cluster-name", default="gnomad")
get_parser = subparsers.add_parser("get-password")
get_parser.set_defaults(action=get_elasticsearch_password)
get_parser.add_argument("--cluster-name", default="gnomad")
load_parser = subparsers.add_parser("load-datasets")
load_parser.set_defaults(action=load_datasets)
load_parser.add_argument("--cluster-name", default="gnomad")
load_parser.add_argument("--dataproc-cluster", required=True)
load_parser.add_argument("--secret", default="gnomad-elasticsearch-password")
load_parser.add_argument("datasets")
args = parser.parse_args(argv)
if "action" not in args:
parser.print_usage()
sys.exit(1)
action = args.action
del args.action
try:
action(**vars(args))
except Exception as err: # pylint: disable=broad-except
print(f"Error: {err}", file=sys.stderr)
sys.exit(1)
| 33.31068 | 120 | 0.686097 |
a1d2649c25a7509b7550a1be1ffdfccaab1a95f9
| 5,407 |
py
|
Python
|
docker/api/api/endpoints/measures.py
|
healthIMIS/aha-kompass
|
7b7cae24502c0c0e5635c587cfef797a93ae02b5
|
[
"MIT"
] | 2 |
2021-03-23T20:32:38.000Z
|
2021-04-21T11:20:12.000Z
|
docker/api/api/endpoints/measures.py
|
healthIMIS/aha-kompass
|
7b7cae24502c0c0e5635c587cfef797a93ae02b5
|
[
"MIT"
] | 4 |
2021-04-19T11:00:55.000Z
|
2021-04-20T08:21:48.000Z
|
docker/api/api/endpoints/measures.py
|
healthIMIS/aha-kompass
|
7b7cae24502c0c0e5635c587cfef797a93ae02b5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Corona-Info-App
# measures endpoint
# © 2020 Tobias Höpp.
# Include dependencies
from flask import request, jsonify
# Include modules
from main import api, db
from models.measures import displayGroup, display, displayGroupHasDisplay
from models.districts import districts
from utils.flexstring import flexstringParse, mergeConfig
import json
# Endpoint definition
@api.route("/data/measures/lk/<KrS>", methods=["GET"])
def measures_lk(KrS):
if request.method == "GET":
d = districts.query.get(KrS)
if(d == None):
return "KrS not found", 404
if(request.args.get("noLKDetails") == "1"):
result = {
"measures" : []
}
else:
result = {
"incidence" : d.incidence,
"color" : d.color,
"name" : d.name,
"categroy": d.category,
"links": d.links,
"measures" : [],
"deactivate_region": d.deactivate_region, # Only for debugging!!!
}
if d.deactivate_region:
defaultGroupsRequest = "select distinct dg1.displayGroup_id from districtHasGroup dg1 where dg1.district_id = :district_id and dg1.is_deleted = False"
else:
defaultGroupsRequest = "select distinct rg1.displayGroup_id from regionHasGroup rg1 where rg1.region_id = :region_id and rg1.is_deleted = False UNION select distinct dg1.displayGroup_id from districtHasGroup dg1 where dg1.district_id = :district_id and dg1.is_deleted = False"
GroupsRequest = "select distinct ggd1.displayGroup_id from displayGroupHasDefault ggd1 where (select count(ggd2.default_id) from displayGroupHasDefault ggd2 where ggd1.displayGroup_id = ggd2.displayGroup_id and ggd2.default_id not in("+defaultGroupsRequest+")) = 0 UNION "+defaultGroupsRequest
ConfigDisplayRequest = "select distinct dgd1.display_id, dgd1.configuration, dgd1.displayGroup_id from displayGroupHasDisplay dgd1 where dgd1.displayGroup_id in("+GroupsRequest+") and dgd1.displayGroup_id not in (select ov.overwrite_id from displayGroupOverwrites ov where ov.displayGroup_id in ("+GroupsRequest+")) ORDER BY (select di.weight from display di where di.id = dgd1.display_id), (select c1.name from display di left join categories c1 on di.category_id = c1.id where di.id = dgd1.display_id)"
daten = db.session.execute(ConfigDisplayRequest, {"region_id":d.region_id,"district_id":d.id})
lastDisplayID = None
lastConfiguration = None
for row in daten:
config = json.loads(row[1])
di = display.query.get(row[0])
if not di:
return "Internal Server Error: display_id '"+str(row[0])+"' selected but not found", 500
if request.args.get("measures") == None or (request.args.get("measures") in ["ofp", "all"] and di.is_OFP) or (request.args.get("measures") in ["detail", "all"] and not di.is_OFP):
merged = False
if di.is_mergable:
if lastDisplayID == di.id:
c = mergeConfig(config,lastConfiguration)
if c:
merged = True
config = c
# Displays mergen. Wenn merge nicht erfolgreich, beide anzeigen
languageString = None
languageSubtitle = None
languageCategory = None
#initialisieren
if(request.args.get("lang") == "de"):
languageString = di.flexstring_german
languageSubtitle = di.subtitle_german
languageCategory = di.category.name
elif(request.args.get("lang") == "en"):
languageString = di.flexstring_english
languageSubtitle = di.subtitle_english
languageCategory = di.category.name_english
print(str(languageCategory))
#Default German if no argument is given or string for requested language is empty
if languageString in ["", None]:
languageString = di.flexstring_german
if languageSubtitle in ["", None]:
languageSubtitle = di.subtitle_german
if languageCategory in ["", None]:
languageCategory = di.category.name
ok, res, epos = flexstringParse(languageString, config)
if not ok:
return jsonify({"status": "InternalServerError", "error": "flexstringParseError for string '"+languageString+"' with configuration '"+str(config)+"': "+res+" at Position "+str(epos)}), 500
appendend = {
"title" : languageCategory,
"text" : res,
"subtitle" : languageSubtitle,
"display_id" : di.id,
"displayGroup_id": row[2],
"isOFP": di.is_OFP
}
if merged:
result["measures"][-1] = appendend
else:
result["measures"].append(appendend)
lastDisplayID = di.id
lastConfiguration = config
return jsonify(result), 200
#TODO: Use proper join and select instead of iterating
| 51.009434 | 512 | 0.591825 |
62f105101b0379598784d69cb1714d2fd448b2c7
| 237 |
py
|
Python
|
PYTHON/Itertools/product.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
PYTHON/Itertools/product.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
PYTHON/Itertools/product.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from itertools import product
if __name__ == '__main__':
x = list(map(int, (input().split())))
y = list(map(int, (input().split())))
for n in list(product(x, y)):
print(n, end=" ")
print()
| 23.7 | 41 | 0.56962 |
c5657079b338cccc8e5d7010bf817e347f462ced
| 818 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v7_0/setup_account_table_for_expense_claim_type_if_exists.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v7_0/setup_account_table_for_expense_claim_type_if_exists.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v7_0/setup_account_table_for_expense_claim_type_if_exists.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("hr", "doctype", "expense_claim_type")
frappe.reload_doc("hr", "doctype", "expense_claim_account")
if not frappe.db.has_column('Expense Claim Type', 'default_account'):
return
for expense_claim_type in frappe.get_all("Expense Claim Type", fields=["name", "default_account"]):
if expense_claim_type.default_account \
and frappe.db.exists("Account", expense_claim_type.default_account):
doc = frappe.get_doc("Expense Claim Type", expense_claim_type.name)
doc.append("accounts", {
"company": frappe.db.get_value("Account", expense_claim_type.default_account, "company"),
"default_account": expense_claim_type.default_account,
})
doc.flags.ignore_mandatory = True
doc.save(ignore_permissions=True)
| 40.9 | 100 | 0.761614 |
9a9dc3d1911158b36b5e225e7ff4f5208ac444ba
| 5,486 |
py
|
Python
|
data_acquire_store/netease/user_data.py
|
PatrickNgZh/RecommenderSystem
|
0dbd3ac05ca8c1b5948a2605f1a9407f1ec59d39
|
[
"Apache-2.0"
] | 1 |
2020-03-18T12:34:39.000Z
|
2020-03-18T12:34:39.000Z
|
data_acquire_store/netease/user_data.py
|
PatrickNgZh/RecommenderSystem
|
0dbd3ac05ca8c1b5948a2605f1a9407f1ec59d39
|
[
"Apache-2.0"
] | 1 |
2021-06-02T01:30:56.000Z
|
2021-06-02T01:30:56.000Z
|
data_acquire_store/netease/user_data.py
|
PatrickNgZh/RecommenderSystem
|
0dbd3ac05ca8c1b5948a2605f1a9407f1ec59d39
|
[
"Apache-2.0"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# author: humingk
# ----------------------
import time
from concurrent.futures import ThreadPoolExecutor
import config
import sys
from my_tools.database_tool import database_tool
from netease.user_ranklist_songs import user_ranklist_songs
from netease.playlist_songs import playlist_songs
from netease.song_comments import song_comments
from my_tools.logger_tool import loggler_tool
logger = loggler_tool()
class user_data:
"""
批量获取用户数据类
热门歌单->热门歌曲->热门评论->用户
1. 用户->听歌排行榜->歌曲(歌手)
2. 用户->喜欢的音乐歌单,创建的歌单,收藏的歌单->歌曲(歌手)
"""
def get_playlist_songs(self, playlist_id):
"""
获取歌单歌曲
:param playlist_id: 歌单id
:return:
"""
_database_tool = database_tool()
_database_tool.insert_many_playlist([[playlist_id, '', 0, 0, '']])
_database_tool.commit()
_database_tool.close()
playlist_songs().get_playlist_songs_by_playlist_id(playlist_id=playlist_id,
playlist_type=config.normal_playlist,
playlist_songs_max=sys.maxsize)
def get_song_comments(self, song_start, song_count):
"""
获取歌曲热门评论(包括用户表,歌手表)
:param song_start: 歌曲表位移
:param song_count: 歌曲表数目
:return:
"""
song_list = database_tool().select_list_limit(table="song", start=song_start, count=song_count)
if song_list[0]:
for song in song_list[1]:
song_comments().get_song_comments_hot(song_id=song[0], song_comments_hot_max=1000, thread_count=10,
thread_inteval_time=2)
def get_user_songs(self, user_start, user_count, thread_count=20, thread_inteval_time=5):
"""
多线程获取用户 排行榜+歌单
:param user_start: 用户表位移xs
:param user_count: 用户表数目
:param thread_count: 线程数
:param thread_inteval_time: 线程间隔时间
:return:
"""
user_list = database_tool().select_list_limit(table="user", start=user_start, count=user_count)[1]
try:
success_count = 0
_user_ranklist_songs = user_ranklist_songs()
_playlist_songs = playlist_songs()
with ThreadPoolExecutor(thread_count) as executer:
future_list = []
for user in user_list:
future_rank_all = executer.submit(_user_ranklist_songs.get_user_ranklist_songs,
user[0], config.rank_type_all, config.all_rank_max)
future_rank_week = executer.submit(_user_ranklist_songs.get_user_ranklist_songs,
user[0], config.rank_type_week, config.week_rank_max)
future_playlist = executer.submit(_playlist_songs.get_playlist_songs_by_user_id, user[0])
future_list.append(future_rank_all)
future_list.append(future_rank_week)
future_list.append(future_playlist)
time.sleep(thread_inteval_time)
for future in future_list:
if future.result()[0]:
success_count += 1
return True
except Exception as e:
logger.error("get_user_songs failed", "error_type:{},error:{}"
.format(type(e), e))
def get_user_target(self, user, thread_count=20, thread_inteval_time=5):
"""
多线程获取用户 排行榜+歌单
:param user: 用户id
:param thread_count: 线程数
:param thread_inteval_time: 线程间隔时间
:return:
"""
try:
success_count = 0
_user_ranklist_songs = user_ranklist_songs()
_playlist_songs = playlist_songs()
with ThreadPoolExecutor(thread_count) as executer:
future_list = []
if user:
future_rank_all = executer.submit(_user_ranklist_songs.get_user_ranklist_songs,
user, config.rank_type_all, config.all_rank_max)
future_rank_week = executer.submit(_user_ranklist_songs.get_user_ranklist_songs,
user, config.rank_type_week, config.week_rank_max)
future_playlist = executer.submit(_playlist_songs.get_playlist_songs_by_user_id, user)
future_list.append(future_rank_all)
future_list.append(future_rank_week)
future_list.append(future_playlist)
time.sleep(thread_inteval_time)
for future in future_list:
if future.result()[0]:
success_count += 1
return True
except Exception as e:
logger.error("get_user_songs failed", "error_type:{},error:{}"
.format(type(e), e))
if __name__ == '__main__':
u = user_data()
# 评论过十万的歌曲歌单
# u.get_playlist_songs(376493212)
# 评论过五万的歌曲歌单
# u.get_playlist_songs(455717860)
# u.get_song_comments(song_start=100, song_count=200)
# 200开始,1000个用户
# u.get_user_songs(user_start=200, user_count=1000)
start = time.clock()
#long running
u.get_user_target(user=1356839524)
#do something other
end = time.clock()
print(end-start)
| 38.097222 | 115 | 0.585308 |
b14c7cc6a5baabd76609ac9139f1e6d2ff0ca70a
| 170 |
py
|
Python
|
twitoff/__init__.py
|
Pdugovich/TwitOff
|
9c06677ed763cf5d14fec53a10024126792681c4
|
[
"MIT"
] | null | null | null |
twitoff/__init__.py
|
Pdugovich/TwitOff
|
9c06677ed763cf5d14fec53a10024126792681c4
|
[
"MIT"
] | 1 |
2021-06-02T00:47:17.000Z
|
2021-06-02T00:47:17.000Z
|
twitoff/__init__.py
|
Pdugovich/TwitOff
|
9c06677ed763cf5d14fec53a10024126792681c4
|
[
"MIT"
] | null | null | null |
"""Entry point for TwitOff."""
from .app import create_app
#APP is a global variable
APP = create_app()
# run this in terminal with
# set FLASK_APP=TWITOFF
# flask run
| 17 | 30 | 0.729412 |
6207cdeeb519029022e3cd898bb388fd2e0c2afe
| 566 |
py
|
Python
|
Packs/DeveloperTools/Integrations/CustomIndicatorDemo/CustomIndicatorDemo_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/DeveloperTools/Integrations/CustomIndicatorDemo/CustomIndicatorDemo_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/DeveloperTools/Integrations/CustomIndicatorDemo/CustomIndicatorDemo_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CustomIndicatorDemo import Client, custom_indicator_creation
def test_custom_indicator_test():
"""
Given
- Dummy result
When
- dummy client is passed
Then
- return enriched indicator and result
"""
client = Client(base_url='some_mock_url', verify=False)
res = custom_indicator_creation(client)
indicator = res.indicator
assert indicator.data['param1'] == 'value1'
assert indicator.data['param2'] == 'value2'
assert indicator.value == 'custom_value'
assert res.outputs['dummy'] == 'test'
| 28.3 | 65 | 0.674912 |
6562af1cc42aa3ea520841a9fe8ecb554a7b021b
| 775 |
py
|
Python
|
misc/update_portfolio.py
|
Floozutter/coursework
|
244548f415553f058098cae84ccdd4ce3f58c245
|
[
"Unlicense"
] | null | null | null |
misc/update_portfolio.py
|
Floozutter/coursework
|
244548f415553f058098cae84ccdd4ce3f58c245
|
[
"Unlicense"
] | null | null | null |
misc/update_portfolio.py
|
Floozutter/coursework
|
244548f415553f058098cae84ccdd4ce3f58c245
|
[
"Unlicense"
] | null | null | null |
"""
A function to update a bad representation of a portfolio, for Rayan.
"Write a function update_portfolio that takes a portfolio (as a list of tuples), a ticker, and the
number of shares. The function should return the updated portfolio. A positive value for the
number of shares means 'buy', a negative value for the number of shares means 'sell'.
"""
Portfolio = list[tuple[str, int]] # why not just use a dictionary...
def update_portfolio(portfolio: Portfolio, ticker: str, shares: int) -> Portfolio:
d = dict(portfolio)
d[ticker] += shares
return list(d.items())
assert update_portfolio([("AAPL", 5), ("AMZN", 3)], "AAPL", -4) == [("AAPL", 1), ("AMZN", 3)]
assert update_portfolio([("AAPL", 5), ("AMZN", 3)], "AAPL", +2) == [("AAPL", 7), ("AMZN", 3)]
| 43.055556 | 98 | 0.673548 |
028e7f4cad8c5de957d700cf6e535d684c530b2e
| 3,203 |
py
|
Python
|
src/ztc/php/fpm.py
|
magistersart/ZTC_fork
|
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
|
[
"PostgreSQL"
] | null | null | null |
src/ztc/php/fpm.py
|
magistersart/ZTC_fork
|
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
|
[
"PostgreSQL"
] | null | null | null |
src/ztc/php/fpm.py
|
magistersart/ZTC_fork
|
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
|
[
"PostgreSQL"
] | null | null | null |
#!/usr/bin/env python
# pylint: disable = W0613
"""
<description>
This file is part of ZTC and distributed under the same license.
http://bitbucket.org/rvs/ztc/
Copyright (c) 2011 Vladimir Rusinov <[email protected]>
"""
import time
from ztc.check import ZTCCheck, CheckFail
import ztc.lib.flup_fcgi_client as fcgi_client
class PHPFPMCheck(ZTCCheck):
""" PHP FPM (FastCGI Process Manager) check class """
name = "php-fpm"
OPTPARSE_MIN_NUMBER_OF_ARGS = 1
OPTPARSE_MAX_NUMBER_OF_ARGS = 2
def _myinit(self):
self.fcgi_port = self.config.get('fpm_port', 9000)
self.fcgi_host = self.config.get('fpm_host', '127.0.0.1')
def _get(self, metric, *arg, **kwarg):
if metric == 'ping':
return self.ping
elif metric == 'status':
m = arg[0]
return self.get_status(m)
else:
raise CheckFail("uncknown metric")
def _load_page(self, url):
""" load fastcgi page """
try:
fcgi = fcgi_client.FCGIApp(host=self.fcgi_host,
port=self.fcgi_port)
env = {
'SCRIPT_FILENAME': url,
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': url,
'REQUEST_URI': url,
'GATEWAY_INTERFACE': 'CGI/1.1',
'SERVER_SOFTWARE': 'ztc',
'REDIRECT_STATUS': '200',
'CONTENT_TYPE': '',
'CONTENT_LENGTH': '0',
#'DOCUMENT_URI': url,
'DOCUMENT_ROOT': '/',
'DOCUMENT_ROOT': '/var/www/',
#'SERVER_PROTOCOL' : ???
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '123',
'SERVER_ADDR': self.fcgi_host,
'SERVER_PORT': str(self.fcgi_port),
'SERVER_NAME': self.fcgi_host}
ret = fcgi(env)
return ret
except:
self.logger.exception('fastcgi load failed')
return '500', [], '', ''
@property
def ping(self):
""" calls php-fpm ping resource """
st = time.time()
code, headers, out, err = self._load_page('/ping') # @UnusedVariable
if code.startswith('200') and out == 'pong':
return time.time() - st
else:
self.logger.error('ping: got response, but not correct')
return 0
def get_status(self, metric):
""" get php-fpm status metric """
metric = metric.replace('_', ' ')
page = self.get_status_page()
if not page:
raise CheckFail("unable to get status page")
for line in page.splitlines():
if line.startswith(metric):
return line.split()[-1]
# no such metric found
raise CheckFail("no such metric found")
def get_status_page(self):
""" return php-ftm status page text """
code, headers, out, err = self._load_page('/status') # @UnusedVariable
if code.startswith('200'):
return out
else:
self.logger.error('ping: got response, but not correct')
return None
| 31.712871 | 79 | 0.53606 |
f303fc453e5bd388b82d179080888aac435cbde0
| 589 |
py
|
Python
|
library/tests/test_buttons.py
|
ptphp/PtServer
|
d590360f853a64e989ba52591548b8a67390f27c
|
[
"BSD-3-Clause"
] | 1 |
2017-08-09T23:15:15.000Z
|
2017-08-09T23:15:15.000Z
|
library/tests/test_buttons.py
|
ptphp/PtServer
|
d590360f853a64e989ba52591548b8a67390f27c
|
[
"BSD-3-Clause"
] | null | null | null |
library/tests/test_buttons.py
|
ptphp/PtServer
|
d590360f853a64e989ba52591548b8a67390f27c
|
[
"BSD-3-Clause"
] | null | null | null |
__author__ = 'Amy'
from library.TestApp import BaseTestCase
from library.core.controls import NginxControl,PhpControl
class TestApp(BaseTestCase):
def setUp(self):
super(TestApp, self).setUp()
def tearDown(self):
super(TestApp, self).tearDown()
def test_nginx_start(self):
nginx = NginxControl(self.win)
nginx._do_stop()
nginx._do_start()
def test_nginx_reload(self):
nginx = NginxControl(self.win)
nginx._do_reload()
def test_php_reload(self):
php = PhpControl(self.win)
php._do_restart()
| 22.653846 | 57 | 0.660441 |
b88e0c4b99797be5f9c2480abf50aefa87a0f41e
| 9,427 |
py
|
Python
|
KassenberichtGenerator.py
|
EEaglEEyEE/JTLKassenberichtGenerator
|
52d425f6e35758364b56de7512e05ce4d1d056e8
|
[
"WTFPL"
] | null | null | null |
KassenberichtGenerator.py
|
EEaglEEyEE/JTLKassenberichtGenerator
|
52d425f6e35758364b56de7512e05ce4d1d056e8
|
[
"WTFPL"
] | null | null | null |
KassenberichtGenerator.py
|
EEaglEEyEE/JTLKassenberichtGenerator
|
52d425f6e35758364b56de7512e05ce4d1d056e8
|
[
"WTFPL"
] | null | null | null |
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# Version 2, December 2004
#
# Copyright (C) 2004 Sam Hocevar <[email protected]>
#
# Everyone is permitted to copy and distribute verbatim or modified
# copies of this license document, and changing it is allowed as long
# as the name is changed.
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
# 0. You just DO WHAT THE FUCK YOU WANT TO.
#
import os
import pyodbc
import ssl
import smtplib
import email
from fpdf import FPDF
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# PDF Tabellenanpassung - optional
Titel = 'Kassenbericht - Firma - Kasse'
Bonnummer_width = 22
Anzahl_width = 12
Artikelname_width = 105
Preis_width = 15
Rabatt_width = 16
Uhrzeit_width = 20
border = 1
header_row_height = 10
# PDF - obligatorisch
path = 'C:\Kassenberichte\\' # Speicherpfad des PDFs angeben
Kassenname = 'Kasse' # Kassenname angeben
# Mail - obligatorisch
sender_email = "Absender E-Mail"
receiver_email = "Empfänger E-Mail"
password = "Absender Passwort"
mail_server = "smtp.server.com"
port = 465
# SQL: Verbindungsaufbau - obligatorisch
conn = pyodbc.connect('Driver={ODBC Driver 17 for SQL Server};'
'Server=Serveradresse,Port\Instanz;' # SQL Server, Port und Instanz eintragen
'Database=Datenbankname;' # Datenbankname eintragen
'UID=user;' # SQL User eintragen
'PWD=Password;') # SQL Password eintragen
cursor = conn.cursor()
# SQL: Query
cursor.execute('SELECT b.cInetBestellNr, '
'cast(bp.nAnzahl as float), '
'bp.cString, '
'ROUND(cast((bp.fVKNetto * 1.16) as float), 2), '
'convert(varchar, b.dErstellt, 8) '
'FROM [eazybusiness].[dbo].[tBestellung] AS b '
'INNER JOIN [eazybusiness].[dbo].[tbestellpos] AS bp '
'ON bp.tBestellung_kBestellung = b.kBestellung '
'INNER JOIN [eazybusiness].[dbo].[tPlattform] AS p '
'ON b.nPlatform = p.nPlattform '
'Where p.nPlattform = 7 '
'AND b.dErstellt >= cast (GETDATE() as DATE)'
'AND bp.cString <> \'Selbstabholer\' '
)
# 'Where p.nPlattform = 7 ' #erste Kasse JTL-POS
# 'Where p.nPlattform = 151 ' #erste Kasse LS-POS (wird einfach fortgeführt: 152,153,...)
# 'AND b.dErstellt >= cast (GETDATE() as DATE)' #Heute
# 'AND b.dErstellt >= dateadd(day,-1, cast(getdate() as date))' #Gestern bis Heute
data = list(cursor)
Umsatz = 0
AnzahlVerkaufteArtikel = 0
i = 0
n = len(data)
# Rabatt
while i < n:
data[i] = list(data[i])
data[i].insert(4, '')
row = data[i]
# Rabattberechnung data[i - 1][4] = row[3] wenn der Rabatt als Preis angegeben werden soll
if "%" in row[2]:
data[i - 1][4] = str(round(row[3] / data[i - 1][3] / data[i - 1][1] * 100)) + '%'
del data[i]
else:
i = i + 1
AnzahlVerkaufteArtikel = AnzahlVerkaufteArtikel + 1
n = len(data)
Umsatz = Umsatz + row[3]
print("Umsatz: " + str(round(Umsatz, 2)))
print("AnzahlVerkaufteArtikel: " + str(AnzahlVerkaufteArtikel))
# SQL: Datum
Datum = "Fehler"
Datum = cursor.execute('SELECT CAST(GETDATE() AS Date)').fetchone()[0]
print("Datum: " + str(Datum))
# SQL: AnzahlVerkäufe
AnzahlVerkaeufe = "Fehler"
AnzahlVerkaeufe = cursor.execute('SELECT COUNT(DISTINCT b.cInetBestellNr) '
'FROM [eazybusiness].[dbo].[tBestellung] AS b '
'INNER JOIN [eazybusiness].[dbo].[tPlattform] AS p '
'ON b.nPlatform = p.nPlattform '
'Where p.nPlattform = 7 '
'AND b.dErstellt >= cast (GETDATE() as DATE) '
).fetchone()[0]
print("AnzahlVerkaeufe: " + str(AnzahlVerkaeufe))
# PDF
class PDF(FPDF):
# Page Header
def header(self):
self.set_font("Helvetica", size=18)
self.cell(185, 25, txt=Titel, border=0, align="C")
self.ln(20)
self.set_font("Helvetica", size=10)
self.cell(10, 20, txt="", border=0, align="L")
self.cell(130, 20, txt="Umsatz:" + str(round(Umsatz, 2)), border=0, align="L")
self.cell(60, 20, txt="Datum:" + str(Datum), border=0, align="L")
self.ln(10)
self.cell(10, 20, txt="", border=0, align="L")
self.cell(130, 20, txt="Anzahl der verkauften Artikel:" + str(AnzahlVerkaufteArtikel), border=0, align="L")
self.cell(60, 20, txt="Anzahl der Verkäufe:" + str(AnzahlVerkaeufe), border=0, align="L")
self.ln(18)
self.cell(Bonnummer_width, header_row_height, txt="Bonnummer", border=border, align="C")
self.cell(Anzahl_width, header_row_height, txt="Menge", border=border, align="C")
self.cell(Artikelname_width, header_row_height, txt="Artikelname", border=border, align="L")
self.cell(Preis_width, header_row_height, txt="Preis", border=border, align="C")
self.cell(Rabatt_width, header_row_height, txt="Rabatt", border=border, align="C")
self.cell(Uhrzeit_width, header_row_height, txt="Uhrzeit", border=border, align="C")
self.ln(header_row_height)
# Page footer
def footer(self):
# Position at 1.5 cm from bottom
self.set_y(-15)
# Arial italic 8
self.set_font('Helvetica', 'I', 8)
# Page number
self.cell(0, 10, 'Seite ' + str(self.page_no()) + '/{nb}', 0, 0, 'C')
pdf = PDF()
pdf.alias_nb_pages()
pdf.add_page()
# PDF Body
pdf.set_font("Helvetica", size=10)
row_height = pdf.font_size
spacing = 2.1
for row in data:
col_width = Bonnummer_width
align = "C"
pdf.set_fill_color(255)
row[3] = "{:.2f}".format(row[3]) # Der Preis wird mit zwei Nachkommastellen angezeigt
row[1] = int(row[1]) # die Menge wird ohne Kommastellen angezeigt
# Die Nummer aus der Bonnummer wird extrahiert
count = int(row[0][4:])
for item in row:
# De- und Encoding
if isinstance(item, str):
item = item.encode('windows-1252').decode('latin-1')
# Farbliche Zellenmarkierung
if (count % 2 == 0):
pdf.set_fill_color(230) # Jede Zeile eines Bons wird leicht grau hinterlegt für die Übersichtlichkeit
if row[1] < 0:
pdf.set_fill_color(255, 170, 170) # Wenn die Menge -1 beträgt (Retoure) wird die Zeile rot markiert
# Zellen füllen
pdf.cell(col_width, row_height * spacing, txt=str(item), border=border, align=align, fill=1)
if col_width == Bonnummer_width:
col_width = Anzahl_width
align = "C"
elif col_width == Anzahl_width:
col_width = Artikelname_width
align = "L"
elif col_width == Artikelname_width:
col_width = Preis_width
align = "R"
elif col_width == Preis_width:
col_width = Rabatt_width
align = "R"
elif col_width == Rabatt_width:
col_width = Uhrzeit_width
align = "R"
pdf.ln(row_height * spacing)
# Output des PDFs
filename = 'Kassenbericht_' + Kassenname + '-' + str(Datum) + '.pdf'
pdf.output(str(path + filename), 'F')
# Mail
subject = 'Kassenbericht_' + Kassenname + '-' + str(Datum)
body = "Dies ist der automatisch generierte Kassenbericht der Kasse " + Kassenname + "vom " + str(Datum)
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = receiver_email
message["Subject"] = subject
message["Bcc"] = receiver_email # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
# Open PDF file in binary mode
with open(path + filename, "rb") as attachment:
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
# Add attachment to message and convert message to string
message.attach(part)
text = message.as_string()
# Log in to server using secure context and send email
# print(os.getenv('HOMEPATH'))
# print(os.getenv('LOCALAPPDATA'))
# context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) # Use this if you have certificate issues
# context.load_verify_locations(os.getenv('LOCALAPPDATA')+'\\Programs\\Python\\Python38\\Lib\\site-packages\\pip\\_vendor\\certifi\\cacert.pem') # Use this if you have certificate issues
context = ssl.create_default_context() # Dont't use this if you have certificate issues
with smtplib.SMTP_SSL(mail_server, port, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, text)
| 35.980916 | 188 | 0.610693 |
b217a42354efe7142e427e945e402d0af8cc4703
| 4,490 |
py
|
Python
|
zencad/geom/operations.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | 5 |
2018-04-11T14:11:40.000Z
|
2018-09-12T19:03:36.000Z
|
zencad/geom/operations.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
zencad/geom/operations.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
from zencad.lazifier import *
from zencad.geom.shape import Shape, nocached_shape_generator, shape_generator
from OCC.Core.BRepExtrema import BRepExtrema_DistShapeShape
from OCC.Core.BRepFilletAPI import BRepFilletAPI_MakeChamfer, BRepFilletAPI_MakeFillet, BRepFilletAPI_MakeFillet2d
from OCC.Core.BRepMesh import BRepMesh_IncrementalMesh
from OCC.Core.TopAbs import TopAbs_REVERSED
from OCC.Core.TopLoc import TopLoc_Location
from zencad.geom.near import _near_vertex
from zencad.util import *
import itertools
def _restore_shapetype(shp):
if len(shp.solids()) == 1:
return shp.solids()[0]
if len(shp.shells()) == 1:
return shp.shells()[0]
elif len(shp.faces()) == 1:
return shp.faces()[0]
elif len(shp.wires()) == 1:
return shp.wires()[0]
elif len(shp.edges()) == 1:
return shp.edges()[0]
return shp
@lazy.lazy(cls=shape_generator)
def restore_shapetype(shp):
return _restore_shapetype(shp)
def _fillet(shp, r, refs=None):
if (shp.shapetype() == "face"):
return _fillet2d(shp, r, refs)
if refs:
refs = points(refs)
if shp.is_solid() or shp.is_compound() or shp.is_compsolid():
mk = BRepFilletAPI_MakeFillet(shp.Shape())
if refs:
for p in refs:
minimum = float("inf")
vtx = p.Vtx()
for edg in shp.edges():
extrema = BRepExtrema_DistShapeShape(edg.Edge(), vtx)
if minimum > extrema.Value():
ret = edg
minimum = extrema.Value()
mk.Add(r, ret.Edge())
else:
for edg in shp.edges():
mk.Add(r, edg.Edge())
return Shape(mk.Shape())
else:
raise Exception("Fillet argument has unsuported type.")
def _chamfer(shp, r, refs=None):
if refs:
refs = points(refs)
if shp.is_solid() or shp.is_compound() or shp.is_compsolid():
mk = BRepFilletAPI_MakeChamfer(shp.Shape())
if refs:
for p in refs:
minimum = float("inf")
vtx = p.Vtx()
for edg in shp.edges():
extrema = BRepExtrema_DistShapeShape(edg.Edge(), vtx)
if minimum > extrema.Value():
ret = edg
minimum = extrema.Value()
mk.Add(r, ret.Edge())
else:
for edg in shp.edges():
mk.Add(r, edg.Edge())
return Shape(mk.Shape())
else:
raise Exception("Fillet argument has unsuported type.")
@lazy.lazy(cls=shape_generator)
def chamfer(shp, r, refs=None):
return _chamfer(shp, r, refs)
@lazy.lazy(cls=shape_generator)
def fillet(shp, r, refs=None):
return _fillet(shp, r, refs)
def _fillet2d(shp, r, refs=None):
mk = BRepFilletAPI_MakeFillet2d(shp.Face())
if refs is None:
refs = shp.vertices()
for p in refs:
mk.AddFillet(_near_vertex(shp, p).Vertex(), r)
return Shape(mk.Shape())
@lazy.lazy(cls=shape_generator)
def fillet2d(shp, r, refs=None):
return _fillet2d(shp, r, refs)
def _triangulate_face(shp, deflection):
mesh = BRepMesh_IncrementalMesh(shp.Shape(), deflection)
reverse_orientation = shp.Face().Orientation() == TopAbs_REVERSED
L = TopLoc_Location()
triangulation = BRep_Tool.Triangulation(shp.Face(), L)
Nodes = triangulation.Nodes()
Triangles = triangulation.Triangles()
triangles = []
for i in range(1, triangulation.NbTriangles() + 1):
tri = Triangles(i)
a, b, c = tri.Get()
if reverse_orientation:
triangles.append((b-1, a-1, c-1))
else:
triangles.append((a-1, b-1, c-1))
nodes = []
for i in range(1, triangulation.NbNodes() + 1):
nodes.append(point3(Nodes(i)))
return nodes, triangles
@lazy.lazy(cls=shape_generator)
def triangulate_face(shp, deflection):
return _triangulate_face(shp, deflection)
def _triangulate(shp, deflection):
results = []
nodes = []
triangles = []
for f in shp.faces():
results.append(_triangulate_face(f, deflection))
for r in results:
nsize = len(nodes)
nodes.extend(r[0])
for t in r[1]:
triangles.append([t[0]+nsize, t[1]+nsize, t[2]+nsize])
return nodes, triangles
@lazy
def triangulate(shp, deflection):
return _triangulate(shp, deflection)
| 24.67033 | 114 | 0.599555 |
a23480f6f46a5826fae88d710b9fa71492bcb71f
| 425 |
py
|
Python
|
LeetCode_problems/Array Partition 1/solution(1).py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 165 |
2020-10-03T08:01:11.000Z
|
2022-03-31T02:42:08.000Z
|
LeetCode_problems/Array Partition 1/solution(1).py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 383 |
2020-10-03T07:39:11.000Z
|
2021-11-20T07:06:35.000Z
|
LeetCode_problems/Array Partition 1/solution(1).py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 380 |
2020-10-03T08:05:04.000Z
|
2022-03-19T06:56:59.000Z
|
#we first sort the array, then we traverse the array by a step of 2 hence we get the required sum. we need the minimum value of a pair
#so if the array is [2,1,3,4] it sorts to [1,2,3,4] then it takes the sum of 1 and 3 which is required.
class Solution:
def arrayPairSum(self, nums: List[int]) -> int:
nums=sorted(nums)
s=0
for i in range(0,len(nums),2):
s=s+nums[i]
return s
| 42.5 | 134 | 0.632941 |
02767bacd9c58666bc909df1fc25b4195a0a01d3
| 34,476 |
py
|
Python
|
Packs/CortexXDR/Integrations/XQLQueryingEngine/XQLQueryingEngine.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CortexXDR/Integrations/XQLQueryingEngine/XQLQueryingEngine.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CortexXDR/Integrations/XQLQueryingEngine/XQLQueryingEngine.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import copy
import gzip
import hashlib
import secrets
import string
import traceback
from typing import Any, Dict, Tuple
import requests
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
DEFAULT_LIMIT = 100
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any XSOAR logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this implementation, no special attributes defined
"""
def start_xql_query(self, data: dict) -> str:
res = self._http_request(method='POST', url_suffix='/xql/start_xql_query', json_data=data)
execution_id = res.get('reply', "")
return execution_id
def get_xql_query_results(self, data: dict) -> dict:
res = self._http_request(method='POST', url_suffix='/xql/get_query_results', json_data=data)
query_results = res.get('reply', "")
return query_results
def get_query_result_stream(self, data: dict) -> bytes:
res = self._http_request(method='POST', url_suffix='/xql/get_query_results_stream', json_data=data,
resp_type='response')
return res.content
def get_xql_quota(self, data: dict) -> dict:
res = self._http_request(method='POST', url_suffix='/xql/get_quota', json_data=data)
return res
# =========================================== Built-In Queries Helpers ===========================================#
def wrap_list_items_in_double_quotes(string_of_argument: str):
"""receive a string of arguments and return a string with each argument wrapped in double quotes.
example:
string_of_argument: '12345678, 87654321'
output: '"12345678","87654321"'
string_of_argument: ''
output: '""'
Args:
string_of_argument (str): The string s of_argument to format.
Returns:
str: The new formatted string
"""
if not string_of_argument:
string_of_argument = ''
list_of_args = argToList(string_of_argument) if string_of_argument != '' else ['']
return ','.join(f'"{item}"' for item in list_of_args)
def get_file_event_query(endpoint_ids: str, args: dict) -> str:
"""Create the file event query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns: The created query.
str: The created query.
"""
file_sha256_list = args.get('file_sha256', '')
if not file_sha256_list:
raise DemistoException('Please provide a file_sha256 argument.')
file_sha256_list = wrap_list_items_in_double_quotes(file_sha256_list)
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = FILE and action_file_sha256
in ({file_sha256_list})| fields agent_hostname, agent_ip_addresses, agent_id, action_file_path, action_file_sha256,
actor_process_file_create_time'''
def get_process_event_query(endpoint_ids: str, args: dict) -> str:
"""Create the process event query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
process_sha256_list = args.get('process_sha256', '')
if not process_sha256_list:
raise DemistoException('Please provide a process_sha256 argument.')
process_sha256_list = wrap_list_items_in_double_quotes(process_sha256_list)
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = PROCESS and
action_process_image_sha256 in ({process_sha256_list}) | fields agent_hostname, agent_ip_addresses, agent_id,
action_process_image_sha256, action_process_image_name,action_process_image_path, action_process_instance_id,
action_process_causality_id, action_process_signature_vendor, action_process_signature_product,
action_process_image_command_line, actor_process_image_name, actor_process_image_path, actor_process_instance_id,
actor_process_causality_id'''
def get_dll_module_query(endpoint_ids: str, args: dict) -> str:
"""Create the DLL module query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
loaded_module_sha256 = args.get('loaded_module_sha256', '')
if not loaded_module_sha256:
raise DemistoException('Please provide a loaded_module_sha256 argument.')
loaded_module_sha256 = wrap_list_items_in_double_quotes(loaded_module_sha256)
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = LOAD_IMAGE and
action_module_sha256 in ({loaded_module_sha256})| fields agent_hostname, agent_ip_addresses, agent_id,
actor_effective_username, action_module_sha256, action_module_path, action_module_file_info,
action_module_file_create_time, actor_process_image_name, actor_process_image_path, actor_process_command_line,
actor_process_image_sha256, actor_process_instance_id, actor_process_causality_id'''
def get_network_connection_query(endpoint_ids: str, args: dict) -> str:
"""Create the network connection query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
remote_ip_list = args.get('remote_ip', '')
if not remote_ip_list:
raise DemistoException('Please provide a remote_ip argument.')
remote_ip_list = wrap_list_items_in_double_quotes(remote_ip_list)
local_ip_filter = ''
if args.get('local_ip'):
local_ip_list = wrap_list_items_in_double_quotes(args.get('local_ip', ''))
local_ip_filter = f'and action_local_ip in({local_ip_list})'
port_list = args.get('port')
port_list_filter = f'and action_remote_port in({port_list})' if port_list else ''
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = STORY
{local_ip_filter} and action_remote_ip in({remote_ip_list}) {port_list_filter}|
fields agent_hostname, agent_ip_addresses, agent_id, actor_effective_username, action_local_ip, action_remote_ip,
action_remote_port, dst_action_external_hostname, action_country, actor_process_image_name, actor_process_image_path,
actor_process_command_line, actor_process_image_sha256, actor_process_instance_id, actor_process_causality_id'''
def get_registry_query(endpoint_ids: str, args: dict) -> str:
"""Create the registry query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
reg_key_name = args.get('reg_key_name', '')
if not reg_key_name:
raise DemistoException('Please provide a reg_key_name argument.')
reg_key_name = wrap_list_items_in_double_quotes(reg_key_name)
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = REGISTRY and
action_registry_key_name in ({reg_key_name}) | fields agent_hostname, agent_id, agent_ip_addresses, agent_os_type,
agent_os_sub_type, event_type, event_sub_type, action_registry_key_name, action_registry_value_name,
action_registry_data'''
def get_event_log_query(endpoint_ids: str, args: dict) -> str:
"""Create the event log query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
event_id = args.get('event_id', '')
if not event_id:
raise DemistoException('Please provide a event_id argument.')
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = EVENT_LOG and
action_evtlog_event_id in ({event_id}) | fields agent_hostname, agent_id, agent_ip_addresses, agent_os_type,
agent_os_sub_type, action_evtlog_event_id, event_type, event_sub_type, action_evtlog_message,
action_evtlog_provider_name'''
def get_dns_query(endpoint_ids: str, args: dict) -> str:
"""Create the DNS query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
if not args.get('external_domain') and not args.get('dns_query'):
raise DemistoException('Please provide at least one of the external_domain, dns_query arguments.')
external_domain_list = wrap_list_items_in_double_quotes(args.get('external_domain', ''))
dns_query_list = wrap_list_items_in_double_quotes(args.get('dns_query', ''))
return f'''dataset = xdr_data | filter (agent_id in ({endpoint_ids}) and event_type = STORY) and
(dst_action_external_hostname in ({external_domain_list}) or dns_query_name in ({dns_query_list}))| fields
agent_hostname, agent_id, agent_ip_addresses, agent_os_type, agent_os_sub_type, action_local_ip, action_remote_ip,
action_remote_port, dst_action_external_hostname, dns_query_name, action_app_id_transitions, action_total_download,
action_total_upload, action_country, action_as_data, os_actor_process_image_path, os_actor_process_command_line,
os_actor_process_instance_id, os_actor_process_causality_id'''
def get_file_dropper_query(endpoint_ids: str, args: dict) -> str:
"""Create the file dropper query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
if not args.get('file_path') and not args.get('file_sha256'):
raise DemistoException('Please provide at least one of the file_path, file_sha256 arguments.')
file_path_list = wrap_list_items_in_double_quotes(args.get('file_path', ''))
file_sha256_list = wrap_list_items_in_double_quotes(args.get('file_sha256', ''))
return f'''dataset = xdr_data | filter (agent_id in ({endpoint_ids}) and event_type = FILE and event_sub_type in (
FILE_WRITE, FILE_RENAME)) and (action_file_path in ({file_path_list}) or action_file_sha256 in ({file_sha256_list})) |
fields agent_hostname, agent_ip_addresses, agent_id, action_file_sha256, action_file_path, actor_process_image_name,
actor_process_image_path, actor_process_image_path, actor_process_command_line, actor_process_signature_vendor,
actor_process_signature_product, actor_process_image_sha256, actor_primary_normalized_user,
os_actor_process_image_path, os_actor_process_command_line, os_actor_process_signature_vendor,
os_actor_process_signature_product, os_actor_process_image_sha256, os_actor_effective_username,
causality_actor_remote_host,causality_actor_remote_ip'''
def get_process_instance_network_activity_query(endpoint_ids: str, args: dict) -> str:
"""Create the process instance networks activity query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
process_instance_id_list = args.get('process_instance_id', '')
if not process_instance_id_list:
raise DemistoException('Please provide a process_instance_id argument.')
process_instance_id_list = wrap_list_items_in_double_quotes(process_instance_id_list)
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = NETWORK and
actor_process_instance_id in ({process_instance_id_list}) | fields agent_hostname, agent_ip_addresses, agent_id,
action_local_ip, action_remote_ip, action_remote_port, dst_action_external_hostname, dns_query_name,
action_app_id_transitions, action_total_download, action_total_upload, action_country, action_as_data,
actor_process_image_sha256, actor_process_image_name , actor_process_image_path, actor_process_signature_vendor,
actor_process_signature_product, actor_causality_id, actor_process_image_command_line, actor_process_instance_id'''
def get_process_causality_network_activity_query(endpoint_ids: str, args: dict) -> str:
"""Create the process causality network activity query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
process_causality_id_list = args.get('process_causality_id', '')
if not process_causality_id_list:
raise DemistoException('Please provide a process_causality_id argument.')
process_causality_id_list = wrap_list_items_in_double_quotes(process_causality_id_list)
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = NETWORK
and actor_process_causality_id in ({process_causality_id_list}) | fields agent_hostname, agent_ip_addresses,agent_id,
action_local_ip, action_remote_ip, action_remote_port, dst_action_external_hostname,dns_query_name,
action_app_id_transitions, action_total_download, action_total_upload, action_country,action_as_data,
actor_process_image_sha256, actor_process_image_name , actor_process_image_path,actor_process_signature_vendor,
actor_process_signature_product, actor_causality_id,actor_process_image_command_line, actor_process_instance_id'''
# =========================================== Helper Functions ===========================================#
def convert_timeframe_string_to_json(time_to_convert: str) -> Dict[str, int]:
"""Convert a timeframe string to a json requred for XQL queries.
Args:
time_to_convert (str): The time frame string to convert (supports seconds, minutes, hours, days, months, years, between).
Returns:
dict: The timeframe parameters in JSON.
"""
try:
time_to_convert_lower = time_to_convert.strip().lower()
if time_to_convert_lower.startswith('between '):
tokens = time_to_convert_lower[len('between '):].split(' and ')
if len(tokens) == 2:
time_from = dateparser.parse(tokens[0], settings={'TIMEZONE': 'UTC'})
time_to = dateparser.parse(tokens[1], settings={'TIMEZONE': 'UTC'})
return {'from': int(time_from.timestamp()) * 1000, 'to': int(time_to.timestamp()) * 1000}
else:
relative = dateparser.parse(time_to_convert, settings={'TIMEZONE': 'UTC'})
return {'relativeTime': int((datetime.utcnow() - relative).total_seconds()) * 1000}
raise ValueError(f'Invalid timeframe: {time_to_convert}')
except Exception as exc:
raise DemistoException(f'Please enter a valid time frame (seconds, minutes, hours, days, weeks, months, '
f'years, between).\n{str(exc)}')
def start_xql_query(client: Client, args: Dict[str, Any]) -> str:
"""Execute an XQL query.
Args:
client (Client): The XDR Client.
args (dict): The arguments to pass to the API call.
Returns:
str: The query execution ID.
"""
query = args.get('query', '')
if not query:
raise ValueError('query is not specified')
if 'limit' not in query: # if user did not provide a limit in the query, we will use the default one.
query = f'{query} \n| limit {str(DEFAULT_LIMIT)}'
data: Dict[str, Any] = {
'request_data': {
'query': query,
}
}
time_frame = args.get('time_frame')
if time_frame:
data['request_data']['timeframe'] = convert_timeframe_string_to_json(time_frame)
tenant_ids = argToList(args.get('tenant_ids'))
if tenant_ids:
data['request_data']['tenants'] = tenant_ids
# call the client function and get the raw response
execution_id = client.start_xql_query(data)
return execution_id
def get_xql_query_results(client: Client, args: dict) -> Tuple[dict, Optional[bytes]]:
"""Retrieve results of an executed XQL query API. returns the general response and
a file data if the query has more than 1000 results.
Args:
client (Client): The XDR Client.
args (dict): The arguments to pass to the API call.
Returns:
dict: The query results.
"""
query_id = args.get('query_id')
if not query_id:
raise ValueError('query ID is not specified')
data = {
'request_data': {
'query_id': query_id,
'pending_flag': True,
'format': 'json',
}
}
# Call the Client function and get the raw response
response = client.get_xql_query_results(data)
response['execution_id'] = query_id
results = response.get('results', {})
stream_id = results.get('stream_id')
if stream_id:
file_data = get_query_result_stream(client, stream_id)
return response, file_data
response['results'] = results.get('data')
return response, None
def get_query_result_stream(client: Client, stream_id: str) -> bytes:
"""Retrieve XQL query results with more than 1000 results.
Args:
client (Client): The XDR Client.
stream_id (str): The stream ID of the query.
Returns:
bytes: The query results.
"""
if not stream_id:
raise ValueError('stream_id is not specified')
data = {
'request_data': {
'stream_id': stream_id,
'is_gzip_compressed': True,
}
}
# Call the Client function and get the raw response
return client.get_query_result_stream(data)
def format_item(item_to_format: Any) -> Any:
"""
Format the given item to the correct format.
Args:
item_to_format (Any): Item to format.
Returns:
Any: Formatted item.
"""
mapper = {
'FALSE': False,
'TRUE': True,
'NULL': None,
}
return mapper[item_to_format] if item_to_format in mapper else item_to_format
def is_empty(item_to_check: Any) -> bool:
"""
Checks if a given item is empty or not
Args:
item_to_check (Any): The item to check.
Returns:
bool: True if empty, False otherwise.
"""
return item_to_check is not False and not item_to_check
def handle_timestamp_item(item_to_convert: Any) -> Union[Any, str]:
"""
Try to convert a given value to datestring.
Args:
item_to_convert (Any): The item to convert.
Returns:
Union[Any, str]: The converted timestamp if convert was successful, otherwise return the original item.
"""
try:
return timestamp_to_datestring(item_to_convert)
except Exception: # cannot convert item
return item_to_convert
def format_results(list_to_format: list, remove_empty_fields: bool = True) -> list:
"""
Recursively format a list of dictionaries and remove empty lists, empty dicts, or None elements from it if desired.
Args:
list_to_format (list): Input list to format.
remove_empty_fields (bool): True if the user wants to remove the empty fields.
Returns:
list: Formatted list.
"""
def format_dict(item_to_format: Any) -> Any:
if not isinstance(item_to_format, (dict, list)): # recursion stopping condition, formatting field
return format_item(item_to_format)
elif isinstance(item_to_format, list):
return [v for v in (format_dict(v) for v in item_to_format) if v]
else:
new_dict = {}
for key, value in item_to_format.items():
formatted_res = format_dict(value)
if is_empty(formatted_res) and remove_empty_fields:
continue # do not add item to the new dict
if 'time' in key:
new_dict[key] = handle_timestamp_item(formatted_res)
else:
new_dict[key] = formatted_res
return new_dict
for i, item in enumerate(list_to_format):
list_to_format[i] = format_dict(item)
return list_to_format
def get_outputs_prefix(command_name: str) -> str:
"""
Get the correct output output prefix.
Args:
command_name (str): The executed command.
Returns:
str: The output prefix.
"""
if command_name in GENERIC_QUERY_COMMANDS:
return 'PaloAltoNetworksXQL.GenericQuery'
else: # built in command
query_name = BUILT_IN_QUERY_COMMANDS[command_name].get('name')
return f'PaloAltoNetworksXQL.{query_name}'
def get_nonce() -> str:
"""
Generate a 64 bytes random string.
Returns:
str: The 64 bytes random string.
"""
return "".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)])
def add_context_to_integration_context(context: dict):
"""
Add the given context to the integration context.
Args:
context (str): The context to add.
"""
if context:
integration_context = get_integration_context()
integration_context.update(context)
set_integration_context(integration_context)
def remove_query_id_from_integration_context(query_id: str):
"""
Remove the given query_id from the integration context.
Args:
query_id (str): The query ID to remove.
"""
if query_id:
integration_context = get_integration_context()
integration_context.pop(query_id, None)
set_integration_context(integration_context)
# ========================================== Generic Query ===============================================#
def test_module(client: Client, args: Dict[str, Any]) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
Args:
client (Client): The XDR Client.
args (dict): The arguments to pass to the API call.
Returns:
str: 'ok' if test passed, anything else will fail the test.
"""
try:
client.get_xql_quota({'request_data': {}})
return 'ok'
except Exception as err:
if any(error in str(err) for error in ['Forbidden', 'Authorization', 'Unauthorized']):
raise DemistoException('Authorization failed, make sure API Key is correctly set')
elif 'Not Found' in str(err):
raise DemistoException('Authorization failed, make sure the URL is correct')
else:
raise err
def start_xql_query_polling_command(client: Client, args: dict) -> Union[CommandResults, list]:
"""Execute an XQL query as a scheduled command.
Args:
client (Client): The XDR Client.
args (dict): The arguments to pass to the API call.
Returns:
CommandResults: The command results.
"""
if not (query_name := args.get('query_name')):
raise DemistoException('Please provide a query name')
execution_id = start_xql_query(client, args)
if not execution_id:
raise DemistoException('Failed to start query\n')
args['query_id'] = execution_id
# the query data is being saved in the integration context for the next scheduled command command.
try:
add_context_to_integration_context({
execution_id: {
'query': args.get('query'),
'time_frame': args.get('time_frame'),
'command_name': demisto.command(),
'query_name': query_name,
}
})
return get_xql_query_results_polling_command(client, args)
except Exception:
remove_query_id_from_integration_context(execution_id)
raise
def get_xql_query_results_polling_command(client: Client, args: dict) -> Union[CommandResults, list]:
"""Retrieve results of an executed XQL query API executes as a scheduled command.
Args:
client (Client): The XDR Client.
args (dict): The arguments to pass to the API call.
Returns:
Union[CommandResults, dict]: The command results.
"""
# get the query data either from the integration context (if its not the first run) or from the given args.
query_id = args.get('query_id', '')
parse_result_file_to_context = argToBoolean(args.get('parse_result_file_to_context', 'false'))
integration_context = get_integration_context()
command_data = integration_context.get(query_id, args)
command_name = command_data.get('command_name', demisto.command())
interval_in_secs = int(args.get('interval_in_seconds', 10))
max_fields = arg_to_number(args.get('max_fields', 20))
if max_fields is None:
raise DemistoException('Please provide a valid number for max_fields argument.')
outputs, file_data = get_xql_query_results(client, args) # get query results with query_id
outputs.update({'query_name': command_data.get('query_name', '')})
outputs_prefix = get_outputs_prefix(command_name)
command_results = CommandResults(outputs_prefix=outputs_prefix, outputs_key_field='execution_id', outputs=outputs,
raw_response=copy.deepcopy(outputs))
# if there are more then 1000 results
if file_data:
if not parse_result_file_to_context:
# Extracts the results into a file only
file = fileResult(filename="results.gz", data=file_data)
remove_query_id_from_integration_context(query_id)
return [file, command_results]
else:
# Parse the results to context:
data = gzip.decompress(file_data).decode()
outputs['results'] = [json.loads(line) for line in data.split("\n") if len(line) > 0]
# if status is pending, in versions above 6.2.0, the command will be called again in the next run until success.
if outputs.get('status') == 'PENDING':
if not is_demisto_version_ge('6.2.0'): # only 6.2.0 version and above support polling command.
remove_query_id_from_integration_context(query_id)
return command_results
scheduled_command = ScheduledCommand(command='xdr-xql-get-query-results', next_run_in_seconds=interval_in_secs,
args=args, timeout_in_seconds=600)
command_results.scheduled_command = scheduled_command
command_results.readable_output = 'Query is still running, it may take a little while...'
return command_results
results_to_format = outputs.pop('results')
# create Human Readable output
query = command_data.get('query', '')
time_frame = command_data.get('time_frame')
extra_for_human_readable = ({'query': query, 'time_frame': time_frame})
outputs.update(extra_for_human_readable)
command_results.readable_output = tableToMarkdown('General Information', outputs,
headerTransform=string_to_table_header,
removeNull=True)
[outputs.pop(key) for key in list(extra_for_human_readable.keys())]
# if no fields were given in the query then the default fields are returned (without empty fields).
if results_to_format:
formatted_list = format_results(results_to_format, remove_empty_fields=False) \
if 'fields' in query else format_results(results_to_format)
if formatted_list and command_name == 'xdr-xql-generic-query' and len(formatted_list[0].keys()) > max_fields:
raise DemistoException('The number of fields per result has exceeded the maximum number of allowed fields, '
'please select specific fields in the query or increase the maximum number of '
'allowed fields.')
outputs.update({'results': formatted_list})
command_results.outputs = outputs
command_results.readable_output += tableToMarkdown('Data Results', outputs.get('results'),
headerTransform=string_to_table_header)
remove_query_id_from_integration_context(query_id)
return command_results
def get_xql_quota_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""Retrieve the amount of query quota available and used.
Args:
client (Client): The XDR Client.
args (dict): The arguments to pass to the API call.
Returns:
dict: The quota results.
"""
data: dict = {
'request_data': {
}
}
# Call the Client function and get the raw response
result = client.get_xql_quota(data).get('reply', {})
readable_output = tableToMarkdown('Quota Results', result, headerTransform=string_to_table_header, removeNull=True)
return CommandResults(
outputs_prefix='PaloAltoNetworksXQL.Quota',
outputs_key_field='',
outputs=result,
readable_output=readable_output
)
# =========================================== Built-In Queries ===========================================#
def get_built_in_query_results_polling_command(client: Client, args: dict) -> Union[CommandResults, list]:
"""Retrieve results of a built in XQL query, execute as a scheduled command.
Args:
client (Client): The XDR Client.
args (dict): The arguments to pass to the API call.
Returns:
Union[CommandResults, dict]: The command results.
"""
# build query, if no endpoint_id was given, the query will search in every endpoint_id (*).
endpoint_id_list = wrap_list_items_in_double_quotes(args.get('endpoint_id', '*'))
built_in_func = BUILT_IN_QUERY_COMMANDS.get(demisto.command(), {}).get('func')
query = built_in_func(endpoint_id_list, args) if callable(built_in_func) else ''
# add extra fields to query
extra_fields = argToList(args.get('extra_fields', []))
if extra_fields:
extra_fields_list = ", ".join(str(e) for e in extra_fields)
query = f'{query}, {extra_fields_list}'
# add limit to query
if 'limit' in args:
query = f"{query} | limit {args.get('limit')}"
query_args = {
'query': query,
'query_name': args.get('query_name'),
'tenants': argToList(args.get('tenants', [])),
'time_frame': args.get('time_frame', '')
}
return start_xql_query_polling_command(client, query_args)
''' MAIN FUNCTION '''
# COMMAND CONSTANTS
BUILT_IN_QUERY_COMMANDS = {
'xdr-xql-file-event-query': {
'func': get_file_event_query,
'name': 'FileEvent',
},
'xdr-xql-process-event-query': {
'func': get_process_event_query,
'name': 'ProcessEvent',
},
'xdr-xql-dll-module-query': {
'func': get_dll_module_query,
'name': 'DllModule',
},
'xdr-xql-network-connection-query': {
'func': get_network_connection_query,
'name': 'NetworkConnection',
},
'xdr-xql-registry-query': {
'func': get_registry_query,
'name': 'Registry',
},
'xdr-xql-event-log-query': {
'func': get_event_log_query,
'name': 'EventLog',
},
'xdr-xql-dns-query': {
'func': get_dns_query,
'name': 'DNS',
},
'xdr-xql-file-dropper-query': {
'func': get_file_dropper_query,
'name': 'FileDropper',
},
'xdr-xql-process-instance-network-activity-query': {
'func': get_process_instance_network_activity_query,
'name': 'ProcessInstanceNetworkActivity',
},
'xdr-xql-process-causality-network-activity-query': {
'func': get_process_causality_network_activity_query,
'name': 'ProcessCausalityNetworkActivity',
},
}
GENERIC_QUERY_COMMANDS = {
'test-module': test_module,
'xdr-xql-generic-query': start_xql_query_polling_command,
'xdr-xql-get-query-results': get_xql_query_results_polling_command,
'xdr-xql-get-quota': get_xql_quota_command,
}
def main() -> None:
"""main function, parses params and runs command functions
"""
args = demisto.args()
params = demisto.params()
# using two different credentials object as they both fields need to be encrypted
apikey = params.get('apikey', {}).get('password', '')
apikey_id = params.get('apikey_id', {}).get('password', '')
if not apikey:
raise DemistoException('Missing API Key. Fill in a valid key in the integration configuration.')
if not apikey_id:
raise DemistoException('Missing API Key ID. Fill in a valid key ID in the integration configuration.')
base_url = urljoin(params['url'], '/public_api/v1')
verify_cert = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
# generate a 64 bytes random string
nonce = get_nonce()
# get the current timestamp as milliseconds.
timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000)
# generate the auth key:
auth_key = f'{apikey}{nonce}{timestamp}'.encode("utf-8")
# convert to bytes object and calculate sha256
api_key_hash = hashlib.sha256(auth_key).hexdigest() # lgtm [py/weak-sensitive-data-hashing]
# generate HTTP call headers
headers = {
"x-xdr-timestamp": timestamp,
"x-xdr-nonce": nonce,
"x-xdr-auth-id": apikey_id,
"Authorization": api_key_hash,
}
client = Client(
base_url=base_url,
verify=verify_cert,
headers=headers,
proxy=proxy,
)
if command in GENERIC_QUERY_COMMANDS:
return_results(GENERIC_QUERY_COMMANDS[command](client, args))
elif command in BUILT_IN_QUERY_COMMANDS:
return_results(get_built_in_query_results_polling_command(client, args))
else:
raise NotImplementedError(f'Command {command} does not exist.')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError: {str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 39.58209 | 129 | 0.682156 |
5a2f550a120855aa680a90a6d218dd794dc46ea1
| 529 |
py
|
Python
|
py-test1/py-test2.py
|
isdev0/sel-9-test1
|
82469a87aa3804efc4881e9a37878da912d5fc3f
|
[
"Apache-2.0"
] | null | null | null |
py-test1/py-test2.py
|
isdev0/sel-9-test1
|
82469a87aa3804efc4881e9a37878da912d5fc3f
|
[
"Apache-2.0"
] | null | null | null |
py-test1/py-test2.py
|
isdev0/sel-9-test1
|
82469a87aa3804efc4881e9a37878da912d5fc3f
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
@pytest.fixture(scope='module')
def driver(request):
wd = webdriver.Firefox()
request.addfinalizer(wd.quit)
return wd
def test_adm_login(driver):
driver.get('http://rubberducks.com:8880/litecart/admin/')
driver.find_element_by_name('username').send_keys('admin')
driver.find_element_by_name('password').send_keys('admin')
driver.find_element_by_name('login').click()
WebDriverWait(driver, 5)
| 31.117647 | 62 | 0.756144 |
ce9875a5413e8399efd79336237fbdcbd3530a6c
| 509 |
py
|
Python
|
Projects/Giffy/main.py
|
miku/haw-di-bim-lv22
|
5e3dd1f7a1eb02ebbe5cc801bd8094618d6525e3
|
[
"MIT"
] | null | null | null |
Projects/Giffy/main.py
|
miku/haw-di-bim-lv22
|
5e3dd1f7a1eb02ebbe5cc801bd8094618d6525e3
|
[
"MIT"
] | null | null | null |
Projects/Giffy/main.py
|
miku/haw-di-bim-lv22
|
5e3dd1f7a1eb02ebbe5cc801bd8094618d6525e3
|
[
"MIT"
] | null | null | null |
import imageio.v3 as iio
import matplotlib.pyplot as plt
import numpy as np
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
n = 60
gif_path = "out.gif"
frames = []
for v in range(n):
data = np.random.randint(0, 255, size=(200, 200))
mask = np.fromfunction(lambda i, j: ((i + v) % 20) > 15 * (j + v % 4 > 2), (200, 200), dtype=int)
data[mask] = 0
frames.append(data)
frames = np.stack(
frames,
axis=0
)
iio.imwrite(gif_path, frames, mode="I", loop=10)
| 20.36 | 101 | 0.605108 |
0b582b3afab21efe4cf715c7cae37614f5e5159a
| 262 |
py
|
Python
|
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/08.1-Loop.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/08.1-Loop.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/08.1-Loop.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
def is_even(number):
if number % 2 == 0:
return True
return False
even_number = []
user_input = int(input("Limit: "))
for num in range(0, user_input):
if is_even(num):
even_number.append(num)
print(f"Even numbers: {even_number}")
| 17.466667 | 37 | 0.629771 |
e7ed0d07aeba7f278b6efe4c8c0d94dee7dda59c
| 734 |
py
|
Python
|
Kapitel_4/_2_basic_descriptor.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | 1 |
2020-12-24T15:42:54.000Z
|
2020-12-24T15:42:54.000Z
|
Kapitel_4/_2_basic_descriptor.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | null | null | null |
Kapitel_4/_2_basic_descriptor.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | null | null | null |
class Descriptor:
def __set_name__(self, owner_cls, name):
self.name = name
def __get__(self, instance, owner_cls=None):
print(f"Get {self.name!r}")
return instance.__dict__[self.name]
def __set__(self, instance, value):
print(f"Set {self.name!r} to {value!r}")
instance.__dict__[self.name] = value
def __delete__(self, instance):
print(f"Delete {self.name!r}")
del instance.__dict__[self.name]
class MyClass:
attr = Descriptor()
def __init__(self, attr):
self.attr = attr
instanz = MyClass('value_1')
instanz.attr
del instanz.attr
# Set 'attr' to 'value_1' -> Hervorgerufen durch 'self.attr = attr' in der init
# Get 'attr'
# Delete 'attr'
| 26.214286 | 79 | 0.643052 |
685868954c949c3154e2bcea2c9e92dff6da8897
| 8,635 |
py
|
Python
|
3DNet/datasets/TReNDs.py
|
BhaveshJP25/RSNA
|
48d85faf82651b1ae4fdcd829ce2d4978a858d3f
|
[
"MIT"
] | null | null | null |
3DNet/datasets/TReNDs.py
|
BhaveshJP25/RSNA
|
48d85faf82651b1ae4fdcd829ce2d4978a858d3f
|
[
"MIT"
] | null | null | null |
3DNet/datasets/TReNDs.py
|
BhaveshJP25/RSNA
|
48d85faf82651b1ae4fdcd829ce2d4978a858d3f
|
[
"MIT"
] | null | null | null |
import os
import h5py
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV, KFold,StratifiedKFold, GroupKFold, KFold
import nilearn as nl
import torch
import random
from tqdm import tqdm
import monai
from monai.transforms import \
LoadNifti, LoadNiftid, AddChanneld, ScaleIntensityRanged, \
Rand3DElasticd, RandAffined, \
Spacingd, Orientationd
root = r'./competition_root'
train = pd.read_csv('{}/train_scores.csv'.format(root)).sort_values(by='Id')
loadings = pd.read_csv('{}/loading.csv'.format(root))
sample = pd.read_csv('{}/sample_submission.csv'.format(root))
reveal = pd.read_csv('{}/reveal_ID_site2.csv'.format(root))
ICN = pd.read_csv('{}/ICN_numbers.csv'.format(root))
"""
Load and display a subject's spatial map
"""
def load_subject(filename, mask_niimg):
"""
Load a subject saved in .mat format with the version 7.3 flag. Return the subject niimg, using a mask niimg as a template for nifti headers.
Args:
filename <str> the .mat filename for the subject data
mask_niimg niimg object the mask niimg object used for nifti headers
"""
subject_data = None
with h5py.File(filename, 'r') as f:
subject_data = f['SM_feature'][()]
# print(subject_data.shape)
# It's necessary to reorient the axes, since h5py flips axis order
subject_data = np.moveaxis(subject_data, [0, 1, 2, 3], [3, 2, 1, 0])
# print(subject_data.shape)
return subject_data
# subject_niimg = nl.image.new_img_like(mask_niimg, subject_data, affine=mask_niimg.affine, copy_header=True)
# return subject_niimg
def read_data_sample():
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
mask_filename = r'{}/fMRI_mask.nii'.format(root)
subject_filename = '{}/fMRI_train/10004.mat'.format(root)
mask_niimg = nl.image.load_img(mask_filename)
print("mask shape is %s" % (str(mask_niimg.shape)))
subject_niimg = load_subject(subject_filename, mask_niimg)
print("Image shape is %s" % (str(subject_niimg.shape)))
num_components = subject_niimg.shape[-1]
print("Detected {num_components} spatial maps".format(num_components=num_components))
class TReNDsDataset(Dataset):
def __init__(self, mode='train', fold_index = 0):
# print("Processing {} datas".format(len(self.img_list)))
self.mode = mode
self.fold_index = fold_index
if self.mode=='train' or self.mode=='valid' or self.mode=='valid_tta':
features = ('age', 'domain1_var1', 'domain1_var2', 'domain2_var1', 'domain2_var2')
data = pd.merge(loadings, train, on='Id').dropna()
id_train = list(data.Id)
fea_train = np.asarray(data.drop(list(features), axis=1).drop('Id', axis=1))
lbl_train = np.asarray(data[list(features)])
self.all_samples = []
for i in range(len(id_train)):
id = id_train[i]
fea = fea_train[i]
lbl = lbl_train[i]
filename = os.path.join('{}/fMRI_train_npy/{}.npy'.format(root, id))
self.all_samples.append([filename, fea, lbl, str(id)])
fold = 0
kf = KFold(n_splits=5, shuffle=True, random_state=1337)
for train_index, valid_index in kf.split(self.all_samples):
if fold_index == fold:
self.train_index = train_index
self.valid_index = valid_index
fold+=1
if self.mode=='train':
self.train_index = [tmp for tmp in self.train_index if os.path.exists(self.all_samples[tmp][0])]
self.len = len(self.train_index)
print('fold index:',fold_index)
print('train num:', self.len)
elif self.mode=='valid' or self.mode=='valid_tta':
self.valid_index = [tmp for tmp in self.valid_index if os.path.exists(self.all_samples[tmp][0])]
self.len = len(self.valid_index)
print('fold index:',fold_index)
print('valid num:', self.len)
elif self.mode=='test':
labels_df = pd.read_csv("{}/train_scores.csv".format(root))
labels_df["is_train"] = True
features = ('age', 'domain1_var1', 'domain1_var2', 'domain2_var1', 'domain2_var2')
data = pd.merge(loadings, labels_df, on="Id", how="left")
id_test = list(data[data["is_train"] != True].Id)
fea_test = np.asarray(data.drop(list(features), axis=1).drop('Id', axis=1)[data["is_train"] != True].drop("is_train", axis=1))
lbl_test = np.asarray(data[list(features)][data["is_train"] != True])
self.all_samples = []
for i in range(len(id_test)):
id = id_test[i]
fea = fea_test[i]
lbl = lbl_test[i]
filename = os.path.join('{}/fMRI_test_npy/{}.npy'.format(root, id))
if os.path.exists(filename):
self.all_samples.append([id, filename, fea, lbl])
self.len = len(self.all_samples)
print(len(id_test))
print('test num:', self.len)
def __getitem__(self, idx):
if self.mode == "train" :
filename, _, lbl, id = self.all_samples[self.train_index[idx]]
train_img = np.load(filename).astype(np.float32)
train_img = train_img.transpose((3,2,1,0))
# (53, 52, 63, 53)
train_lbl = lbl
data_dict = {'image':train_img}
rand_affine = RandAffined(keys=['image'],
mode=('bilinear', 'nearest'),
prob=0.5,
spatial_size=(52, 63, 53),
translate_range=(5, 5, 5),
rotate_range=(np.pi * 4, np.pi * 4, np.pi * 4),
scale_range=(0.15, 0.15, 0.15),
padding_mode='border')
affined_data_dict = rand_affine(data_dict)
train_img = affined_data_dict['image']
return torch.FloatTensor(train_img), \
torch.FloatTensor(train_lbl)
elif self.mode == "valid":
filename, _, lbl, id = self.all_samples[self.valid_index[idx]]
train_img = np.load(filename).astype(np.float32)
train_img = train_img.transpose((3, 2, 1, 0))
# (53, 52, 63, 53)
train_lbl = lbl
return torch.FloatTensor(train_img),\
torch.FloatTensor(train_lbl)
elif self.mode == 'test':
id, filename, fea, lbl = self.all_samples[idx]
test_img = np.load(filename).astype(np.float32)
test_img = test_img.transpose((3, 2, 1, 0))
return str(id), \
torch.FloatTensor(test_img)
def __len__(self):
return self.len
def run_check_datasets():
dataset = TReNDsDataset(mode='test')
for m in range(len(dataset)):
tmp = dataset[m]
print(m)
def convert_mat2nii2npy():
def get_data(filename):
with h5py.File(filename, 'r') as f:
subject_data = f['SM_feature'][()]
# print(subject_data.shape)
# It's necessary to reorient the axes, since h5py flips axis order
subject_data = np.moveaxis(subject_data, [0, 1, 2, 3], [3, 2, 1, 0])
return subject_data
# train_root = '{}/fMRI_train/'.format(root)
# train_npy_root = '{}/fMRI_train_npy/'.format(root)
train_root = '{}/fMRI_test/'.format(root)
train_npy_root = '{}/fMRI_test_npy/'.format(root)
os.makedirs(train_npy_root, exist_ok=True)
mats = os.listdir(train_root)
mats = [mat for mat in mats if '.mat' in mat]
random.shuffle(mats)
for mat in tqdm(mats):
mat_path = os.path.join(train_root, mat)
if os.path.exists(mat_path):
print(mat_path)
npy_path = os.path.join(train_npy_root, mat.replace('.mat','.npy'))
if os.path.exists(npy_path):
print(npy_path, 'exist')
else:
data = get_data(mat_path)
print(npy_path,data.shape)
np.save(npy_path,data.astype(np.float16))
if __name__ == '__main__':
run_check_datasets()
# convert_mat2nii2npy()
| 39.072398 | 144 | 0.590272 |
688d1702f92cf1c12416f0cff71c5e2430c9f2af
| 2,711 |
py
|
Python
|
21-fs-ias-lec/03-BACnetCore/src/core/node.py
|
cn-uofbasel/BCN
|
2d0852e00f2e7f3c4f7cf30f60c6765f2761f80a
|
[
"MIT"
] | 8 |
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
21-fs-ias-lec/03-BACnetCore/src/core/node.py
|
cn-uofbasel/BCN
|
2d0852e00f2e7f3c4f7cf30f60c6765f2761f80a
|
[
"MIT"
] | 2 |
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
21-fs-ias-lec/03-BACnetCore/src/core/node.py
|
cn-uofbasel/BCN
|
2d0852e00f2e7f3c4f7cf30f60c6765f2761f80a
|
[
"MIT"
] | 25 |
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
from .storage_controller import StorageController
from .com_link import ComLink, OperationModes
from .interface.owned_masterfeed import OwnedMasterFeed
from ..constants import SQLITE
class Node:
"""
This class is the main class for every Node in the BACnet. It creates all components such as the com-link and
Storage-controller and serves basic functionality to retrieve the components and shut down the Node.
"""
def __init__(self, operation_mode: OperationModes, channel, path="NodeBase.sqlite"):
"""
:param operation_mode: (MANUAL or AUTOSYNC)
:param channel: Communication channel to use
:param: path: the path to the database to create or load
"""
self.operation_mode = operation_mode
self.channel = channel
self.path_to_db = path
self.db_type = SQLITE
# initialize all main components
self.storage_controller = StorageController(self.path_to_db, self.db_type)
self.com_link = ComLink(channel, operation_mode, self.storage_controller)
self.storage_controller.set_com_link(self.com_link)
self.owned_master_feed = self.storage_controller.get_owned_master()
# automatically start the channel when initialization is finished
self.channel.start()
def get_master(self) -> OwnedMasterFeed:
"""
This method returns an Interface Instance of the Owned MasterFeed of this Node.
(The Masterfeed is automatically created when creating a Database for the first time)
"""
return self.owned_master_feed
def get_storage(self):
"""
Returns the Node's Storage-Controller so that the user can have better manual access to the functionality
if needed. (Ex another application need in_depth access to internals or to functionality not yet served
by the Interface.)
"""
return self.storage_controller
def get_com_link(self):
"""
Returns the Node's Com-Link Instance so that the user can have better manual control/access if needed.
"""
return self.com_link
def manual_synchronize(self):
"""
No matter which operation mode the com link is in, you can trigger the synchronization manually.
This method HAVE TO be used when using the MANUAL OperationMode.
"""
self.storage_controller.sync()
def shutdown(self):
"""
When shutting down a node, all threads need to be terminated. All other instances are automatically closed.
Should ALWAYS be called at the end of a program that uses this package
"""
self.com_link.stop_autosync()
self.channel.stop()
| 41.075758 | 115 | 0.690151 |
cc223ff1c14b50a11e927e3f1135db0fee7f5853
| 2,470 |
py
|
Python
|
S5/CS331-SS/PGM_003 - Producer Consumer.py
|
joe247/CSE-LABS
|
bab17548562bdc9c0bc8b15679f07379a9e98dec
|
[
"MIT"
] | 2 |
2021-02-03T02:03:21.000Z
|
2021-07-03T20:24:14.000Z
|
S5/CS331-SS/PGM_003 - Producer Consumer.py
|
joe247/CSE-LABS
|
bab17548562bdc9c0bc8b15679f07379a9e98dec
|
[
"MIT"
] | null | null | null |
S5/CS331-SS/PGM_003 - Producer Consumer.py
|
joe247/CSE-LABS
|
bab17548562bdc9c0bc8b15679f07379a9e98dec
|
[
"MIT"
] | null | null | null |
#https://www.studytonight.com/python/python-threading-condition-object
#gedit font preferences Source Code Pro
'''
Note: the notify() and notifyAll() methods don’t release the lock; this means that the thread or threads awakened will not return from their wait() call immediately, but only when the thread that called notify() or notifyAll() finally relinquishes ownership of the lock.ownership of the lock.
'''
from threading import Thread, Condition
import time
from random import random
queue = []
MAX_NUM, i= 10, 0
condition = Condition() #creates a default lock
class ProducerThread(Thread):
print("**Start Producer**")
def run(self):
global queue, i
while True:
condition.acquire() #returns true and producer aquires default lock
print("\nProducer Aquires lock...")
if len(queue) == MAX_NUM:
print("\n>>Queue full, producer is waiting..\n")
print("Producer Suspended...")
condition.wait() #releases the lock and blocks the producer thread
print("Producer Woken...")
print("\n\"Queue Freed\" - Consumer notifies the Producer**\n")
queue.append(round(random()*10,2))
print("Produced",queue[i],"kB of Data")
i+=1
condition.notify()
condition.release()
print("Producer Releases lock...")
time.sleep(1)
class ConsumerThread(Thread):
print("\n**Start Consumer**")
def run(self):
global queue, i
while True:
condition.acquire() #returns true and consumer aquires default lock
print("\nConsumer Aquires lock...")
if not queue: #queue not empty if produer has produced something
print("\n>>Nothing in queue, consumer is waiting..\n")
print("Consumer Suspended...")
condition.wait() #releases the lock and blocks the consumer thread
print("Consumer Woken...")
print("\n\"Queue Occupied\" - Producer notifies the Consumer\n")
val = queue.pop(i-1)
i-=1
print("Consumed",val,"kB of Data")
condition.notify()
condition.release()
print("Consumer Releases lock...")
time.sleep(1)
ProducerThread().start()
ConsumerThread().start()
'''
condition.wait()
#This method is used to block the thread and make it wait until some other thread notifies it by calling the notify() or notifyAll() method on the same condition object or until the timeout occurs.
#This method returns True if it is released because of notify() or notifyAll() method else if timeout occurs this method will returnFalse boolean value.
#The control here goes to ConsumerThread.start()
'''
| 35.797101 | 292 | 0.715789 |
accaf4cdb6f6950c27a717b8c94d51efab4d6e31
| 2,527 |
py
|
Python
|
Backend/migrations/alembic/versions/296c837aaf67_create_survstat_table.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 15 |
2020-04-24T20:18:11.000Z
|
2022-01-31T21:05:05.000Z
|
Backend/migrations/alembic/versions/296c837aaf67_create_survstat_table.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 2 |
2021-05-19T07:15:09.000Z
|
2022-03-07T08:29:34.000Z
|
Backend/migrations/alembic/versions/296c837aaf67_create_survstat_table.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 4 |
2020-04-27T16:20:13.000Z
|
2021-02-23T10:39:42.000Z
|
"""create survstat table
Revision ID: 296c837aaf67
Revises: d5d392162842
Create Date: 2021-03-12 17:41:02.421128
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '296c837aaf67'
down_revision = 'd5d392162842'
branch_labels = None
depends_on = None
ages = ""
for a in range(0, 80):
ages += "A{:02d} int not null,".format(a)
def upgrade():
op.get_bind().execute("""
-- auto-generated definition
create table survstat_cases_agegroup
(
ags varchar(6) not null,
year int not null,
week int not null,
A00 int not null,
A01 int not null,
A02 int not null,
A03 int not null,
A04 int not null,
A05 int not null,
A06 int not null,
A07 int not null,
A08 int not null,
A09 int not null,
A10 int not null,
A11 int not null,
A12 int not null,
A13 int not null,
A14 int not null,
A15 int not null,
A16 int not null,
A17 int not null,
A18 int not null,
A19 int not null,
A20 int not null,
A21 int not null,
A22 int not null,
A23 int not null,
A24 int not null,
A25 int not null,
A26 int not null,
A27 int not null,
A28 int not null,
A29 int not null,
A30 int not null,
A31 int not null,
A32 int not null,
A33 int not null,
A34 int not null,
A35 int not null,
A36 int not null,
A37 int not null,
A38 int not null,
A39 int not null,
A40 int not null,
A41 int not null,
A42 int not null,
A43 int not null,
A44 int not null,
A45 int not null,
A46 int not null,
A47 int not null,
A48 int not null,
A49 int not null,
A50 int not null,
A51 int not null,
A52 int not null,
A53 int not null,
A54 int not null,
A55 int not null,
A56 int not null,
A57 int not null,
A58 int not null,
A59 int not null,
A60 int not null,
A61 int not null,
A62 int not null,
A63 int not null,
A64 int not null,
A65 int not null,
A66 int not null,
A67 int not null,
A68 int not null,
A69 int not null,
A70 int not null,
A71 int not null,
A72 int not null,
A73 int not null,
A74 int not null,
A75 int not null,
A76 int not null,
A77 int not null,
A78 int not null,
A79 int not null,
\"A80+\" int not null,
Unbekannt int not null,
constraint survstat_cases_agegroup_pk
primary key (ags, year, week)
);
""")
def downgrade():
op.drop_table('survstat_cases_agegroup')
| 20.884298 | 45 | 0.619707 |
680975928a048a83ee677f5dbc8d49ae33bfe253
| 680 |
py
|
Python
|
2-resources/__DATA-Structures/Code-Challenges/cc72permutationPalindrome/solution.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/__DATA-Structures/Code-Challenges/cc72permutationPalindrome/solution.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/__DATA-Structures/Code-Challenges/cc72permutationPalindrome/solution.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
#
from collections import Counter
import timeit
def is_permutation_palindrome(string):
unpaired_chars = set()
for char in string:
if char in unpaired_chars:
unpaired_chars.remove(char)
else:
unpaired_chars.add(char)
return len(unpaired_chars) <= 1
def is_permutation_palindrome2(string):
return sum(v % 2 for v in Counter(string).values()) <= 1
def wrapper(func, *args, **kwargs):
def wrapped():
return func(*args, **kwargs)
return wrapped
string1 = 'asdfdsa'
wrapped1 = wrapper(is_permutation_palindrome, string1);
wrapped2 = wrapper(is_permutation_palindrome2, string1);
print(timeit.timeit(wrapped1))
print(timeit.timeit(wrapped2))
| 21.935484 | 58 | 0.733824 |
4f35e60b84b561c40deeb817ba5b0b3a37ec6a56
| 5,275 |
py
|
Python
|
PlaidCTF/2021/web/Pearl_U-Stor/app/app.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
PlaidCTF/2021/web/Pearl_U-Stor/app/app.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
PlaidCTF/2021/web/Pearl_U-Stor/app/app.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-12-19T11:06:24.000Z
|
2021-12-19T11:06:24.000Z
|
from flask import Flask, render_template, url_for, request, send_from_directory, send_file, make_response, abort, redirect
from forms import AppFileForm
import os
from io import BytesIO
from werkzeug.utils import secure_filename
from subprocess import Popen
import uuid
import sys
from paste.translogger import TransLogger
import waitress
import time
from flask_wtf.csrf import CSRFProtect
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get("APP_SECRET_KEY")
app.config['UPLOAD_FOLDER'] = 'uploads'
app.config['TMP_FOLDER'] = '/tmp'
app.config['RECAPTCHA_DATA_ATTRS'] = {'bind': 'recaptcha-submit', 'callback': 'onSubmitCallback', 'size': 'invisible'}
app.config['RECAPTCHA_PUBLIC_KEY'] = os.environ.get("APP_RECAPTCHA_PUBLIC_KEY")
app.config['RECAPTCHA_PRIVATE_KEY'] = os.environ.get("APP_RECAPTCHA_PRIVATE_KEY")
csrf = CSRFProtect(app)
def get_cookie(cookies):
if 'id' in cookies:
cookie_id = cookies.get('id')
if cookie_id.strip() != '' and os.path.exists(os.path.join(app.config['TMP_FOLDER'], cookie_id)):
return (False, cookie_id)
cookie_id = uuid.uuid4().hex
os.mkdir(os.path.join(app.config['TMP_FOLDER'], cookie_id))
return (True,cookie_id)
@app.route('/', methods=["GET", "POST"])
def index():
(set_cookie, cookie_id) = get_cookie(request.cookies)
form = AppFileForm()
if request.method == "GET":
try:
file_list = os.listdir(os.path.join(app.config['TMP_FOLDER'], cookie_id))
except PermissionError:
abort(404, description="Nothing here.")
resp = make_response(render_template("index.html", form=form, files=file_list))
elif request.method == "POST":
errors = []
if form.validate_on_submit():
myfile = request.files["myfile"]
file_path = os.path.join(app.config['TMP_FOLDER'], secure_filename(cookie_id), secure_filename(myfile.filename))
if os.path.exists(file_path):
errors.append("File already exists.")
elif secure_filename(cookie_id) == '':
errors.append("Cannot store file.")
else:
try:
myfile.save(file_path)
cmd = ["chattr", "+r", file_path]
proc = Popen(cmd, stdin=None, stderr=None, close_fds=True)
except:
errors.append("Cannot store file.")
try:
file_list = os.listdir(os.path.join(app.config['TMP_FOLDER'], cookie_id))
except PermissionError:
abort(404, description="Nothing here.")
resp = make_response(render_template("index.html", form=form, files=file_list, errors=errors))
if set_cookie:
resp.set_cookie('id', cookie_id)
return resp
@app.route('/file/<path:filename>')
def get_file(filename):
(set_cookie, cookie_id) = get_cookie(request.cookies)
filename = secure_filename(filename)
if set_cookie:
abort(404, description="Nothing here.")
if not os.path.exists(os.path.join(app.config['TMP_FOLDER'], secure_filename(cookie_id), filename)):
abort(404, description="Nothing here.")
with open(os.path.join(app.config['TMP_FOLDER'], secure_filename(cookie_id), filename), "rb") as f:
memory_file = f.read()
return send_file(BytesIO(memory_file), attachment_filename=filename, as_attachment=True)
@app.errorhandler(404)
def page_not_found(error):
return render_template("error.html", message=error)
class AppLogger(TransLogger):
def write_log(self, environ, method, req_uri, start, status, bytes):
if method == 'POST' and 'myfile' in environ['werkzeug.request'].files:
filename = environ['werkzeug.request'].files["myfile"].filename
else:
filename = ''
if bytes is None:
bytes = '-'
remote_addr = '-'
if environ.get('HTTP_X_FORWARDED_FOR'):
remote_addr = environ['HTTP_X_FORWARDED_FOR']
elif environ.get('REMOTE_ADDR'):
remote_addr = environ['REMOTE_ADDR']
d = {
'REMOTE_ADDR': remote_addr,
'REMOTE_USER': environ.get('REMOTE_USER') or '-',
'REQUEST_METHOD': method,
'REQUEST_URI': req_uri,
'HTTP_VERSION': environ.get('SERVER_PROTOCOL'),
'time': time.strftime('%d/%b/%Y:%H:%M:%S', start),
'status': status.split(None, 1)[0],
'bytes': bytes,
'HTTP_REFERER': environ.get('HTTP_REFERER', '-'),
'HTTP_USER_AGENT': environ.get('HTTP_USER_AGENT', '-'),
'ID': environ['werkzeug.request'].cookies['id'] if 'id' in environ['werkzeug.request'].cookies else '',
'FILENAME': filename
}
message = self.format % d
self.logger.log(self.logging_level, message)
if __name__ == "__main__":
format_logger = ('%(REMOTE_ADDR)s - %(REMOTE_USER)s [%(time)s] '
'"%(REQUEST_METHOD)s %(REQUEST_URI)s %(HTTP_VERSION)s" '
'%(status)s %(bytes)s "%(ID)s" "%(FILENAME)s"')
waitress.serve(AppLogger(app, format=format_logger), listen="*:8000")
| 40.891473 | 125 | 0.61782 |
4f4a0ab729afd80258b75aaeebfcf85278a75a2b
| 637 |
py
|
Python
|
demos/System_Dynamics/prey_predator_sd/model/run.py
|
w-ghub/demos
|
6382676fae89bd5a190626612712fcedf17bca6d
|
[
"MIT"
] | 56 |
2020-07-08T23:23:15.000Z
|
2022-03-11T20:43:09.000Z
|
demos/System_Dynamics/prey_predator_sd/model/run.py
|
w-ghub/demos
|
6382676fae89bd5a190626612712fcedf17bca6d
|
[
"MIT"
] | 41 |
2020-07-11T23:24:06.000Z
|
2022-01-28T13:28:07.000Z
|
demos/System_Dynamics/prey_predator_sd/model/run.py
|
w-ghub/demos
|
6382676fae89bd5a190626612712fcedf17bca6d
|
[
"MIT"
] | 39 |
2020-07-15T11:35:04.000Z
|
2022-02-01T16:02:51.000Z
|
import pandas as pd
from model import config
from cadCAD.engine import ExecutionMode, ExecutionContext,Executor
from cadCAD import configs
def run():
'''
Definition:
Run simulation
'''
# Single
exec_mode = ExecutionMode()
local_mode_ctx = ExecutionContext(context=exec_mode.local_mode)
simulation = Executor(exec_context=local_mode_ctx, configs=configs)
raw_system_events, tensor_field, sessions = simulation.execute()
# Result System Events DataFrame
df = pd.DataFrame(raw_system_events)
# subset to last substep
df = df[df['substep'] == df.substep.max()]
return df
| 26.541667 | 71 | 0.711146 |
4ff39b2a5b27aca51de5fac71c57dd9ec5f13506
| 275 |
py
|
Python
|
src/using_tips/decorator/dec3.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
src/using_tips/decorator/dec3.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
src/using_tips/decorator/dec3.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
def log(func):
def wrapper(area):
print
'call %s():' % func.__name__
return func(area)
return wrapper
def now(area):
print
area, '2016-01-23'
now = log(now)
now('Beijing')
print
'The name of function now() is:', now.__name__
| 13.75 | 46 | 0.570909 |
8b1ea3e1923b29fff66477bdaf9bc60977915398
| 1,122 |
py
|
Python
|
rawio/raw/metadata.py
|
hdkai/Raw-IO
|
f0fa928d7ef59a363c6f4c876d642af6dede6ae4
|
[
"Apache-2.0"
] | null | null | null |
rawio/raw/metadata.py
|
hdkai/Raw-IO
|
f0fa928d7ef59a363c6f4c876d642af6dede6ae4
|
[
"Apache-2.0"
] | null | null | null |
rawio/raw/metadata.py
|
hdkai/Raw-IO
|
f0fa928d7ef59a363c6f4c876d642af6dede6ae4
|
[
"Apache-2.0"
] | null | null | null |
#
# RawIO
# Copyright (c) 2021 Yusuf Olokoba.
#
from piexif import load as load_exif, dump as dump_exif
from PIL import Image
def exifread (image_path: str) -> dict: # INCOMPLETE # Switch to whitelist
"""
Read the EXIF dictionary from a file.
Parameters:
image_path (str): Path to image.
Returns:
dict: EXIF metadata dictionary.
"""
# Load exif
exif = load_exif(image_path)
# Strip tags
if "thumbnail" in exif:
del exif["thumbnail"]
if "0th" in exif and 700 in exif["0th"]:
del exif["0th"][700]
if "Interop" in exif:
del exif["Interop"]
if "Exif" in exif:
if 37500 in exif["Exif"]:
del exif["Exif"][37500]
if 37510 in exif["Exif"]:
del exif["Exif"][37510]
# Return
return exif
def exifwrite (image: Image.Image, metadata: dict) -> Image.Image:
"""
Write EXIF metadata to an image.
Parameters:
image (PIL.Image): Image to write metadata to.
metadata (dict): Metadata dictionary.
"""
image.info["exif"] = dump_exif(metadata)
return image
| 24.933333 | 74 | 0.599822 |
8c6b3feac4cc95e58b724ebb95a164d916e199bd
| 4,060 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/visualization/plot_net_speeds.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/visualization/plot_net_speeds.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/visualization/plot_net_speeds.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
#!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2008-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file plot_net_speeds.py
# @author Daniel Krajzewicz
# @author Michael Behrisch
# @date 2014-02-19
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
if 'SUMO_HOME' in os.environ:
sys.path.append(os.path.join(os.environ['SUMO_HOME'], 'tools'))
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import sumolib # noqa
from sumolib.visualization import helpers # noqa
import matplotlib.pyplot as plt # noqa
import matplotlib # noqa
def main(args=None):
"""The main function; parses options and plots"""
# ---------- build and read options ----------
from optparse import OptionParser
optParser = OptionParser()
optParser.add_option("-n", "--net", dest="net", metavar="FILE",
help="Defines the network to read")
optParser.add_option("--edge-width", dest="defaultWidth",
type="float", default=1, help="Defines the edge width")
optParser.add_option("--edge-color", dest="defaultColor",
default='k', help="Defines the edge color")
optParser.add_option("--minV", dest="minV",
type="float", default=None, help="Define the minimum value boundary")
optParser.add_option("--maxV", dest="maxV",
type="float", default=None, help="Define the maximum value boundary")
optParser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="If set, the script says what it's doing")
# standard plot options
helpers.addInteractionOptions(optParser)
helpers.addPlotOptions(optParser)
# parse
options, remaining_args = optParser.parse_args(args=args)
if options.net is None:
print("Error: a network to load must be given.")
return 1
if options.verbose:
print("Reading network from '%s'" % options.net)
net = sumolib.net.readNet(options.net)
speeds = {}
minV = None
maxV = None
for e in net._id2edge:
v = net._id2edge[e]._speed
if minV is None or minV > v:
minV = v
if maxV is None or maxV < v:
maxV = v
speeds[e] = v
if options.minV is not None:
minV = options.minV
if options.maxV is not None:
maxV = options.maxV
# if options.logColors:
# helpers.logNormalise(colors, maxColorValue)
# else:
# helpers.linNormalise(colors, minColorValue, maxColorValue)
helpers.linNormalise(speeds, minV, maxV)
for e in speeds:
speeds[e] = helpers.getColor(options, speeds[e], 1.)
fig, ax = helpers.openFigure(options)
ax.set_aspect("equal", None, 'C')
helpers.plotNet(net, speeds, {}, options)
# drawing the legend, at least for the colors
print("%s -> %s" % (minV, maxV))
sm = matplotlib.cm.ScalarMappable(
cmap=matplotlib.cm.get_cmap(options.colormap), norm=matplotlib.colors.Normalize(vmin=minV, vmax=maxV))
# "fake up the array of the scalar mappable. Urgh..."
# (pelson, http://stackoverflow.com/questions/8342549/matplotlib-add-colorbar-to-a-sequence-of-line-plots)
sm._A = []
plt.colorbar(sm)
options.nolegend = True
helpers.closeFigure(fig, ax, options)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 38.666667 | 110 | 0.664778 |
8ca311cd2bc5ab67573cbe29594e505e30a2c2f1
| 1,332 |
py
|
Python
|
library/core/ptwebpage.py
|
ptphp/PtServer
|
d590360f853a64e989ba52591548b8a67390f27c
|
[
"BSD-3-Clause"
] | 1 |
2017-08-09T23:15:15.000Z
|
2017-08-09T23:15:15.000Z
|
library/core/ptwebpage.py
|
ptphp/PtServer
|
d590360f853a64e989ba52591548b8a67390f27c
|
[
"BSD-3-Clause"
] | null | null | null |
library/core/ptwebpage.py
|
ptphp/PtServer
|
d590360f853a64e989ba52591548b8a67390f27c
|
[
"BSD-3-Clause"
] | null | null | null |
from PySide.QtWebKit import QWebPage
from .ptnetworkaccessmanager import PtNetworkAccessManager
import webbrowser
class PtWebPage(QWebPage):
def __init__(self, parent):
QWebPage.__init__(self, parent)
manager = PtNetworkAccessManager(self)
manager.set_proxy("127.0.0.1:8888")
self.setNetworkAccessManager(manager)
def acceptNavigationRequest(self, frame, request, type):
if(type == QWebPage.NavigationTypeLinkClicked):
if(frame == self.mainFrame()):
self.view().load(request.url())
else:
webbrowser.open(request.url().toString())
return False
return QWebPage.acceptNavigationRequest(self, frame, request, type)
def userAgentForUrl(self, url):
return "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36"
def javaScriptConsoleMessage(self, message, line, source):
"""Prints client console message in current output stream."""
super(PtWebPage, self).javaScriptConsoleMessage(message, line,source)
log_type = "error" if "Error" in message else "info"
#Logger.log("%s(%d): %s" % (source or '<unknown>', line, message),
#sender="Frame", level=log_type)
#print message, line, source
| 44.4 | 126 | 0.662913 |
8cbcc4d4d1667278c220135b9e8d73b3174ac79c
| 417 |
py
|
Python
|
python_reference/useful_scripts/find_file.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
python_reference/useful_scripts/find_file.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
python_reference/useful_scripts/find_file.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:17:28.000Z
|
2019-05-10T09:17:28.000Z
|
# Sebastian Raschka 2014
#
# A Python function to find files in a directory based on a substring search.
import os
def find_files(substring, path):
results = []
for f in os.listdir(path):
if substring in f:
results.append(os.path.join(path, f))
return results
# E.g.
# find_files('Untitled', '/Users/sebastian/Desktop/')
# returns
# ['/Users/sebastian/Desktop/Untitled0.ipynb']
| 23.166667 | 77 | 0.669065 |
e88127452f8513b689893088aac6b48c4dbe3287
| 271 |
py
|
Python
|
Online-Judges/CodingBat/Python/Logic-01/Logic_1-04-caught_speeding.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3 |
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/CodingBat/Python/Logic-01/Logic_1-04-caught_speeding.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/CodingBat/Python/Logic-01/Logic_1-04-caught_speeding.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def caught_speeding(speed, is_birthday):
if speed <= 60 or is_birthday is True and speed <= 65:
return 0
elif speed <= 80 or is_birthday is True and speed <= 85:
return 1
elif speed > 80 or is_birthday is True and speed > 85:
return 2
| 33.875 | 60 | 0.638376 |
fa60fcccfcec138fda470cfb83d8c15d8eb2095d
| 411 |
py
|
Python
|
simple-tensorflow-demo/3.neural network/tf_3rd_2_training_data.py
|
crackedcd/Intern.MT
|
36398837af377a7e1c4edd7cbb15eabecd2c3103
|
[
"MIT"
] | 1 |
2019-07-05T03:42:17.000Z
|
2019-07-05T03:42:17.000Z
|
simple-tensorflow-demo/3.neural network/tf_3rd_2_training_data.py
|
crackedcd/Intern.MT
|
36398837af377a7e1c4edd7cbb15eabecd2c3103
|
[
"MIT"
] | null | null | null |
simple-tensorflow-demo/3.neural network/tf_3rd_2_training_data.py
|
crackedcd/Intern.MT
|
36398837af377a7e1c4edd7cbb15eabecd2c3103
|
[
"MIT"
] | 1 |
2019-06-24T05:56:55.000Z
|
2019-06-24T05:56:55.000Z
|
import numpy as np
import matplotlib.pyplot as plt
# 构造训练数据
train_X = np.asarray([3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1])
train_Y = np.asarray([1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221, 2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3])
# 绘图
plt.plot(train_X, train_Y, 'ro', label='training data')
plt.legend()
plt.show()
| 29.357143 | 130 | 0.613139 |
a8eb55650d3c64a8cc7d22736d87188168bae76b
| 1,367 |
py
|
Python
|
leetcode-and-lintcode/DP/arithmetic_slices_982_lintcode.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode-and-lintcode/DP/arithmetic_slices_982_lintcode.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode-and-lintcode/DP/arithmetic_slices_982_lintcode.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
'''
Permutation, Combination, how much is added
Don't forget the context is, continuous subarray.
So the calculation formulas of Permutation and Combination don't apply.
'''
'''
Non-DP, Space O(1)
'''
class Solution1:
"""
@param A: an array
@return: the number of arithmetic slices in the array A.
"""
def numberOfArithmeticSlices(self, A):
# Write your code here
def getSliceNum(num):
return num * (num+1) // 2
n = len(A)
if n < 3:
return 0
res = 0
ntriples = 0
for i in range(1, n-1):
if 2 * A[i] == A[i-1] + A[i+1]:
ntriples += 1
else:
res += getSliceNum(ntriples)
ntriples = 0
return res + getSliceNum(ntriples) if ntriples > 0 else res
'''
DP, Space O(1)
dp[i] represents the amount of addition.
The idea of NineChapter's solution is same as this one. However, less concise.
'''
class Solution2:
"""
@param A: an array
@return: the number of arithmetic slices in the array A.
"""
def numberOfArithmeticSlices(self, A):
if A is None or len(A) < 3:
return 0
n = len(A)
dp = [0] * n
for i in range(2, n):
if 2 * A[i-1] == A[i] + A[i-2]:
dp[i] = dp[i-1]+1
return sum(dp)
| 21.359375 | 78 | 0.529627 |
7e2473ce5ebf177c07e496ec6d4c9e282c6275ac
| 879 |
py
|
Python
|
mod/units/gpa_handler.py
|
HeraldStudio/wechat
|
b023b7460a6b4284ea782333e13f24d169ddaff4
|
[
"MIT"
] | 1 |
2015-06-28T15:26:52.000Z
|
2015-06-28T15:26:52.000Z
|
mod/units/gpa_handler.py
|
HeraldStudio/wechat
|
b023b7460a6b4284ea782333e13f24d169ddaff4
|
[
"MIT"
] | null | null | null |
mod/units/gpa_handler.py
|
HeraldStudio/wechat
|
b023b7460a6b4284ea782333e13f24d169ddaff4
|
[
"MIT"
] | 6 |
2015-03-20T16:36:22.000Z
|
2021-08-28T07:58:18.000Z
|
# -*- coding: utf-8 -*-
# @Date : 2014-07-01 22:00:36
# @Author : [email protected]
import tornado.web
from ..models.gpa import Detail as GPAD
from collections import OrderedDict
class GPAHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get(self, openid):
items = self.db.query(GPAD).filter(GPAD.openid == openid).all()
detail = OrderedDict()
semesters = [item.semester for item in items]
for semester in semesters:
detail[semester] = []
for item in items:
detail[item.semester].append([
item.course, item.credit, item.score,
item.score_type, item.extra])
self.render('gpa.html', detail=detail)
self.db.close()
def on_finish(self):
self.db.close()
| 28.354839 | 72 | 0.588168 |
0e297b2c95c4f81d337027512d3154e1afab7019
| 913 |
py
|
Python
|
DCM/programs/bilinearModel.py
|
l-althueser/NiMoNa_DCM16
|
d93ee253ade8ccb4da1f17a91f064258adcf29c0
|
[
"BSD-2-Clause"
] | 1 |
2018-04-20T07:44:11.000Z
|
2018-04-20T07:44:11.000Z
|
DCM/programs/bilinearModel.py
|
l-althueser/NiMoNa_DCM16
|
d93ee253ade8ccb4da1f17a91f064258adcf29c0
|
[
"BSD-2-Clause"
] | null | null | null |
DCM/programs/bilinearModel.py
|
l-althueser/NiMoNa_DCM16
|
d93ee253ade8ccb4da1f17a91f064258adcf29c0
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Tobias
Timo
Beschreibung:
Implementierung des bilinearen Modells zur Simulation der Hirnaktivität als Reaktion
auf einen äußeren Stimulus
Funktionsweise:
Die Zustandgleichungen werden mit dem RK4 oder Eulerverfahren gelöst. Um eine Simulation zu starten, müssen folgende Startparameter übergeben werden:
u: Anregungen/Stimulus
thetha: [Eigenkopplung, Induzierte Kopplung(en), Äußerer Einfluss]
Die Parameter x und tstep werden aus dem Runge-Kutta-Verfahren übernommen.
Pythonversion:
3.5.1
"""
import numpy as np
def bilinearModel(z, u, theta, tstep):
#Berechnung der Zeitableitung von z
D = 0
for i in range(len(theta[1])): # Berechnung von sum u_j*B_j
D = D + np.dot(theta[1][i], u[i,tstep])
z_dot = np.dot((theta[0] + D),z[:,tstep]) + np.dot(theta[2],u[:,tstep])
return np.array([z_dot]).T
| 29.451613 | 149 | 0.685652 |
7ee6db1839dca7ddd5a36d20b1c6e72ed0026fb1
| 1,016 |
py
|
Python
|
OMH/2021/crypto/cry_for_help/cry_for_help.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
OMH/2021/crypto/cry_for_help/cry_for_help.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
OMH/2021/crypto/cry_for_help/cry_for_help.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
import random
import flag
import collections
import os
import pow
def xorshift64(x):
x ^= (x << 13) & 0xffffffffffffffff
x ^= (x >> 7) & 0xffffffffffffffff
x ^= (x << 17) & 0xffffffffffffffff
return x
def main():
r = os.urandom(10)
random.seed(r)
SEEDS = 18
seed = input("give me the seed: ")
seed = seed.strip()
if(len(seed)) != SEEDS:
print("seed should be "+str(SEEDS)+" bytes long!")
exit()
seed = list(seed)
random.shuffle(seed)
counts = collections.Counter(seed)
if counts.most_common()[0][1] > 3:
print ("You can't use the same number more than 3 times!")
exit()
int16 = lambda x: int(x,16)
seed = list(map(int16,seed))
S = 0x0
for i in range(SEEDS):
S*=16
S+=seed[i]
count = 2+seed[0]+seed[1]
for i in range(count):
S=xorshift64(S)
last = S & 0xFFFF
print("The last 2 bytes are: "+str(last))
check = int(input("give me the number: "))
if check == S:
print(flag.flag)
else:
print("Nope!")
if __name__ == '__main__':
pow.check_pow(27)
main()
| 18.142857 | 61 | 0.628937 |
382c38ad3bc07f410983f32922400f6bfc98e253
| 4,119 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_vlan.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_vlan.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_vlan.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules.network.onyx import onyx_vlan
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ..onyx_module import TestOnyxModule, load_fixture
class TestOnyxVlanModule(TestOnyxModule):
module = onyx_vlan
def setUp(self):
super(TestOnyxVlanModule, self).setUp()
self.mock_get_config = patch.object(
onyx_vlan.OnyxVlanModule, "_get_vlan_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible_collections.community.general.plugins.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_version = patch.object(
onyx_vlan.OnyxVlanModule, "_get_os_version")
self.get_version = self.mock_get_version.start()
def tearDown(self):
super(TestOnyxVlanModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_get_version.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_vlan_show.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
self.get_version.return_value = "3.6.5000"
def test_vlan_no_change(self):
set_module_args(dict(vlan_id=20))
self.execute_module(changed=False)
def test_vlan_remove_name(self):
set_module_args(dict(vlan_id=10, name=''))
commands = ['vlan 10 no name']
self.execute_module(changed=True, commands=commands)
def test_vlan_change_name(self):
set_module_args(dict(vlan_id=10, name='test-test'))
commands = ['vlan 10 name test-test']
self.execute_module(changed=True, commands=commands)
def test_vlan_create(self):
set_module_args(dict(vlan_id=30))
commands = ['vlan 30', 'exit']
self.execute_module(changed=True, commands=commands)
def test_vlan_create_with_name(self):
set_module_args(dict(vlan_id=30, name='test-test'))
commands = ['vlan 30', 'exit', 'vlan 30 name test-test']
self.execute_module(changed=True, commands=commands)
def test_vlan_remove(self):
set_module_args(dict(vlan_id=20, state='absent'))
commands = ['no vlan 20']
self.execute_module(changed=True, commands=commands)
def test_vlan_remove_not_exist(self):
set_module_args(dict(vlan_id=30, state='absent'))
self.execute_module(changed=False)
def test_vlan_aggregate(self):
aggregate = list()
aggregate.append(dict(vlan_id=30))
aggregate.append(dict(vlan_id=20))
set_module_args(dict(aggregate=aggregate))
commands = ['vlan 30', 'exit']
self.execute_module(changed=True, commands=commands)
def test_vlan_aggregate_purge(self):
aggregate = list()
aggregate.append(dict(vlan_id=30))
aggregate.append(dict(vlan_id=20))
set_module_args(dict(aggregate=aggregate, purge=True))
commands = ['vlan 30', 'exit', 'no vlan 10', 'no vlan 1']
self.execute_module(changed=True, commands=commands)
| 38.495327 | 103 | 0.706239 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.