hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57e11bef0ed4bb5532b0fb2f87ab3defa3ee0f08
| 17,229 |
py
|
Python
|
python/etc/preprocessing/jisc/jisc_preprocessing.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 89 |
2015-02-13T13:46:06.000Z
|
2022-03-13T16:42:44.000Z
|
python/etc/preprocessing/jisc/jisc_preprocessing.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 91 |
2015-03-12T13:31:36.000Z
|
2022-01-14T07:37:37.000Z
|
python/etc/preprocessing/jisc/jisc_preprocessing.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 138 |
2015-03-04T15:23:43.000Z
|
2022-03-09T15:11:52.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
import csv
import datetime
import json
from os import path
import re
import sys
from urllib.error import HTTPError, URLError
ARG_HELP_STRINGS = {
"source_file": "The jisc csv file",
"exchange_rates_cache_file": "An optional cache file for ECB exchange rates",
"no_decorations": "Do not use ANSI coded colors in console output",
"jisc_file_format": "The format type of the Jisc input file"
}
FIELDNAMES = {
"2014_16": {
"article": [
"APC paid (actual currency) including VAT if charged",
"APC paid (£) including VAT (calculated)",
"APC paid (£) including VAT if charged",
"Currency of APC",
"DOI",
"Date of APC payment",
"Date of initial application by author",
"ISSN0",
"Institution",
"Journal",
"Licence",
"PubMed Central (PMC) ID",
"PubMed ID",
"Publisher",
"TCO year",
"Type of publication",
"Drop?",
"Year of publication",
"period",
"is_hybrid",
"euro"
],
"book": [
"Line number",
"APC paid (actual currency) including VAT if charged",
"APC paid (£) including VAT (calculated)",
"APC paid (£) including VAT if charged",
"Article title",
"Currency of APC",
"DOI",
"Date of APC payment",
"Date of initial application by author",
"Institution",
"Journal",
"Licence",
"Publisher",
"TCO year",
"Type of publication",
"Year of publication",
"period",
"euro",
"ISBN"
]
},
"2017": {
"article": [
"APC paid (£) including VAT if charged",
"DOI",
"Date of APC payment",
"ISSN0",
"Institution",
"Journal",
"Licence",
"PubMed ID",
"Publisher",
"TCO year",
"Type of publication",
"Drop?",
"Period of APC payment",
"period",
"is_hybrid",
"euro"
],
"book": [
"Line number",
"APC paid (£) including VAT if charged",
"Article title",
"DOI",
"Date of APC payment",
"Institution",
"Journal",
"Licence",
"Publisher",
"TCO year",
"Type of publication",
"Period of APC payment",
"period",
"euro",
"ISBN"
]
},
"2018": {
"article": [
"Institution",
"Date of acceptance",
"PubMed ID",
"DOI",
"Publisher",
"Journal",
"Type of publication",
"Date of publication",
"Date of APC payment",
"APC paid (£) including VAT if charged",
"period",
"is_hybrid",
"euro"
],
"book": [
"Line number",
"Institution",
"Date of acceptance",
"DOI",
"Publisher",
"Journal",
"Type of publication",
"Article title",
"Date of publication",
"Date of APC payment",
"APC paid (£) including VAT if charged",
"period",
"euro",
"ISBN"
]
}
}
PUBLICATION_TYPES_BL = [
"Book chapter",
"Book edited",
"Conference Paper/Proceeding/Abstract",
"Letter"
]
PUBLICATION_TYPES_BOOKS = [
"Book",
"Monograph"
]
DATE_DAY_RE = {
"2014_16": re.compile("(?P<year>[0-9]{4})-?(?P<month>[0-9]{2})?-?(?P<day>[0-9]{2})?"),
"2017": re.compile("(?P<year>[0-9]{4})-?(?P<month>[0-9]{2})?-?(?P<day>[0-9]{2})?"),
"2018": re.compile("(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/(?P<year>[0-9]{4})")
}
DATE_STRPTIME = {
"2014_16": "%Y-%m-%d",
"2017": "%Y-%m-%d",
"2018": "%m/%d/%Y"
}
PERIOD_FIELD_SOURCE = {
"2014_16": [
"Date of APC payment",
"Year of publication",
"Date of initial application by author",
"TCO year"
],
"2017": [
"Date of APC payment",
"Period of APC payment",
"TCO year"
],
"2018": [
"Date of APC payment",
"Date of publication",
"Date of acceptance"
]
}
EXCHANGE_RATES_CACHE = {}
EXCHANGE_RATES_CACHE_FILE = None
DELETE_REASONS = {}
CURRENT_YEAR = 2017
#CURRENT_YEAR = datetime.datetime.now().year
NO_DECORATIONS = False
def delete_line(line_dict, reason):
_print("r", " - " + reason + ", line deleted")
if reason not in DELETE_REASONS:
DELETE_REASONS[reason] = 1
else:
DELETE_REASONS[reason] += 1
for key in line_dict:
line_dict[key] = ""
def line_as_list(line_dict, pub_type):
return [line_dict[field] for field in FIELDNAMES[FORMAT][pub_type]]
def is_money_value(string):
try:
number = float(string)
return number > 0
except ValueError:
return False
def is_valid_date(date_match_obj):
gd = date_match_obj.groupdict()
if gd["year"] is None or gd["month"] is None or gd["day"] is None:
return False
try:
date = datetime.datetime(int(gd["year"]), int(gd["month"]), int(gd["day"]))
if date > datetime.datetime.now():
return False
return True
except ValueError:
return False
def shutdown():
_print("r", "Updating exchange rates cache...")
with open(EXCHANGE_RATES_CACHE_FILE, "w") as f:
f.write(json.dumps(EXCHANGE_RATES_CACHE, sort_keys=True, indent=4, separators=(',', ': ')))
f.flush()
_print("r", "Done.")
sys.exit()
def _print(color, s):
if color in ["r", "y", "g", "b"] and not NO_DECORATIONS:
getattr(oat, "print_" + color)(s)
else:
print(s)
def get_exchange_rate(currency, frequency, date, jisc_format):
if currency not in EXCHANGE_RATES_CACHE:
EXCHANGE_RATES_CACHE[currency] = {}
if frequency not in EXCHANGE_RATES_CACHE[currency]:
EXCHANGE_RATES_CACHE[currency][frequency] = {}
if not len(EXCHANGE_RATES_CACHE[currency][frequency]):
try:
rates = oat.get_euro_exchange_rates(currency, frequency)
EXCHANGE_RATES_CACHE[currency][frequency] = rates
except HTTPError as httpe:
_print("r", "HTTPError while querying the ECB data warehouse: " + httpe.reason)
shutdown()
except URLError as urle:
_print("r", "URLError while querying the ECB data warehouse: " + urle.reason)
shutdown()
except ValueError as ve:
_print("r", "ValueError while querying the ECB data warehouse: " + ve.reason)
shutdown()
if frequency == "D":
# The ECB does not report exchange rate for all dates due to weekends/holidays. We have
# consider some days in advance to find the next possible data in some cases.
day = datetime.datetime.strptime(date, DATE_STRPTIME[jisc_format])
for i in range(6):
future_day = day + datetime.timedelta(days=i)
search_day = future_day.strftime("%Y-%m-%d")
if search_day in EXCHANGE_RATES_CACHE[currency][frequency]:
_print("y", " [Exchange rates: Cached value used]")
if i > 0:
msg = " [Exchange rates: No rate found for date {}, used value for {} instead]"
_print("y", msg.format(date, search_day))
return EXCHANGE_RATES_CACHE[currency][frequency][search_day]
_print("r", "Error during Exchange rates lookup: No rate for " + date + " or any following day!")
shutdown()
else:
return EXCHANGE_RATES_CACHE[currency][frequency][date]
def calculate_euro_value(line, jisc_format):
payment_date = line["Date of APC payment"]
date_match = DATE_DAY_RE[jisc_format].match(payment_date)
if jisc_format in ["2017", "2018"]:
apc_pound = line["APC paid (£) including VAT if charged"]
field_used_for_pound_value = "APC paid (£) including VAT if charged"
elif jisc_format == "2014_16":
apc_orig = line["APC paid (actual currency) including VAT if charged"]
apc_pound = ""
field_used_for_pound_value = ""
for field in ["APC paid (£) including VAT (calculated)", "APC paid (£) including VAT if charged"]:
if is_money_value(line[field]):
apc_pound = line[field]
field_used_for_pound_value = field
break
if is_money_value(apc_orig):
currency = line["Currency of APC"].strip()
if currency == "EUR":
line["euro"] = apc_orig
msg = " - Created euro field ('{}') by using the value in 'APC paid (actual currency) including VAT if charged' directly since the currency is EUR"
_print("g", msg.format(apc_orig))
elif len(currency) == 3:
if date_match and is_valid_date(date_match):
rate = get_exchange_rate(currency, "D", payment_date, jisc_format)
euro_value = round(float(apc_orig) / float(rate), 2)
line["euro"] = str(euro_value)
msg = " - Created euro field ('{}') by dividing the value in 'APC paid (actual currency) including VAT if charged' ({}) by {} (EUR -> {} conversion rate on {}) [ECB]"
msg = msg.format(euro_value, apc_orig, rate, currency, payment_date)
_print("g", msg)
else:
year = line["period"]
if int(year) >= CURRENT_YEAR:
del_msg = "period ({}) too recent to determine average yearly conversion rate".format(year)
delete_line(line, del_msg)
return
try:
rate = get_exchange_rate(currency, "A", year, jisc_format)
except KeyError:
_print("r", "KeyError: An average yearly conversion rate is missing (" + currency + ", " + year + ")")
shutdown()
euro_value = round(float(apc_orig) / float(rate), 2)
line["euro"] = str(euro_value)
msg = " - Created euro field ('{}') by dividing the value in 'APC paid (actual currency) including VAT if charged' ({}) by {} (avg EUR -> {} conversion rate in {}) [ECB]"
msg = msg.format(euro_value, apc_orig, rate, currency, year)
_print("g", msg)
if line["euro"] == "" and is_money_value(apc_pound):
if date_match and is_valid_date(date_match):
rate = get_exchange_rate("GBP", "D", payment_date, jisc_format)
euro_value = round(float(apc_pound) / float(rate), 2)
line["euro"] = str(euro_value)
msg = " - Created euro field ('{}') by dividing the value in '{}' ({}) by {} (EUR -> GBP conversion rate on {}) [ECB]"
msg = msg.format(euro_value, field_used_for_pound_value, apc_pound, rate, payment_date)
_print("g", msg)
else:
year = line["period"]
if int(year) > CURRENT_YEAR:
del_msg = "period ({}) too recent to determine average yearly conversion rate".format(year)
delete_line(line, del_msg)
return
try:
rate = get_exchange_rate("GBP", "A", year, jisc_format)
except KeyError:
_print("r", "KeyError: An average yearly conversion rate is missing (GBP, " + year + ")")
shutdown()
euro_value = round(float(apc_pound) / float(rate), 2)
line["euro"] = str(euro_value)
msg = " - Created euro field ('{}') by dividing the value in '{}' ({}) by {} (avg EUR -> GBP conversion rate in {}) [ECB]"
msg = msg.format(euro_value, field_used_for_pound_value, apc_pound, rate, year)
_print("g", msg)
if line["euro"] == "":
delete_line(line, "Unable to properly calculate a converted euro value")
def main():
global EXCHANGE_RATES_CACHE, EXCHANGE_RATES_CACHE_FILE, NO_DECORATIONS, FORMAT
parser = argparse.ArgumentParser()
parser.add_argument("source_file", help=ARG_HELP_STRINGS["source_file"])
parser.add_argument("jisc_file_format", choices=list(FIELDNAMES), help=ARG_HELP_STRINGS["jisc_file_format"])
parser.add_argument("-c", "--exchange_rates_cache_file", help=ARG_HELP_STRINGS["exchange_rates_cache_file"], default="_exchange_rates_cache.json")
parser.add_argument("-n", "--no-decorations", help=ARG_HELP_STRINGS["no_decorations"], action="store_true")
args = parser.parse_args()
NO_DECORATIONS = args.no_decorations
EXCHANGE_RATES_CACHE_FILE = args.exchange_rates_cache_file
FORMAT = args.jisc_file_format
if path.isfile(args.exchange_rates_cache_file):
with open(EXCHANGE_RATES_CACHE_FILE, "r") as f:
try:
EXCHANGE_RATES_CACHE = json.loads(f.read())
except ValueError:
_print("r", "Could not decode a cache structure from " + EXCHANGE_RATES_CACHE_FILE + ", starting with an empty cache.")
f = open(args.source_file, "r", encoding="utf-8")
reader = csv.DictReader(f)
article_content = [list(FIELDNAMES[FORMAT]["article"])]
book_content = [list(FIELDNAMES[FORMAT]["book"])]
empty_article_line = ["" for i in range(len(FIELDNAMES[FORMAT]["article"]))]
empty_book_line = ["" for i in range(len(FIELDNAMES[FORMAT]["book"]))]
for line in reader:
line["period"] = ""
line["euro"] = ""
line["Journal"] = line["Journal"].replace("\n", " ")
_print("b", "--- Analysing line " + str(reader.line_num) + " ---")
is_book = False
pub_type = line["Type of publication"]
if pub_type in PUBLICATION_TYPES_BOOKS:
line["Line number"] = str(reader.line_num)
line["ISBN"] = ""
is_book = True
else:
line["is_hybrid"] = ""
# Publication blacklist checking
if pub_type in PUBLICATION_TYPES_BL and not is_book:
delete_line(line, "Blacklisted pub type ('" + pub_type + "')")
article_content.append(list(empty_article_line))
continue
# DOI checking
if len(line["DOI"].strip()) == 0 and not is_book:
delete_line(line, "Empty DOI")
article_content.append(list(empty_article_line))
continue
# Drop checking
if "Drop?" in FIELDNAMES[FORMAT]["article"] and line["Drop?"] == "1":
delete_line(line, "Drop mark found")
article_content.append(list(empty_article_line))
continue
# period field generation
for source_field in PERIOD_FIELD_SOURCE[FORMAT]:
content = line[source_field].strip()
match = DATE_DAY_RE[FORMAT].match(content)
if match:
year = match.groupdict()["year"]
if int(year) > CURRENT_YEAR:
continue
line["period"] = year
msg = " - Created period field ('{}') by parsing value '{}' in column '{}'".format(year, content, source_field)
_print("g", msg)
break
else:
delete_line(line, "Unable to determine payment date for period column")
article_content.append(list(empty_article_line))
continue
# euro field generation
calculate_euro_value(line, FORMAT)
if is_book:
if line["Line number"] != "":
book_content.append(line_as_list(line, "book"))
delete_line(line, "Book content (extracted to separate file)")
article_content.append(list(empty_article_line))
else:
article_content.append(line_as_list(line, "article"))
with open('out.csv', 'w') as out:
writer = oat.OpenAPCUnicodeWriter(out, None, False, True)
writer.write_rows(article_content)
with open('out_books.csv', 'w') as out:
writer = oat.OpenAPCUnicodeWriter(out, None, False, True)
writer.write_rows(book_content)
print("\n\nPreprocessing finished, deleted articles overview:")
sorted_reasons = sorted(DELETE_REASONS.items(), key=lambda x: x[1])
sorted_reasons.reverse()
for item in sorted_reasons:
_print("r", item[0].ljust(72) + str(item[1]))
_print("r,", "-------------------------------------------------")
_print("r", "Total".ljust(72) + str(sum(DELETE_REASONS.values())))
shutdown()
if __name__ == '__main__' and __package__ is None:
sys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))))
import openapc_toolkit as oat
main()
| 38.371938 | 192 | 0.555111 |
57ea82b16b274f5350cda5f8ffb8033e48a01085
| 1,548 |
py
|
Python
|
methods/transformers/examples/seq2seq/save_len_file.py
|
INK-USC/RiddleSense
|
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
|
[
"MIT"
] | 3 |
2021-07-06T20:02:31.000Z
|
2022-03-27T13:13:01.000Z
|
methods/transformers/examples/seq2seq/save_len_file.py
|
INK-USC/RiddleSense
|
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
|
[
"MIT"
] | null | null | null |
methods/transformers/examples/seq2seq/save_len_file.py
|
INK-USC/RiddleSense
|
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import Seq2SeqDataset, pickle_save
def save_len_file(
tokenizer_name, data_dir, max_source_length=1024, max_target_length=1024, consider_target=False, **kwargs
):
"""Save max(src_len, tgt_len) for each example to allow dynamic batching."""
tok = AutoTokenizer.from_pretrained(tokenizer_name)
train_ds = Seq2SeqDataset(tok, data_dir, max_source_length, max_target_length, type_path="train", **kwargs)
pad = tok.pad_token_id
def get_lens(ds):
dl = tqdm(
DataLoader(ds, batch_size=512, num_workers=8, shuffle=False, collate_fn=ds.collate_fn),
desc=str(ds.len_file),
)
max_lens = []
for batch in dl:
src_lens = batch["input_ids"].ne(pad).sum(1).tolist()
tgt_lens = batch["labels"].ne(pad).sum(1).tolist()
if consider_target:
for src, tgt in zip(src_lens, tgt_lens):
max_lens.append(max(src, tgt))
else:
max_lens.extend(src_lens)
return max_lens
train_lens = get_lens(train_ds)
val_ds = Seq2SeqDataset(tok, data_dir, max_source_length, max_target_length, type_path="val", **kwargs)
val_lens = get_lens(val_ds)
pickle_save(train_lens, train_ds.len_file)
pickle_save(val_lens, val_ds.len_file)
if __name__ == "__main__":
fire.Fire(save_len_file)
| 35.181818 | 112 | 0.654393 |
57ec6408fc157866d0a81c58f4feac352152e619
| 3,450 |
py
|
Python
|
research/cv/pointnet2/src/pointnet2.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-11-18T08:17:44.000Z
|
2021-11-18T08:17:44.000Z
|
research/cv/pointnet2/src/pointnet2.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/pointnet2/src/pointnet2.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""network definition"""
import mindspore.nn as nn
import mindspore.ops as P
from mindspore.nn.loss.loss import _Loss
from mindspore.ops import functional as F
from src.layers import Dense
from src.pointnet2_utils import PointNetSetAbstraction
class PointNet2(nn.Cell):
"""PointNet2"""
def __init__(self, num_class, normal_channel=False):
super(PointNet2, self).__init__()
in_channel = 6 if normal_channel else 3
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstraction(npoint=512, radius=0.2, nsample=32,
in_channel=in_channel, mlp=[64, 64, 128],
group_all=False)
self.sa2 = PointNetSetAbstraction(npoint=128, radius=0.4, nsample=64,
in_channel=128 + 3, mlp=[128, 128, 256],
group_all=False)
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None,
in_channel=256 + 3, mlp=[256, 512, 1024],
group_all=True)
self.fc1 = Dense(1024, 512)
self.bn1 = nn.BatchNorm1d(512)
self.drop1 = nn.Dropout(0.6)
self.fc2 = Dense(512, 256)
self.bn2 = nn.BatchNorm1d(256)
self.drop2 = nn.Dropout(0.5)
self.fc3 = Dense(256, num_class)
self.relu = P.ReLU()
self.reshape = P.Reshape()
self.log_softmax = P.LogSoftmax()
self.transpose = P.Transpose()
def construct(self, xyz):
"""
construct method
"""
if self.normal_channel:
norm = self.transpose(xyz[:, :, 3:], (0, 2, 1))
xyz = xyz[:, :, :3]
else:
norm = None
l1_xyz, l1_points = self.sa1(xyz, norm) # [B, 3, 512], [B, 128, 512]
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points) # [B, 3, 128], [B, 256, 128]
_, l3_points = self.sa3(l2_xyz, l2_points) # [B, 3, 1], [B, 1024, 1]
x = self.reshape(l3_points, (-1, 1024))
x = self.drop1(self.relu(self.bn1(self.fc1(x))))
x = self.drop2(self.relu(self.bn2(self.fc2(x))))
x = self.fc3(x)
x = self.log_softmax(x)
return x
class NLLLoss(_Loss):
"""NLL loss"""
def __init__(self, reduction='mean'):
super(NLLLoss, self).__init__(reduction)
self.one_hot = P.OneHot()
self.reduce_sum = P.ReduceSum()
def construct(self, logits, label):
"""
construct method
"""
label_one_hot = self.one_hot(label, F.shape(logits)[-1], F.scalar_to_array(1.0), F.scalar_to_array(0.0))
loss = self.reduce_sum(-1.0 * logits * label_one_hot, (1,))
return loss
| 37.5 | 112 | 0.576232 |
a4f4e23a6d735a570fdd38b211528a6cdd0610d9
| 1,189 |
py
|
Python
|
Problems/Depth-First Search/medium/NumberEnclaves/number_of_enclaves.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Depth-First Search/medium/NumberEnclaves/number_of_enclaves.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Depth-First Search/medium/NumberEnclaves/number_of_enclaves.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from typing import List
def numEnclaves(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
def dfs(row: int, col: int) -> int:
if row < 0 or row >= m or col < 0 or col >= n:
return - (m * n)
if grid[row][col] == 0:
return 0
grid[row][col] = 0
top = dfs(row - 1, col)
down = dfs(row + 1, col)
left = dfs(row, col - 1)
right = dfs(row, col + 1)
return 1 + top + down + left + right
ans = 0
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
enclosed = dfs(i, j)
ans += enclosed if enclosed > 0 else 0
return ans
# def numEnclaves(self, A: List[List[int]]) -> int:
# def dfs(i, j):
# A[i][j] = 0
# for x, y in (i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1):
# if 0 <= x < m and 0 <= y < n and A[x][y]:
# dfs(x, y)
#
# m, n = len(A), len(A[0])
# for i in range(m):
# for j in range(n):
# if A[i][j] == 1 and (i == 0 or j == 0 or i == m - 1 or j == n - 1):
# dfs(i, j)
# return sum(sum(row) for row in A)
| 27.022727 | 81 | 0.417998 |
3527e785eb348ee6938e6cb836b183317a0e5f7c
| 3,688 |
py
|
Python
|
plugins/tff_backend/bizz/rogerthat.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | null | null | null |
plugins/tff_backend/bizz/rogerthat.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 178 |
2017-08-02T12:58:06.000Z
|
2017-12-20T15:01:12.000Z
|
plugins/tff_backend/bizz/rogerthat.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 2 |
2018-01-10T10:43:12.000Z
|
2018-03-18T10:42:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import json
import logging
from google.appengine.api import users
from mcfw.rpc import arguments, returns
from plugins.rogerthat_api.api import system, messaging, RogerthatApiException
from plugins.rogerthat_api.to import MemberTO
from plugins.rogerthat_api.to.messaging import Message, AnswerTO
from plugins.rogerthat_api.to.messaging.service_callback_results import TYPE_FLOW, FlowCallbackResultTypeTO, \
FlowMemberResultCallbackResultTO
from plugins.tff_backend.bizz import get_tf_token_api_key
from plugins.tff_backend.bizz.service import get_main_branding_hash
from plugins.tff_backend.plugin_consts import FLOW_ERROR_MESSAGE
from plugins.tff_backend.utils.app import get_app_user_tuple
def put_user_data(api_key, user_email, app_id, updated_user_data, retry=True):
# type: (unicode, unicode, unicode, dict, bool) -> None
try:
system.put_user_data(api_key, user_email, app_id, updated_user_data)
except RogerthatApiException as e:
if retry and e.code == 60011: # user not in friend list
raise Exception(e.message) # ensure task is retried
raise
@returns(unicode)
@arguments(member=MemberTO, message=unicode, answers=(None, [AnswerTO]), flags=(int, long), api_key=unicode)
def send_rogerthat_message(member, message, answers=None, flags=None, api_key=None):
# type: (MemberTO, unicode, list[AnswerTO], int, unicode) -> unicode
flags = flags if flags is not None else Message.FLAG_AUTO_LOCK
if not answers:
flags = flags | Message.FLAG_ALLOW_DISMISS
answers = []
return messaging.send(api_key=api_key or get_tf_token_api_key(),
parent_message_key=None,
members=[member],
message=message,
answers=answers or [],
flags=flags,
alert_flags=Message.ALERT_FLAG_VIBRATE,
branding=get_main_branding_hash(),
tag=None)
@returns(unicode)
@arguments(member=(MemberTO, users.User), flow=unicode)
def send_rogerthat_flow(member, flow):
if isinstance(member, users.User):
human_user, app_id = get_app_user_tuple(member)
member = MemberTO(member=human_user.email(), app_id=app_id, alert_flags=Message.ALERT_FLAG_VIBRATE)
messaging.start_local_flow(api_key=get_tf_token_api_key(),
xml=None,
members=[member],
flow=flow)
def create_error_message(message=None):
logging.debug('Sending error message')
if not message:
message = u'Oh no! An error occurred.\nHow embarrassing :-(\n\nPlease try again later.'
result = FlowCallbackResultTypeTO(flow=FLOW_ERROR_MESSAGE,
tag=None,
force_language=None,
flow_params=json.dumps({'message': message}))
return FlowMemberResultCallbackResultTO(type=TYPE_FLOW, value=result)
| 43.388235 | 110 | 0.68167 |
1077af5d072f08e925497a9eb5ae52458f753edd
| 1,830 |
py
|
Python
|
challenge/Thiemo/test.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | 7 |
2015-11-27T09:53:32.000Z
|
2021-01-13T17:35:54.000Z
|
challenge/Thiemo/test.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | null | null | null |
challenge/Thiemo/test.py
|
florianletsch/kinect-juggling
|
f320cc0b55adf65d338d25986a03106a7e3f46ef
|
[
"Unlicense",
"MIT"
] | null | null | null |
import timeit
tests = [
('Summierung uint8',
'''
import pyximport;
import numpy as np
pyximport.install(setup_args={'include_dirs': np.get_include()})
import summierungInt as alg
a = np.random.random_integers(0,255, %(dim)s).astype(np.uint8)
b = np.random.random_integers(0,255, %(dim)s).astype(np.uint8)
''',
'alg.sum(a, b)'),
('Summierung float32',
'''
import pyximport;
import numpy as np
pyximport.install(setup_args={'include_dirs': np.get_include()})
import summierungFloat as alg
a = np.random.random_integers(0,255, %(dim)s).astype(np.float32)
b = np.random.random_integers(0,255, %(dim)s).astype(np.float32)
''',
'alg.sum(a, b)'),
('Schwellenwert uint8',
'''
import pyximport;
import numpy as np
pyximport.install(setup_args={'include_dirs': np.get_include()})
import schwellenwertInt as alg
a = np.random.random_integers(0,255, %(dim)s).astype(np.uint8)
''',
'alg.scalar(a, 125)'),
('Schwellenwert float32',
'''
import pyximport;
import numpy as np
pyximport.install(setup_args={'include_dirs': np.get_include()})
import schwellenwertFloat as alg
a = np.random.random_integers(0,255, %(dim)s).astype(np.float32)
''',
'alg.scalar(a, 125.0)'),
('Histogramm uint8',
'''
import pyximport;
import numpy as np
pyximport.install(setup_args={'include_dirs': np.get_include()})
import histogrammInt as alg
a = np.random.random_integers(0,255, %(dim)s).astype(np.uint8)
''',
'alg.histogramm(a)'),
('Histogramm float32',
'''
import pyximport;
import numpy as np
pyximport.install(setup_args={'include_dirs': np.get_include()})
import histogrammFloat as alg
a = np.random.random_integers(0,255, %(dim)s).astype(np.float32)
''',
'alg.histogramm(a)')
]
for name, setup, cmd in tests:
for dim in ('(204,204)','(409,409)'):
print '%s %s: %f' % (name, dim, timeit.timeit(cmd, setup=setup % {'dim' : dim}, number=100))
| 26.142857 | 100 | 0.708743 |
dc47572676cafedf0e1f868c7160abfa4617d015
| 4,013 |
py
|
Python
|
Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/data.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-01-12T06:52:43.000Z
|
2022-01-12T06:52:43.000Z
|
Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/data.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | null | null | null |
Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/data.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import json
from collections import OrderedDict
from paddle.io import Dataset
import numpy as np
class ATISDataset(Dataset):
def __init__(self, path, vocab_path, intent_path, slot_path):
self.examples = self.load_data(path)
self.token2id, self.id2token = self.load_dict(vocab_path)
self.intent2id, self.id2intent = self.load_dict(intent_path)
self.slot2id, self.id2slot = self.load_dict(slot_path)
def __getitem__(self, idx):
example = self.examples[idx]
tokens, tags, intent = self.convert_example_to_id(example)
return np.array(tokens), np.array(tags), intent, len(tokens)
def __len__(self):
return len(self.examples)
@property
def vocab_size(self):
return len(self.token2id)
@property
def num_intents(self):
return len(self.intent2id)
@property
def num_slots(self):
return len(self.slot2id)
def convert_example_to_id(self, example):
tokens = example["text"].split()
tags = example["tag"].split()
intent = example["intent"]
assert len(tokens) == len(tags)
tokens = [self.token2id.get(token, "[unk]") for token in tokens]
tags = [self.slot2id.get(tag, "O") for tag in tags]
intent = self.intent2id[intent]
return tokens, tags, intent
def load_dict(self, dict_path):
with open(dict_path, "r", encoding="utf-8") as f:
words = [word.strip() for word in f.readlines()]
dict2id = dict(zip(words, range(len(words))))
id2dict = {v:k for k,v in dict2id.items()}
return dict2id, id2dict
def _split_with_id(self, text, start=0):
word2sid = OrderedDict()
word = ""
count = 0
for i in range(len(text)):
if text[i] == " ":
continue
else:
word += text[i]
if (i < len(text) - 1 and text[i + 1] == " ") or i == len(text) - 1:
# get whole word
key = str(i - len(word) + 1 + start) + "_" + str(i + start) + "_" + word
word2sid[key] = count
count += 1
word = ""
return word2sid
def load_data(self, path):
examples = []
raw_examples = []
with open(path, "r", encoding="utf-8") as f:
for example in f.readlines():
raw_examples.append(json.loads(example))
for raw_example in raw_examples:
example = {}
example["text"] = raw_example["text"]
example["intent"] = raw_example["intent"]
splited_text = raw_example["text"].split()
tags = ['O'] * len(splited_text)
word2sid = self._split_with_id(raw_example["text"])
for entity in raw_example["entities"]:
start, end, value, entity_name = entity["start"], entity["end"] - 1, entity["value"], entity["entity"]
entity2sid = self._split_with_id(value, start=start)
for i, word in enumerate(entity2sid.keys()):
if i == 0:
tags[word2sid[word]] = "B-" + entity_name
else:
tags[word2sid[word]] = "I-" + entity_name
example["tag"] = " ".join(tags)
examples.append(example)
return examples
| 34.299145 | 118 | 0.585098 |
10f85d1081fec9c0fe3b1cde987d69b3cccacd2d
| 1,707 |
py
|
Python
|
Openharmony v1.0/vendor/hisi/hi35xx/third_party/uboot/tools/binman/image_test.py
|
clkbit123/TheOpenHarmony
|
0e6bcd9dee9f1a2481d762966b8bbd24baad6159
|
[
"MIT"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.1/device/hisilicon/third_party/uboot/u-boot-2020.01/tools/binman/image_test.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.1/device/hisilicon/third_party/uboot/u-boot-2020.01/tools/binman/image_test.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2017 Google, Inc
# Written by Simon Glass <[email protected]>
#
# Test for the image module
import unittest
from image import Image
from test_util import capture_sys_output
class TestImage(unittest.TestCase):
def testInvalidFormat(self):
image = Image('name', 'node', test=True)
with self.assertRaises(ValueError) as e:
image.LookupSymbol('_binman_something_prop_', False, 'msg', 0)
self.assertIn(
"msg: Symbol '_binman_something_prop_' has invalid format",
str(e.exception))
def testMissingSymbol(self):
image = Image('name', 'node', test=True)
image._entries = {}
with self.assertRaises(ValueError) as e:
image.LookupSymbol('_binman_type_prop_pname', False, 'msg', 0)
self.assertIn("msg: Entry 'type' not found in list ()",
str(e.exception))
def testMissingSymbolOptional(self):
image = Image('name', 'node', test=True)
image._entries = {}
with capture_sys_output() as (stdout, stderr):
val = image.LookupSymbol('_binman_type_prop_pname', True, 'msg', 0)
self.assertEqual(val, None)
self.assertEqual("Warning: msg: Entry 'type' not found in list ()\n",
stderr.getvalue())
self.assertEqual('', stdout.getvalue())
def testBadProperty(self):
image = Image('name', 'node', test=True)
image._entries = {'u-boot': 1}
with self.assertRaises(ValueError) as e:
image.LookupSymbol('_binman_u_boot_prop_bad', False, 'msg', 0)
self.assertIn("msg: No such property 'bad", str(e.exception))
| 37.933333 | 79 | 0.627417 |
337a7618e5b3ffaa46eaf54af32dd998cfb7e5e2
| 9,642 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/plugins/mapmatching/wxgui-02_stillbrokesave.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/plugins/mapmatching/wxgui-02_stillbrokesave.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/plugins/mapmatching/wxgui-02_stillbrokesave.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2016-2020 German Aerospace Center (DLR) and others.
# SUMOPy module
# Copyright (C) 2012-2017 University of Bologna - DICAM
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file wxgui-02_stillbrokesave.py
# @author Joerg Schweizer
# @date
import os
import wx
import numpy as np
from agilepy.lib_wx.modulegui import ModuleGui
from agilepy.lib_wx.ogleditor import *
from agilepy.lib_base.processes import Process
from agilepy.lib_wx.processdialog import ProcessDialog
from coremodules.network import routing
from coremodules.demand import demand
import mapmatching
class WxGui(ModuleGui):
"""Contains functions that communicate between the widgets of the main wx gui
and the functions of the plugin.
"""
def __init__(self, ident):
self._mapmatching = None
self._scenario = None
self._init_common(ident, priority=100001,
icondirpath=os.path.join(os.path.dirname(__file__), 'images'))
def get_module(self):
return self._mapmatching
def get_scenario(self):
return self._mainframe.get_modulegui('coremodules.scenario').get_scenario()
def get_neteditor(self):
return self._mainframe.get_modulegui('coremodules.network').get_neteditor()
def init_widgets(self, mainframe):
"""
Set mainframe and initialize widgets to various places.
"""
self._mainframe = mainframe
#self._neteditor = mainframe.add_view("Network", Neteditor)
# mainframe.browse_obj(self._module)
self.make_menu()
self.make_toolbar()
def refresh_widgets(self):
"""
Check through mainframe what the state of the application is
and reset widgets. For exampe enable/disable widgets
dependent on the availability of data.
"""
scenario = self.get_scenario()
# print 'demand refresh_widgets',scenario.net
is_refresh = False
if self._scenario != scenario:
del self._scenario
del self._mapmatching
self._scenario = scenario
self._mapmatching = mapmatching.Mapmatching('mapmatching', self._scenario)
#self._mapmatching = self._demand.add_demandobject(ident = 'mapmatching', DemandClass = mapmatching.Mapmatching)
is_refresh = True
def make_menu(self):
menubar = self._mainframe.menubar
menubar.append_menu('plugins/mapmatching',
bitmap=self.get_icon("icon_gps.png"),
)
menubar.append_item('plugins/mapmatching/browse',
self.on_browse, # common function in modulegui
info='View and browse mapmatching in object panel.',
bitmap=self.get_agileicon('icon_browse_24px.png'), # ,
)
menubar.append_menu('plugins/mapmatching/import',
bitmap=self.get_agileicon("Document_Import_24px.png"),
)
menubar.append_item('plugins/mapmatching/import/European cycling challange...',
self.on_import_ecc,
info=self.on_import_ecc.__doc__.strip(),
bitmap=self.get_agileicon("Document_Import_24px.png"),
)
menubar.append_item('plugins/mapmatching/project points',
self.on_project_points,
bitmap=wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE_AS, wx.ART_MENU),
)
menubar.append_item('plugins/mapmatching/safe as...',
self.on_save_as,
info='Save all mapmatching data in a new Python binary file.',
bitmap=wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE_AS, wx.ART_MENU),
)
menubar.append_item('plugins/mapmatching/open...',
self.on_open,
info='Open previousely saved mapmatching data from a Python binary file.',
bitmap=wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_MENU),
)
def on_import_ecc(self, event=None):
"""
Import and filter data from a European cycling challange.
"""
p = mapmatching.EccTracesImporter(self._mapmatching, logger=self._mainframe.get_logger())
dlg = ProcessDialog(self._mainframe, p, immediate_apply=True)
dlg.CenterOnScreen()
# this does not return until the dialog is closed.
val = dlg.ShowModal()
# print ' val,val == wx.ID_OK',val,wx.ID_OK,wx.ID_CANCEL,val == wx.ID_CANCEL
# print ' status =',dlg.get_status()
if dlg.get_status() != 'success': # val == wx.ID_CANCEL:
# print ">>>>>>>>>Unsuccessful\n"
dlg.Destroy()
if dlg.get_status() == 'success':
# print ">>>>>>>>>successful\n"
# apply current widget values to scenario instance
dlg.apply()
dlg.Destroy()
self._mainframe.browse_obj(self._mapmatching.trips)
def on_project_points(self, event=None):
self._mapmatching.points.project()
self._mainframe.browse_obj(self._mapmatching.points)
if event:
event.Skip()
def on_browse(self, event=None):
self._mainframe.browse_obj(self._mapmatching)
if event:
event.Skip()
def on_save_as(self, event=None):
if self._mapmatching is None:
return
scenario = self.get_scenario()
wildcards_all = "All files (*.*)|*.*"
wildcards_obj = "Python binary result files (*.mmatch.obj)|*.mmatch.obj|Python binary files (*.obj)|*.obj"
wildcards = wildcards_obj+"|"+wildcards_all
# Finally, if the directory is changed in the process of getting files, this
# dialog is set up to change the current working directory to the path chosen.
dlg = wx.FileDialog(
self._mainframe, message="Save mapmatching to file",
defaultDir=scenario.get_workdirpath(),
defaultFile=scenario.get_rootfilepath()+'.mmatch.obj',
wildcard=wildcards,
style=wx.SAVE | wx.CHANGE_DIR
)
val = dlg.ShowModal()
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
if val == wx.ID_OK:
# This returns a Python list of files that were selected.
filepath = dlg.GetPath()
if len(filepath) > 0:
# now set new filename and workdir
self._mapmatching.save(filepath)
# Destroy the dialog. Don't do this until you are done with it!
# BAD things can happen otherwise!
dlg.Destroy()
def on_open(self, event=None):
wildcards_all = "All files (*.*)|*.*"
wildcards_obj = "Python binary mapmatching files (*.mmatch.obj)|*.mmatch.obj|Python binary files (*.obj)|*.obj"
wildcards = wildcards_obj+"|"+wildcards_all
# Finally, if the directory is changed in the process of getting files, this
# dialog is set up to change the current working directory to the path chosen.
dlg = wx.FileDialog(
self._mainframe, message="Open mapmatching file",
defaultDir=self.get_scenario().get_workdirpath(),
#defaultFile = os.path.join(scenario.get_workdirpath(), scenario.format_ident()+'.obj'),
wildcard=wildcards,
style=wx.OPEN | wx.CHANGE_DIR
)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
is_new = False
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of files that were selected.
filepath = dlg.GetPath()
if len(filepath) > 0:
if self._mapmatching is not None:
# browse away from results
# self._mainframe.browse_obj(self._results.get_scenario())
del self._mapmatching
self._mapmatching = mapmatching.load_mapmatching(filepath,
self.get_scenario(),
logger=self._mainframe.get_logger()
)
is_new = True
# Destroy the dialog. Don't do this until you are done with it!
# BAD things can happen otherwise!
dlg.Destroy()
if is_new:
# this should update all widgets for the new scenario!!
# print 'call self._mainframe.refresh_moduleguis()'
self._mainframe.browse_obj(self._mapmatching)
self._mainframe.select_view(name="Network") # !!!!!!!!tricky, crashes without
self.refresh_widgets()
| 41.560345 | 124 | 0.601846 |
1d3beb1500f67cee98b3bd4f352519dd91475e5e
| 1,159 |
py
|
Python
|
sso-db/ssodb/common/models/service_model.py
|
faical-yannick-congo/sso-backend
|
e962006b0fecd68e4da94e54b4dc63547a5a2c21
|
[
"MIT"
] | null | null | null |
sso-db/ssodb/common/models/service_model.py
|
faical-yannick-congo/sso-backend
|
e962006b0fecd68e4da94e54b4dc63547a5a2c21
|
[
"MIT"
] | null | null | null |
sso-db/ssodb/common/models/service_model.py
|
faical-yannick-congo/sso-backend
|
e962006b0fecd68e4da94e54b4dc63547a5a2c21
|
[
"MIT"
] | null | null | null |
import datetime
from ..core import db
import json
from bson import ObjectId
class Service(db.Document):
created_at = db.StringField(default=str(datetime.datetime.utcnow()))
updated_at = db.StringField(default=str(datetime.datetime.utcnow()))
name = db.StringField(required=True, unique=True)
host = db.StringField()
possible_status = ["active", "innactive"]
status = db.StringField(default="innactive", choices=possible_status)
menu_endpoint = db.StringField() # Endpoint that take care of providing the sms menu to the service.
def save(self, *args, **kwargs):
self.updated_at = str(datetime.datetime.utcnow())
self.day = str(datetime.date.today().isoformat())
return super(Service, self).save(*args, **kwargs)
def info(self):
data = {'updated-at':str(self.updated_at), 'id':str(self.id),
'created-at':str(self.created_at), 'status':self.status, 'name':self.name, 'host':self.host,
'menu-endpoint':self.menu_endpoint}
return data
def to_json(self):
data = self.info()
return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
| 39.965517 | 104 | 0.672994 |
89208cce1c70cc78da035c74743a23606b906167
| 2,660 |
py
|
Python
|
examples/relationship/manytoonefield/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 5 |
2020-07-14T07:48:10.000Z
|
2021-12-20T21:20:10.000Z
|
examples/relationship/manytoonefield/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 7 |
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
examples/relationship/manytoonefield/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 1 |
2021-02-16T07:04:25.000Z
|
2021-02-16T07:04:25.000Z
|
from django.db import models
# CREATE TABLE `manytoonefield_reporter` ( -- Django 建表语句
# `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
# `first_name` varchar(30) NOT NULL,
# `last_name` varchar(30) NOT NULL,
# `email` varchar(254) NOT NULL
# );
#
#
# CREATE TABLE `manytoonefield_reporter` (
# `id` int(11) NOT NULL AUTO_INCREMENT, -- django自动补充该字段, 自增ID
# `first_name` varchar(30) NOT NULL,
# `last_name` varchar(30) NOT NULL,
# `email` varchar(254) NOT NULL,
# PRIMARY KEY (`id`) -- 主键(聚集索引)
# ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
# CREATE TABLE `manytoonefield_article` ( -- Django 建表语句
# `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
# `headline` varchar(100) NOT NULL,
# `pub_date` date NOT NULL,
# `reporter_id` integer NOT NULL
# );
#
# ALTER TABLE `manytoonefield_article`
# ADD CONSTRAINT `manytoonefield_artic_reporter_id_01692140_fk_manytoone` FOREIGN KEY (`reporter_id`)
# REFERENCES `manytoonefield_reporter` (`id`) -- Django 添加外键语句
#
#
# Question: 辅助索引与外键有什么区别?
# https://stackoverflow.com/questions/1732467/what-is-the-difference-between-an-index-and-a-foreign-key
# 辅助索引的内部是一个b+树的数据结构, 对于随机查询起到加速的作用.
# 外键仅仅是对只想其他表的主键.
# CREATE TABLE `manytoonefield_article` (
# `id` int(11) NOT NULL AUTO_INCREMENT, -- django自动补充该字段, 自增ID
# `headline` varchar(100) NOT NULL,
# `pub_date` date NOT NULL,
# `reporter_id` int(11) NOT NULL,
# PRIMARY KEY (`id`), -- 主键(聚集索引)
# KEY `manytoonefield_artic_reporter_id_01692140_fk_manytoone` (`reporter_id`), -- 辅助索引
# CONSTRAINT `manytoonefield_artic_reporter_id_01692140_fk_manytoone` FOREIGN KEY (`reporter_id`) \
# REFERENCES `manytoonefield_reporter` (`id`) -- 外键索引
# ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, on_delete=models.CASCADE) # Many-to-one
| 48.363636 | 117 | 0.566165 |
89ac690609f1e3d0a1501b1e64be3237dc585594
| 92 |
py
|
Python
|
2014/07/men-life-gap/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14 |
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2014/07/men-life-gap/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2014/07/men-life-gap/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7 |
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '16FywAcZFB7xGfCtrHMWmUkidEV8ikFwMgSgJf5LWxxI'
| 23 | 68 | 0.847826 |
7fa1485d45de4609f2ca861fc3816ac0c76217b5
| 1,723 |
py
|
Python
|
scripts/component_graph/server/query/query_handler.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/component_graph/server/query/query_handler.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/component_graph/server/query/query_handler.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""QueryHandler services all graph and package queries.
The QueryHandler is the business logic layer between the PackageManager and
the ComponentGraphGenerator. It is responsible for processing the data provided
by the PackageManager and forwarding it through for graph generation.
"""
import sys
import json
from server.util.logging import get_logger
from server.graph import ComponentGraphGenerator
class ComponentQueryError(Exception):
"""Raised when an unrecoverable query exception occurs"""
pass
class QueryHandler:
""" Core handler to respond to different queries """
def __init__(self, package_manager):
""" Verifies the package manager is online. """
self.logger = get_logger(__name__)
self.package_manager = package_manager
self.graph_generator = ComponentGraphGenerator()
if not self.package_manager.ping():
self.logger.error(
"Failed to connect to package manager please run fx serve.")
sys.exit(1)
def services(self, packages):
""" Returns the list of service to component url mappings """
return self.package_manager.get_services(packages)
def packages(self):
""" Returns a list of available packages """
return self.package_manager.get_packages()
def component_graph(self):
""" Returns the component graph that shows all component connections """
packages = self.packages()
return self.graph_generator.generate(packages, self.services(packages)).export()
| 36.659574 | 88 | 0.720255 |
12062a73da81e781e475f26cc8fd1813ec241c8a
| 1,908 |
py
|
Python
|
tests/rbac/common/addresser/addresser_test.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | null | null | null |
tests/rbac/common/addresser/addresser_test.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | 1 |
2019-07-08T22:32:43.000Z
|
2019-07-08T22:32:43.000Z
|
tests/rbac/common/addresser/addresser_test.py
|
akgunkel/sawtooth-next-directory
|
a88833033ab30e9091479a38947f04c5e396ca46
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Test Addresser"""
import logging
import pytest
from rbac.common import addresser
from tests.rbac.common.assertions import TestAssertions
LOGGER = logging.getLogger(__name__)
@pytest.mark.addressing
@pytest.mark.library
class TestAddresser(TestAssertions):
"""Test Addresser"""
def test_family_props(self):
"""Test the addresser family has the expected properties"""
self.assertIsInstance(addresser.family.name, str)
self.assertIsInstance(addresser.family.version, str)
self.assertIsInstance(addresser.family.pattern.pattern, str)
def test_unique_id(self):
"""Test unique_id returns unique identifiers"""
unique_id1 = addresser.role.unique_id()
unique_id2 = addresser.role.unique_id()
self.assertIsIdentifier(unique_id1)
self.assertIsIdentifier(unique_id2)
self.assertNotEqual(unique_id1, unique_id2)
def test_hash(self):
"""Test hash returns unique identifiers"""
hash1 = addresser.role.hash(addresser.role.unique_id())
hash2 = addresser.role.hash(addresser.role.unique_id())
self.assertIsIdentifier(hash1)
self.assertIsIdentifier(hash2)
self.assertNotEqual(hash1, hash2)
| 36 | 79 | 0.70283 |
126022b5d66c9c3625d207e1cae9dbfb33a68a04
| 2,702 |
py
|
Python
|
tests/google/test_primary_google_service_account.py
|
scottyellis/fence
|
012ba76a58853169e9ee8e3f44a0dc510f4b2543
|
[
"Apache-2.0"
] | 31 |
2018-01-05T22:49:33.000Z
|
2022-02-02T10:30:23.000Z
|
tests/google/test_primary_google_service_account.py
|
scottyellis/fence
|
012ba76a58853169e9ee8e3f44a0dc510f4b2543
|
[
"Apache-2.0"
] | 737 |
2017-12-11T17:42:11.000Z
|
2022-03-29T22:42:52.000Z
|
tests/google/test_primary_google_service_account.py
|
scottyellis/fence
|
012ba76a58853169e9ee8e3f44a0dc510f4b2543
|
[
"Apache-2.0"
] | 46 |
2018-02-23T09:04:23.000Z
|
2022-02-09T18:29:51.000Z
|
import pytest
from unittest.mock import MagicMock, patch
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
def test_primary_google_service_account_valid(
client,
app,
db_session,
encoded_jwt_google_data_access,
primary_google_service_account_google,
):
"""
Test that given valid credentials, the endpoint responds with the user's primary
google SA in the response and it matches the mocked value setup in the fixture
"""
encoded_creds_jwt = encoded_jwt_google_data_access["jwt"]
mock = primary_google_service_account_google["get_or_create_service_account_mock"]
email = primary_google_service_account_google["email"]
response = client.post(
"/google/primary_google_service_account",
headers={"Authorization": "Bearer " + encoded_creds_jwt},
content_type="application/json",
)
assert response.status_code == 200
assert response.json.get("primary_google_service_account") == email
def test_primary_google_service_account_invalid(
client,
app,
db_session,
encoded_jwt_service_accounts_access,
primary_google_service_account_google,
):
"""
Test that given invalid credentials (e.g. doesn't have the right scope),
this endpoint responds with an HTTP error code and no data
NOTE: encoded_jwt_service_accounts_access does not have the expected claim in the
mocked token.
"""
encoded_creds_jwt = encoded_jwt_service_accounts_access["jwt"]
mock = primary_google_service_account_google["get_or_create_service_account_mock"]
email = primary_google_service_account_google["email"]
response = client.post(
"/google/primary_google_service_account",
headers={"Authorization": "Bearer " + encoded_creds_jwt},
content_type="application/json",
)
assert response.status_code == 401
assert not (response.json or {}).get("primary_google_service_account")
def test_primary_google_service_account_no_creds(
client,
app,
db_session,
primary_google_service_account_google,
):
"""
Test that given no creds, this endpoint responds with an HTTP error code and no data
"""
mock = primary_google_service_account_google["get_or_create_service_account_mock"]
email = primary_google_service_account_google["email"]
response = client.post(
"/google/primary_google_service_account",
content_type="application/json",
)
assert response.status_code == 401
assert not (response.json or {}).get("primary_google_service_account")
| 32.554217 | 88 | 0.734641 |
c3e71626d20fc18a6728e7e99e05a5d7cf3045ab
| 3,380 |
py
|
Python
|
Uebung4/Uebung4_Aufgabe12.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | 1 |
2018-04-18T19:10:06.000Z
|
2018-04-18T19:10:06.000Z
|
Uebung4/Uebung4_Aufgabe12.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | null | null | null |
Uebung4/Uebung4_Aufgabe12.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | 1 |
2018-04-29T08:48:00.000Z
|
2018-04-29T08:48:00.000Z
|
# import:
import random
# die Liste der gezogenen Lottozahlen (sauce header)
picks = list()
# code:
#liste der zur auswahl stehenden zahlen (hier 1-49)
zahlen = list(range(1,50))
#anzahl der ziehungen (hier 6 aus 49)
anzahl = 6
#ziehungen: jeweils eine zahl ziehen und zu picks hinzufügen und aus zahlen entfernen um doppelte zu vermeiden
for i in range(anzahl):
j = random.randint(0, len(zahlen)-i-1)
picks.append(zahlen[j])
zahlen.remove(zahlen[j])
#sortieren
picks.sort()
#Sauce Output:
print("#test if 6 different numbers were drawn")
if(len(set(picks)) == 6):
print("all picks are unique")
else:
print("at least two picks are the same")
print("your picks:", picks)
print("#test if all picks are in range from 1 to 49")
if(all(k in range(1,50) for k in picks)):
print("picks in valid range")
else:
print("picks are not in valid range")
print("your picks:", picks)
print("#test if picks are sorted")
if(sorted(picks) == picks):
print("picks are sorted")
else:
print("picks are not sorted!")
print("your picks:", picks)
"""
Aufgabe 12.1:
Da es sich um eine Zeihung ohne Zurücklegen handelt, ist die Wahrscheinlichkeit jeder Permutation gleich.
"""
"""
# Aufgabe 12.3:
getippt = False
while getippt == False:
tipp = [int(i) for i in (input("Tippe 6 Zahlen:").split())]
if (len(set(tipp)) == 6) & (all(k in range(1,50) for k in picks)):
getippt = True
else:
print("Bitte genau 6 verschiedene Zahlen zwischen 1 und 49 tippen.")
tipp.sort()
tippset = set(tipp)
picksset = set(picks)
treffer = list(picksset.intersection(tippset))
count_treffer = len(treffer)
print("Folgende Zahlen sind richtig:",treffer)
print("Du hast", count_treffer, "Richtige!")
"""
"""
# Aufgabe 12.4:
#Simuliere random tipps bis sie mit den 6 rnd picks von lotto übereinstimmen
# x-mal wiederholen um Mittelwert zu erhalten
x = int(input("Wieviele Durchgaenge fuer den Mittelwert?"))
#durchgangszähler
zaehler = 0
for _ in range(x):
richtig = False
while richtig == False:
#für die picks aus aufgabenteil 2:
picks = list()
zahlen = list(range(1,50))
anzahl = 6
for i in range(anzahl):
j = random.randint(0, len(zahlen)-i-1)
picks.append(zahlen[j])
zahlen.remove(zahlen[j])
picks.sort()
#für tipps:
tipps = list()
zahlen = list(range(1,50))
for i in range(anzahl):
j = random.randint(0, len(zahlen)-i-1)
tipps.append(zahlen[j])
zahlen.remove(zahlen[j])
tipps.sort()
#überprüfen ob richtig getippt:
tippsset = set(tipps)
picksset = set(picks)
treffer = list(picksset.intersection(tippsset))
count_treffer = len(treffer)
if count_treffer == 6:
richtig = True
zaehler += 1
mittel = zaehler/x
print(mittel)
# Ergebnis mit x = 1: Im Mittel braucht man 10011063.0 Tipps um 6 Richtige zu erhalten.
# Ergebnis mit x = 10: Im Mittel braucht man 11212583.5 Tipps um 6 Richtige zu erhalten.
# Zu aufwendig für mehr.
#Mathematisch:
# Mögliche Permutationen der Ziehung: (49 über 6) = 49!/(6!*43!) = 13 983 816
# Mögliche Tipps die dazu passen: (6 über 6) = 6!/6! = 1
# => P(6 Richtige) = 1/13983816 bzw im Mitel braucht man 13 983 816 Versuche für 6 Richtige.
"""
| 24.316547 | 110 | 0.644083 |
7f247104a82e8fad0e1f63f31b0d8269e65797f9
| 2,003 |
py
|
Python
|
RESTApi/flaskapp/recommend.py
|
Peacecake/HoloMu
|
98b422b226c2274e6d7e96df31724b0d2abd8ebb
|
[
"MIT"
] | null | null | null |
RESTApi/flaskapp/recommend.py
|
Peacecake/HoloMu
|
98b422b226c2274e6d7e96df31724b0d2abd8ebb
|
[
"MIT"
] | 32 |
2018-06-19T15:27:04.000Z
|
2018-09-30T20:17:23.000Z
|
RESTApi/flaskapp/recommend.py
|
Peacecake/HoloMu
|
98b422b226c2274e6d7e96df31724b0d2abd8ebb
|
[
"MIT"
] | null | null | null |
import json
from db import get_db
import random
def calcRecommendation(watched_name, watched_cat):
db = get_db()
watchedSameCat = 2
watchedExhibit = 0.9
otherExhibit = 1
weights_sum = 0
lastRow = db.execute("SELECT * FROM recommend WHERE ID=(SELECT MAX(ID) FROM recommend)").fetchone()
newData = json.loads(lastRow["data"])
for exhibit_data in newData:
if watched_name != exhibit_data["e_name"] and watched_cat == exhibit_data["e_cat"]:
exhibit_data["e_prop"] = exhibit_data["e_prop"] * watchedSameCat
weights_sum += exhibit_data["e_prop"]
elif watched_name == exhibit_data["e_name"]:
exhibit_data["e_prop"] = exhibit_data["e_prop"] * watchedExhibit
weights_sum += exhibit_data["e_prop"]
else:
exhibit_data["e_prop"] = exhibit_data["e_prop"] * otherExhibit
weights_sum += exhibit_data["e_prop"]
for exhibit_data in newData:
exhibit_data["e_prop"] = exhibit_data["e_prop"] / weights_sum
return newData
def recommendExhibit(watched_exhibit):
db = get_db()
exhibitNames = []
weights = []
currentRow = db.execute("SELECT * FROM recommend WHERE ID=(SELECT MAX(ID) FROM recommend)")
for data_set in currentRow:
data = json.loads(data_set["data"])
for exhibit_data in data:
if exhibit_data["e_name"] != watched_exhibit:
weights.append(exhibit_data["e_prop"])
exhibitNames.append(exhibit_data["e_name"])
recommendedExhibit = weightedRands(exhibitNames, weights)
return recommendedExhibit
# returns random value from exhibitNames list weighted by the e_props
# Retrieved from: https://stackoverflow.com/questions/12096819
def weightedRands(exhibits, weights):
r = random.uniform(0, sum(weights))
for n,v in map(None, exhibits, [sum(weights[:x+1]) for x in range(len(weights))]):
if r < v:
return n
| 40.06 | 104 | 0.648527 |
9ccb7ea5fedf5bc0b68bae94dbd5a5c09c9d9c76
| 108 |
py
|
Python
|
python/python_new/Python 3/untitled4x.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_new/Python 3/untitled4x.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_new/Python 3/untitled4x.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import numpy as np
a=np.array([[1,2],[3,4],[5,8]])
b=a[-1,0:2]
print(b)
c=a[-1]
print(c)
d=a[:,1:3]
print(d)
| 13.5 | 31 | 0.546296 |
2c28d23538ff750b39c70a97536878ac52e6fc71
| 1,076 |
py
|
Python
|
setup.py
|
KeepSafe/html-structure-diff
|
83ea2b8ad4b78b140bb5f69e3f2966c9fda01a89
|
[
"Apache-2.0"
] | 3 |
2016-05-10T13:57:14.000Z
|
2016-09-29T21:01:53.000Z
|
setup.py
|
KeepSafe/html-structure-diff
|
83ea2b8ad4b78b140bb5f69e3f2966c9fda01a89
|
[
"Apache-2.0"
] | 3 |
2015-10-20T22:29:37.000Z
|
2022-01-18T18:20:06.000Z
|
setup.py
|
KeepSafe/html-structure-diff
|
83ea2b8ad4b78b140bb5f69e3f2966c9fda01a89
|
[
"Apache-2.0"
] | 1 |
2016-11-05T04:23:05.000Z
|
2016-11-05T04:23:05.000Z
|
import os
from setuptools import setup, find_packages
version = '0.4.1'
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
install_requires = [
'mistune <= 1',
]
tests_require = [
'nose',
'flake8',
'autopep8',
]
devtools_require = [
'twine',
'build',
]
setup(name='sdiff',
version=version,
description=('sdiff compares the structure of two markdown texts'),
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Programming Language :: Python'],
author='Keepsafe',
author_email='[email protected]',
url='https://github.com/KeepSafe/html-structure-diff/',
license='Apache',
packages=find_packages(exclude=['tests']),
package_data={},
namespace_packages=[],
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'tests': tests_require,
'devtools': devtools_require,
},
include_package_data=False)
| 22.416667 | 74 | 0.624535 |
9fac8f3c828695c8cff839c5969097866b2f76b4
| 514 |
py
|
Python
|
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/07.0-Condition.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/07.0-Condition.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/07.0-Condition.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
marks = int(input("What is you marks in Math: "))
def show_grade(grade):
print(f"You got: {grade}")
if marks >= 80:
show_grade("A+")
elif marks >= 70:
show_grade("A")
elif marks >= 60:
show_grade("A-")
elif marks >= 50:
show_grade("B")
elif marks >= 40:
show_grade("C")
else:
show_grade("F")
if marks > 80 or marks < 10:
print("You are very good or very bad")
if marks > 80:
print("Excellent")
else:
print("Not so good")
else:
print("You are okay")
| 17.724138 | 49 | 0.577821 |
2c6a60c9c8ae76a52cd66a8d5cb2289cc09c49cf
| 2,147 |
py
|
Python
|
python_experiments/data_analysis/vldbj_data_parsing/generate_accuracy_markdown.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 8 |
2020-04-14T23:17:00.000Z
|
2021-06-21T12:34:04.000Z
|
python_experiments/data_analysis/vldbj_data_parsing/generate_accuracy_markdown.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | null | null | null |
python_experiments/data_analysis/vldbj_data_parsing/generate_accuracy_markdown.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 1 |
2021-01-17T16:26:50.000Z
|
2021-01-17T16:26:50.000Z
|
from data_analysis.vldbj_data_parsing.querying_time_accuracy_statistics import *
from data_analysis.vldbj_data_parsing.reads_accuracy_statistics import *
import decimal
def get_accuracy_dict(root_dir='.', file_name='accuracy_result'):
with open(root_dir + os.sep + 'data-json' + os.sep + file_name + '.json') as ifs:
return json.load(ifs)
def get_accuracy_dict_with_reads(root_dir='.', file_name='accuracy_result'):
accuracy_dict = get_accuracy_dict(root_dir)
for file in ['accuracy_result_reads', 'accuracy_result_probesim']:
read_dict = get_accuracy_dict(root_dir, file_name=file)
assert isinstance(read_dict, dict)
for key, val in read_dict.items():
accuracy_dict[key] = val
return accuracy_dict
def format_str(float_num):
my_str = str(decimal.Decimal.from_float(float_num * (10 ** 2)).quantize(decimal.Decimal('0.000')))
if (float_num < 0.01):
return my_str
else:
return '**' + my_str + '**'
if __name__ == '__main__':
algorithm_tag_lst = [bflpmc_tag, flpmc_tag, bprw_tag, sling_tag,
reads_tag, reads_d_tag, reads_rq_tag,
isp_tag, tsf_tag, lind_tag, cw_tag]
accuracy_dict = get_accuracy_dict_with_reads()
def get_time_table(round_lst, data_set):
table_lines = []
header = ['algo\\data'] + map(str, round_lst)
table_lines.append(' | '.join(header))
table_lines.append(' | '.join(['---'] * (len(round_lst) + 1)))
lines = map(lambda algorithm:
' | '.join([algorithm] +
map(lambda num: format_str(num) + "",
accuracy_dict[algorithm][data_set][str(10 ** 5)], )), algorithm_tag_lst)
table_lines.extend(lines)
return '\n'.join(table_lines)
os.system('mkdir -p data-markdown')
with open('data-markdown/accuracy_result.md', 'w') as ofs:
ofs.writelines(['# Max Err, Unit: 0.01\n\n'])
for data_set in accuracy_data_set_lst:
ofs.writelines(['## ' + data_set, '\n\n', get_time_table(range(10), data_set), '\n\n'])
| 38.339286 | 107 | 0.630182 |
646a745e6ac3daf4d9d0c6d9836370483a70e293
| 230 |
py
|
Python
|
Algorithms/Implementation/service_lane.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/service_lane.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/service_lane.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
N, T = list(map(int, input().strip().split(' ')))
width = list(map(int, input().strip().split(' ')))
for _ in range(T):
i, j = list(map(int, input().strip().split(' ')))
print(min(width[i:j + 1]))
| 25.555556 | 53 | 0.556522 |
6484edd6039e15fcfb7d0ccf9cd6a117867e0839
| 250 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v5_1/default_bom.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v5_1/default_bom.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v5_1/default_bom.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.db.sql("""Update `tabItem` as item set default_bom = NULL where
not exists(select name from `tabBOM` as bom where item.default_bom = bom.name and bom.docstatus =1 )""")
| 35.714286 | 106 | 0.752 |
64b334b0e627ffd9f97989fbdbc08774a19c7319
| 734 |
py
|
Python
|
AutosClasificados/core/test2.py
|
joaquinpunales1992/Python-Django-WatsonVisualRecognition-WatsonNLU
|
2997359150236a7d897a3f9201f8e9404f3d7f02
|
[
"MIT"
] | null | null | null |
AutosClasificados/core/test2.py
|
joaquinpunales1992/Python-Django-WatsonVisualRecognition-WatsonNLU
|
2997359150236a7d897a3f9201f8e9404f3d7f02
|
[
"MIT"
] | null | null | null |
AutosClasificados/core/test2.py
|
joaquinpunales1992/Python-Django-WatsonVisualRecognition-WatsonNLU
|
2997359150236a7d897a3f9201f8e9404f3d7f02
|
[
"MIT"
] | null | null | null |
import json
#json = {{"watsonVisualRecognition":{"vAPIKey": "868b9f7ba1beb9cd3ef77236760fea74bac9af26", "vAPIVersion": "2016-05-20", "vIdClasificador": "vehiculos_260725218", "vUmbralMinScore_WVR": 0.5}, "watsonNLU":{"preprocessing_queue":{"vAPIUser": "e477af0a-db2f-4753-8b2d-14a084e607cf", "vAPIPass": "qbHBxkSPhwPA","vAPIVersion": "2017-10-03", "vUmbralMinScore_WNLU": 0.1}, "otros":{"vUmbralMinDescripcion": 10}}}}
with open('AutosClasificados\core\config.json') as json_data_file:
vConfig = json.load(json_data_file)
vAPIVersion = vConfig["watsonVisualRecognition"]["vAPIVersion"]
vAPIKey = vConfig["watsonVisualRecognition"]["vAPIKey"]
vAPIClasificador = vConfig["watsonVisualRecognition"]["vIdClasificador"]
| 66.727273 | 406 | 0.757493 |
37beb8be587eda2253830cd5e5c76fc4dc369c96
| 10,739 |
py
|
Python
|
official/cv/east/src/east.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/east/src/east.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/east/src/east.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore as mstype
import mindspore.nn as nn
import mindspore.ops as P
def _conv(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=0,
pad_mode='pad'):
"""Conv2D wrapper."""
weights = 'ones'
layers = []
layers += [nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
pad_mode=pad_mode,
weight_init=weights,
has_bias=False)]
layers += [nn.BatchNorm2d(out_channels)]
return nn.SequentialCell(layers)
class VGG16FeatureExtraction(nn.Cell):
"""VGG16FeatureExtraction for deeptext"""
def __init__(self):
super(VGG16FeatureExtraction, self).__init__()
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.avg_pool = nn.AvgPool2d(kernel_size=2, stride=2)
self.conv1_1 = _conv(
in_channels=3,
out_channels=64,
kernel_size=3,
padding=1)
self.conv1_2 = _conv(
in_channels=64,
out_channels=64,
kernel_size=3,
padding=1)
self.conv2_1 = _conv(
in_channels=64,
out_channels=128,
kernel_size=3,
padding=1)
self.conv2_2 = _conv(
in_channels=128,
out_channels=128,
kernel_size=3,
padding=1)
self.conv3_1 = _conv(
in_channels=128,
out_channels=256,
kernel_size=3,
padding=1)
self.conv3_2 = _conv(
in_channels=256,
out_channels=256,
kernel_size=3,
padding=1)
self.conv3_3 = _conv(
in_channels=256,
out_channels=256,
kernel_size=3,
padding=1)
self.conv4_1 = _conv(
in_channels=256,
out_channels=512,
kernel_size=3,
padding=1)
self.conv4_2 = _conv(
in_channels=512,
out_channels=512,
kernel_size=3,
padding=1)
self.conv4_3 = _conv(
in_channels=512,
out_channels=512,
kernel_size=3,
padding=1)
self.conv5_1 = _conv(
in_channels=512,
out_channels=512,
kernel_size=3,
padding=1)
self.conv5_2 = _conv(
in_channels=512,
out_channels=512,
kernel_size=3,
padding=1)
self.conv5_3 = _conv(
in_channels=512,
out_channels=512,
kernel_size=3,
padding=1)
self.cast = P.Cast()
def construct(self, out):
""" Construction of VGG """
f_0 = out
out = self.cast(out, mstype.float32)
out = self.conv1_1(out)
out = self.relu(out)
out = self.conv1_2(out)
out = self.relu(out)
out = self.max_pool(out)
out = self.conv2_1(out)
out = self.relu(out)
out = self.conv2_2(out)
out = self.relu(out)
out = self.max_pool(out)
f_2 = out
out = self.conv3_1(out)
out = self.relu(out)
out = self.conv3_2(out)
out = self.relu(out)
out = self.conv3_3(out)
out = self.relu(out)
out = self.max_pool(out)
f_3 = out
out = self.conv4_1(out)
out = self.relu(out)
out = self.conv4_2(out)
out = self.relu(out)
out = self.conv4_3(out)
out = self.relu(out)
out = self.max_pool(out)
f_4 = out
out = self.conv5_1(out)
out = self.relu(out)
out = self.conv5_2(out)
out = self.relu(out)
out = self.conv5_3(out)
out = self.relu(out)
out = self.max_pool(out)
f_5 = out
return f_0, f_2, f_3, f_4, f_5
class Merge(nn.Cell):
def __init__(self):
super(Merge, self).__init__()
self.conv1 = nn.Conv2d(1024, 128, 1, has_bias=True)
self.bn1 = nn.BatchNorm2d(128)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(
128,
128,
3,
padding=1,
pad_mode='pad',
has_bias=True)
self.bn2 = nn.BatchNorm2d(128)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(384, 64, 1, has_bias=True)
self.bn3 = nn.BatchNorm2d(64)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(
64,
64,
3,
padding=1,
pad_mode='pad',
has_bias=True)
self.bn4 = nn.BatchNorm2d(64)
self.relu4 = nn.ReLU()
self.conv5 = nn.Conv2d(192, 32, 1)
self.bn5 = nn.BatchNorm2d(32)
self.relu5 = nn.ReLU()
self.conv6 = nn.Conv2d(
32,
32,
3,
padding=1,
pad_mode='pad',
has_bias=True)
self.bn6 = nn.BatchNorm2d(32)
self.relu6 = nn.ReLU()
self.conv7 = nn.Conv2d(
32,
32,
3,
padding=1,
pad_mode='pad',
has_bias=True)
self.bn7 = nn.BatchNorm2d(32)
self.relu7 = nn.ReLU()
self.concat = P.Concat(axis=1)
def construct(self, x, f1, f2, f3, f4):
img_hight = P.Shape()(x)[2]
img_width = P.Shape()(x)[3]
out = P.ResizeBilinear((img_hight / 16, img_width / 16), True)(f4)
out = self.concat((out, f3))
out = self.relu1(self.bn1(self.conv1(out)))
out = self.relu2(self.bn2(self.conv2(out)))
out = P.ResizeBilinear((img_hight / 8, img_width / 8), True)(out)
out = self.concat((out, f2))
out = self.relu3(self.bn3(self.conv3(out)))
out = self.relu4(self.bn4(self.conv4(out)))
out = P.ResizeBilinear((img_hight / 4, img_width / 4), True)(out)
out = self.concat((out, f1))
out = self.relu5(self.bn5(self.conv5(out)))
out = self.relu6(self.bn6(self.conv6(out)))
out = self.relu7(self.bn7(self.conv7(out)))
return out
class Output(nn.Cell):
def __init__(self, scope=512):
super(Output, self).__init__()
self.conv1 = nn.Conv2d(32, 1, 1)
self.sigmoid1 = nn.Sigmoid()
self.conv2 = nn.Conv2d(32, 4, 1)
self.sigmoid2 = nn.Sigmoid()
self.conv3 = nn.Conv2d(32, 1, 1)
self.sigmoid3 = nn.Sigmoid()
self.scope = scope
self.concat = P.Concat(axis=1)
self.PI = 3.1415926535898
def construct(self, x):
score = self.sigmoid1(self.conv1(x))
loc = self.sigmoid2(self.conv2(x)) * self.scope
angle = (self.sigmoid3(self.conv3(x)) - 0.5) * self.PI
geo = self.concat((loc, angle))
return score, geo
class EAST(nn.Cell):
def __init__(self):
super(EAST, self).__init__()
self.extractor = VGG16FeatureExtraction()
self.merge = Merge()
self.output = Output()
def construct(self, x_1):
f_0, f_1, f_2, f_3, f_4 = self.extractor(x_1)
x_1 = self.merge(f_0, f_1, f_2, f_3, f_4)
score, geo = self.output(x_1)
return score, geo
class DiceCoefficient(nn.Cell):
def __init__(self):
super(DiceCoefficient, self).__init__()
self.sum = P.ReduceSum()
self.eps = 1e-5
def construct(self, true_cls, pred_cls):
intersection = self.sum(true_cls * pred_cls, ())
union = self.sum(true_cls, ()) + self.sum(pred_cls, ()) + self.eps
loss = 1. - (2 * intersection / union)
return loss
class MyMin(nn.Cell):
def __init__(self):
super(MyMin, self).__init__()
self.abs = P.Abs()
def construct(self, a, b):
return (a + b - self.abs(a - b)) / 2
class EastLossBlock(nn.Cell):
def __init__(self):
super(EastLossBlock, self).__init__()
self.split = P.Split(1, 5)
self.min = MyMin()
self.log = P.Log()
self.cos = P.Cos()
self.mean = P.ReduceMean(keep_dims=False)
self.sum = P.ReduceSum()
self.eps = 1e-5
self.dice = DiceCoefficient()
def construct(
self,
y_true_cls,
y_pred_cls,
y_true_geo,
y_pred_geo,
training_mask):
ans = self.sum(y_true_cls)
classification_loss = self.dice(
y_true_cls, y_pred_cls * (1 - training_mask))
# n * 5 * h * w
d1_gt, d2_gt, d3_gt, d4_gt, theta_gt = self.split(y_true_geo)
d1_pred, d2_pred, d3_pred, d4_pred, theta_pred = self.split(y_pred_geo)
area_gt = (d1_gt + d3_gt) * (d2_gt + d4_gt)
area_pred = (d1_pred + d3_pred) * (d2_pred + d4_pred)
w_union = self.min(d2_gt, d2_pred) + self.min(d4_gt, d4_pred)
h_union = self.min(d1_gt, d1_pred) + self.min(d3_gt, d3_pred)
area_intersect = w_union * h_union
area_union = area_gt + area_pred - area_intersect
iou_loss_map = -self.log((area_intersect + 1.0) /
(area_union + 1.0)) # iou_loss_map
angle_loss_map = 1 - self.cos(theta_pred - theta_gt) # angle_loss_map
angle_loss = self.sum(angle_loss_map * y_true_cls) / ans
iou_loss = self.sum(iou_loss_map * y_true_cls) / ans
geo_loss = 10 * angle_loss + iou_loss
return geo_loss + classification_loss
class EastWithLossCell(nn.Cell):
def __init__(self, network):
super(EastWithLossCell, self).__init__()
self.east_network = network
self.loss = EastLossBlock()
def construct(self, img, true_cls, true_geo, training_mask):
socre, geometry = self.east_network(img)
loss = self.loss(
true_cls,
socre,
true_geo,
geometry,
training_mask)
return loss
| 29.502747 | 79 | 0.538039 |
208edc51685c65faff0900a1cfaeda349445762a
| 298 |
py
|
Python
|
src/network/bo/messages/message.py
|
TimHabeck/blockchain-lab
|
3cd050ee43f26cf0a1f70869100f0b40a6abae07
|
[
"RSA-MD"
] | null | null | null |
src/network/bo/messages/message.py
|
TimHabeck/blockchain-lab
|
3cd050ee43f26cf0a1f70869100f0b40a6abae07
|
[
"RSA-MD"
] | null | null | null |
src/network/bo/messages/message.py
|
TimHabeck/blockchain-lab
|
3cd050ee43f26cf0a1f70869100f0b40a6abae07
|
[
"RSA-MD"
] | null | null | null |
from abc import ABC
class Message(ABC):
def __init__(self) -> None:
self._name = None
def get_name(self):
return self._name
def set_name(self, name):
self._name = name
def to_dict(self):
pass
@staticmethod
def from_dict():
pass
| 14.9 | 31 | 0.573826 |
20d46ae887ed60a533225b195bd966d42cf1e440
| 493 |
py
|
Python
|
3kCTF/2021/web/pawnshop/apache/src/index.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
3kCTF/2021/web/pawnshop/apache/src/index.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
3kCTF/2021/web/pawnshop/apache/src/index.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from funcs import *
form = cgi.FieldStorage()
action = form.getvalue('action')
if action=='list':
list_items()
elif action=='bid':
mail = form.getvalue('mail')
item_id = form.getvalue('item_id')
amount = form.getvalue('amount')
if(mail != None and item_id != None and amount != None):
verify_email(mail)
save_bid(mail+"|"+item_id+"|"+amount+"\n\n")
api({'msg':'bid saved, we will contact winners when auction ends'})
api({'msg':'error'})
else:
api(False)
| 25.947368 | 69 | 0.665314 |
b30e40b036ccd20726e930a9caacc645f345a20a
| 2,840 |
py
|
Python
|
Packs/CommonScripts/Scripts/GetDomainDNSDetails/GetDomainDNSDetails.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CommonScripts/Scripts/GetDomainDNSDetails/GetDomainDNSDetails.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CommonScripts/Scripts/GetDomainDNSDetails/GetDomainDNSDetails.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa # pylint: disable=unused-wildcard-import
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa # pylint: disable=unused-wildcard-import
from typing import (
Dict, Any, Optional,
Union, List
)
import traceback
import dns.message
import dns.resolver
import dns.rdatatype
import dns.rdataclass
import dns.rdata
DNS_QUERY_TTL = 10.0
QTYPES = ["CNAME", "NS", "A", "AAAA"]
''' STANDALONE FUNCTION '''
def make_query(resolver: dns.resolver.Resolver, qname: str, qtype: str, use_tcp: bool) -> Dict[str, Any]:
q_rdtype = dns.rdatatype.from_text(qtype)
q_rdclass = dns.rdataclass.from_text("IN")
try:
ans = resolver.resolve(
qname,
q_rdtype, q_rdclass,
tcp=use_tcp,
lifetime=DNS_QUERY_TTL,
raise_on_no_answer=True
)
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
return {}
if ans.rrset is None:
return {}
result: Dict[str, List[str]] = {}
result[qtype] = [
rr.to_text()
for rr in ans.rrset
if (rr is not None and rr.rdtype == q_rdtype and rr.rdclass == q_rdclass)
]
return result
''' COMMAND FUNCTION '''
def get_domain_dns_details_command(args: Dict[str, Any]) -> CommandResults:
outputs: Optional[Dict[str, Dict[str, Any]]]
answer: Union[str, Dict[str, Any]]
server = args.get('server')
use_tcp = argToBoolean(args.get('use_tcp', 'Yes'))
qtypes = QTYPES
if (arg_qtype := args.get('qtype')) is not None:
qtypes = argToList(arg_qtype)
qname = args.get('domain')
if qname is None:
raise ValueError("domain is required")
resolver = dns.resolver.Resolver()
if server is not None:
resolver.nameservers = [server]
answer = {
'domain': qname,
'server': server if server is not None else 'system'
}
# we ask specifically for CNAMEs
for qtype in qtypes:
answer.update(make_query(resolver, qname, qtype, use_tcp=use_tcp))
outputs = {
'DomainDNSDetails': answer
}
markdown = tableToMarkdown(
f' Domain DNS Details for {qname}',
answer,
headers=["domain", "server"] + qtypes
)
return CommandResults(
readable_output=markdown,
outputs=outputs,
outputs_key_field=['domain', 'server']
)
''' MAIN FUNCTION '''
def main():
try:
return_results(get_domain_dns_details_command(demisto.args()))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute GetDomainDNSDetails. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 23.865546 | 105 | 0.635915 |
5b76f0f58bd77a10887debdf519f7d1a031584da
| 451 |
py
|
Python
|
crypto/crypto-unrandompad/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | 2 |
2021-08-09T17:08:12.000Z
|
2021-08-09T17:08:17.000Z
|
crypto/crypto-unrandompad/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
crypto/crypto-unrandompad/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | 1 |
2021-10-09T16:51:56.000Z
|
2021-10-09T16:51:56.000Z
|
from pwn import *
from sympy.ntheory.modular import crt
from gmpy2 import iroot
from Crypto.Util.number import long_to_bytes
ns = []
cs = []
for _ in range(3):
s = remote(sys.argv[1], int(sys.argv[2]))
s.recvuntil("n: ")
ns.append(int(s.recvline().decode()))
s.sendlineafter("opt: ", "2")
s.recvuntil("c: ")
cs.append(int(s.recvline().decode()))
s.close()
ptc = int(crt(ns, cs)[0])
print(long_to_bytes(int(iroot(ptc, 3)[0])).decode())
| 23.736842 | 52 | 0.654102 |
947aaf98d219695563566c85c2549bd230e445ac
| 245 |
py
|
Python
|
Algorithms/Warmup/mini_max_sum.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Warmup/mini_max_sum.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Warmup/mini_max_sum.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from functools import reduce
from itertools import permutations, product, combinations
arr = [1, 2, 3, 4, 5]
n = len(arr)
ret = [reduce(lambda x, y: x + y, i) for i in combinations(arr, n - 1)]
print(min(ret), max(ret))
| 24.5 | 71 | 0.677551 |
947f3eed6772f22eda5ef430e9b3191c16ac374f
| 2,687 |
py
|
Python
|
___Python/Torsten/Python-Kurs/p05_random/m01_wuerfeln.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Torsten/Python-Kurs/p05_random/m01_wuerfeln.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Torsten/Python-Kurs/p05_random/m01_wuerfeln.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
import random
r = random.Random()
def wuerfeln():
return r.randint(1, 6)
def muenzwurf():
return r.randint(0, 1) # 0 > Kopf, 1 > Zahl
def kugel():
return r.randint(1, 49)
def lottoziehung(anzahl):
zahl = r.randint(1, 49)
if zahl not in ziehung:
ziehung.append(zahl)
anzahl += 1
if anzahl != 6:
lottoziehung(anzahl)
d = {}
for i in range(100000):
augenzahl = wuerfeln()
if augenzahl in d:
d[augenzahl] += 1
else:
d[augenzahl] = 1
print("Würfel klassisch:",d)
# 1) Lottozahlen ermittel für '6 aus 49'
# Ansatz A: WHILE
anzahl = 0
ziehung = []
while anzahl < 6:
zahl = r.randint(1, 49)
if zahl not in ziehung:
ziehung.append(zahl)
anzahl += 1
print("While-Schleife:",sorted(ziehung))
# Ansatz B: Rekursiv
anzahl = 0
ziehung = []
lottoziehung(anzahl)
print("Rekursiv:",sorted(ziehung))
# Ansatz C:
kugeln = 0
menge = set() # Menge der bereits gezogenen Kugeln
lottoziehung = []
while kugeln < 6:
ziehung = kugel()
if ziehung not in menge: # Diese Kugel wurde zuvor noch nicht gezogen
lottoziehung.append(ziehung)
menge.add(ziehung) # Diese Kugel darf nicht noch einmal gezogen werden, deshalb landet sie in der Menge der zuvor gezogenen Kugeln
kugeln += 1
print(sorted(lottoziehung))
# Ansatz D: "Lottofee"
urne = list(range(1, 50)) # urne = [1, 2, 3, ..., 49]
lottoziehung = []
for i in range(6):
ziehung = urne.pop(r.randint(0, len(urne)) - 1)
lottoziehung.append(ziehung)
print(sorted(lottoziehung))
# Ansatz E: Geeignete Methode aus random benutzen
lottoziehung = r.sample(range(1, 50), 6)
print(sorted(lottoziehung))
# 2) Schreibe eine Funktion wuerfeln2, die fair würfelt.
# Bei der Implementierung darf nur die Funktion muenzwurf verwendet werden.
# würfeln mit Münzwurf
# Dual Dez
# 0 0 0 0
# 1 0 0 1
# 0 1 0 2
# 1 1 0 3
# 0 0 1 4
# 1 0 1 5
# 0 1 1 6
# 1 1 1 7
def wuerfeln2():
while True:
i_dual = 1 # Zähler Dual-Code
i_augenzahl = 0
while i_dual <= 4:
i_augenzahl += muenzwurf() * i_dual
i_dual *= 2 # Zähler Dual-Code multipliziert mit 2 (1,2,4...)
if 0 < i_augenzahl < 7:
break
return i_augenzahl
dict_mw ={}
i_schleife = 0 # Schleifenzaehler
while i_schleife < 100000:
i_augenzahl = wuerfeln2()
if i_augenzahl in dict_mw:
dict_mw[i_augenzahl] += 1
else:
dict_mw[i_augenzahl] = 1
i_schleife += 1
print("Münzwurf: "+str(dict(sorted(dict_mw.items()))))
| 21.669355 | 139 | 0.596948 |
8469b52da50e995d6953b3f007086e6d9fc96c76
| 1,082 |
py
|
Python
|
frappe-bench/env/lib/python2.7/site-packages/cli_helpers/tabular_output/terminaltables_adapter.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/cli_helpers/tabular_output/terminaltables_adapter.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/cli_helpers/tabular_output/terminaltables_adapter.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Format adapter for the terminaltables module."""
import terminaltables
import itertools
from cli_helpers.utils import filter_dict_by_key
from .preprocessors import (convert_to_string, override_missing_value,
style_output)
supported_formats = ('ascii', 'double', 'github')
preprocessors = (override_missing_value, convert_to_string, style_output)
def adapter(data, headers, table_format=None, **kwargs):
"""Wrap terminaltables inside a function for TabularOutputFormatter."""
keys = ('title', )
table_format_handler = {
'ascii': terminaltables.AsciiTable,
'double': terminaltables.DoubleTable,
'github': terminaltables.GithubFlavoredMarkdownTable,
}
table = table_format_handler[table_format]
t = table([headers] + list(data), **filter_dict_by_key(kwargs, keys))
dimensions = terminaltables.width_and_alignment.max_dimensions(
t.table_data,
t.padding_left,
t.padding_right)[:3]
for r in t.gen_table(*dimensions):
yield u''.join(r)
| 30.914286 | 75 | 0.698706 |
b6d65f7817d4f4ae0d0fe72cc3c4ecb679fb86e4
| 1,010 |
py
|
Python
|
getData/accv.py
|
Dong-Ki-Lee/emotionFinder
|
54dca62993bb87f1994be3fafaf1b7c6c60d6d95
|
[
"MIT"
] | null | null | null |
getData/accv.py
|
Dong-Ki-Lee/emotionFinder
|
54dca62993bb87f1994be3fafaf1b7c6c60d6d95
|
[
"MIT"
] | null | null | null |
getData/accv.py
|
Dong-Ki-Lee/emotionFinder
|
54dca62993bb87f1994be3fafaf1b7c6c60d6d95
|
[
"MIT"
] | null | null | null |
'''
Automatically combine consonants and vowels in a sentence
작성자 : Dongki Lee
모듈 기능 : hgtk 라이브러리에서는 전체가 decompose된 항목을 compose, 그 반대의 경우만 할 수 있다.
이 모듈에서는 hgtk의 decompose, compose 기능을 이용하여 문장이 들어왔을 때 가ㄴ다 -> 간다 처럼 받침만이 떨어진 문장을 원래 문장으로 돌릴 수 있게 해주는 기능을 구현하였다.
생성 날자 : 2018.07.05
수정 로그 :
2018.07.05 : 모듈 최초버전 구현
'''
import hgtk
import re
def combine(input_string):
need_to_bind = re.compile('[ㄱ-ㅎ]')
m = need_to_bind.search(input_string)
if m == None:
return input_string
else:
start = m.start()
decompose = list(hgtk.letter.decompose(input_string[start - 1]))
remove_key = input_string[start]
if decompose[2] == '':
decompose[2] = remove_key
composed_letter = hgtk.letter.compose(decompose[0], decompose[1], decompose[2])
input_string = input_string[0:start-1] + composed_letter + input_string[start+1:]
else:
input_string = input_string[0:start] + input_string[start+1:]
return input_string
| 34.827586 | 109 | 0.657426 |
b63a894a11177ac8639ed1aa90a4802fd5dfd07a
| 523 |
py
|
Python
|
examples/mult.py
|
enordquist/Go100x
|
327a8f767a68d7b42e6cd0aea11b2d9cc2d8b5ea
|
[
"MIT"
] | null | null | null |
examples/mult.py
|
enordquist/Go100x
|
327a8f767a68d7b42e6cd0aea11b2d9cc2d8b5ea
|
[
"MIT"
] | null | null | null |
examples/mult.py
|
enordquist/Go100x
|
327a8f767a68d7b42e6cd0aea11b2d9cc2d8b5ea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import go100x
import numpy as np
os.environ["TIMEMORY_PRECISION"] = "8"
n = int(2.0e8)
a = np.zeros([n], dtype=np.float)
b = np.zeros([n], dtype=np.float)
a += 2
b += 3
c = go100x.calculate_cpu(a, b)
go100x.set_device(0)
o = [32, 64, 128, 256, 512, 1024]
for block in o:
for ngrid in o + [-1]:
if ngrid < 0:
ngrid = int((n + block - 1) / block)
d = go100x.calculate_gpu([ngrid], [block], a, b)
print("\nResults for array of size {} ({})", n, float(n))
| 19.37037 | 57 | 0.585086 |
b664a3da3d375dc5fe45b712d376cb50db9fed2a
| 4,702 |
py
|
Python
|
plugins/tff_backend/dal/node_orders.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | null | null | null |
plugins/tff_backend/dal/node_orders.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 178 |
2017-08-02T12:58:06.000Z
|
2017-12-20T15:01:12.000Z
|
plugins/tff_backend/dal/node_orders.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 2 |
2018-01-10T10:43:12.000Z
|
2018-03-18T10:42:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import logging
from datetime import datetime
from google.appengine.api import search
from google.appengine.api.search import SortExpression
from google.appengine.ext import ndb
from framework.bizz.job import run_job, MODE_BATCH
from mcfw.exceptions import HttpNotFoundException
from mcfw.rpc import returns, arguments
from plugins.tff_backend.consts.hoster import NODE_ORDER_SEARCH_INDEX
from plugins.tff_backend.models.hoster import NodeOrder
from plugins.tff_backend.plugin_consts import NAMESPACE
from plugins.tff_backend.utils.search import remove_all_from_index
NODE_ORDER_INDEX = search.Index(NODE_ORDER_SEARCH_INDEX, namespace=NAMESPACE)
@returns(NodeOrder)
@arguments(order_id=(int, long))
def get_node_order(order_id):
# type: (int) -> NodeOrder
order = NodeOrder.get_by_id(order_id)
if not order:
raise HttpNotFoundException('order_not_found')
return order
def index_all_node_orders():
remove_all_from_index(NODE_ORDER_INDEX)
run_job(_get_all_node_orders, [], multi_index_node_order, [], mode=MODE_BATCH, batch_size=200)
def _get_all_node_orders():
return NodeOrder.query()
def index_node_order(order):
# type: (NodeOrder) -> list[search.PutResult]
logging.info('Indexing node order %s', order.id)
document = create_node_order_document(order)
return NODE_ORDER_INDEX.put(document)
def multi_index_node_order(order_keys):
logging.info('Indexing %s node orders', len(order_keys))
orders = ndb.get_multi(order_keys) # type: list[NodeOrder]
return NODE_ORDER_INDEX.put([create_node_order_document(order) for order in orders])
def create_node_order_document(order):
order_id_str = '%s' % order.id
fields = [
search.AtomField(name='id', value=order_id_str),
search.AtomField(name='socket', value=order.socket),
search.NumberField(name='so', value=order.odoo_sale_order_id or -1),
search.NumberField(name='status', value=order.status),
search.DateField(name='order_time', value=datetime.utcfromtimestamp(order.order_time)),
search.TextField(name='username', value=order.username),
]
if order.shipping_info:
fields.extend([search.TextField(name='shipping_name', value=order.shipping_info.name),
search.TextField(name='shipping_email', value=order.shipping_info.email),
search.TextField(name='shipping_phone', value=order.shipping_info.phone),
search.TextField(name='shipping_address', value=order.shipping_info.address.replace('\n', ''))])
if order.billing_info:
fields.extend([search.TextField(name='billing_name', value=order.billing_info.name),
search.TextField(name='billing_email', value=order.billing_info.email),
search.TextField(name='billing_phone', value=order.billing_info.phone),
search.TextField(name='billing_address', value=order.billing_info.address.replace('\n', ''))])
return search.Document(order_id_str, fields)
def search_node_orders(query=None, page_size=20, cursor=None):
# type: (unicode, int, unicode) -> tuple[list[NodeOrder], search.Cursor, bool]
options = search.QueryOptions(limit=page_size,
cursor=search.Cursor(cursor),
ids_only=True,
sort_options=search.SortOptions(
expressions=[SortExpression(expression='order_time',
direction=SortExpression.DESCENDING)]))
search_results = NODE_ORDER_INDEX.search(search.Query(query, options=options)) # type: search.SearchResults
results = search_results.results # type: list[search.ScoredDocument]
node_orders = ndb.get_multi([NodeOrder.create_key(long(result.doc_id)) for result in results])
return node_orders, search_results.cursor, search_results.cursor is not None
def list_node_orders_by_user(username):
return NodeOrder.list_by_user(username)
| 44.358491 | 119 | 0.711825 |
b67a93dbdc6dae113acee0d8fa2424ce07a736e0
| 1,746 |
py
|
Python
|
checks/duplicate_content_test.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 19 |
2018-04-20T11:03:41.000Z
|
2022-01-12T20:58:56.000Z
|
checks/duplicate_content_test.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 160 |
2018-04-05T16:12:59.000Z
|
2022-03-01T13:01:27.000Z
|
checks/duplicate_content_test.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 8 |
2018-11-05T13:07:57.000Z
|
2021-06-11T11:46:43.000Z
|
import httpretty
from httpretty import httprettified
import unittest
from checks import duplicate_content
from checks import page_content
from checks.config import Config
@httprettified
class TestDuplicateContent(unittest.TestCase):
def test_identical(self):
page_body = """
<html>
<head>
<title>Title</title>
</head>
<body>
<h1 class="title">Headline</h1>
<p class="intro">Second paragraph with <strong>strong words</strong></p>
<p class="text">Third paragraph</p>
<ul class="somelist">
<li>A list item</li>
</ul>
</body>
</html>
"""
url1 = 'http://example.com/'
httpretty.register_uri(httpretty.GET, url1, body=page_body)
url2 = 'http://www.example.com/'
httpretty.register_uri(httpretty.GET, url2, body=page_body)
results = {}
config = Config(urls=[url1, url2])
page_content_checker = page_content.Checker(config=config, previous_results={})
results['page_content'] = page_content_checker.run()
checker = duplicate_content.Checker(config=page_content_checker.config,
previous_results=results)
result = checker.run()
urls_after = checker.config.urls
self.assertEqual(result, {
'http://example.com/ http://www.example.com/': {
'exception': None,
'similarity': 1.0
}
})
self.assertEqual(urls_after, ['http://example.com/'])
if __name__ == '__main__':
unittest.main()
| 30.631579 | 92 | 0.548683 |
1e5477fff3f0d8e4e575997d9206dddf56821f65
| 189 |
py
|
Python
|
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/ph-7.11-list-in-function.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/ph-7.11-list-in-function.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/ph-7.11-list-in-function.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
def add_numbers(numbers):
result = 0
for i in numbers:
result += i
#print("number =", i)
return result
result = add_numbers([1, 2, 30, 4, 5, 9])
print(result)
| 17.181818 | 41 | 0.566138 |
1e8ce7e39fa909fe47e567c835da6345fb42a02c
| 485 |
py
|
Python
|
infrastructure/azwrapper.py
|
lizzyTheLizard/homeserver-azure
|
e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57
|
[
"MIT"
] | null | null | null |
infrastructure/azwrapper.py
|
lizzyTheLizard/homeserver-azure
|
e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57
|
[
"MIT"
] | null | null | null |
infrastructure/azwrapper.py
|
lizzyTheLizard/homeserver-azure
|
e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57
|
[
"MIT"
] | null | null | null |
from az.cli import az
import sys
# General Wrapper for azure cli calls
def azSafe(command):
exit_code, result_dict, logs = az(command)
if exit_code != 0:
print("az " + command + " failed")
print(logs)
sys.exit(-1)
return result_dict
# General call to check if a resource already exists
def resourceExists(group, name):
resourceExists = "resource list -g {} -n {}"
list = azSafe(resourceExists.format(group, name))
return len(list) != 0
| 28.529412 | 53 | 0.659794 |
1ea0c610076601a406a8cb69dd2c1e21593a40b9
| 1,835 |
py
|
Python
|
Packs/CommonScripts/Scripts/RegexGroups/RegexGroups.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CommonScripts/Scripts/RegexGroups/RegexGroups.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CommonScripts/Scripts/RegexGroups/RegexGroups.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def main():
args = demisto.args()
match_target = args['value']
capture_groups = args.get('groups')
dict_keys = args.get('keys')
regex_flags = 0
for flag in argToList(args.get('flags', '')):
if flag in ('dotall', 's'):
regex_flags |= re.DOTALL
elif flag in ('multiline', 'm'):
regex_flags |= re.MULTILINE
elif flag in ('ignorecase', 'i'):
regex_flags |= re.IGNORECASE
elif flag in ('unicode', 'u'):
regex_flags |= re.UNICODE
else:
raise ValueError(f'Unknown flag: {flag}')
regex_pattern = re.compile(r'{}'.format(args['regex']), regex_flags)
if capture_groups:
capture_groups = capture_groups.split(',')
# Validating groups input to be integers
if not all(x.isdigit() for x in capture_groups):
raise ValueError('Error: groups must be integers')
if dict_keys:
dict_keys = dict_keys.split(',')
pattern_match = re.search(regex_pattern, match_target)
matches = []
if pattern_match:
for i in pattern_match.groups():
matches.append(i)
if capture_groups:
for j in capture_groups:
if len(matches) - 1 < int(j):
raise ValueError('Error: Regex group (' + j + ') out of range')
matches = [matches[int(x)] for x in capture_groups]
if dict_keys:
if len(dict_keys) != len(matches):
raise ValueError('Error: Number of keys does not match number of items')
else:
dict_matches = dict(zip(dict_keys, matches))
demisto.results(dict_matches)
else:
demisto.results(matches)
if __name__ in ('__builtin__', 'builtins'):
main()
| 32.192982 | 84 | 0.595095 |
7897cac0ea4d830a3b8371de9199f19f03d22f71
| 2,632 |
py
|
Python
|
2_DeepLearning-Keras/04_Tensorboard/1-tensorboard.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
2_DeepLearning-Keras/04_Tensorboard/1-tensorboard.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
2_DeepLearning-Keras/04_Tensorboard/1-tensorboard.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
from tensorflow.keras.callbacks import * # für Tensorboard
# Log erstellen/speichern
dir_path = os.path.abspath("../DeepLearning/logs") # Linux und Windows
# Dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Cast to np.float32
x_train = x_train.astype(np.float32)
y_train = y_train.astype(np.float32)
x_test = x_test.astype(np.float32)
y_test = y_test.astype(np.float32)
# Dataset Variablen
train_size = x_train.shape[0]
test_size = x_test.shape[0]
num_features = 784 # 28x28
num_classes = 10
# kategorisieren
y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
# input Daten reshapen
x_train = x_train.reshape(train_size, num_features)
x_test = x_test.reshape(test_size, num_features)
# Modell Parameter
init_w = TruncatedNormal(mean=0.0, stddev=0.01)
init_b = Constant(value=0.0)
lr = 0.001
optimizer = Adam(lr=lr)
epochs = 20
batch_size = 256 # [32, 1024] Werte dazwischen gibt an wieviele Datenpunkte parrallel verwendet werden zum trainieren
# Modell definieren
model = Sequential()
model.add(Dense(units=500, kernel_initializer=init_w, bias_initializer=init_b, input_shape=(num_features, )))
model.add(Activation("relu"))
model.add(Dense(units=300, kernel_initializer=init_w, bias_initializer=init_b))
model.add(Activation("relu"))
model.add(Dense(units=100, kernel_initializer=init_w, bias_initializer=init_b))
model.add(Activation("relu"))
model.add(Dense(units=num_classes, kernel_initializer=init_w, bias_initializer=init_b))
model.add(Activation("softmax"))
model.summary()
# Modell kompilieren, trainieren und evaluieren
model.compile(
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
# Tensorboard Callback
tb = TensorBoard(
log_dir=dir_path,
histogram_freq=1, # jede Epoche 2 = alle 2 Epochen etc.
write_graph=True)
model.fit(
x=x_train,
y=y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=[x_test, y_test],
callbacks=[tb]) # benötigt für Tensorboard
score = model.evaluate(
x_test,
y_test,
verbose=0)
print("Score: ", score)
# USE: in Konsole tensorboard --logdir LOGSORDNER
# mehrere Modelle vergleichen -> für jedes Modell in Logs Unterordner erstellen und mit Tensorboard den Oberordner angeben
| 28.301075 | 122 | 0.765957 |
15b74e5fe7101d3d254659d6739b6dff810a68af
| 489 |
py
|
Python
|
python/decorator/class_based_decorators_with_arguments.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/decorator/class_based_decorators_with_arguments.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/decorator/class_based_decorators_with_arguments.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
import functools
class ClassDecorator(object):
def __init__(self, arg1, arg2):
print(f'Arguments of decorators {arg1}, {arg2}')
self.arg1 = arg1
self.arg2 = arg2
def __call__(self, func):
functools.update_wrapper(self, func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@ClassDecorator("arg1", "arg2")
def print_args(*args):
for arg in args:
print(arg)
print_args(1, 2, 3)
| 20.375 | 56 | 0.607362 |
01be7042a6f0b68cc5fa4de5ee66956770d65fa3
| 794 |
py
|
Python
|
docs/API/Users_Guide/scripts/BAM_Slice.py
|
ZhenyuZ/gdc-docs
|
f024d5d4cd86dfa2c9e7d63850eee94d975b7948
|
[
"Apache-2.0"
] | 67 |
2016-06-09T14:11:51.000Z
|
2022-03-16T07:54:44.000Z
|
docs/API/Users_Guide/scripts/BAM_Slice.py
|
ZhenyuZ/gdc-docs
|
f024d5d4cd86dfa2c9e7d63850eee94d975b7948
|
[
"Apache-2.0"
] | 19 |
2016-06-21T15:51:11.000Z
|
2021-06-07T09:22:20.000Z
|
docs/API/Users_Guide/scripts/BAM_Slice.py
|
ZhenyuZ/gdc-docs
|
f024d5d4cd86dfa2c9e7d63850eee94d975b7948
|
[
"Apache-2.0"
] | 32 |
2016-07-15T01:24:19.000Z
|
2019-03-25T10:42:28.000Z
|
import requests
import json
'''
This script will not work until $TOKEN_FILE_PATH
is replaced with an actual path.
'''
token_file = "$TOKEN_FILE_PATH"
file_id = "11443f3c-9b8b-4e47-b5b7-529468fec098"
data_endpt = "https://api.gdc.cancer.gov/slicing/view/{}".format(file_id)
with open(token_file,"r") as token:
token_string = str(token.read().strip())
params = {"gencode": ["BRCA1", "BRCA2"]}
response = requests.post(data_endpt,
data = json.dumps(params),
headers = {
"Content-Type": "application/json",
"X-Auth-Token": token_string
})
file_name = "brca_slices.bam"
with open(file_name, "wb") as output_file:
output_file.write(response.content)
| 26.466667 | 73 | 0.600756 |
da7a049f44bfb52f1ea5a044260e1bee670f6bac
| 1,567 |
py
|
Python
|
MockServer/server.py
|
mcteacraft/MovingSpirit
|
90fb85809a46f286b55ecc4e1d2adbe9579ca713
|
[
"MIT"
] | null | null | null |
MockServer/server.py
|
mcteacraft/MovingSpirit
|
90fb85809a46f286b55ecc4e1d2adbe9579ca713
|
[
"MIT"
] | null | null | null |
MockServer/server.py
|
mcteacraft/MovingSpirit
|
90fb85809a46f286b55ecc4e1d2adbe9579ca713
|
[
"MIT"
] | null | null | null |
from http.server import BaseHTTPRequestHandler, HTTPServer
hostName = "localhost"
serverPort = 8080
class MyServer(BaseHTTPRequestHandler):
state = "Stopped"
def __init__(self, request, client_address, server):
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
self.send_response(200)
if(self.path == "/minecraft/start"):
if(MyServer.state == "Stopped"):
MyServer.state = "Starting"
elif(self.path == "/minecraft/stop"):
if(MyServer.state == "Running"):
MyServer.state = "Stopping"
else:
if(MyServer.state == "Starting"):
MyServer.state = "Running"
if(MyServer.state == "Stopping"):
MyServer.state = "Stopped"
if (self.path == "/minecraft/status"):
responseJson = '{"status" : "' + MyServer.state + '"}';
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(responseJson, "utf-8"));
else:
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(MyServer.state, "utf-8"));
if __name__ == "__main__":
webServer = HTTPServer((hostName, serverPort), MyServer)
print("Server started http://%s:%s" % (hostName, serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
print("Server stopped.")
| 33.340426 | 78 | 0.589024 |
16f23c8bc19c75ec9e1270d93c6f8d77b4b97e77
| 9,916 |
py
|
Python
|
pythonProj/FZPython/pyquant/db_models/__init__.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | null | null | null |
pythonProj/FZPython/pyquant/db_models/__init__.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | null | null | null |
pythonProj/FZPython/pyquant/db_models/__init__.py
|
iHamburg/FZQuant
|
86b750ec33d01badfd3f324d6f1599118b9bf8ff
|
[
"MIT"
] | 2 |
2019-04-10T10:05:00.000Z
|
2021-11-24T17:17:23.000Z
|
#!/usr/bin/env python
# coding: utf8
import json
import datetime
from pprint import pprint
from sqlalchemy import Column, String,Integer, Float, DateTime
from sqlalchemy import Table, Text
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
import pandas as pd
from pyquant.libs.mysqllib import session
from pyquant.libs.mysqllib import BaseModel as Base
import pyquant.libs.utillib as utillib
from pyquant.libs.cachelib import cache
from pyquant.utils.monitor import listener, Monitor, addCache
# Many-Many Relation
symbolgroup_symbol = Table('symbolgroup_symbol', Base.metadata,
Column('symbol_id', ForeignKey('symbol.id'), primary_key=True),
Column('symbolgroup_id', ForeignKey('symbolgroup.id'), primary_key=True))
stockIndex_symbol = Table('stockIndex_symbol', Base.metadata,
Column('stockIndex_id', ForeignKey('stockIndex.id'), primary_key=True),
Column('symbol_id', ForeignKey('symbol.id'), primary_key=True))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(String)
# fullname = Column(String)
password = Column(String)
# def __repr__(self):
# return "<User(name='%s', fullname='%s', password='%s')>" % (
# self.name, self.fullname, self.password)
class Symbol(Base):
__tablename__ = 'symbol'
__table_args__ = {
'mysql_engine': 'InnoDB',
'mysql_charset': 'utf8',
}
id = Column(Integer, primary_key=True)
exchange_id = Column(String)
ticker = Column(String)
instrument = Column(String)
name = Column(String)
sector = Column(String)
symbolgroup = relationship('SymbolGroup',secondary = symbolgroup_symbol,back_populates = 'symbol')
stockIndex = relationship('StockIndex',secondary = stockIndex_symbol,back_populates = 'symbol')
def __repr__(self):
return "<Symbol(id='%s', exchange_id='%s', ticker='%s', instrument='%s', name='%s', sector='%s')>" % (
self.id, self.exchange_id, self.ticker, self.instrument, self.name, self.sector)
@classmethod
# @listener(Monitor)
def get_by_ticker(cls, ticker, index=False, lock_mode=None):
cache_key = '%s-%s-%s-%s-%s' % (cls.__name__,'get_stock_by_ticker',ticker, index, lock_mode)
cache_value = cache.get(cache_key)
if cache_value: #如果有缓存,直接返回缓存
return cache_value
query = session.query(cls)
if lock_mode:
query = query.with_lockmode(lock_mode)
query = query.filter(cls.ticker==ticker, cls.instrument==('index' if index else 'stock'))
obj = query.first()
cache.set(cache_key, obj)
return obj
@staticmethod
def get_list_by_symbolgroup_id(symbolgroup_id, limit=30, offset=0):
return session.query(Symbol).filter(Symbol.symbolgroup.any(id=symbolgroup_id)).\
limit(limit).offset(offset).all()
@classmethod
def get_index_list(cls):
return session.query(cls).filter(Symbol.instrument == 'index').all()
@property
def index(self):
return True if self.instrument == 'index' else False
def _test_get_all():
objs = Symbol.get_all(columns='id', limit=None)
for row in objs:
print(row)
def _test_index():
symbol = Symbol.get_by_id(2408)
print('index', symbol.index)
class DailyPrice(Base):
"""
每日价格
"""
__tablename__ = 'dailyPrice'
id = Column(Integer, primary_key=True)
symbol_id = Column(Integer, ForeignKey('symbol.id'))
price_date = Column(DateTime)
open_price = Column(Float)
high_price = Column(Float)
low_price = Column(Float)
close_price = Column(Float)
volume = Column(Integer)
symbol = relationship("Symbol",back_populates="daily_price")
def __repr__(self):
return "<Daily_price( symbol_id='%s', price_date='%s', o='%s', h='%s', l='%s', c='%s', v=='%s')>" % (
self.symbol_id, self.price_date, self.open_price, self.high_price, self.low_price, self.close_price, self.volume)
def to_dict(self):
obj = super(DailyPrice, self).to_dict()
obj['price_date'] = str(obj['price_date'])
return obj
@classmethod
# @listener(Monitor)
def get_by_symbol_id(cls, symbol_id, fromdate=None, todate=None, output = 'dict', isCache=True):
"""
根据symbol_id 查daily price
:param symbol_id:
:param fromdate:
:param todate:
:param output:
:return:
"""
if not todate:
todate = str(datetime.date.today())
cache_key = '%s-%s-%s-%s-%s-%s' % (cls.__name__, 'get_by_symbol_id', symbol_id, fromdate, todate, output)
# print('cache key', cache_key)
if isCache: #用cache
cache_value = cache.get(cache_key)
if isinstance(cache_value, pd.DataFrame):
if not cache_value.empty:
return cache_value
if cache_value: #如果有缓存,直接返回缓存
return cache_value
where = []
if isinstance(symbol_id, (list, tuple)): #
where.append(DailyPrice.symbol_id.in_(symbol_id))
else:
where.append(DailyPrice.symbol_id == symbol_id)
if fromdate:
where.append(DailyPrice.price_date >= fromdate)
if todate:
where.append(DailyPrice.price_date < todate)
# print(where)
if output == 'df':
df = pd.read_sql(session.query(DailyPrice).filter(*where).statement, session.bind)
del df['id']
del df['symbol_id']
#设置index
df['price_date'] = df['price_date'].astype('datetime64[ns]')
df = df.set_index('price_date')
# columns 改名
df.columns = ['open', 'high', 'low','close', 'volume']
# 更换columns顺序
cols = ['open', 'high', 'close', 'low', 'volume']
objs = df.ix[:, cols]
elif output == 'dict':
# print('保存缓存', cache_key)
objs = [row.to_dict() for row in session.query(DailyPrice).filter(*where).all()]
else: #models
objs = session.query(DailyPrice).filter(*where).all()
cache.set(cache_key, objs)
return objs
def _test_multi_symbol():
print(DailyPrice.get_by_symbol_id([17,18], fromdate='2017-01-01', output='df')[:5])
class StockIndex(Base):
__tablename__ = 'stockIndex'
id = Column(Integer, primary_key=True)
name = Column(String)
symbol_id = Column(Integer, ForeignKey('symbol.id'))
symbol = relationship('Symbol', secondary=stockIndex_symbol, back_populates='stockIndex')
def _test_get_symbols():
stockindex = StockIndex.get_by_id(2)
print(stockindex.symbol)
class SymbolGroup(Base):
__tablename__ = 'symbolgroup'
id = Column(Integer, primary_key=True)
name = Column(String)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship("User", back_populates="symbolgroup")
symbol = relationship('Symbol', secondary=symbolgroup_symbol, back_populates='symbolgroup')
@staticmethod
def get_system_groups():
return session.query(__class__).filter(SymbolGroup.user_id == 0).all()
class Strategy(Base):
__tablename__ = 'strategy'
id = Column(Integer, primary_key=True)
name = Column(String)
user_id = Column(Integer, ForeignKey('user.id'))
filePath = Column(String)
desc = Column(String)
# One-Many Relations
Symbol.daily_price = relationship("DailyPrice", back_populates="symbol")
User.symbolgroup = relationship("SymbolGroup", back_populates="user")
def _query_relation():
query = session.query(DailyPrice).filter(DailyPrice.id == '7798525').all()
pprint.pprint([row.to_dict() for row in query])
def _query_join():
pprint.pprint(session.query(DailyPrice).join(Symbol).filter(Symbol.id == 2433).limit(10).all())
def _symbol_find_all():
# print(Symbol.find_all((Symbol.id > 200),10))
print(Symbol.find_all(limit = 10))
def _add_user_symbolgroup():
user = User(username='new user', password='123')
print(user)
sg = SymbolGroup(name='上证50')
user.symbolgroup = [sg]
session.add(user)
session.commit()
def _test_m_m_relation1():
# sd = SymbolGroup.get_by_id(3)
# print(sd.symbol)
si = StockIndex.get_by_id(2)
print(si.symbol)
def _test_add_m_m_relation():
sd = SymbolGroup.get(3)
sd.symbol.append(Symbol.get(19))
session.commit()
def _test_delete_m_m_relation():
sd = SymbolGroup.get(3)
sd.symbol.remove(Symbol.get(19))
session.commit()
def _test_add_user():
user = User(username='new user222', password='123')
session.add(user)
session.commit()
if __name__ == '__main__':
""""""
import pprint
# _query()
#
# _get()
# print(Symbol.get_by_ticker('000001', True))
# query_relation()
# _query_join()
# _symbol_find_all()
# print(Symbol.query().limit(10).all())
# print(User.get(1).to_dict())
# print(Symbol.get(17).to_dict())
# query = session.query(Symbol). \
# filter(Symbol.symbolgroup.any(id=3)). \
# all()
#
# print(query)
# _test_add_m_m_relation()
# _test_delete_m_m_relation()
# print(session.query(SymbolGroup).filter(SymbolGroup.user_id == 0).all())
# _symbol_find_all()
# print(DailyPrice.get_by_id(100).to_dict())
# _test_m_m_relation1()
# print(StockIndex.get_all())
# _test_m_m_relation1()
# print(Symbol.get_stock_by_ticker('000001', index=True))
# print(Symbol.get_by_id(20))
# print(DailyPrice.get_by_symbol_id(17, fromdate='2017-01-01', output='df'))
# _test_multi_symbol()
# arr = [12,34]
# s = 'key:%s' % arr
# print(s)
# _test_get_max_date()
# _test_get_all()
# _test_index()
# _test_m_m_relation1()
# _test_add_user()
_test_get_symbols()
| 26.655914 | 125 | 0.640581 |
c146650f80b6d9b0345e1e6e8e2d34975d1f6326
| 1,149 |
py
|
Python
|
api/clean/sequence_num.py
|
Latent-Lxx/dazhou-dw
|
902b4b625cda4c9e4eb205017b8955b81f37a0b5
|
[
"MIT"
] | null | null | null |
api/clean/sequence_num.py
|
Latent-Lxx/dazhou-dw
|
902b4b625cda4c9e4eb205017b8955b81f37a0b5
|
[
"MIT"
] | null | null | null |
api/clean/sequence_num.py
|
Latent-Lxx/dazhou-dw
|
902b4b625cda4c9e4eb205017b8955b81f37a0b5
|
[
"MIT"
] | 1 |
2022-02-11T04:44:37.000Z
|
2022-02-11T04:44:37.000Z
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/7/19 下午7:00
# @Author : Latent
# @Email : [email protected]
# @File : sequence_num.py
# @Software: PyCharm
# @class : 对于库存的清洗
"""
字段说明:
1.inventory_id ---->数据库自增
2.num ---> 当前库存
3.num_level ---> 库存等级
"""
class Sequence_Num(object):
# 1. 库存等级换算 ------> 库存0-50->紧张 50-100 -> 正常 100以上充足
@classmethod
def sequence_num_level(cls, data):
platform = data['platform']
if platform != 'pdd':
_func_none = (lambda x: x if type(x) == int else 0)
item_num = int(_func_none(data['public']['num']))
if item_num <= 50:
num_level = '紧张'
elif 50 < item_num <= 100:
num_level = '正常'
else:
num_level = '充足'
else:
item_num = int(data['public']['num'])
if item_num <= 300:
num_level = '紧张'
elif 300 < item_num <= 999:
num_level = '正常'
else:
num_level = '充足'
num_info = {'num': item_num,
'num_level': num_level}
return num_info
| 24.978261 | 63 | 0.491732 |
c1e4b11d7e74da3041c02009a83ef0d1d92d36f0
| 1,936 |
py
|
Python
|
main.py
|
alexpod1000/TF_PoseNet
|
0329a16275ec974d660e99564949ca95d71389ff
|
[
"MIT"
] | 1 |
2020-03-04T02:32:07.000Z
|
2020-03-04T02:32:07.000Z
|
main.py
|
alexpod1000/TF_PoseNet
|
0329a16275ec974d660e99564949ca95d71389ff
|
[
"MIT"
] | null | null | null |
main.py
|
alexpod1000/TF_PoseNet
|
0329a16275ec974d660e99564949ca95d71389ff
|
[
"MIT"
] | null | null | null |
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
from utils.model_utils import perform_prediction, decode_predictions
parts = [
"nose",
"leftEye",
"rightEye",
"leftEar",
"rightEar",
"leftShoulder",
"rightShoulder",
"leftElbow",
"rightElbow",
"leftWrist",
"rightWrist",
"leftHip",
"rightHip",
"leftKnee",
"rightKnee",
"leftAnkle",
"rightAnkle"
]
min_conf_score = 0.2
model_path = 'models/posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite'
# Resolution = ((InputImageSize - 1) / OutputStride) + 1
# (513 - 1 / 32) + 1 = 17 (our case), so we are using the "worst", accuracy wise
interpreter = tf.lite.Interpreter(model_path=model_path)
image = cv2.imread('images/1.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
heatmaps, offsets, displacements_fwd, displacements_bwd, resized_image = perform_prediction(image, interpreter)
keypoints = decode_predictions(heatmaps, offsets, output_stride=32)
resize_y_ratio = image.shape[0]/resized_image.shape[0]
resize_x_ratio = image.shape[1]/resized_image.shape[1]
image_cpy = np.copy(image)
pose_conf = np.array([keypoint["confidence"] for keypoint in keypoints]).mean()
for keypoint in keypoints:
scale = 5
# rescale to original (not resized by model) image coordinates
pos_y = int(keypoint["y"] * resize_y_ratio)
pos_x = int(keypoint["x"] * resize_x_ratio)
confidence_score = keypoint["confidence"]
if confidence_score > min_conf_score:
cv2.circle(image_cpy, (pos_x, pos_y), scale, (255, 0, 0), thickness=cv2.FILLED)
cv2.putText(image_cpy, parts[keypoint["part_index"]], (pos_x, pos_y), 0, 0.4, (0, 255, 0))
print("Confidence for {}: {}".format(parts[keypoint["part_index"]], confidence_score))
print("Confidence for pose {}".format(pose_conf))
plt.imshow(image_cpy)
plt.show()
| 28.470588 | 111 | 0.708678 |
a9b755b3d21103a71c140e3ae9bbb69470f88938
| 2,157 |
py
|
Python
|
7-assets/past-student-repos/LambdaSchool-master/m7/71a1/hashtable/test_hashtable_no_collisions.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m7/71a1/hashtable/test_hashtable_no_collisions.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m7/71a1/hashtable/test_hashtable_no_collisions.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
"""
This is the same test, but with big hash tables that are _unlikely_ to
have collisions after the 3 inserts we do.
Does not collide with DJB2 or FNV-1-64. But could collide with other hashes.
"""
import unittest
from hashtable import HashTable
class TestHashTable(unittest.TestCase):
def test_hash_table_insertion_and_retrieval(self):
ht = HashTable(0x10000)
ht.put("key-0", "val-0")
ht.put("key-1", "val-1")
ht.put("key-2", "val-2")
return_value = ht.get("key-0")
self.assertTrue(return_value == "val-0")
return_value = ht.get("key-1")
self.assertTrue(return_value == "val-1")
return_value = ht.get("key-2")
self.assertTrue(return_value == "val-2")
def test_hash_table_pution_overwrites_correctly(self):
ht = HashTable(0x10000)
ht.put("key-0", "val-0")
ht.put("key-1", "val-1")
ht.put("key-2", "val-2")
ht.put("key-0", "new-val-0")
ht.put("key-1", "new-val-1")
ht.put("key-2", "new-val-2")
return_value = ht.get("key-0")
self.assertTrue(return_value == "new-val-0")
return_value = ht.get("key-1")
self.assertTrue(return_value == "new-val-1")
return_value = ht.get("key-2")
self.assertTrue(return_value == "new-val-2")
def test_hash_table_removes_correctly(self):
ht = HashTable(0x10000)
ht.put("key-0", "val-0")
ht.put("key-1", "val-1")
ht.put("key-2", "val-2")
return_value = ht.get("key-0")
self.assertTrue(return_value == "val-0")
return_value = ht.get("key-1")
self.assertTrue(return_value == "val-1")
return_value = ht.get("key-2")
self.assertTrue(return_value == "val-2")
ht.delete("key-2")
ht.delete("key-1")
ht.delete("key-0")
return_value = ht.get("key-0")
self.assertTrue(return_value is None)
return_value = ht.get("key-1")
self.assertTrue(return_value is None)
return_value = ht.get("key-2")
self.assertTrue(return_value is None)
if __name__ == '__main__':
unittest.main()
| 29.958333 | 76 | 0.590635 |
8243304ed31524da22fd730f6960d925ef517d1c
| 4,232 |
py
|
Python
|
vkapp/bot/migrations/0001_initial.py
|
ParuninPavel/lenta4_hack
|
6d3340201deadf5757e37ddd7cf5580b928d7bda
|
[
"MIT"
] | 1 |
2017-11-23T13:33:13.000Z
|
2017-11-23T13:33:13.000Z
|
vkapp/bot/migrations/0001_initial.py
|
ParuninPavel/lenta4_hack
|
6d3340201deadf5757e37ddd7cf5580b928d7bda
|
[
"MIT"
] | null | null | null |
vkapp/bot/migrations/0001_initial.py
|
ParuninPavel/lenta4_hack
|
6d3340201deadf5757e37ddd7cf5580b928d7bda
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-21 13:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='AdminReview',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('rating', models.IntegerField()),
('date_time', models.DateTimeField(auto_now_add=True)),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Admin')),
],
),
migrations.CreateModel(
name='Blogger',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('balance', models.FloatField(default=0)),
],
),
migrations.CreateModel(
name='Income',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('type', models.CharField(choices=[('PROP', 'Предложение новости'), ('PUB', 'Опубликование новости')], default='PROP', max_length=4)),
('amount', models.FloatField(default=0)),
('date_time', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('link', models.CharField(blank=True, max_length=300, null=True)),
('media', models.CharField(blank=True, max_length=3000, null=True)),
('date_time', models.DateTimeField(auto_now_add=True)),
('blogger', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bot.Blogger')),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('mount', models.FloatField(default=0)),
('date_time', models.DateTimeField(auto_now_add=True)),
('blogger', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Blogger')),
],
),
migrations.CreateModel(
name='Publication',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('date_time', models.DateTimeField(auto_now_add=True)),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Admin')),
('news', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.News')),
],
),
migrations.CreateModel(
name='VKUser',
fields=[
('vk_id', models.IntegerField(primary_key=True, serialize=False)),
],
),
migrations.AddField(
model_name='payment',
name='payer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.VKUser'),
),
migrations.AddField(
model_name='income',
name='news',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.News'),
),
migrations.AddField(
model_name='blogger',
name='vk_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.VKUser'),
),
migrations.AddField(
model_name='adminreview',
name='news',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.News'),
),
migrations.AddField(
model_name='admin',
name='vk_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.VKUser'),
),
]
| 39.185185 | 150 | 0.553639 |
6b7e59d167fcf735ba0f1030602e287f95102618
| 112 |
py
|
Python
|
comp/microsoft/has_solution/009_longest_semi_alernating_substr.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2017-05-18T06:11:02.000Z
|
2017-05-18T06:11:02.000Z
|
comp/microsoft/has_solution/009_longest_semi_alernating_substr.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
comp/microsoft/has_solution/009_longest_semi_alernating_substr.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
'''
same as 005.
- 002: the substring
- 005: the length
'''
class Solution:
def longest_substr(self, s):
| 10.181818 | 32 | 0.633929 |
d407e07335c75b4a785c579550380e60429aee7c
| 4,724 |
py
|
Python
|
research/cv/Pix2Pix/src/utils/config.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-11-18T08:17:44.000Z
|
2021-11-18T08:17:44.000Z
|
research/cv/Pix2Pix/src/utils/config.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/Pix2Pix/src/utils/config.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""
Define the common options that are used in both training and test.
"""
import argparse
import ast
def get_args():
'''
get args.
'''
parser = argparse.ArgumentParser(description='Pix2Pix Model')
# parameters
parser.add_argument('--device_target', type=str, default='Ascend', choices=('Ascend', 'GPU'),
help='device where the code will be implemented (default: Ascend)')
parser.add_argument('--run_distribute', type=int, default=0, help='distributed training, default is 0.')
parser.add_argument('--device_num', type=int, default=1, help='device num, default is 1.')
parser.add_argument('--device_id', type=int, default=6, help='device id, default is 0.')
parser.add_argument('--save_graphs', type=ast.literal_eval, default=False,
help='whether save graphs, default is False.')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization, default is normal.')
parser.add_argument('--init_gain', type=float, default=0.02,
help='scaling factor for normal, xavier and orthogonal, default is 0.02.')
parser.add_argument('--pad_mode', type=str, default='CONSTANT', choices=('CONSTANT', 'REFLECT', 'SYMMETRIC'),
help='scale images to this size, default is CONSTANT.')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size, default is 286.')
parser.add_argument('--batch_size', type=int, default=1, help='batch_size, default is 1.')
parser.add_argument('--LAMBDA_Dis', type=float, default=0.5, help='weight for Discriminator Loss, default is 0.5.')
parser.add_argument('--LAMBDA_GAN', type=int, default=1, help='weight for GAN Loss, default is 1.')
parser.add_argument('--LAMBDA_L1', type=int, default=100, help='weight for L1 Loss, default is 100.')
parser.add_argument('--beta1', type=float, default=0.5, help='adam beta1, default is 0.5.')
parser.add_argument('--beta2', type=float, default=0.999, help='adam beta2, default is 0.999.')
parser.add_argument('--lr', type=float, default=0.0002, help='the initial learning rate, default is 0.0002.')
parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy, default is linear.')
parser.add_argument('--epoch_num', type=int, default=200, help='epoch number for training, default is 200.')
parser.add_argument('--n_epochs', type=int, default=100,
help='number of epochs with the initial learning rate, default is 100.')
parser.add_argument('--n_epochs_decay', type=int, default=100,
help='number of epochs with the dynamic learning rate, default is 100.')
parser.add_argument('--dataset_size', type=int, default=400, choices=(400, 1096),
help='for Facade_dataset,the number is 400; for Maps_dataset,the number is 1096.')
# The location of input and output data
parser.add_argument('--train_data_dir', type=str, default=None, help='the file path of input data during training.')
parser.add_argument('--val_data_dir', type=str, default=None, help='the file path of input data during validating.')
parser.add_argument('--train_fakeimg_dir', type=str, default='./results/fake_img/',
help='during training, the file path of stored fake img.')
parser.add_argument('--loss_show_dir', type=str, default='./results/loss_show',
help='during training, the file path of stored loss img.')
parser.add_argument('--ckpt_dir', type=str, default='./results/ckpt/',
help='during training, the file path of stored CKPT.')
parser.add_argument('--ckpt', type=str, default=None, help='during validating, the file path of the CKPT used.')
parser.add_argument('--predict_dir', type=str, default='./results/predict/',
help='during validating, the file path of Generated image.')
args = parser.parse_args()
return args
| 63.837838 | 120 | 0.671253 |
d474d5eb551dacae95e039d18d26a387b48c2cc2
| 2,903 |
py
|
Python
|
indl/metrics.py
|
SachsLab/indl
|
531d2e0c2ee765004aedc553af40e258262f86cb
|
[
"Apache-2.0"
] | 1 |
2021-02-22T01:39:50.000Z
|
2021-02-22T01:39:50.000Z
|
indl/metrics.py
|
SachsLab/indl
|
531d2e0c2ee765004aedc553af40e258262f86cb
|
[
"Apache-2.0"
] | null | null | null |
indl/metrics.py
|
SachsLab/indl
|
531d2e0c2ee765004aedc553af40e258262f86cb
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
__all__ = ['dprime', 'quickplot_history']
def dprime(y_true, y_pred, pmarg: float = 0.01, outputs: List[str] = ['dprime', 'bias', 'accuracy']) -> tuple:
"""
Calculate D-Prime for binary data.
70% for both classes is d=1.0488.
Highest possible is 6.93, but effectively 4.65 for 99%
http://www.birmingham.ac.uk/Documents/college-les/psych/vision-laboratory/sdtintro.pdf
This function is not designed to behave as a valid 'Tensorflow metric'.
Args:
y_true (array-like): True labels.
y_pred (array-like): Predicted labels.
pmarg:
outputs: list of outputs among 'dprime', 'bias', 'accuracy'
Returns:
Calculated d-prime value.
"""
import numpy as np
from scipy.stats import norm
# TODO: Adapt this function for tensorflow
# y_pred = ops.convert_to_tensor(y_pred)
# y_true = math_ops.cast(y_true, y_pred.dtype)
# return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
# TODO: Check that true_y only has 2 classes, and test_y is entirely within true_y classes.
b_true = y_pred == y_true
b_pos = np.unique(y_true, return_inverse=True)[1].astype(bool)
true_pos = np.sum(np.logical_and(b_true, b_pos))
true_neg = np.sum(np.logical_and(b_true, ~b_pos))
false_pos = np.sum(np.logical_and(~b_true, b_pos))
false_neg = np.sum(np.logical_and(~b_true, ~b_pos))
tpr = true_pos / (true_pos + false_neg)
tpr = max(pmarg, min(tpr, 1-pmarg))
fpr = false_pos / (false_pos + true_neg)
fpr = max(pmarg, min(fpr, 1 - pmarg))
ztpr = norm.ppf(tpr, loc=0, scale=1)
zfpr = norm.ppf(fpr, loc=0, scale=1)
# Other measures of performance:
# sens = tp ./ (tp+fp)
# spec = tn ./ (tn+fn)
# balAcc = (sens+spec)/2
# informedness = sens+spec-1
output = tuple()
for out in outputs:
if out == 'dprime':
dprime = ztpr - zfpr
output += (dprime,)
elif out == 'bias':
bias = -(ztpr + zfpr) / 2
output += (bias,)
elif out == 'accuracy':
accuracy = 100 * (true_pos + true_neg) / (true_pos + false_pos + false_neg + true_neg)
output += (accuracy,)
return output
def quickplot_history(history) -> None:
"""
A little helper function to do a quick plot of model fit results.
Args:
history (tf.keras History):
"""
import matplotlib.pyplot as plt
if hasattr(history, 'history'):
history = history.history
hist_metrics = [_ for _ in history.keys() if not _.startswith('val_')]
for m_ix, m in enumerate(hist_metrics):
plt.subplot(len(hist_metrics), 1, m_ix + 1)
plt.plot(history[m], label='Train')
plt.plot(history['val_' + m], label='Valid.')
plt.xlabel('Epoch')
plt.ylabel(m)
plt.legend()
plt.tight_layout()
plt.show()
| 31.554348 | 110 | 0.617637 |
2e0c32714f6d997eca9a7323c26f7d7c44b13150
| 1,007 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch02_math/ex03_perfectnumber_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch02_math/ex03_perfectnumber_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch02_math/ex03_perfectnumber_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch02_math.solutions.ex03_perfectnumber import is_perfect_number_simple, calc_perfect_numbers, is_perfect_number_based_on_proper_divisors
@pytest.mark.parametrize("n, expected",
[(6, True), (28, True),
(496, True), (8128, True)])
def test_is_perfect_number_simple(n, expected):
assert is_perfect_number_simple(n) == expected
@pytest.mark.parametrize("n, expected", [(50, [6, 28]),
(1000, [6, 28, 496]),
(10000, [6, 28, 496, 8128])])
def test_calc_perfect_numbers(n, expected):
assert calc_perfect_numbers(n) == expected
@pytest.mark.parametrize("n, expected", [
(6, True), (28, True), (496, True), (8128, True)])
def test_is_perfect_number_based_on_proper_divisors(n, expected):
assert is_perfect_number_based_on_proper_divisors(n) == expected
| 34.724138 | 141 | 0.647468 |
d84fb925e5f198a242b49a3625906021e8cf6205
| 3,412 |
py
|
Python
|
src/torch/npu/random.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
src/torch/npu/random.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
src/torch/npu/random.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from . import _lazy_init, _lazy_call, device_count, current_device
__all__ = ['manual_seed', 'manual_seed_all',
'seed', 'seed_all', 'initial_seed']
def manual_seed(seed):
r"""Sets the seed for generating random numbers for the current NPU.
It's safe to call this function if NPU is not available; in that
case, it is silently ignored.
Args:
seed (int): The desired seed.
.. warning::
If you are working with a multi-NPU model, this function is insufficient
to get determinism. To seed all NPUs, use :func:`manual_seed_all`.
"""
seed = int(seed)
def cb():
idx = current_device()
default_generator = torch.npu.default_generators[idx]
default_generator.manual_seed(seed)
_lazy_call(cb)
def manual_seed_all(seed):
r"""Sets the seed for generating random numbers on all NPUs.
It's safe to call this function if NPU is not available; in that
case, it is silently ignored.
Args:
seed (int): The desired seed.
"""
seed = int(seed)
def cb():
for i in range(device_count()):
default_generator = torch.npu.default_generators[i]
default_generator.manual_seed(seed)
_lazy_call(cb)
def seed():
r"""Sets the seed for generating random numbers to a random number for the current NPU.
It's safe to call this function if NPU is not available; in that
case, it is silently ignored.
.. warning::
If you are working with a multi-NPU model, this function will only initialize
the seed on one NPU. To initialize all NPUs, use :func:`seed_all`.
"""
def cb():
idx = current_device()
default_generator = torch.npu.default_generators[idx]
default_generator.seed()
_lazy_call(cb)
def seed_all():
r"""Sets the seed for generating random numbers to a random number on all NPUs.
It's safe to call this function if NPU is not available; in that
case, it is silently ignored.
"""
def cb():
random_seed = 0
seeded = False
for i in range(device_count()):
default_generator = torch.npu.default_generators[i]
if not seeded:
default_generator.seed()
random_seed = default_generator.initial_seed()
seeded = True
else:
default_generator.manual_seed(random_seed)
_lazy_call(cb)
def initial_seed():
r"""Returns the current random seed of the current NPU.
.. warning::
This function eagerly initializes NPU.
"""
_lazy_init()
idx = current_device()
default_generator = torch.npu.default_generators[idx]
return default_generator.initial_seed()
| 30.738739 | 91 | 0.668816 |
d8b8db492830d698e6b17933d3730996e12d5da3
| 4,114 |
py
|
Python
|
Scarky2/builder/views.py
|
kopringo/Scarky2
|
93c59cd31113749045caff68274f779a61360167
|
[
"MIT"
] | null | null | null |
Scarky2/builder/views.py
|
kopringo/Scarky2
|
93c59cd31113749045caff68274f779a61360167
|
[
"MIT"
] | null | null | null |
Scarky2/builder/views.py
|
kopringo/Scarky2
|
93c59cd31113749045caff68274f779a61360167
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
from django.shortcuts import render
from django.http.response import HttpResponseRedirect, HttpResponse
from django.conf import settings
from django.core.urlresolvers import reverse
import uuid
import json
from models import Problem, Language
# Create your views here.
def home(request):
return HttpResponseRedirect(reverse('builder', args=['new',]))
def builder(request, pid):
params = {}
secret = ''
problem = None
if pid == 'new':
params['new'] = True
if request.POST:
user = None
if request.user.is_authenticated():
user = request.user
problem = Problem.create_problem(user)
problem.name = request.POST.get('name', '')
problem.content = request.POST.get('content', '')
problem.save()
if request.is_ajax():
return HttpResponse(json.dumps({'pid': problem.code, 'secret': problem.secret}), content_type='application/json')
else:
return HttpResponseRedirect('%s?secret=%s' % (reverse('problem', args=[problem.code]), problem.secret))
else:
secret = request.GET.get('secret', request.POST.get('secret', ''))
try:
problem = Problem.objects.get(code=pid)
if (not request.user.is_authenticated() and secret != problem.secret) or \
(request.user.is_authenticated() and problem.user != request.user):
raise Exception('access-denied')
except Problem.DoesNotExist as e:
return HttpResponseRedirect('/?not-found')
except Exception as e:
return HttpResponseRedirect('/?access-denied')
if request.POST:
name = request.POST.get('name', '')
content = request.POST.get('content', '')
input = request.POST.get('input', '')
output = request.POST.get('output', '')
problem.name = name
problem.content = content
problem.input = input
problem.output = output
problem.save()
return HttpResponseRedirect('/builder/%s?secret=%s' % (pid, problem.secret))
languages = Language.objects.all().filter(visible=True)
if len(languages) == 0:
Language.sync_languages()
languages = Language.objects.all().filter(visible=True)
params['languages'] = languages
params['problem'] = problem
params['problem_code'] = pid
params['problem_secret'] = secret
return render(request, 'builder/home.html', params)
def builder_upload(request):
file = 'sdf'
return HttpResponse(json.dumps({'file': file}), content_type='application/json')
def problem(request, pid):
params = {}
try:
problem = Problem.objects.get(code=pid)
except Problem.DoesNotExist:
return HttpResponseRedirect(reverse('problems'))
#if problem.secret != request.GET.get('secret', '~!@#$%^#@#$@#!@!...'):
# pass
# jesli jest secret to edycja i statystyki
params['problem'] = problem
params['host'] = request.META['HTTP_HOST']
return render(request, 'builder/problem.html', params)
def widget_js(request, pid):
params = {pid: pid}
try:
problem = Problem.objects.get(code=pid)
except Problem.DoesNotExist:
pass
params['host'] = request.META['HTTP_HOST']
params['problem'] = problem
return render(request, 'builder/widget_js.html', params)
def widget(request, pid):
params = {}
try:
problem = Problem.objects.get(code=pid)
except Problem.DoesNotExist:
pass
return render(request, 'builder/widget.html', params)
def problems(request):
params = {}
return render(request, 'builder/problems.html', params)
def api_1_problems(request):
pass
def api_1_problem(request, pid):
pass
def api_1_submissions(request, pid):
pass
def api_1_submission(request, pid, sid):
pass
| 28.769231 | 129 | 0.59893 |
993259bb8f1d2c008f6332049af9e6a0cef4bdf4
| 10,355 |
py
|
Python
|
paddlenlp/transformers/ppminilm/tokenizer.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/ppminilm/tokenizer.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/ppminilm/tokenizer.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import six
import shutil
from paddle.utils import try_import
from paddlenlp.utils.env import MODEL_HOME
from .. import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer
__all__ = ['PPMiniLMTokenizer']
class PPMiniLMTokenizer(PretrainedTokenizer):
r"""
Constructs an PPMiniLM tokenizer. It uses a basic tokenizer to do punctuation
splitting, lower casing and so on, and follows a WordPiece tokenizer to
tokenize as subwords.
This tokenizer inherits from :class:`~paddlenlp.transformers.tokenizer_utils.PretrainedTokenizer`
which contains most of the main methods. For more information regarding those methods,
please refer to this superclass.
Args:
vocab_file (str):
The vocabulary file path (ends with '.txt') required to instantiate
a `WordpieceTokenizer`.
do_lower_case (str, optional):
Whether or not to lowercase the input when tokenizing.
Defaults to`True`.
unk_token (str, optional):
A special token representing the *unknown (out-of-vocabulary)* token.
An unknown token is set to be `unk_token` inorder to be converted to an ID.
Defaults to "[UNK]".
sep_token (str, optional):
A special token separating two different sentences in the same input.
Defaults to "[SEP]".
pad_token (str, optional):
A special token used to make arrays of tokens the same size for batching purposes.
Defaults to "[PAD]".
cls_token (str, optional):
A special token used for sequence classification. It is the last token
of the sequence when built with special tokens. Defaults to "[CLS]".
mask_token (str, optional):
A special token representing a masked token. This is the token used
in the masked language modeling task which the model tries to predict the original unmasked ones.
Defaults to "[MASK]".
Examples:
.. code-block::
from paddlenlp.transformers import PPMiniLMTokenizer
tokenizer = PPMiniLMTokenizer.from_pretrained('ppminilm-6l-768h')
encoded_inputs = tokenizer('He was a puppeteer')
# encoded_inputs:
# { 'input_ids': [1, 4444, 4385, 1545, 6712, 10062, 9568, 9756, 9500, 2],
# 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
# }
"""
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"ppminilm-6l-768h":
"https://bj.bcebos.com/paddlenlp/models/transformers/ppminilm-6l-768h/vocab.txt",
}
}
pretrained_init_configuration = {
"ppminilm-6l-768h": {
"do_lower_case": True
},
}
def __init__(self,
vocab_file,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = PPMiniLMTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.do_lower_case = do_lower_case
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab,
unk_token=unk_token)
@property
def vocab_size(self):
"""
Return the size of vocabulary.
Returns:
int: The size of vocabulary.
"""
return len(self.vocab)
def _tokenize(self, text):
r"""
End-to-end tokenization for PPMiniM models.
Args:
text (str): The text to be tokenized.
Returns:
List[str]: A list of string representing converted tokens.
"""
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_string(self, tokens):
r"""
Converts a sequence of tokens (list of string) in a single string. Since
the usage of WordPiece introducing `##` to concat subwords, also remove
`##` when converting.
Args:
tokens (List[str]): A list of string representing tokens to be converted.
Returns:
str: Converted string from tokens.
Examples:
.. code-block::
from paddlenlp.transformers import PPMiniLMTokenizer
tokenizer = PPMiniLMTokenizer.from_pretrained('ppminilm-6l-768h')
tokens = tokenizer.tokenize('He was a puppeteer')
strings = tokenizer.convert_tokens_to_string(tokens)
#he was a puppeteer
"""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
r"""
Returns the number of added tokens when encoding a sequence with special tokens.
Note:
This encodes inputs and checks the number of added tokens, and is therefore not efficient.
Do not put this inside your training loop.
Args:
pair (bool, optional):
Whether the input is a sequence pair or a single sequence.
Defaults to `False` and the input is a single sequence.
Returns:
int: Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(
token_ids_0, token_ids_1 if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
r"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
A sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (List[int]):
List of IDs to which the special tokens will be added.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs.
Defaults to `None`.
Returns:
List[int]: List of input_id with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
r"""
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
An offset_mapping has the following format:
- single sequence: ``(0,0) X (0,0)``
- pair of sequences: ``(0,0) A (0,0) B (0,0)``
Args:
offset_mapping_ids_0 (List[tuple]):
List of char offsets to which the special tokens will be added.
offset_mapping_ids_1 (List[tuple], optional):
Optional second list of wordpiece offsets for offset mapping pairs.
Defaults to `None`.
Returns:
List[tuple]: A list of wordpiece offsets with the appropriate offsets of special tokens.
"""
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
r"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
A sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (List[int]):
A list of `inputs_ids` for the first sequence.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs.
Defaults to `None`.
Returns:
List[int]: List of token_type_id according to the given sequence(s).
"""
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
| 38.494424 | 119 | 0.592854 |
cfd871f28f6c1b0bbbc8ffe42207aa261daa1915
| 549 |
py
|
Python
|
Organisation/Recherche/Datenvisualisierung/matplotlib_plot/Wetterplots_Giessen.py
|
mxsph/Data-Analytics
|
c82ff54b78f50b6660d7640bfee96ea68bef598f
|
[
"MIT"
] | 3 |
2020-08-24T19:02:09.000Z
|
2021-05-27T20:22:41.000Z
|
Organisation/Recherche/Datenvisualisierung/matplotlib_plot/Wetterplots_Giessen.py
|
mxsph/Data-Analytics
|
c82ff54b78f50b6660d7640bfee96ea68bef598f
|
[
"MIT"
] | 342 |
2020-08-13T10:24:23.000Z
|
2021-08-12T14:01:52.000Z
|
Organisation/Recherche/Datenvisualisierung/matplotlib_plot/Wetterplots_Giessen.py
|
visuanalytics/visuanalytics
|
f9cce7bc9e3227568939648ddd1dd6df02eac752
|
[
"MIT"
] | 8 |
2020-09-01T07:11:18.000Z
|
2021-04-09T09:02:11.000Z
|
import matplotlib.pyplot as plt
# Daten zum Plotten
hoch = [11, 11, 12, 14, 18, 22, 20, 23, 23, 21, 20, 23, 11, 11, 16, 23, 24, 23, 15, 17, 18, 21, 22, 22, 16, 20, 22, 17,
18, 16]
tief = [-5, -3, -2, 1, 1, 4, 6, 5, 6, 2, 5, 2, -2, -2, 1, 6, 8, 8, 5, 6, 7, 5, 3, 4, 2, 3, 6, 8, 7, 5]
tage = list(range(1, 31))
plt.plot(tage, hoch, ":r", tage, hoch, "or", tage, tief, ":b", tage, tief, "ob")
plt.xlabel("Tag im April 2020")
plt.ylabel("Temperatur in Grad Celsius")
plt.title("Temperaturen im April 2020")
plt.grid()
plt.show()
| 36.6 | 120 | 0.54827 |
cfe40386467f5a82bc05967bf49df0d025067384
| 10,250 |
py
|
Python
|
src/onegov/election_day/upgrade.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/election_day/upgrade.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/election_day/upgrade.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
""" Contains upgrade tasks that are executed when the application is being
upgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`.
"""
from onegov.ballot import Election
from onegov.ballot import Vote
from onegov.core.orm.types import JSON
from onegov.core.orm.types import UTCDateTime
from onegov.core.upgrade import upgrade_task
from onegov.election_day.collections import ArchivedResultCollection
from onegov.election_day.models import ArchivedResult
from onegov.election_day.models import Subscriber
from sqlalchemy import Column
from sqlalchemy import Enum
from sqlalchemy import Text
@upgrade_task('Create archived results')
def create_archived_results(context):
""" Create an initial archived result entry for all existing votes
and elections.
Because we don't have a real request here, the generated URL are wrong!
To fix the links, login after the update and call the 'update-results'
view.
"""
ArchivedResultCollection(context.session).update_all(context.request)
@upgrade_task('Add ID to archived results')
def add_id_to_archived_results(context):
""" Add the IDs of the elections/votes as meta information to the results.
Normally, the right election and vote should be found. To be sure, you
call the 'update-results' view to ensure that everything is right.
"""
session = context.session
results = session.query(ArchivedResult)
results = results.filter(ArchivedResult.schema == context.app.schema)
for result in results:
if result.type == 'vote':
vote = session.query(Vote).filter(
Vote.date == result.date,
Vote.domain == result.domain,
Vote.shortcode == result.shortcode,
Vote.title_translations == result.title_translations
).first()
if vote and vote.id in result.url:
result.external_id = vote.id
if result.type == 'election':
election = session.query(Election).filter(
Election.date == result.date,
Election.domain == result.domain,
Election.shortcode == result.shortcode,
Election.title_translations == result.title_translations,
Election.counted_entities == result.counted_entities,
Election.total_entities == result.total_entities,
).first()
if election and election.id in result.url:
result.external_id = election.id
@upgrade_task('Update vote progress')
def update_vote_progress(context):
""" Recalculate the vote progress for the archived results.
"""
session = context.session
results = session.query(ArchivedResult)
results = results.filter(
ArchivedResult.schema == context.app.schema,
ArchivedResult.type == 'vote'
)
for result in results:
vote = session.query(Vote).filter_by(id=result.external_id)
vote = vote.first()
if vote:
result.counted_entities, result.total_entities = vote.progress
@upgrade_task('Add elected candidates to archived results')
def add_elected_candidates(context):
""" Adds the elected candidates to the archived results,
"""
session = context.session
results = session.query(ArchivedResult)
results = results.filter(
ArchivedResult.schema == context.app.schema,
ArchivedResult.type == 'election'
)
for result in results:
election = session.query(Election).filter_by(id=result.external_id)
election = election.first()
if election:
result.elected_candidates = election.elected_candidates
@upgrade_task('Add content columns to archived results')
def add_content_columns_to_archived_results(context):
if not context.has_column('archived_results', 'content'):
context.operations.add_column(
'archived_results', Column('content', JSON)
)
@upgrade_task('Change last change columns')
def change_last_change_columns(context):
if not context.has_column('archived_results', 'last_modified'):
context.operations.add_column(
'archived_results',
Column('last_modified', UTCDateTime, nullable=True)
)
if context.has_column('archived_results', 'last_result_change'):
context.operations.execute(
'ALTER TABLE {} ALTER COLUMN {} DROP NOT NULL;'.format(
'archived_results', 'last_result_change'
)
)
if (
context.has_column('notifications', 'last_change')
and not context.has_column('notifications', 'last_modified')
):
context.operations.execute(
'ALTER TABLE {} RENAME COLUMN {} TO {};'.format(
'notifications', 'last_change', 'last_modified'
)
)
if context.has_column('notifications', 'last_modified'):
context.operations.execute(
'ALTER TABLE {} ALTER COLUMN {} DROP NOT NULL;'.format(
'notifications', 'last_modified'
)
)
@upgrade_task('Make subscriber polymorphic')
def make_subscriber_polymorphic(context):
if not context.has_column('subscribers', 'type'):
context.operations.add_column(
'subscribers',
Column('type', Text, nullable=True)
)
if (
context.has_column('subscribers', 'phone_number')
and not context.has_column('subscribers', 'address')
):
context.operations.execute(
'ALTER TABLE {} RENAME COLUMN {} TO {};'.format(
'subscribers', 'phone_number', 'address'
)
)
if context.has_column('subscribers', 'type'):
susbscribers = context.session.query(Subscriber)
susbscribers = susbscribers.filter(Subscriber.type.is_(None))
for subscriber in susbscribers:
subscriber.type = 'sms'
@upgrade_task('Make notifications polymorphic')
def make_notifications_polymorphic(context):
if (
context.has_column('notifications', 'action')
and not context.has_column('notifications', 'type')
):
context.operations.execute(
'ALTER TABLE {} RENAME COLUMN {} TO {};'.format(
'notifications', 'action', 'type'
)
)
context.operations.execute(
'ALTER TABLE {} ALTER COLUMN {} DROP NOT NULL;'.format(
'notifications', 'type'
)
)
@upgrade_task(
'Apply static data',
requires='onegov.ballot:Replaces results group with name and district'
)
def apply_static_data(context):
principal = getattr(context.app, 'principal', None)
if not principal:
return
for vote in context.session.query(Vote):
for ballot in vote.ballots:
assert vote.date and vote.date.year in principal.entities
for result in ballot.results:
assert (
result.entity_id in principal.entities[vote.date.year]
or result.entity_id == 0
)
result.name = principal.entities.\
get(vote.date.year, {}).\
get(result.entity_id, {}).\
get('name', '')
result.district = principal.entities.\
get(vote.date.year, {}).\
get(result.entity_id, {}).\
get('district', '')
for election in context.session.query(Election):
assert election.date and election.date.year in principal.entities
for result in election.results:
assert (
result.entity_id in principal.entities[election.date.year]
or result.entity_id == 0
)
result.name = principal.entities.\
get(election.date.year, {}).\
get(result.entity_id, {}).\
get('name', '')
result.district = principal.entities.\
get(election.date.year, {}).\
get(result.entity_id, {}).\
get('district', '')
@upgrade_task('Add election compound to archive')
def add_election_compound_to_archive(context):
old_type = Enum('election', 'vote', name='type_of_result')
new_type = Enum(
'election', 'election_compound', 'vote', name='type_of_result'
)
tmp_type = Enum(
'election', 'election_compound', 'vote', name='_type_of_result'
)
tmp_type.create(context.operations.get_bind(), checkfirst=False)
context.operations.execute(
'ALTER TABLE archived_results ALTER COLUMN type '
'TYPE _type_of_result USING type::text::_type_of_result'
)
old_type.drop(context.operations.get_bind(), checkfirst=False)
new_type.create(context.operations.get_bind(), checkfirst=False)
context.operations.execute(
'ALTER TABLE archived_results ALTER COLUMN type '
'TYPE type_of_result USING type::text::type_of_result'
)
tmp_type.drop(context.operations.get_bind(), checkfirst=False)
@upgrade_task('Add contraints to notifications and sources')
def add_contraints_to_notifications_and_sources(context):
# We use SQL (rather than operations.xxx) so that we can drop and add
# the constraints in one statement
for ref in ('election', 'vote'):
for table in ('notifications', 'upload_data_source_item'):
context.operations.execute(
f'ALTER TABLE {table} '
f'DROP CONSTRAINT {table}_{ref}_id_fkey, '
f'ADD CONSTRAINT {table}_{ref}_id_fkey'
f' FOREIGN KEY ({ref}_id) REFERENCES {ref}s (id)'
f' ON UPDATE CASCADE'
)
@upgrade_task('Enable expats on votes and elections')
def enable_expats(context):
principal = getattr(context.app, 'principal', None)
if not principal:
return
for vote in context.session.query(Vote):
ballot = vote.ballots.first()
if ballot:
if ballot.results.filter_by(entity_id=0).first():
vote.expats = True
for election in context.session.query(Election):
if election.results.filter_by(entity_id=0).first():
election.expats = True
| 34.863946 | 78 | 0.633659 |
cff8c74b564e9aa23283843ea7fd5738bfa7ce69
| 3,009 |
py
|
Python
|
timetable.py
|
jerluebke/SOWAS
|
d606bcd6757503257d01381da56602016261f578
|
[
"MIT"
] | null | null | null |
timetable.py
|
jerluebke/SOWAS
|
d606bcd6757503257d01381da56602016261f578
|
[
"MIT"
] | null | null | null |
timetable.py
|
jerluebke/SOWAS
|
d606bcd6757503257d01381da56602016261f578
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable = C, R
from collections import namedtuple
import matplotlib as mpl
from matplotlib.patches import Patch
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
mpl.rcParams["font.size"] = 12
# colors
cmap = plt.get_cmap("Set1")
color_mapping = {
"th" : cmap(0), # theory
"ex" : cmap(1), # experiment
"ev" : cmap(2) # evaluation
}
class item(namedtuple("item",
["description", "start", "duration", "domain"])):
"""
item in the timetable containing startpoint (starting with 0), duration,
domain (theory, experiment, evaluation) and description (as it appears in
the plot)
"""
__slots__ = ()
@property
def color(self):
return color_mapping[self.domain]
#############
# data #
#############
items = [
# item: description, start, duration, domain
item("Einarbeitung\n in Python", 0, 2, "th"),
item("Erstellen der\n Simulation", 1, 4, "th"),
# item("Ergänzung der\n Simulation", 5, 2, "th"),
item("Bau der Mess-\n vorrichtung", 0, 2, "ex"),
item("Aufnahme der\n Messreihen", 2, 4, "ex"),
item("Anpassung des\n Aufbaus", 6, 3, "ex"),
item("Auswertung der\nDaten", 7, 3, "ev"),
item("Erstellung des\nPosters", 8, 11, "ev")
]
data_as_array = np.array([[item.start, item.duration] for item in items])
y_values = np.arange(len(items))
starting_points = data_as_array[:,0]
durations = data_as_array[:,1]
y_labels = [item.description for item in items]
colors = [item.color for item in items]
kwargs = {
"height" : .4,
"align" : "center"
}
#################
# plotting #
#################
fig = plt.figure(figsize=(8, 6))
# make two subplots - actual plot and legend
gs = GridSpec(2, 1, height_ratios=[11, 1])
# make timetable
ax = plt.subplot(gs[0])
ax.barh(y_values, durations, left=starting_points, color=colors, **kwargs)
# adjust yaxis
ax.invert_yaxis()
ax.set_yticks(y_values)
ax.set_yticklabels(y_labels)
# hide yticks
ax.tick_params(axis='y', length=0)
# xaxis: set ticks, label and limits
ax.set_xticks(np.arange(12))
ax.set_xlabel("Wochen (09.04.2018 - 22.06.2018)")
ax.set_xlim((0, 11))
# turn grid off to avoid conflicts with local settings
ax.grid(False)
# place grid below elements in plot
ax.set_axisbelow(True)
ax.grid(axis='x')
# make legend in second subplot
legend_ax = plt.subplot(gs[1])
# remove ticks and boundary box
legend_ax.set(xticks=[], yticks=[])
legend_ax.set_axis_off()
# make and map proxy artists to legend
plt.legend(handles=[Patch(color=c, label=l)
for c, l in zip(color_mapping.values(),
("Theorie", "Experiment", "Auswertung"))],
loc="lower center", ncol=3)
# allign subplots
plt.tight_layout()
plt.savefig("timetable.png", format="png", dpi=300)
plt.savefig("timetable.eps", format="eps", dpi=1000)
| 26.628319 | 78 | 0.63011 |
5c8ba138654ebae397537c0aba3ec6dff61d9382
| 771 |
py
|
Python
|
01_class/transfer_parent_class_method.py
|
wuyueCreator/python-test
|
6072ac9264a257c89925469238c14fff3bda5630
|
[
"MIT"
] | 1 |
2019-03-25T03:44:54.000Z
|
2019-03-25T03:44:54.000Z
|
01_class/transfer_parent_class_method.py
|
wuyueCreator/python-test
|
6072ac9264a257c89925469238c14fff3bda5630
|
[
"MIT"
] | null | null | null |
01_class/transfer_parent_class_method.py
|
wuyueCreator/python-test
|
6072ac9264a257c89925469238c14fff3bda5630
|
[
"MIT"
] | null | null | null |
class A:
def spam(self):
print('A.spam')
class B(A):
def spam(self):
print('B.spam')
super().spam() # Call parent spam()
class C:
def __init__(self):
self.x = 0
class D(C):
def __init__(self):
super().__init__()
self.y = 1
# super() 的另外一个常见用法出现在覆盖Python特殊方法的代码中,比如:
class Proxy:
def __init__(self, obj):
self._obj = obj
# Delegate attribute lookup to internal obj
def __getattr__(self, name):
return getattr(self._obj, name)
# Delegate attribute assignment
def __setattr__(self, name, value):
if name.startswith('_'):
super().__setattr__(name, value) # Call original __setattr__
else:
setattr(self._obj, name, value)
| 19.275 | 73 | 0.583658 |
5cdbb678909c4b3e3437233198e313bdeb3e63d0
| 4,260 |
py
|
Python
|
quant/api/kkex.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 7 |
2017-10-22T15:00:09.000Z
|
2019-09-19T11:45:43.000Z
|
quant/api/kkex.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 1 |
2018-01-19T16:19:40.000Z
|
2018-01-19T16:19:40.000Z
|
quant/api/kkex.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 5 |
2017-12-11T15:10:29.000Z
|
2018-12-21T17:40:58.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from urllib import urlencode
from urlparse import urljoin
import requests
from hashlib import md5
BASE_URL = 'https://kkex.com/api/v1'
TIMEOUT = 5
class PublicClient(object):
def __init__(self):
super(PublicClient, self).__init__()
@classmethod
def _build_parameters(cls, parameters):
# sort the keys so we can test easily in Python 3.3 (dicts are not
# ordered)
keys = list(parameters.keys())
keys.sort()
return '&'.join(["%s=%s" % (k, parameters[k]) for k in keys])
def url_for(self, path, path_arg=None, parameters=None):
# build the basic url
url = "%s/%s" % (BASE_URL, path)
# If there is a path_arh, interpolate it into the URL.
# In this case the path that was provided will need to have string
# interpolation characters in it, such as PATH_TICKER
if path_arg:
url = url % (path_arg)
# Append any parameters to the URL.
if parameters:
url = "%s?%s" % (url, self._build_parameters(parameters))
return url
@classmethod
def _get(cls, url, params=None):
try:
resp = requests.get(url, timeout=TIMEOUT, params=params)
except requests.exceptions.RequestException as e:
raise e
else:
if resp.status_code == requests.codes.ok:
return resp.json()
def depth(self, symbol):
url = self.url_for('depth')
params = {
'symbol': symbol
}
return self._get(url, params)
class PrivateClient(PublicClient):
def __init__(self, api_key, api_secret):
super(PrivateClient, self).__init__()
self._key = api_key
self._secret = api_secret
self.api_root = 'https://kkex.com'
def _sign(self, params):
sign = list(sorted(params.items()) + [('secret_key', self._secret)])
signer = md5()
signer.update(urlencode(sign).encode('utf-8'))
return signer.hexdigest().upper()
def _post(self, path, params=None):
if params is None:
params = {}
params['api_key'] = self._key
sign = self._sign(params)
params['sign'] = sign
url = urljoin(self.api_root, path)
try:
resp = requests.post(url, data=params, timeout=5)
except requests.exceptions.RequestException as e:
raise e
else:
if resp.status_code == requests.codes.ok:
return resp.json()
def profile(self):
return self._post('/api/v1/profile')
def balance(self):
return self._post('/api/v1/userinfo')
def buy_limit(self, symbol, amount, price):
params = {
'symbol': symbol,
'type': 'buy',
'price': price,
'amount': amount
}
return self._post('/api/v1/trade', params)
def sell_limit(self, symbol, amount, price):
params = {
'symbol': symbol,
'type': 'sell',
'price': price,
'amount': amount
}
return self._post('/api/v1/trade', params)
def cancel_order(self, symbol, order_id):
params = {'symbol': symbol,
'order_id': order_id}
return self._post('/api/v1/cancel_order', params)
def cancel_all_orders(self, symbol):
params = {
'symbol': symbol
}
return self._post('/api/v1/cancel_all_orders', params)
def order_info(self, symbol, order_id):
params = {
'symbol': symbol,
'order_id': order_id
}
return self._post('/api/v1/order_info', params)
def orders_info(self, symbol, order_ids):
order_id_p = ','.join(order_ids)
params = {
'symbol': symbol,
'order_id': order_id_p
}
return self._post('/api/v1/orders_info', params)
def _get_orders_history(self, symbol, status=0, page=1, pagesize=10):
params = {
'symbol': symbol,
'status': status,
'current_page': page,
'page_length': pagesize
}
return self._post('/api/v1/order_history', params)
| 28.590604 | 76 | 0.562911 |
a4603d1b25c4c01de3ede09c65761ebeeef0fc49
| 991 |
py
|
Python
|
initial-settings.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 2 |
2020-03-18T18:23:27.000Z
|
2020-08-02T15:59:16.000Z
|
initial-settings.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 5 |
2019-07-07T16:47:47.000Z
|
2020-08-10T16:20:00.000Z
|
initial-settings.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 1 |
2020-03-18T18:23:30.000Z
|
2020-03-18T18:23:30.000Z
|
from conans import ConanFile, CMake, tools
import os.path
import sys
class LocalConanFile(ConanFile):
settings = "arch_build"
generator = []
def requirements(self):
profile = "default"
if "--profile" in sys.argv:
profile = sys.argv[sys.argv.index("--profile") + 1]
profile = os.path.abspath(profile) if os.path.exists(profile) else profile
print(f"Configuring CMake arch to {self.settings.arch_build} for {profile} profile. Helps cross-compiling.")
self.run(f"conan profile update settings.cmake:arch={self.settings.arch_build} {profile}")
print(f"Adding the bincrafters remote @ https://bincrafters.jfrog.io/artifactory/api/conan/public-conan.")
self.run(f"conan remote add bincrafters https://bincrafters.jfrog.io/artifactory/api/conan/public-conan --force")
print("Configuring settings to work correctly with bincrafters.")
self.run(f"conan config set general.revisions_enabled=1")
| 41.291667 | 121 | 0.696266 |
778b53e8126b091296e7e1d7fb2989f969872569
| 1,067 |
py
|
Python
|
zencad/examples/1.GeomPrim/2.prim2d/textshape.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | 5 |
2018-04-11T14:11:40.000Z
|
2018-09-12T19:03:36.000Z
|
zencad/examples/1.GeomPrim/2.prim2d/textshape.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
zencad/examples/1.GeomPrim/2.prim2d/textshape.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
ZenCad API example: textshape
date: 04.10.2019
"""
import os
from zencad import *
zencad_example_directory = zencad.moduledir + "/examples"
testfont = os.path.join(zencad_example_directory, "fonts/testfont.ttf")
mandarinc = os.path.join(zencad_example_directory, "fonts/mandarinc.ttf")
register_font(testfont)
register_font(mandarinc)
m0 = textshape(text="ZenCad", fontname="Ubuntu Mono", size=100)
m1 = textshape(text="ZenCad", fontname="Mandarinc", size=100)
disp(m0, color.white)
disp(m0.rotateX(deg(90)).translate(0, 70, 0))
disp(m1.translate( 0, 200, 0), color.green)
disp(m1.rotateX(deg(90)).translate( 0, 270, 0), color.yellow)
#########################Advanced Example########################################
x = 400
y = 100
z = 50
deep = 10
#find the geometric center of the textshape
m1center = m1.center()
m2 = (
box(x, y, z)
- m1.extrude(deep).up(z-deep).translate(x/2 - m1center.x, y/2 - m1center.y, 0)
)
disp(m2.forw(400))
################################################################################
show()
| 24.25 | 81 | 0.615745 |
24af93a81c7b2b92ca665acd7e7576d818f94d98
| 2,054 |
py
|
Python
|
toolsparty-master/information-gathering/ip-extender.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
toolsparty-master/information-gathering/ip-extender.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
toolsparty-master/information-gathering/ip-extender.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#!/usr/bin/env python
# coding: utf-8
# -**- Author: LandGrey -**-
import os
import sys
def ip_extender(ips=None, files=None, switch=3, extend=5, is_format=False):
results = []
cidr_groups = []
none_cidr_groups = []
cidr_dict = {}
if not ips and files:
ips = []
with open(files, 'r') as f:
for line in f.readlines():
if line.strip():
ips.append(line.strip())
ips = list(set(ips))
for ip in ips:
prefix = ".".join(ip.split(".")[:3])
if prefix not in cidr_dict.keys():
cidr_dict[prefix] = [1, ip]
else:
cidr_dict[prefix][0] += 1
cidr_dict[prefix].append(ip)
for k, v in cidr_dict.items():
if v[0] >= switch:
cidr_groups.append(k)
else:
for _ in v[1:]:
none_cidr_groups.append(_)
if not is_format:
for _ in cidr_groups:
results.extend(extend_ips([_ + ".128"]))
results.extend(extend_ips(none_cidr_groups, extend=extend))
else:
for _ in cidr_groups:
results.append(_ + ".0/24")
for _ in none_cidr_groups:
r = extend_ips([_], extend=extend)
results.append(r[0] + "-" + r[-1])
return results
def extend_ips(ips, extend=128):
results = ips
var0 = []
for ip in ips:
ip_chunk = ip.split(".")
for chunk in range(min(int(ip_chunk[3]) - int(extend), int(ip_chunk[3]) - 1)
if int(ip_chunk[3]) - int(extend) > 0 else 1, min(int(ip_chunk[3]) + int(extend) + 1, 256)):
var0.append("{0}.{1}.{2}.{3}".format(ip_chunk[0], ip_chunk[1], ip_chunk[2], str(chunk)))
results.extend(var0)
return sorted(list(set(results)), key=lambda x: (len(x), str(x)))
if __name__ == "__main__":
if len(sys.argv) != 2 or not os.path.isfile(sys.argv[1]):
exit("[*] Usage: python ip-extender.py single_ip_list.txt")
for ip in ip_extender(files=sys.argv[1], is_format=True):
print(ip)
| 31.121212 | 119 | 0.543817 |
24d549d8e10dc17807c702172c3db31e6793dde3
| 31 |
py
|
Python
|
frappe-bench/env/lib/python2.7/site-packages/bleach_whitelist/__init__.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/bleach_whitelist/__init__.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/bleach_whitelist/__init__.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
from bleach_whitelist import *
| 15.5 | 30 | 0.83871 |
d9198234423e5d3c84ef6a9aac1201bc80bc1c51
| 1,605 |
py
|
Python
|
etl/io_config/server_protocol.py
|
cloud-cds/cds-stack
|
d68a1654d4f604369a071f784cdb5c42fc855d6e
|
[
"Apache-2.0"
] | 6 |
2018-06-27T00:09:55.000Z
|
2019-03-07T14:06:53.000Z
|
etl/io_config/server_protocol.py
|
cloud-cds/cds-stack
|
d68a1654d4f604369a071f784cdb5c42fc855d6e
|
[
"Apache-2.0"
] | 3 |
2021-03-31T18:37:46.000Z
|
2021-06-01T21:49:41.000Z
|
etl/io_config/server_protocol.py
|
cloud-cds/cds-stack
|
d68a1654d4f604369a071f784cdb5c42fc855d6e
|
[
"Apache-2.0"
] | 3 |
2020-01-24T16:40:49.000Z
|
2021-09-30T02:28:55.000Z
|
import asyncio
import json
import logging
import socket, errno
from etl.io_config.core import get_environment_var
SRV_LOG_FMT = '%(asctime)s|%(name)s|%(process)s-%(thread)s|%(levelname)s|%(message)s'
logging.basicConfig(level=logging.INFO, format=SRV_LOG_FMT)
MAGIC_NUMBER = b'trews_magic_number'
CONNECTION_CLOSED = 'Connection Closed'
LMC_ALERT_SERVER_IP = get_environment_var('LMC_ALERT_SERVER_IP',
'alerts.default.svc.cluster.local')
LMC_ALERT_SERVER_PORT = 31000
TREWS_ALERT_SERVER_IP = get_environment_var('TREWS_ALERT_SERVER_IP',
'trews-alerts.default.svc.cluster.local')
TREWS_ALERT_SERVER_PORT = 31000
async def read_message(reader, writer):
try:
data = await reader.readuntil(MAGIC_NUMBER)
except asyncio.streams.IncompleteReadError:
return CONNECTION_CLOSED
# Decode and return message
EOM = -1 * len(MAGIC_NUMBER)
data = data[:EOM]
logging.debug('Receiving from {}: {}'.format(writer.get_extra_info('peername'), data))
return json.loads(data.decode())
async def write_message(writer, message):
logging.debug('Sending to {}: {}'.format(writer.get_extra_info('sockname'), message))
if type(message) != dict:
raise ValueError('write_message takes a dictionary as the second argument')
try:
writer.write(json.dumps(message).encode() + MAGIC_NUMBER)
await writer.drain()
return True
except (socket.error, IOError) as e:
if e.errno == errno.EPIPE:
logging.error(e)
else:
logging.error("Other error: {}".format(e))
writer.close()
return False
| 32.1 | 89 | 0.709657 |
d9504bf6bd55f14bbf36d7503f8289596841f780
| 108 |
py
|
Python
|
packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from .pipeline_index_service import PipelineIndexService
from .topic_index_service import TopicIndexService
| 36 | 56 | 0.907407 |
d95f44159ce85d870bd47768845befcafc69f3e9
| 933 |
py
|
Python
|
INBa/2015/Chinkirov_V_V/task_4_29.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Chinkirov_V_V/task_4_29.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
INBa/2015/Chinkirov_V_V/task_4_29.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 4. Вариант 28.
# Напишите программу, которая выводит имя,
# под которым скрывается Эмиль Эрзог.
# Дополнительно необходимо вывести область интересов указанной личности,
# место рождения, годы рождения и смерти (если человек умер),
# вычислить возраст на данный момент (или момент смерти).
# Для хранения всех необходимых данных требуется использовать переменные.
# После вывода информации программа должна дожидаться пока пользователь
# нажмет Enter для выхода.
#Чинкиров.В.В.
# 28.03.2016
name = "Эмиль Эрзог"
city = "Эльбёф,Франция"
rod = int (1895)
dead = int (1934)
age = int (dead - rod)
interest = "Писатель"
print(name+" наиболее известен как Андреа Моруа - Французский писатель и член Французской академии. ")
print("Место рождения: "+city)
print("Год рождения: "+str(rod))
print("Год смерти: "+str(dead))
print("Возраст смерти: "+str(age))
print("Область интересов: "+interest)
input("Нажмите Enter для закрытия")
| 35.884615 | 102 | 0.757771 |
79d3fba129da163bfef8d0d4759998082ab4c008
| 1,331 |
py
|
Python
|
profiles/urls.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
profiles/urls.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
profiles/urls.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
app_name = 'profiles'
urlpatterns = [
url(
r'^$',
views.profile,
name='profile'
),
# Login mechanism
url(
r'^login/$',
views.login_user,
name='login'
),
# Logout mechanism
url(
r'^logout/$',
views.logout_user,
name='logout'
),
# Signup mechanism
url(
r'^signup/$',
views.signup,
name='signup'
),
# Change Password
url(
r'^change_password/$',
views.change_password,
name='change_password'
),
# General terms and conditions
url(
r'^general_terms_and_conditions/$',
views.general_terms_and_conditions_view,
name='general_terms_and_conditions'
),
# Privacy policy
url(
r'^privacy_policy/$',
views.privacy_policy_view,
name='privacy_policy'
),
# Provide feedback
url(
r'^feedback/$',
views.feedback,
name='feedback'
),
# Resolve Feedback
url(
r'^feedback/(?P<id>[0-9]+)/resolve/$',
views.resolve_feedback,
name='resolve_feedback'
),
# Deposit funds in account
url(
r'^payment/$',
views.payment,
name='payment'
),
]
| 17.285714 | 48 | 0.525169 |
ccde64f635ab8dae261e9151bd5fa024ac46889a
| 6,096 |
py
|
Python
|
pymantic/tests/test_primitives.py
|
dnswd/blazegraph-python
|
046a6b47406b0f56d71abc6039f4d7586a1708d2
|
[
"BSD-3-Clause"
] | 42 |
2016-01-15T14:31:48.000Z
|
2022-03-10T14:32:25.000Z
|
pymantic/tests/test_primitives.py
|
igor-kim/blazegraph-python
|
7be8d219e00acb51d949bf49aaaed90c2c2344e5
|
[
"BSD-3-Clause"
] | 3 |
2016-10-02T18:36:42.000Z
|
2019-09-18T15:48:58.000Z
|
pymantic/tests/test_primitives.py
|
igor-kim/blazegraph-python
|
7be8d219e00acb51d949bf49aaaed90c2c2344e5
|
[
"BSD-3-Clause"
] | 11 |
2016-08-18T09:47:52.000Z
|
2021-12-26T06:22:18.000Z
|
from nose.tools import *
from pymantic.primitives import *
import random
def en(s):
return Literal(s, "en")
def test_to_curie_multi_match():
"""Test that the longest match for prefix is used"""
namespaces = {'short': "aa", 'long': "aaa"}
curie = to_curie("aaab", namespaces)
print curie
assert curie == 'long:b'
def test_simple_add():
t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!"))
g = Graph()
g.add(t)
assert t in g
def test_simple_remove():
t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!"))
g = Graph()
g.add(t)
g.remove(t)
assert t not in g
def test_match_VVV_pattern():
t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!"))
g = Graph()
g.add(t)
matches = g.match(None, None, None)
assert t in matches
def test_match_sVV_pattern():
t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!"))
g = Graph()
g.add(t)
matches = g.match(NamedNode("http://example.com"), None, None)
assert t in matches
def test_match_sVo_pattern():
t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!"))
g = Graph()
g.add(t)
matches = g.match(NamedNode("http://example.com"), None, en("Never!"))
assert t in matches
def test_match_spV_pattern():
t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!"))
g = Graph()
g.add(t)
matches = g.match(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"), None)
assert t in matches
def test_match_Vpo_pattern():
t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!"))
g = Graph()
g.add(t)
matches = g.match(None, NamedNode("http://purl.org/dc/terms/issued"), en("Never!"))
assert t in matches
def test_match_VVo_pattern():
t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!"))
g = Graph()
g.add(t)
matches = g.match(None, None, en("Never!"))
assert t in matches
def test_match_VpV_pattern():
t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!"))
g = Graph()
g.add(t)
matches = g.match(None, NamedNode("http://purl.org/dc/terms/issued"), None)
assert t in matches
def generate_triples(n=10):
for i in range(1,n):
yield Triple(NamedNode("http://example/" + str(random.randint(1,1000))),
NamedNode("http://example/terms/" + str(random.randint(1,1000))),
Literal(random.randint(1,1000)))
def test_10000_triples():
n = 10000
g = Graph()
for t in generate_triples(n):
g.add(t)
assert len(g) > n * .9
matches = g.match(NamedNode("http://example.com/42"), None, None)
matches = g.match(None, NamedNode("http://example/terms/42"), None)
matches = g.match(None, None, Literal(42))
def test_iter_10000_triples():
n = 10000
g = Graph()
triples = set()
for t in generate_triples(n):
g.add(t)
triples.add(t)
assert len(g) > n * .9
for t in g:
triples.remove(t)
assert len(triples) == 0
# Dataset Tests
def test_add_quad():
q = Quad(NamedNode("http://example.com/graph"),NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),Literal("Never!"))
ds = Dataset()
ds.add(q)
assert q in ds
def test_remove_quad():
q = Quad(NamedNode("http://example.com/graph"),NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),Literal("Never!"))
ds = Dataset()
ds.add(q)
ds.remove(q)
assert q not in ds
def test_ds_len():
n = 10
ds = Dataset()
for q in generate_quads(n):
ds.add(q)
assert len(ds) == 10
def test_match_ds_sVV_pattern():
q = Quad(NamedNode("http://example.com"),
NamedNode("http://purl.org/dc/terms/issued"),Literal("Never!"),
NamedNode("http://example.com/graph"))
ds = Dataset()
ds.add(q)
matches = ds.match(subject=NamedNode("http://example.com"))
assert q in matches
def test_match_ds_quad_pattern():
q = Quad(NamedNode("http://example.com"),
NamedNode("http://purl.org/dc/terms/issued"),Literal("Never!"),
NamedNode("http://example.com/graph"))
ds = Dataset()
ds.add(q)
matches = ds.match(graph="http://example.com/graph")
assert q in matches
def test_add_graph():
t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),Literal("Never!"))
g = Graph("http://example.com/graph")
g.add(t)
ds = Dataset()
ds.add_graph(g)
assert t in ds
def generate_quads(n):
for i in range(n):
yield Quad(NamedNode("http://example/" + str(random.randint(1,1000))),
NamedNode("http://purl.org/dc/terms/" + str(random.randint(1,100))),
Literal(random.randint(1,1000)),
NamedNode("http://example/graph/"+str(random.randint(1,1000))))
def test_10000_quads():
n = 10000
ds = Dataset()
for q in generate_quads(n):
ds.add(q)
assert len(ds) > n * .9
matches = ds.match(subject=NamedNode("http://example.com/42"),
graph=NamedNode("http://example/graph/42"))
def test_iter_10000_quads():
n = 10000
ds = Dataset()
quads = set()
for q in generate_quads(n):
ds.add(q)
quads.add(q)
assert len(ds) > n * .9
for quad in ds:
quads.remove(quad)
assert len(quads) == 0
def test_interfaceName():
assert Literal("Bob", "en").interfaceName == "Literal"
assert NamedNode().interfaceName == "NamedNode"
def test_BlankNode_id():
b1 = BlankNode()
b2 = BlankNode()
assert b1.value != b2.value
| 32.253968 | 147 | 0.60876 |
033cebcbd7d667e2cf8eb5c214611871d49fe72b
| 20,012 |
py
|
Python
|
wz/ui/gridbase.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/ui/gridbase.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
wz/ui/gridbase.py
|
gradgrind/WZ
|
672d93a3c9d7806194d16d6d5b9175e4046bd068
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
ui/gridbase.py
Last updated: 2021-10-10
Widget with tiles on grid layout (QGraphicsScene/QGraphicsView).
=+LICENCE=============================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=-LICENCE========================================
"""
##### Configuration #####################
FONT_DEFAULT = 'Droid Sans'
FONT_SIZE_DEFAULT = 11
FONT_COLOUR = '442222' # rrggbb
BORDER_COLOUR = '000088' # rrggbb
MARK_COLOUR = 'E00000' # rrggbb
# Line width for borders
UNDERLINE_WIDTH = 3.0
BORDER_WIDTH = 1.0
SCENE_MARGIN = 10.0 # Margin around content in GraphicsView widgets
#####################
### Messages
_TILE_OUT_OF_BOUNDS = ("Kachel außerhalb Tabellenbereich:\n"
" Zeile {row}, Höhe {rspan}, Spalte {col}, Breite {cspan}")
_NOTSTRING = "In <grid::Tile>: Zeichenkette erwartet: {val}"
#####################################################
import sys, os, copy
from PySide6.QtWidgets import QGraphicsView, QGraphicsScene, \
QGraphicsRectItem, QGraphicsSimpleTextItem, QGraphicsLineItem
from PySide6.QtGui import (QFont, QPen, QColor, QBrush, QTransform,
QPainter, QPdfWriter, QPageLayout)
from PySide6.QtCore import Qt, QMarginsF, QRectF, QBuffer, QByteArray
class GridError(Exception):
pass
### ---
class GridView(QGraphicsView):
"""This is the "view" widget for the grid.
The actual grid is implemented as a "scene".
"""
def __init__(self):
self._scale = 1.0
super ().__init__()
# Change update mode: The default, MinimalViewportUpdate, seems
# to cause artefacts to be left, i.e. it updates too little.
# Also BoundingRectViewportUpdate seems not to be 100% effective.
#self.setViewportUpdateMode(self.BoundingRectViewportUpdate)
self.setViewportUpdateMode(self.FullViewportUpdate)
self.ldpi = self.logicalDpiX()
if self.logicalDpiY() != self.ldpi:
REPORT('WARNING', "LOGICAL DPI different for x and y")
self.MM2PT = self.ldpi / 25.4
#
def set_scene(self, scene):
"""Set the QGraphicsScene for this view. The size will be fixed
to that of the initial <sceneRect> (to prevent it from being
altered by pop-ups).
<scene> may be <None>, to remove the current scene.
"""
self.setScene(scene)
if scene:
self.setSceneRect(scene._sceneRect)
#
def mousePressEvent(self, event):
point = event.pos()
# print("POS:", point, self.mapToGlobal(point), self.itemAt(point))
# The Tile may not be the top item.
items = self.items(point)
button = event.button()
if items:
for item in items:
# Give all items at this point a chance to react, starting
# with the topmost. An item can break the chain by
# returning a false value.
try:
if button == Qt.LeftButton:
if not item.leftclick():
return
elif button == Qt.RightButton:
if not item.rightclick():
return
except AttributeError:
pass
#
### View scaling
def scaleUp (self):
self.scale(1)
#
def scaleDn (self):
self.scale(-1)
#
def scale(self, delta):
t = QTransform()
self._scale += self._scale * delta / 10
t.scale(self._scale, self._scale)
self.setTransform(t)
### ---------------
###
class GridViewRescaling(GridView):
"""An QGraphicsView that automatically adjusts the scaling of its
scene to fill the viewing window.
"""
def __init__(self):
super().__init__()
# Apparently it is a good idea to disable scrollbars when using
# this resizing scheme. With this resizing scheme they would not
# appear anyway, so this doesn't lose any features!
self.setHorizontalScrollBarPolicy (Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy (Qt.ScrollBarAlwaysOff)
def resizeEvent(self, event):
self.resize()
return super().resizeEvent(event)
def resize(self, qrect=None):
if qrect == None:
qrect = self.sceneRect()
self.fitInView(qrect, Qt.KeepAspectRatio)
###
class GridBase(QGraphicsScene):
def __init__(self, gview, rowheights, columnwidths):
"""Set the grid size.
<columnwidths>: a list of column widths (mm)
<rowheights>: a list of row heights (mm)
Rows and columns are 0-indexed.
"""
super().__init__()
self._gview = gview
self._styles = {'*': CellStyle(FONT_DEFAULT, FONT_SIZE_DEFAULT,
align = 'c', border = 1, mark = MARK_COLOUR)
}
self.xmarks = [0.0]
x = 0.0
for c in columnwidths:
x += c * self._gview.MM2PT
self.xmarks.append(x)
self.ymarks = [0.0]
y = 0.0
for r in rowheights:
y += r * self._gview.MM2PT
self.ymarks.append(y)
# Allow a little margin
self._sceneRect = QRectF(-SCENE_MARGIN, -SCENE_MARGIN,
x + 2 * SCENE_MARGIN, y + 2 * SCENE_MARGIN)
#
def style(self, name):
return self._styles[name]
#
def new_style(self, name, base = None, **params):
if base:
style0 = self._styles[base]
self._styles[name] = style0.copy(**params)
else:
self._styles[name] = CellStyle(params.pop('font', None),
params.pop('size', None), **params)
#
def ncols(self):
return len(self.xmarks) - 1
#
def nrows(self):
return len(self.ymarks) - 1
#
def screen_coordinates(self, x, y):
"""Return the screen coordinates of the given scene point.
"""
viewp = self._gview.mapFromScene(x, y)
return self._gview.mapToGlobal(viewp)
#
def basic_tile(self, row, col, tag, text, style, cspan = 1, rspan = 1):
"""Add a basic tile to the grid, checking coordinates and
converting row + col to x + y point-coordinates for the
<Tile> class.
"""
# Check bounds
if (row < 0 or col < 0
or (row + rspan) >= len(self.ymarks)
or (col + cspan) >= len(self.xmarks)):
raise GridError(_TILE_OUT_OF_BOUNDS.format(
row = row, col = col, cspan = cspan, rspan = rspan))
x = self.xmarks[col]
y = self.ymarks[row]
w = self.xmarks[col + cspan] - x
h = self.ymarks[row + rspan] - y
t = Tile(self, tag, x, y, w, h, text, self._styles[style])
self.addItem(t)
return t
#
### pdf output
def setPdfMargins(self, left = 15, top = 15, right = 15, bottom = 15):
self._pdfmargins = (left, top, right, bottom)
return self._pdfmargins
#
def pdfMargins(self):
try:
return self._pdfmargins
except AttributeError:
return self.setPdfMargins()
#
def to_pdf(self, filepath):
"""Produce and save a pdf of the table.
The output orientation is selected according to the aspect ratio
of the table. If the table is too big for the page area, it will
be shrunk to fit.
"""
if not filepath.endswith('.pdf'):
filepath += '.pdf'
printer = QPdfWriter(filepath)
printer.setPageSize(printer.A4)
printer.setPageMargins(QMarginsF(*self.pdfMargins()),
QPageLayout.Millimeter)
sceneRect = self._sceneRect
sw = sceneRect.width()
sh = sceneRect.height()
if sw > sh:
printer.setPageOrientation(QPageLayout.Orientation.Landscape)
painter = QPainter()
painter.begin(printer)
scaling = printer.logicalDpiX() / self._gview.ldpi
# Do drawing with painter
page_layout = printer.pageLayout()
pdf_rect = page_layout.paintRect(QPageLayout.Point)
pdf_w = pdf_rect.width()
pdf_h = pdf_rect.height()
if sw > pdf_w or sh > pdf_h:
# Shrink to fit page
self.render(painter)
else:
# Scale resolution to keep size
pdf_rect.setWidth(sw * scaling)
pdf_rect.setHeight(sh * scaling)
self.render(painter, pdf_rect)
painter.end()
return filepath
#
# An earlier, alternative implementation of the pdf writer:
def to_pdf0(self, filepath):
"""Produce and save a pdf of the table.
The output orientation is selected according to the aspect ratio
of the table. If the table is too big for the page area, it will
be shrunk to fit.
"""
qbytes = QByteArray()
qbuf = QBuffer(qbytes)
qbuf.open(qbuf.WriteOnly)
printer = QPdfWriter(qbuf)
printer.setPageSize(printer.A4)
printer.setPageMargins(QMarginsF(*self.pdfMargins()),
QPageLayout.Millimeter)
sceneRect = self._sceneRect
sw = sceneRect.width()
sh = sceneRect.height()
if sw > sh:
printer.setPageOrientation(QPageLayout.Orientation.Landscape)
pdf_dpmm = printer.resolution() / 25.4 # pdf resolution, dots per mm
scene_dpmm = self._gview.MM2PT # scene resolution, dots per mm
natural_scale = pdf_dpmm / scene_dpmm
page_layout = printer.pageLayout()
pdf_rect = page_layout.paintRect(QPageLayout.Millimeter)
swmm = sw / self._gview.MM2PT
shmm = sh / self._gview.MM2PT
painter = QPainter(printer)
pdf_wmm = pdf_rect.width()
pdf_hmm = pdf_rect.height()
if swmm > pdf_wmm or shmm > pdf_hmm:
# Shrink to fit page
self.render(painter)
else:
# Scale resolution to keep size
pdf_rect.setWidth(sw * natural_scale)
pdf_rect.setHeight(sh * natural_scale)
self.render(painter, pdf_rect)
painter.end()
qbuf.close()
# Write resulting file
if not filepath.endswith('.pdf'):
filepath += '.pdf'
with open(filepath, 'wb') as fh:
fh.write(bytes(qbytes))
return filepath
###
class CellStyle:
"""Handle various aspects of cell styling.
Also manage caches for fonts, pens and brushes.
"""
_fonts = {}
_brushes = {}
_pens = {}
#
@classmethod
def getFont(cls, fontFamily, fontSize, fontBold, fontItalic):
ftag = (fontFamily, fontSize, fontBold, fontItalic)
try:
return cls._fonts[ftag]
except:
pass
font = QFont()
if fontFamily:
font.setFamily(fontFamily)
if fontSize:
font.setPointSizeF(fontSize)
if fontBold:
font.setBold(True)
if fontItalic:
font.setItalic(True)
cls._fonts[ftag] = font
return font
#
@classmethod
def getPen(cls, width, colour = None):
"""Manage a cache for pens of different width and colour.
"""
if width:
wc = (width, colour or BORDER_COLOUR)
try:
return cls._pens[wc]
except AttributeError:
cls._pens = {}
except KeyError:
pass
pen = QPen('#FF' + wc[1])
pen.setWidthF(wc[0])
cls._pens[wc] = pen
return pen
else:
try:
return cls._noPen
except AttributeError:
cls._noPen = QPen()
cls._noPen.setStyle(Qt.NoPen)
return cls._noPen
#
@classmethod
def getBrush(cls, colour):
"""Manage a cache for brushes of different colour.
<colour> is a colour in the form 'RRGGBB'.
"""
try:
return cls._brushes[colour or FONT_COLOUR]
except:
pass
brush = QBrush(QColor('#FF' + (colour or FONT_COLOUR)))
cls._brushes[colour] = brush
return brush
#
def __init__(self, font, size, align = 'c', highlight = None,
bg = None, border = 1, border_colour = None, mark = None):
"""
<font> is the name of the font (<None> => default, not recommended,
unless the cell is to contain no text).
<size> is the size of the font (<None> => default, not recommended,
unless the cell is to contain no text).
<align> is the horizontal (l, c or r) OR vertical (b, m, t) alignment.
Vertical alignment is for rotated text (-90° only).
<highlight> can set bold, italic and font colour: 'bi:RRGGBB'. All bits
are optional, but the colon must be present if a colour is given.
<bg> can set the background colour ('RRGGBB').
<border>: Only three border types are supported here:
0: none
1: all sides
2: (thicker) underline
<border_colour>: 'RRGGBB', default is <BORDER_COLOUR>.
<mark> is a colour ('RRGGBB') which can be selected as an
"alternative" font colour.
"""
# Font
self.setFont(font, size, highlight)
self.colour_marked = mark
# Alignment
self.setAlign(align)
# Background colour
self.bgColour = self.getBrush(bg) if bg else None
# Border
self.border = border
self.border_colour = border_colour
#
def setFont(self, font, size, highlight):
self._font, self._size, self._highlight = font, size, highlight
try:
emph, clr = highlight.split(':')
except:
emph, clr = highlight or '', None
self.fontColour = self.getBrush(clr)
self.font = self.getFont(font, size, 'b' in emph, 'i' in emph)
#
def setAlign(self, align):
if align in 'bmt':
# Vertical
self.alignment = ('c', align, True)
else:
self.alignment = (align, 'm', False)
#
def copy(self, font = None, size = None, align = None,
highlight = None, mark = None, bg = None, border = None):
"""Make a copy of this style, but with changes specified by the
parameters.
Note that a change to a 'None' parameter value is not possible.
"""
newstyle = copy.copy(self)
if font or size or highlight:
newstyle.setFont(font or self._font,
size or self._size, highlight or self._highlight)
if mark:
newstyle.colour_marked = mark
if align:
newstyle.setAlign(align)
if bg:
newstyle.bgColour = self.getBrush(bg)
if border != None:
newstyle.border = border
return newstyle
###
class Tile(QGraphicsRectItem):
"""The graphical representation of a table cell.
This cell can span rows and columns.
It contains a simple text element.
Both cell and text can be styled to a limited extent (see <CellStyle>).
"""
def __init__(self, grid, tag, x, y, w, h, text, style):
self._style = style
self._grid = grid
self.tag = tag
self.height0 = h
self.width0 = w
super().__init__(0, 0, w, h)
self.setFlag(self.ItemClipsChildrenToShape, True)
self.setPos(x, y)
# Background colour
if style.bgColour != None:
self.setBrush(style.bgColour)
# Border
if style.border == 1:
# Set the pen for the rectangle boundary
pen0 = CellStyle.getPen(BORDER_WIDTH, style.border_colour)
else:
# No border for the rectangle
pen0 = CellStyle.getPen(None)
if style.border != 0:
# Thick underline
line = QGraphicsLineItem(self)
line.setPen(CellStyle.getPen(UNDERLINE_WIDTH,
style.border_colour))
line.setLine(0, h, w, h)
self.setPen(pen0)
# Alignment and rotation
self.halign, self.valign, self.rotation = style.alignment
# Text
self.textItem = QGraphicsSimpleTextItem(self)
self.textItem.setFont(style.font)
self.textItem.setBrush(style.fontColour)
self.setText(text or '')
#
def mark(self):
if self._style.colour_marked:
self.textItem.setBrush(self._style.getBrush(self._style.colour_marked))
#
def unmark(self):
self.textItem.setBrush(self._style.fontColour)
#
def margin(self):
return 0.4 * self._grid._gview.MM2PT
#
def value(self):
return self._text
#
def setText(self, text):
if type(text) != str:
raise GridError(_NOTSTRING.format(val = repr(text)))
self._text = text
self.textItem.setText(text)
self.textItem.setScale(1)
w = self.textItem.boundingRect().width()
h = self.textItem.boundingRect().height()
if text:
scale = 1
maxw = self.width0 - self.margin() * 2
maxh = self.height0 - self.margin() * 2
if self.rotation:
maxh -= self.margin() * 4
if w > maxh:
scale = maxh / w
if h > maxw:
_scale = maxw / h
if _scale < scale:
scale = _scale
if scale < 0.6:
self.textItem.setText('###')
scale = (maxh /
self.textItem.boundingRect().width())
if scale < 1:
self.textItem.setScale(scale)
trf = QTransform().rotate(-90)
self.textItem.setTransform(trf)
else:
maxw -= self.margin() * 4
if w > maxw:
scale = maxw / w
if h > maxh:
_scale = maxh / h
if _scale < scale:
scale = _scale
if scale < 0.6:
self.textItem.setText('###')
scale = (maxw /
self.textItem.boundingRect().width())
if scale < 1:
self.textItem.setScale(scale)
# This print line can help find box size problems:
# print("BOX-SCALE: %5.3f (%s) *** w: %6.2f / %6.2f *** h: %6.2f / %6.2f"
# % (scale, text, w, maxw, h, maxh))
bdrect = self.textItem.mapRectToParent(
self.textItem.boundingRect())
yshift = - bdrect.top() if self.rotation else 0.0
w = bdrect.width()
h = bdrect.height()
xshift = 0.0
if self.halign == 'l':
xshift += self.margin()
elif self.halign == 'r':
xshift += self.width0 - self.margin() - w
else:
xshift += (self.width0 - w) / 2
if self.valign == 't':
yshift += self.margin()
elif self.valign == 'b':
yshift += self.height0 - self.margin() - h
else:
yshift += (self.height0 - h) / 2
self.textItem.setPos(xshift, yshift)
#
def leftclick(self):
return self._grid.tile_left_clicked(self)
#
def rightclick(self):
return self._grid.tile_right_clicked(self)
#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#
#TODO ...
| 34.803478 | 84 | 0.560164 |
064ba62a570f85c226bd1d351ff184276e78c21f
| 10,283 |
py
|
Python
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladebios_lib.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 5 |
2019-11-11T07:57:26.000Z
|
2022-03-28T08:26:53.000Z
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladebios_lib.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 3 |
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladebios_lib.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 11 |
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
# Copyright (C) Microsoft Corporation. All rights reserved.
# This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ipmicmd_library import *
def get_server_bios_config(serverid):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48",completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface,completion_code.failure)
ipmi_cmd = 'ocsoem biosconfig' # IPMI command to get server bios config details
cmdinterface = interface + ' ' + ipmi_cmd
bios_config = parse_get_bios_config(cmdinterface, "getserverbiosconfig")
if bios_config is None or not bios_config: # Check empty or none
#return set_failure_dict("Empty data for biosconfig", "-1")
return set_failure_dict("Empty data for biosconfig",completion_code.failure)
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("getbiosconfig Exception: ", e),completion_code.failure)
return bios_config
def set_server_bios_config(serverid, majorconfig, minorconfig):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48",completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface,completion_code.failure)
ipmi_cmd = 'ocsoem setbiosconfig' + ' ' + str(majorconfig) + ' ' + str(minorconfig) # IPMI command to set server bios config details
cmdinterface = interface + ' ' + ipmi_cmd
bios_config = parse_set_bios_config(cmdinterface, "setserverbiosconfig")
if bios_config is None or not bios_config: # Check empty or none
return set_failure_dict("Empty data for setbiosconfig",completion_code.failure)
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("setbiosconfig Exception: ", e),completion_code.failure)
return bios_config
def get_bios_code(serverid, version):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48",completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface,completion_code.failure)
ipmi_cmd = 'ocsoem bioscode' + ' ' + version # IPMI command to get server bios code details
cmdinterface = interface + ' ' + ipmi_cmd
bios_code = parse_bioscode(cmdinterface, "getserverbioscode")
if bios_code is None or not bios_code: # Check empty or none
return set_failure_dict("Empty data for getserverbioscode",completion_code.failure)
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("getbioscode Exception: ", e),completion_code.failure)
return bios_code
def parse_get_bios_config(interface, command):
try:
completionstate = True
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return output
biosconfigrsp = {}
biosconfigrsp["AvailableConfigurations"] = {}
if(output['status_code'] == 0):
biosdata = output['stdout'].split('\n\n')
#Gets current and chosen config details from output
current_config_details = biosdata.pop(0)
currentconfig = current_config_details.split('\n')
for cfgval in currentconfig:
if "Current BIOS Configuration" in cfgval:
biosconfigrsp["Current BIOS Configuration"] = cfgval.split(":")[-1]
elif "Chosen BIOS Configuration" in cfgval:
biosconfigrsp["Chosen BIOS Configuration"] = cfgval.split(":")[-1]
elif "Available Configuration Name" in cfgval:
biosconfigrsp["AvailableConfigName"] = cfgval.split(":")[-1]
# Gets all available configuration details
for availablecfg in biosdata:
configdata = availablecfg.split('\n')
config_value= filter(None, configdata)
# Skipping empty lists if any
if len(config_value) == 0:
break
else:
if config_value[0].lower().strip('-').strip() == "Available Configurations".lower():
available_config_data = availablecfg.split('*')
available_config_value= filter(None, available_config_data)
config_info = get_config_data(available_config_value)
if completion_code.cc_key in config_info.keys():
completionstate &= False
biosconfigrsp["AvailableConfigurations"] = None
else:
biosconfigrsp["AvailableConfigurations"] = config_info
if(completionstate):
biosconfigrsp[completion_code.cc_key] = completion_code.success
else:
biosconfigrsp[completion_code.cc_key] = completion_code.failure
return biosconfigrsp
else:
error_data = output['stderr'].split('\n')
biosconfigrsp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
biosconfigrsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
biosconfigrsp[completion_code.ipmi_code] = data.split(":")[-1]
return biosconfigrsp
except Exception,e:
#log.exception("GetserverBiosConfig: Exception error:" ,e)
return set_failure_dict(("parse_get_bios_config() Exception: ",e),completion_code.failure)
def get_config_data(configdata):
try:
config_rsp = {}
config_id = 1
for value in configdata:
config_data = value.split('\n')
config_info = filter(None, config_data) # Removes empty strings
# Skipping empty lists if any
if len(config_info) == 0:
break
config_rsp[config_id] = {}
for value in config_info:
if "ConfigName" in value:
config_rsp[config_id]["Config Name"] = value.split(":")[-1].strip()
elif "ConfigValue" in value:
config_rsp[config_id]["Config Value"] = value.split(":")[-1].strip()
config_id = config_id + 1
except Exception,e:
config_rsp[completion_code.cc_key] = completion_code.failure
config_rsp[completion_code.desc] = "Get available config data, Exception: ", e
return config_rsp
def parse_set_bios_config(interface, command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return output
setbiosconfigrsp = {}
if(output['status_code'] == 0):
sdata = output['stdout'].split('\n')
completionstate = sdata.pop(0)
if "Completion Status" in completionstate:
setbiosconfigrsp[completion_code.cc_key] = completionstate.split(":")[-1]
return setbiosconfigrsp
else:
error_data = output['stderr'].split('\n')
setbiosconfigrsp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
setbiosconfigrsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
setbiosconfigrsp[completion_code.ipmi_code] = data.split(":")[-1]
return setbiosconfigrsp
except Exception, e:
#log.exception("Exception error is: ",e)
return set_failure_dict(("parse_set_bios_config() Exception ",e),completion_code.failure)
def parse_bioscode(interface, command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return output
biosrsp = {}
if(output['status_code'] == 0):
sdata = output['stdout'].split('\n')
biosrsp["Bios Code"] = str(sdata[0])
biosrsp[completion_code.cc_key] = completion_code.success
return biosrsp
else:
error_data = output['stderr'].split('\n')
biosrsp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
biosrsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
biosrsp[completion_code.ipmi_code] = data.split(":")[-1]
return biosrsp
except Exception, e:
#log.exception("Exception error is: %s " %e)
#print "Exception: ", e
return set_failure_dict(("ParseGetBiosCodeResult() Exception: ",e),completion_code.failure)
| 42.143443 | 142 | 0.561704 |
d1cb1afdc983b8fa4778f32f8458e3c566262013
| 6,272 |
py
|
Python
|
official/cv/unet/src/unet_nested/unet_model.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/unet/src/unet_nested/unet_model.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/unet/src/unet_nested/unet_model.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Model of UnetPlusPlus
import mindspore.nn as nn
import mindspore.ops as P
from .unet_parts import UnetConv2d, UnetUp
class NestedUNet(nn.Cell):
"""
Nested unet
"""
def __init__(self, in_channel, n_class=2, feature_scale=2, use_deconv=True, use_bn=True, use_ds=True):
super(NestedUNet, self).__init__()
self.in_channel = in_channel
self.n_class = n_class
self.feature_scale = feature_scale
self.use_deconv = use_deconv
self.use_bn = use_bn
self.use_ds = use_ds
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# Down Sample
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="same")
self.conv00 = UnetConv2d(self.in_channel, filters[0], self.use_bn)
self.conv10 = UnetConv2d(filters[0], filters[1], self.use_bn)
self.conv20 = UnetConv2d(filters[1], filters[2], self.use_bn)
self.conv30 = UnetConv2d(filters[2], filters[3], self.use_bn)
self.conv40 = UnetConv2d(filters[3], filters[4], self.use_bn)
# Up Sample
self.up_concat01 = UnetUp(filters[1], filters[0], self.use_deconv, 2)
self.up_concat11 = UnetUp(filters[2], filters[1], self.use_deconv, 2)
self.up_concat21 = UnetUp(filters[3], filters[2], self.use_deconv, 2)
self.up_concat31 = UnetUp(filters[4], filters[3], self.use_deconv, 2)
self.up_concat02 = UnetUp(filters[1], filters[0], self.use_deconv, 3)
self.up_concat12 = UnetUp(filters[2], filters[1], self.use_deconv, 3)
self.up_concat22 = UnetUp(filters[3], filters[2], self.use_deconv, 3)
self.up_concat03 = UnetUp(filters[1], filters[0], self.use_deconv, 4)
self.up_concat13 = UnetUp(filters[2], filters[1], self.use_deconv, 4)
self.up_concat04 = UnetUp(filters[1], filters[0], self.use_deconv, 5)
# Finale Convolution
self.final1 = nn.Conv2d(filters[0], n_class, 1)
self.final2 = nn.Conv2d(filters[0], n_class, 1)
self.final3 = nn.Conv2d(filters[0], n_class, 1)
self.final4 = nn.Conv2d(filters[0], n_class, 1)
self.stack = P.Stack(axis=0)
def construct(self, inputs):
x00 = self.conv00(inputs) # channel = filters[0]
x10 = self.conv10(self.maxpool(x00)) # channel = filters[1]
x20 = self.conv20(self.maxpool(x10)) # channel = filters[2]
x30 = self.conv30(self.maxpool(x20)) # channel = filters[3]
x40 = self.conv40(self.maxpool(x30)) # channel = filters[4]
x01 = self.up_concat01(x10, x00) # channel = filters[0]
x11 = self.up_concat11(x20, x10) # channel = filters[1]
x21 = self.up_concat21(x30, x20) # channel = filters[2]
x31 = self.up_concat31(x40, x30) # channel = filters[3]
x02 = self.up_concat02(x11, x00, x01) # channel = filters[0]
x12 = self.up_concat12(x21, x10, x11) # channel = filters[1]
x22 = self.up_concat22(x31, x20, x21) # channel = filters[2]
x03 = self.up_concat03(x12, x00, x01, x02) # channel = filters[0]
x13 = self.up_concat13(x22, x10, x11, x12) # channel = filters[1]
x04 = self.up_concat04(x13, x00, x01, x02, x03) # channel = filters[0]
final1 = self.final1(x01)
final2 = self.final2(x02)
final3 = self.final3(x03)
final4 = self.final4(x04)
if self.use_ds:
final = self.stack((final1, final2, final3, final4))
return final
return final4
class UNet(nn.Cell):
"""
Simple UNet with skip connection
"""
def __init__(self, in_channel, n_class=2, feature_scale=2, use_deconv=True, use_bn=True):
super(UNet, self).__init__()
self.in_channel = in_channel
self.n_class = n_class
self.feature_scale = feature_scale
self.use_deconv = use_deconv
self.use_bn = use_bn
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# Down Sample
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="same")
self.conv0 = UnetConv2d(self.in_channel, filters[0], self.use_bn)
self.conv1 = UnetConv2d(filters[0], filters[1], self.use_bn)
self.conv2 = UnetConv2d(filters[1], filters[2], self.use_bn)
self.conv3 = UnetConv2d(filters[2], filters[3], self.use_bn)
self.conv4 = UnetConv2d(filters[3], filters[4], self.use_bn)
# Up Sample
self.up_concat1 = UnetUp(filters[1], filters[0], self.use_deconv, 2)
self.up_concat2 = UnetUp(filters[2], filters[1], self.use_deconv, 2)
self.up_concat3 = UnetUp(filters[3], filters[2], self.use_deconv, 2)
self.up_concat4 = UnetUp(filters[4], filters[3], self.use_deconv, 2)
# Finale Convolution
self.final = nn.Conv2d(filters[0], n_class, 1)
def construct(self, inputs):
x0 = self.conv0(inputs) # channel = filters[0]
x1 = self.conv1(self.maxpool(x0)) # channel = filters[1]
x2 = self.conv2(self.maxpool(x1)) # channel = filters[2]
x3 = self.conv3(self.maxpool(x2)) # channel = filters[3]
x4 = self.conv4(self.maxpool(x3)) # channel = filters[4]
up4 = self.up_concat4(x4, x3)
up3 = self.up_concat3(up4, x2)
up2 = self.up_concat2(up3, x1)
up1 = self.up_concat1(up2, x0)
final = self.final(up1)
return final
| 42.378378 | 106 | 0.615912 |
ee3d6d3e804f9ef1a9f8dd5e3ba587f570138ac4
| 90 |
py
|
Python
|
examples/helloWorld.py
|
Devoxx4KidsDE/workshop-minecraft-modding-raspberry-pi
|
7fcae0e43de51843565c2403fa66da26cb79a04b
|
[
"MIT"
] | 3 |
2016-02-29T09:22:05.000Z
|
2018-05-16T23:10:38.000Z
|
examples/helloWorld.py
|
Devoxx4KidsDE/workshop-minecraft-modding-raspberry-pi
|
7fcae0e43de51843565c2403fa66da26cb79a04b
|
[
"MIT"
] | 3 |
2016-01-20T20:58:28.000Z
|
2017-02-06T08:28:30.000Z
|
examples/helloWorld.py
|
Devoxx4KidsDE/workshop-minecraft-modding-raspberry-pi
|
7fcae0e43de51843565c2403fa66da26cb79a04b
|
[
"MIT"
] | 3 |
2016-01-20T20:02:57.000Z
|
2021-03-10T20:21:59.000Z
|
from mcpi import minecraft
mc = minecraft.Minecraft.create()
mc.postToChat("Hello World")
| 22.5 | 33 | 0.788889 |
c99b1c8bbe063bb3a62611598e7d312d2aee21ba
| 298 |
py
|
Python
|
spht/urls.py
|
consbio/spht
|
96ec6a0931851b33eace08720d4d18ab34775a52
|
[
"BSD-2-Clause"
] | 1 |
2019-08-04T21:13:41.000Z
|
2019-08-04T21:13:41.000Z
|
spht/urls.py
|
consbio/spht
|
96ec6a0931851b33eace08720d4d18ab34775a52
|
[
"BSD-2-Clause"
] | 23 |
2018-04-12T20:43:15.000Z
|
2022-02-10T12:10:53.000Z
|
spht/urls.py
|
consbio/spht
|
96ec6a0931851b33eace08720d4d18ab34775a52
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf.urls import url
from django.views.generic import TemplateView
from spht.views import IntersectView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='spht/tool.html')),
url(r'^intersect/tiles/(?P<z>\d+)/(?P<x>\d+)/(?P<y>\d+).png$', IntersectView.as_view())
]
| 29.8 | 91 | 0.697987 |
094ea321a49ca2dc60c8a40c13f143c9e2cd5be6
| 5,306 |
py
|
Python
|
kts/ui/static.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 18 |
2019-02-14T13:10:07.000Z
|
2021-11-26T07:10:13.000Z
|
kts/ui/static.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-02-17T14:06:42.000Z
|
2019-09-15T18:05:54.000Z
|
kts/ui/static.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-09-15T13:12:42.000Z
|
2020-04-15T14:05:54.000Z
|
CSS_STYLE = """
.kts {{
line-height: 1.6;
}}
.kts * {{
box-sizing: content-box;
}}
.kts-wrapper {{
display: inline-flex;
flex-direction: column;
background-color: {first};
padding: 10px;
border-radius: 20px;
}}
.kts-wrapper-border {{
border: 0px solid {second};
}}
.kts-pool {{
display: flex;
flex-wrap: wrap;
background-color: {second};
padding: 5px;
border-radius: 20px;
margin: 5px;
}}
.kts-field {{
text-align: left;
border-radius: 15px;
padding: 5px 15px;
margin: 5px;
display: inline-block;
}}
.kts-field-bg {{
background-color: {second};
}}
.kts-field-bold {{
font-weight: bold;
}}
.kts-field-third {{
color: {third};
}}
.kts-field-accent {{
color: {accent};
}}
.kts-field-bg:hover {{
background-color: {fourth};
}}
.kts-annotation {{
text-align: left;
margin-left: 20px;
margin-bottom: -5px;
display: inline-block;
color: {third};
}}
.kts-title {{
text-align: center;
display: inline-block;
font-weight: bold;
color: {third};
}}
.kts-code {{
background-color: {second};
text-align: left;
border-radius: 15px;
padding: 0.5em 15px;
margin: 5px;
color: white;
display: inline-block;
}}
.kts-code:hover {{
background-color: {fourth};
}}
.kts-code > pre {{
background-color: {second};
overflow: auto;
white-space: pre-wrap;
}}
.kts-code:hover > pre {{
background-color: {fourth};
}}
.kts-output {{
background-color: {second};
text-align: left;
border-radius: 15px;
padding: 5px 15px;
margin: 5px;
font-weight: bold;
font-family: monospace;
color: {accent};
overflow: auto;
max-height: 4.8em;
display: flex;
flex-direction: column-reverse;
}}
.kts-df {{
background-color: {second};
text-align: left;
border-radius: 15px;
padding: 5px 15px;
margin: 5px;
display: inline-block;
color: {accent};
}}
.kts-title-with-cross {{
display: grid;
grid-template-columns: 1em auto 1em;
margin-left: 5px;
margin-right: 5px;
}}
.kts-cross-circle {{
background-color: {second};
width: 1em;
height: 1em;
position: relative;
border-radius: 50%;
cursor: pointer;
z-index: 2;
margin-top: 2px;
max-width: none;
}}
.kts-cross-before,
.kts-cross-after {{
background-color: {third};
content: '';
position: absolute;
width: 0.75em;
height: 2px;
border-radius: 0;
top: calc((1em - 2px) / 2);
z-index: 0;
}}
.kts-cross-before {{
-webkit-transform: rotate(-45deg);
-moz-transform: rotate(-45deg);
transform: rotate(-45deg);
left: calc(1em / 8);
}}
.kts-cross-after {{
-webkit-transform: rotate(-135deg);
-moz-transform: rotate(-135deg);
transform: rotate(-135deg);
right: calc(1em / 8);
}}
#kts-hidden {{
display: none
}}
.kts-thumbnail {{
margin: 0;
cursor: pointer;
}}
.kts-thumbnail-first {{
background-color: {first};
}}
.kts-thumbnail-second {{
background-color: {second};
}}
#kts-collapsible {{
-webkit-transition: max-height {anim_height}, padding {anim_padding};
-moz-transition: max-height {anim_height}, padding {anim_padding};
-ms-transition: max-height {anim_height}, padding {anim_padding};
-o-transition: max-height {anim_height}, padding {anim_padding};
transition: max-height {anim_height}, padding {anim_padding};
padding: 0;
margin: 2px;
align-self: flex-start;
max-height: 100px;
overflow: hidden;
}}
.kts-check {{
display: none;
}}
.kts-check:checked + #kts-collapsible {{
padding: 10px;
max-height: {max_height_expanded};
}}
.kts-check:checked + #kts-collapsible > #kts-hidden {{
display: inline-flex;
}}
.kts-check:checked + #kts-collapsible > .kts-thumbnail {{
display: none;
}}
.kts-check:checked + .kts-wrapper-border {{
border: 2px solid {second};
}}
.kts-check-outer {{
display: none;
}}
.kts-check-outer:checked + #kts-collapsible {{
padding: 10px;
max-height: {max_height_expanded};
}}
.kts-check-outer:checked + #kts-collapsible > #kts-hidden {{
display: inline-flex;
}}
.kts-check-outer:checked + #kts-collapsible > .kts-thumbnail {{
display: none;
}}
.kts-check-outer:checked + .kts-wrapper-border {{
border: 2px solid {second};
}}
.kts-inner-wrapper {{
flex-direction: column;
}}
.kts-progressbar-wrapper {{
display: flex;
flex-direction: row;
align-items: center;
height: 1.6em;
}}
.kts-progressbar-outer {{
box-sizing: padding-box;
display: flex;
flex-direction: row;
background-color: {second};
align-items: center;
padding: 3px;
border-radius: 15px;
width: 100%;
}}
.kts-progressbar-inner {{
background-color: {third};
height: 0.7em;
border-radius: 15px;
}}
.kts-hbar-container {{
display: block;
position: relative;
height: min(calc(100% - 3px), 1.5em);
margin: 2px;
}}
.kts-hbar {{
position: absolute;
display: inline-block;
background-color: {third};
text-align: left;
height: 100%;
border-radius: 15px;
}}
.kts-hbar-line {{
position: absolute;
display: inline-block;
background-color: {accent};
text-align: left;
height: 1px;
top: 50%;
}}
.kts-inner-column {{
display: flex;
flex-direction: column;
padding: auto;
}}
.kts-row {{
display: flex;
flex-direction: row;
}}
.kts-hoverable-line, .kts-hoverable-line * {{
pointer-events: all;
transition: all 0.1s ease-out;
}}
.kts-hoverable-line:hover * {{
stroke: {second_accent};
stroke-width: 10;
}}
"""
| 19.224638 | 72 | 0.650773 |
0959f644ddbc06b75da28e53ec3916db243825e1
| 2,087 |
py
|
Python
|
ecrire_json.py
|
Maxim01/Programmes
|
dbe5b83b3c65776ccc00049793fa85313fb76065
|
[
"Apache-2.0"
] | null | null | null |
ecrire_json.py
|
Maxim01/Programmes
|
dbe5b83b3c65776ccc00049793fa85313fb76065
|
[
"Apache-2.0"
] | null | null | null |
ecrire_json.py
|
Maxim01/Programmes
|
dbe5b83b3c65776ccc00049793fa85313fb76065
|
[
"Apache-2.0"
] | null | null | null |
import json
import time
import subprocess
import sys
MAC_ARG = "VIDE"
ACTION_ARG = "VIDE"
ADD_ARG = "VIDE"
MDP_ARG = "VIDE"
DEST_ARG = "VIDE"
MDP_SERR_ARG = "VIDE"
def mdp_serrure():
global MAC_ARG
global MDP_SERR_ARG
with open('/home/Devismes_Bridge/Equipements/' + MAC_ARG + '/Pass.json') as f:
dataa = json.load(f)
dataa['Password']['Pass'] = MDP_SERR_ARG
with open('/home/Devismes_Bridge/Equipements/' + MAC_ARG + '/Pass.json', 'w') as f:
json.dump(dataa, f, indent=2)
def mail_dest():
global DEST_ARG
with open('/home/Devismes_Bridge/JSON_List/mail.json') as f:
dataa = json.load(f)
print "OK: ", DEST_ARG
dataa['mail']['Dest'] = DEST_ARG
with open('/home/Devismes_Bridge/JSON_List/mail.json', 'w') as f:
json.dump(dataa, f, indent=2)
def mail_origine(): #base SQL !!!!
global ADD_ARG
global MDP_ARG
with open('/home/Devismes_Bridge/JSON_List/mail.json') as f:
dataa = json.load(f)
dataa['mail']['adresse'] = ADD_ARG
with open('/home/Devismes_Bridge/JSON_List/mail.json', 'w') as f:
json.dump(dataa, f, indent=2)
def Arguments():
global MAC_ARG
global ADD_ARG
global MDP_ARG
global DEST_ARG
global MDP_SERR_ARG
print "Arguments: ", sys.argv
print "NB d'arguments: ", len(sys.argv)
if (len(sys.argv) == 4) and (sys.argv[1] == '1'): #on modifie le mot de passe de la serrure selectionnee
print "modifie mot de passe serrure"
MAC_ARG = sys.argv[2]
MDP_SERR_ARG = sys.argv[3]
mdp_serrure()
if (len(sys.argv) == 3) and (sys.argv[1] == '2'): #on modifie le mail de destination
print "modifie mail destination"
DEST_ARG = sys.argv[2]
mail_dest()
if (len(sys.argv) == 3) and (sys.argv[1] == '3'): #on modifie le mail d'origine et mot de passe
print "modifie mail d'origine et mot de passe"
ADD_ARG = sys.argv[2]
mail_origine()
def main():
print "MAIN"
Arguments()
if __name__ == "__main__":
main()
| 22.202128 | 106 | 0.615716 |
1195d97ce7b7fc191ee2c37ce674f6e799bdf4b2
| 107 |
py
|
Python
|
main.ru.py
|
vv31415926/python_lessons_01_4
|
f5e67d008a5401335c7b5589d9dacc125856560d
|
[
"MIT"
] | null | null | null |
main.ru.py
|
vv31415926/python_lessons_01_4
|
f5e67d008a5401335c7b5589d9dacc125856560d
|
[
"MIT"
] | null | null | null |
main.ru.py
|
vv31415926/python_lessons_01_4
|
f5e67d008a5401335c7b5589d9dacc125856560d
|
[
"MIT"
] | null | null | null |
s = input('Введите ФИО через пробел:')
lst = s.split()
print( 'Привет, ' )
for si in lst:
print( si)
| 17.833333 | 38 | 0.598131 |
e115b2e949a11782a5bb56a5b2bb6a3795f7d276
| 330 |
py
|
Python
|
menucard/admin.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | 1 |
2021-01-23T21:42:10.000Z
|
2021-01-23T21:42:10.000Z
|
menucard/admin.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
menucard/admin.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from menucard.models import *
admin.site.register(Vorspeise)
admin.site.register(Hauptspeise)
admin.site.register(Nachspeise)
admin.site.register(Snacks)
admin.site.register(AlkoholfreieDrinks)
admin.site.register(AlkoholhaltigeDrinks)
admin.site.register(Besucher)
| 25.384615 | 41 | 0.833333 |
0183624fde61b9b8bb023787016c964c88412b6b
| 170 |
py
|
Python
|
flask/app/views.py
|
hou2zi0/flask-app-docker
|
0e51b1f00201fc6eb46a62d0d8f2701bc02d4031
|
[
"MIT"
] | null | null | null |
flask/app/views.py
|
hou2zi0/flask-app-docker
|
0e51b1f00201fc6eb46a62d0d8f2701bc02d4031
|
[
"MIT"
] | null | null | null |
flask/app/views.py
|
hou2zi0/flask-app-docker
|
0e51b1f00201fc6eb46a62d0d8f2701bc02d4031
|
[
"MIT"
] | null | null | null |
from app import app
@app.route('/')
def index():
return "Hello from Flask! 🐵"
@app.route('/affe')
def affe():
return "Hello from Flask! Affe sagt Hallo! 🐵"
| 17 | 49 | 0.611765 |
0968faab53e0aa82c8b7c026041088ebbd25206c
| 2,274 |
py
|
Python
|
python/my_sql_conn.py
|
EstherLacan/jiangfw
|
a449b1925742873c76dc1b3284aedb359204bc76
|
[
"Apache-2.0"
] | 1 |
2020-07-29T16:43:46.000Z
|
2020-07-29T16:43:46.000Z
|
python/my_sql_conn.py
|
EstherLacan/jiangfw
|
a449b1925742873c76dc1b3284aedb359204bc76
|
[
"Apache-2.0"
] | null | null | null |
python/my_sql_conn.py
|
EstherLacan/jiangfw
|
a449b1925742873c76dc1b3284aedb359204bc76
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
import MySQLdb
class DbFunctions(object):
"""
数据库连接
"""
def __init__(self, server, username, password, dbname):
self.server = server
self.username = username
self.password = password
self.dbname = dbname
self.db = None
self.cur = None
def connection_open(self):
self.db = MySQLdb.connect(host=self.server, user=self.username, passwd=self.password, db=self.dbname)
self.cur = self.db.cursor()
def connection_close(self):
self.db.close()
def mysql_qry(self, sql, bool): # 1 for select and 0 for insert update delete
self.connection_open()
try:
self.cur.execute(sql)
if bool:
return self.cur.fetchall()
else:
self.db.commit()
return True
except MySQLdb.Error, e:
try:
print "Mysql Error:- " + str(e)
except IndexError:
print "Mysql Error:- " + str(e)
self.connection_close()
def mysql_insert(self, table, fields, values):
sql = "INSERT INTO " + table + " (" + fields + ") VALUES (" + values + ")";
return self.mysql_qry(sql, 0)
def mysql_update(self, table, values, conditions):
sql = "UPDATE " + table + " SET " + values + " WHERE " + conditions
return self.mysql_qry(sql, 0)
def mysql_delete(self, table, conditions):
sql = "DELETE FROM " + table + " WHERE " + conditions
return self.mysql_qry(sql, 0)
def mysql_select(self, table):
sql = "SELECT * FROM " + table
return self.mysql_qry(sql, 1)
def insert_by_many(self, tablname, rows):
try:
# sql = 'INSERT INTO table values(%s,%s,%s)'
# 批量插入
sql = 'INSERT INTO ' + tablname + ' values(%s,%s,%s)'
self.connection_open()
self.cur.executemany(sql, rows)
self.db.commit()
except Exception as e:
print e
self.db.rollback()
self.connection_close()
print '[insert_by_many executemany] total:', len(rows)
# db = DbFunctions("localhost, "root", "Root@123", "db")
# result = db.mysql_qry("",1)
| 29.921053 | 109 | 0.547054 |
61e5a5b59a9e4dbc5ab5ac9e9ca08a0245f2a295
| 3,267 |
py
|
Python
|
BigData_exp/exp3/exp3/my_html.py
|
DolorHunter/hfut-exp-archived
|
c67e26c1f4fba550c8974eaba10dfa302b928868
|
[
"BSD-2-Clause"
] | 12 |
2020-12-07T05:49:05.000Z
|
2022-03-25T09:09:36.000Z
|
BigData_exp/exp3/exp3/my_html.py
|
DolorHunter/hfut-exp
|
c67e26c1f4fba550c8974eaba10dfa302b928868
|
[
"BSD-2-Clause"
] | null | null | null |
BigData_exp/exp3/exp3/my_html.py
|
DolorHunter/hfut-exp
|
c67e26c1f4fba550c8974eaba10dfa302b928868
|
[
"BSD-2-Clause"
] | 1 |
2021-01-08T08:53:53.000Z
|
2021-01-08T08:53:53.000Z
|
import requests
# filename cannot be 'html.py' which will lead to conflict to bs!!
from bs4 import BeautifulSoup
import csv
# Get raw html info
def get_html_info(url):
try:
print('url:'+url)
re = requests.get(url, timeout=30)
re.raise_for_status()
re.encoding = 'utf-8'
print("Get raw info.")
return re.text
except:
print("[Error] Failed to get HTML info!")
# Save raw html info to the file
def save_html_info(html_info):
file = open('raw_html_info.txt', 'w')
for info in html_info:
file.write(info)
file.close()
print("Saved raw info to the file.")
# Re raw html info from the list
def re_html_info(raw_html_info):
# Using bs4 to extract raw info
soup = BeautifulSoup(raw_html_info, 'html.parser')
data = soup.find_all('tr')
# print(data) # test
ready_info = []
i = 0
for info in data:
if i < 2:
# escape the title
i += 1
continue
else:
info = str(info)
re_info = {}
# Save serial numbers to list
serial_num_start = info.find(
'; height: 18.75pt">')
serial_num_end = info.find('</td>')
serial_num = info[serial_num_start+19: serial_num_end]
re_info['序号'] = serial_num # " ".join(serial_num.split())
# Save student names to list
student_name_start = info.find(
'left: medium none">', serial_num_end, len(info))
student_name_end = info.find('</td>', student_name_start, len(info))
student_name = info[student_name_start+19: student_name_end]
re_info['姓名'] = student_name # " ".join(student_name.split())
# Save school names to list
school_name_start = info.find(
'left: medium none">', student_name_end, len(info))
school_name_end = info.find('</td>', school_name_start, len(info))
school_name = info[school_name_start+19: school_name_end]
re_info['录取学校'] = school_name # " ".join(school_name.split())
# Save school types to list
school_types_start = info.find(
'left: medium none">', school_name_end, len(info))
school_types_end = info.find('</td>', school_types_start, len(info))
school_types = info[school_types_start+19: school_types_end]
re_info['大学类型'] = school_types # " ".join(school_types.split())
ready_info.append(re_info)
print("Info is ready.")
return ready_info
def save_to_csv(ready_info):
with open('re_html_info.csv', 'wt', encoding='utf-16') as csvfile:
csvout = csv.DictWriter(csvfile, ['序号', '姓名', '录取学校', '大学类型'])
csvout.writeheader()
csvout.writerows(ready_info)
print("Save to csv.")
def main():
url = 'http://www.sszx.cn/jxjy/xkjs/201802/t20180205_8967.htm'
raw_html_info = get_html_info(url) # Get raw html info
save_html_info(raw_html_info) # Save raw html info to file
ready_info = re_html_info(raw_html_info) # re raw html info from file
print(ready_info)
save_to_csv(ready_info) # Save ready info to csv
if __name__ == '__main__':
main()
| 33.680412 | 80 | 0.603612 |
28b0d798dce294187b09ea0093ced120bf38031b
| 2,144 |
py
|
Python
|
Algorithms/Implementation/Bomberman.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/Bomberman.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/Bomberman.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
#coding:utf-8
R, C, N = map(int, raw_input().split())
grid_1 = [[] for i in xrange(R)]
bomb_list = []
for i in xrange(R):
row_list = list(raw_input())
grid_1[i] = row_list
for j in xrange(len(row_list)):
if row_list[j] == 'O':
bomb_list.append((i, j))
def is_vaild(x, y):
return x >= 0 and x < R and y >= 0 and y < C# and (x, y) not in bomb_list 愚蠢至极!
def have_bomb(x, y, grid):
if x+1 < R and grid[x+1][y] == 'O':
return 1
elif x-1 >= 0 and grid[x-1][y] == 'O':
return 1
elif y+1 < C and grid[x][y+1] == 'O':
return 1
elif y-1 >= 0 and grid[x][y-1] == 'O':
return 1
else:
return 0
if N % 2 == 0:
for _ in xrange(R):
print ''.join(['O'] * C)
else:
if N == 1:
for row in grid_1:
print ''.join(row)
else:
while 1:
grid_3 = [['O']*C for _ in xrange(R)]
for bomb in bomb_list:
print bomb
#middle
x, y = bomb
grid_3[x][y] = '.'
#up
i, j = x-1, y
if is_vaild(i, j):
grid_3[i][j] = '.'
#down
i, j = x+1, y
if is_vaild(i, j):
grid_3[i][j] = '.'
#left
i, j = x, y-1
if is_vaild(i, j):
grid_3[i][j] = '.'
#right
i, j = x, y+1
if is_vaild(i, j):
grid_3[i][j] = '.'
if N/2 % 2 != 0:
for row in grid_3:
print ''.join(row)
break
for i in xrange(R):
row = []
for j in xrange(C):
if grid_3[i][j] == 'O':
row.append('.')
elif have_bomb(i, j, grid_3):
row.append('.')
else:
row.append('O')
print ''.join(row)
break
| 25.831325 | 83 | 0.35028 |
3a7d6a374822bd399b7fc85dc349d7dbd4212ce5
| 1,228 |
py
|
Python
|
pytestDemo/common/read_data.py
|
lthinktime/testdemo
|
509656d62535ed06e222c08671db11e31d9b3162
|
[
"Apache-2.0"
] | null | null | null |
pytestDemo/common/read_data.py
|
lthinktime/testdemo
|
509656d62535ed06e222c08671db11e31d9b3162
|
[
"Apache-2.0"
] | null | null | null |
pytestDemo/common/read_data.py
|
lthinktime/testdemo
|
509656d62535ed06e222c08671db11e31d9b3162
|
[
"Apache-2.0"
] | null | null | null |
import yaml
import json
from configparser import ConfigParser
from common.logger import logger
class MyConfigParser(ConfigParser):
# 重写configparser 中的 optionxform 函数,解决 .ini 文件中的 键option 自动转为小写的问题
def __init__(self, defaults=None):
ConfigParser.__init__(self, defaults=defaults)
def optionxform(self, optionstr):
return optionstr
class ReadFileData():
def __init__(self):
pass
def load_yaml(self, file_path):
logger.info("加载 {} 文件......".format(file_path))
with open(file_path, encoding='utf-8') as f:
data = yaml.safe_load(f)
logger.info("读到数据 ==>> {} ".format(data))
return data
def load_json(self, file_path):
logger.info("加载 {} 文件......".format(file_path))
with open(file_path, encoding='utf-8') as f:
data = json.load(f)
logger.info("读到数据 ==>> {} ".format(data))
return data
def load_ini(self, file_path):
logger.info("加载 {} 文件......".format(file_path))
config = MyConfigParser()
config.read(file_path, encoding="UTF-8")
data = dict(config._sections)
# print("读到数据 ==>> {} ".format(data))
return data
data = ReadFileData()
| 28.55814 | 69 | 0.614821 |
a33ece80edcfc6ff8944ba4518931f91f0b1ccbe
| 2,094 |
py
|
Python
|
research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1 |
2021-07-03T06:52:20.000Z
|
2021-07-03T06:52:20.000Z
|
research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2 |
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
RandomCrop operator.
"""
from mindspore.dataset.vision import py_transforms
from mindspore.dataset.vision import py_transforms_util
from mindspore.dataset.vision import utils
class RandomCrop(py_transforms.RandomCrop):
"""
RandomCrop inherits from py_transforms.RandomCrop but derives/uses the
original image size as the output size.
Please refer to py_transforms.RandomCrop for argument specifications.
"""
def __init__(self, padding=4, pad_if_needed=False,
fill_value=0, padding_mode=utils.Border.CONSTANT):
# Note the `1` for the size argument is only set for passing the check.
super(RandomCrop, self).__init__(1, padding=padding, pad_if_needed=pad_if_needed,
fill_value=fill_value, padding_mode=padding_mode)
def __call__(self, img):
"""
Call method.
Args:
img (PIL image): Image to be padded and then randomly cropped back
to the same size.
Returns:
img (PIL image), Randomly cropped image.
"""
if not py_transforms_util.is_pil(img):
raise TypeError(
py_transforms_util.augment_error_message.format(type(img)))
return py_transforms_util.random_crop(
img, img.size, self.padding, self.pad_if_needed,
self.fill_value, self.padding_mode,
)
| 36.736842 | 90 | 0.658548 |
a37c14fdb39ad32cb41eecf0cb0e42257fe19ade
| 617 |
py
|
Python
|
quant/common/log.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 7 |
2017-10-22T15:00:09.000Z
|
2019-09-19T11:45:43.000Z
|
quant/common/log.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 1 |
2018-01-19T16:19:40.000Z
|
2018-01-19T16:19:40.000Z
|
quant/common/log.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 5 |
2017-12-11T15:10:29.000Z
|
2018-12-21T17:40:58.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
from logging.handlers import RotatingFileHandler
def get_logger(log_name, level=logging.DEBUG):
"""
日志的疯转
:param level: 日志级别
:param log_name: 日志对象名
:return: 日志对象名
"""
logger = logging.getLogger(log_name)
logger.setLevel(level)
rt_handler = RotatingFileHandler(log_name, maxBytes=100 * 1024 * 1024, backupCount=10)
rt_handler.setLevel(level)
formatter = logging.Formatter('%(asctime)-12s [%(levelname)s] %(message)s')
rt_handler.setFormatter(formatter)
logger.addHandler(rt_handler)
return logger
| 25.708333 | 90 | 0.698541 |
6e9f8f97f56424abab696c62b47c86b133739767
| 784 |
py
|
Python
|
06 APIs, Scraping I/kommetarezaehlen.py
|
manuelapaganini/20_21_Workfile
|
5ec3637d18cbd73256b56682d9b99547e21a24d9
|
[
"MIT"
] | 6 |
2019-08-06T14:53:34.000Z
|
2020-10-16T19:44:16.000Z
|
06 APIs, Scraping I/kommetarezaehlen.py
|
manuelapaganini/20_21_Workfile
|
5ec3637d18cbd73256b56682d9b99547e21a24d9
|
[
"MIT"
] | 1 |
2020-06-25T09:46:58.000Z
|
2020-06-25T09:46:58.000Z
|
06 APIs, Scraping I/kommetarezaehlen.py
|
manuelapaganini/20_21_Workfile
|
5ec3637d18cbd73256b56682d9b99547e21a24d9
|
[
"MIT"
] | 2 |
2019-09-16T13:05:51.000Z
|
2019-09-27T09:07:49.000Z
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
import datetime
import sys
def kommentarezaehlen(url):
r = requests.get(url)
soup = BeautifulSoup(r.text,'xml')
storybox = soup.find_all('div', {'class':'text'})
lst = []
for elem in storybox:
try:
t = elem.find('h2').text
except:
t = 'Kein Titel'
try:
k = elem.find('a', {'class':'standard comments'}).text.replace("\n", "")
except:
k = 'Keine Kommentare'
mini_dict = {'Titel': t,
'Kommentar': k}
lst.append(mini_dict)
now = datetime.datetime.now()
return pd.DataFrame(lst).to_csv(str(now)+'-watson.csv')
if __name__== "__main__":
kommentarezaehlen(sys.argv[1])
| 23.757576 | 84 | 0.567602 |
287b71be0094ff9c0a8a646a19e81197612eb1f3
| 3,673 |
py
|
Python
|
zplus_scraper/pipelines.py
|
tstaec/ZplusScraper
|
38c4f9534b8ee3822a80b48a6827ef4f52793c0b
|
[
"MIT"
] | null | null | null |
zplus_scraper/pipelines.py
|
tstaec/ZplusScraper
|
38c4f9534b8ee3822a80b48a6827ef4f52793c0b
|
[
"MIT"
] | 1 |
2021-07-04T12:02:57.000Z
|
2021-07-04T12:02:57.000Z
|
zplus_scraper/pipelines.py
|
tstaec/ZplusScraper
|
38c4f9534b8ee3822a80b48a6827ef4f52793c0b
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import mysql
from scrapy.exceptions import NotConfigured
from database import create_database
class ZplusscraperPipeline:
def process_item(self, item, spider):
return item
class DatabasePipeline(object):
def __init__(self, db, user, passwd, host):
self.db = db
self.user = user
self.passwd = passwd
self.host = host
@classmethod
def from_crawler(cls, crawler):
db_settings = crawler.settings.getdict("DB_SETTINGS")
if not db_settings: # if we don't define db config in settings
raise NotConfigured # then raise error
db = db_settings['db']
user = db_settings['user']
passwd = db_settings['passwd']
host = db_settings['host']
return cls(db, user, passwd, host) # returning pipeline instance
def open_spider(self, spider):
print('open spider was called. Initializing database')
self.context = mysql.connector.connect(
user=self.user,
passwd=self.passwd,
host=self.host,
charset='utf8mb4',
use_unicode=True)
create_database(self.context, self.db)
def close_spider(self, spider):
print('closing spider')
self.context.close()
def process_item(self, item, spider):
existing_article = self.get_existing_article(item)
if existing_article is None:
article_id = self.save_article(item)
else:
article_id = existing_article[0]
if item['datazplus'] is not None and item['article_html'] is not None:
self.update_article(item)
self.save_scrape_run(item, article_id)
return item
def get_existing_article(self, article):
href = article['href']
if href is None:
return None
cursor = self.context.cursor(buffered=True)
sql_command = "SELECT id, title FROM articles WHERE href = %s"
returned_rows = cursor.execute(sql_command, (href,))
result = cursor.fetchone()
cursor.close()
return result
def save_article(self, article):
cursor = self.context.cursor(buffered=True)
sql_command = """INSERT INTO articles (created, last_modified, title, href, article_html)
VALUES (%s, %s, %s, %s, %s)"""
str_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
cursor.execute(sql_command, (str_now, str_now, article['title'], article['href'], article['article_html']))
self.context.commit()
row_id = cursor.lastrowid
cursor.close()
return row_id
def update_article(self, article):
cursor = self.context.cursor(buffered=True)
sql_command = """UPDATE articles SET last_modified = %s, article_html = %s """
str_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
cursor.execute(sql_command, (str_now, article['article_html']))
self.context.commit()
cursor.close()
return None
def save_scrape_run(self, article, article_id):
cursor = self.context.cursor(buffered=True)
sql_command = """INSERT INTO scrape_run (created, datazplus, article_id)
VALUES (%s, %s, %s)"""
str_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
cursor.execute(sql_command, (str_now, article['datazplus'], article_id))
self.context.commit()
cursor.close()
return None
| 34.327103 | 115 | 0.586986 |
95f916a0d2f23768a5613ddf564dc02ba2c599ac
| 478 |
py
|
Python
|
ggit_platform/admin.py
|
girlsgoit/GirlsGoIT
|
447cd15c44ebee4af9e942a079d681be8683239f
|
[
"MIT"
] | 1 |
2019-02-27T21:20:54.000Z
|
2019-02-27T21:20:54.000Z
|
ggit_platform/admin.py
|
girlsgoit/GirlsGoIT
|
447cd15c44ebee4af9e942a079d681be8683239f
|
[
"MIT"
] | null | null | null |
ggit_platform/admin.py
|
girlsgoit/GirlsGoIT
|
447cd15c44ebee4af9e942a079d681be8683239f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from markdownx.admin import MarkdownxModelAdmin
from .models import Event
from .models import Member
from .models import MemberRole
from .models import Region
from .models import Story
from .models import Track
admin.site.register(Track, MarkdownxModelAdmin)
admin.site.register(Region)
admin.site.register(Member)
admin.site.register(MemberRole)
admin.site.register(Event, MarkdownxModelAdmin)
admin.site.register(Story, MarkdownxModelAdmin)
| 28.117647 | 47 | 0.83682 |
66ad35acb4d63dc9937ca5276e6b238cb6b79da1
| 10,058 |
py
|
Python
|
spider/got/manager/TweetManager.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | null | null | null |
spider/got/manager/TweetManager.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | 2 |
2021-03-31T18:54:16.000Z
|
2021-12-13T19:49:08.000Z
|
spider/got/manager/TweetManager.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | null | null | null |
import urllib,urllib2,json,re,datetime,sys,cookielib
from .. import models
from pyquery import PyQuery
import requests
import random
random.seed(1)
def fetch_activities(tweet_id):
retusers = []
favorusers = []
re_url = 'https://twitter.com/i/activity/retweeted_popup?id=%s'%(tweet_id)
favor_url = 'https://twitter.com/i/activity/favorited_popup?id=%s'%(tweet_id)
headers = {
'Host':"twitter.com",
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.%s'%(random.randint(0,999)),
'Accept':"application/json, text/javascript, */*; q=0.01",
'Accept-Language':"de,en-US;q=0.7,en;q=0.3",
'X-Requested-With':"XMLHttpRequest",
'Referer':'https://twitter.com/',
'Connection':"keep-alive",
}
re_users = PyQuery(requests.get(re_url,headers=headers).json()['htmlUsers'])('ol.activity-popup-users')
for re_user in re_users('div.account'):
userPQ = PyQuery(re_user)
userd = {
'screen_name':userPQ.attr('data-screen-name'),
'user_id':userPQ.attr('data-user-id'),
'data_name':userPQ.attr('data-name'),
'avatar_src':userPQ('img.avatar').attr('src'),
'userbadges':userPQ('span.UserBadges').text(),
'bio':userPQ('p.bio').text(),
}
retusers.append({userd['screen_name']:userd})
favor_users = PyQuery(requests.get(favor_url,headers=headers).json()['htmlUsers'])('ol.activity-popup-users')
for favor_user in favor_users('div.account'):
userPQ = PyQuery(favor_user)
userd = {
'screen_name':userPQ.attr('data-screen-name'),
'user_id':userPQ.attr('data-user-id'),
'data_name':userPQ.attr('data-name'),
'avatar_src':userPQ('img.avatar').attr('src'),
'userbadges':userPQ('span.UserBadges').text(),
'bio':userPQ('p.bio').text(),
}
favorusers.append({userd['screen_name']:userd})
return retusers,favorusers
def fetch_entities(tweetPQ):
hashtags = []
urls = []
for url in tweetPQ('p.js-tweet-text a'):
d = dict(url.items())
if d.has_key('data-expanded-url'): #d['class'] == 'twitter-timeline-link'
#pdb.set_trace()
urls.append({'href':d['href'],'expanded_url':d['data-expanded-url']})
if d['href'].startswith('/hashtag/'):
hashtags.append(d['href'].split('?')[0].split('/')[-1])
tweetPQ('p.js-tweet-text a.twitter-timeline-link').remove()
return hashtags,urls
def getTweet(tweetHTML):
tweetPQ = PyQuery(tweetHTML)
tweet = models.Tweet()
#base info
id = tweetPQ.attr("data-tweet-id")
conversation_id = tweetPQ.attr('data-conversation-id')
dateSec = int(tweetPQ("small.time span.js-short-timestamp").attr("data-time"))
#permalink = tweetPQ.attr("data-permalink-path")
#user
screen_name = tweetPQ.attr('data-screen-name')
user_id = tweetPQ.attr('data-user-id')
data_name = tweetPQ.attr('data-name')
avatar_src = tweetPQ('img.avatar').attr('src')
userbadges = tweetPQ('span.UserBadges').text()
#text
hashtags,urls = fetch_entities(tweetPQ)
mentions = tweetPQ.attr("data-mentions")
lang = tweetPQ("p.js-tweet-text").attr('lang')
raw_text = re.sub(r"\s+", " ", tweetPQ("p.js-tweet-text").text().replace('# ', '#').replace('@ ', '@'))
standard_text = re.sub(r"\s+", " ", tweetPQ("p.js-tweet-text").text().replace('# ', '').replace('@ ', ''))
tweetPQ('p.js-tweet-text')('a').remove()
tweetPQ('p.js-tweet-text')('img').remove()
clean_text = tweetPQ("p.js-tweet-text").text()
#media
quote_id = tweetPQ('div.QuoteTweet a.QuoteTweet-link').attr('data-conversation-id')
has_cards = tweetPQ.attr('data-has-cards')
card_url = tweetPQ('div.js-macaw-cards-iframe-container').attr('data-card-url')
img_src = tweetPQ('div.AdaptiveMedia-container img').attr('src')
video_src = tweetPQ('div.AdaptiveMedia-container video').attr('src')
geo = ''
geoSpan = tweetPQ('span.Tweet-geo')
if len(geoSpan) > 0:
geo = geoSpan.attr('title')
#action
retweet_id = tweetPQ.attr('data-retweet-id')
retweeter = tweetPQ.attr('data-retweeter')
#retusers,favorusers = fetch_activities(id)
replies = int(tweetPQ("span.ProfileTweet-action--reply span.ProfileTweet-actionCount").attr("data-tweet-stat-count").replace(",", ""))
retweets = int(tweetPQ("span.ProfileTweet-action--retweet span.ProfileTweet-actionCount").attr("data-tweet-stat-count").replace(",", ""))
favorites = int(tweetPQ("span.ProfileTweet-action--favorite span.ProfileTweet-actionCount").attr("data-tweet-stat-count").replace(",", ""))
## tweet model
tweet.id = id
tweet.conversation_id = conversation_id
tweet.is_reply = tweet.id != tweet.conversation_id
tweet.created_at = datetime.datetime.fromtimestamp(dateSec)
#tweet.permalink = 'https://twitter.com' + permalink
#user
tweet.user = {
'screen_name':screen_name,
'user_id':user_id,
'data_name':data_name,
'avatar_src':avatar_src,
'userbadges':userbadges,
}
#media
tweet.media = {
'quote_id':quote_id,
'has_cards':has_cards,
'card_url':card_url,
'img_src':img_src,
'video_src':video_src,
'geo':geo,
}
#text
tweet.hashtags = hashtags
tweet.urls = urls
tweet.mentions = mentions.split(' ') if mentions != None else None
tweet.lang = lang
tweet.raw_text = raw_text
tweet.standard_text = standard_text
#tweet.clean_text = clean_text
#action
tweet.action = {
#'retusers':retusers,
#'favorusers':favorusers,
'replies':replies,
'retweets':retweets,
'favorites':favorites,
'retweet_id':retweet_id,
'retweeter':retweeter,
'is_retweet':True if retweet_id != None else False,
}
return tweet
class TweetManager:
def __init__(self):
pass
@staticmethod
def getTweetsById(tweet_id):
url = 'https://twitter.com/xxx/status/%s'%(tweet_id)
headers = {
'Host':"twitter.com",
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.%s'%(random.randint(0,999)),
'Accept':"application/json, text/javascript, */*; q=0.01",
'Accept-Language':"de,en-US;q=0.7,en;q=0.3",
'X-Requested-With':"XMLHttpRequest",
'Referer':'https://twitter.com/',
'Connection':"keep-alive",
}
tweets = PyQuery(requests.get(url,headers=headers).content)('div.js-original-tweet')
for tweetHTML in tweets:
return getTweet(tweetHTML)
@staticmethod
def getTweets(tweetCriteria, refreshCursor='', receiveBuffer=None, bufferLength=100, proxy=None):
results = []
resultsAux = []
cookieJar = cookielib.CookieJar()
if hasattr(tweetCriteria, 'username') and (tweetCriteria.username.startswith("\'") or tweetCriteria.username.startswith("\"")) and (tweetCriteria.username.endswith("\'") or tweetCriteria.username.endswith("\"")):
tweetCriteria.username = tweetCriteria.username[1:-1]
active = True
while active:
json = TweetManager.getJsonReponse(tweetCriteria, refreshCursor, cookieJar, proxy)
if len(json['items_html'].strip()) == 0:
break
if not json.has_key('min_position'):
break
refreshCursor = json['min_position']
if refreshCursor == None:
break
tweets = PyQuery(json['items_html'])('div.js-stream-tweet')
if len(tweets) == 0:
break
for tweetHTML in tweets:
tweet = getTweet(tweetHTML)
if hasattr(tweetCriteria, 'sinceTimeStamp'):
if tweet.created_at < tweetCriteria.sinceTimeStamp:
active = False
break
if hasattr(tweetCriteria, 'untilTimeStamp'):
if tweet.created_at <= tweetCriteria.untilTimeStamp:
results.append(tweet.__dict__)
else:
results.append(tweet.__dict__)
#resultsAux.append(tweet)
if receiveBuffer and len(resultsAux) >= bufferLength:
receiveBuffer(resultsAux)
resultsAux = []
if tweetCriteria.maxTweets > 0 and len(results) >= tweetCriteria.maxTweets:
active = False
break
if receiveBuffer and len(resultsAux) > 0:
receiveBuffer(resultsAux)
return results
@staticmethod
def getJsonReponse(tweetCriteria, refreshCursor, cookieJar, proxy):
url = "https://twitter.com/i/search/timeline?q=%s&src=typd&max_position=%s"
urlGetData = ''
if hasattr(tweetCriteria, 'username'):
urlGetData += ' from:' + tweetCriteria.username
if hasattr(tweetCriteria, 'querySearch'):
urlGetData += ' ' + tweetCriteria.querySearch
if hasattr(tweetCriteria, 'near'):
urlGetData += "&near:" + tweetCriteria.near + " within:" + tweetCriteria.within
if hasattr(tweetCriteria, 'since'):
urlGetData += ' since:' + tweetCriteria.since
if hasattr(tweetCriteria, 'until'):
urlGetData += ' until:' + tweetCriteria.until
if hasattr(tweetCriteria, 'topTweets'):
if tweetCriteria.topTweets:
url = "https://twitter.com/i/search/timeline?q=%s&src=typd&max_position=%s"
if hasattr(tweetCriteria, 'tweetType'):
url = url + tweetCriteria.tweetType
url = url % (urllib.quote(urlGetData), refreshCursor)
ua = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.%s'%(random.randint(0,999))
headers = [
('Host', "twitter.com"),
('User-Agent', ua),
# Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36
#Mozilla/5.0 (Windows NT 6.1; Win64; x64)
('Accept', "application/json, text/javascript, */*; q=0.01"),
('Accept-Language', "de,en-US;q=0.7,en;q=0.3"),
('X-Requested-With', "XMLHttpRequest"),
('Referer', url),
('Connection', "keep-alive")
]
if proxy:
opener = urllib2.build_opener(urllib2.ProxyHandler({'http': proxy, 'https': proxy}), urllib2.HTTPCookieProcessor(cookieJar))
else:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))
opener.addheaders = headers
try:
response = opener.open(url)
jsonResponse = response.read()
except Exception,e:
print "Twitter weird response. Try to see on browser: https://twitter.com/search?q=%s&src=typd" % urllib.quote(urlGetData)
raise Exception(e.message)
#sys.exit()
#return None
dataJson = json.loads(jsonResponse)
return dataJson
| 34.094915 | 214 | 0.680155 |
06d235db1b1f699e83bac4c384e8ab53dcbb9ca9
| 103 |
py
|
Python
|
dtdocker/containers/__init__.py
|
oxsoftdev/dt-docker
|
8ba30bf5ca2a7b89f3c7658a7768b16dbf8514a5
|
[
"MIT"
] | null | null | null |
dtdocker/containers/__init__.py
|
oxsoftdev/dt-docker
|
8ba30bf5ca2a7b89f3c7658a7768b16dbf8514a5
|
[
"MIT"
] | null | null | null |
dtdocker/containers/__init__.py
|
oxsoftdev/dt-docker
|
8ba30bf5ca2a7b89f3c7658a7768b16dbf8514a5
|
[
"MIT"
] | null | null | null |
from MssqlContainer import MssqlContainer
from RedisContainers import RedisContainers, RedisContainer
| 25.75 | 59 | 0.893204 |
06d61d6128d3d24a44e5f423459d0349beadddf4
| 834 |
py
|
Python
|
Raspberry Pi Pico/Pi_Pico_TrafficLight.py
|
ckuehnel/MicroPython
|
c57d0df744fe5301e755bd139b6cc56d69c442fd
|
[
"MIT"
] | 1 |
2021-03-22T18:38:43.000Z
|
2021-03-22T18:38:43.000Z
|
Raspberry Pi Pico/Pi_Pico_TrafficLight.py
|
ckuehnel/MicroPython
|
c57d0df744fe5301e755bd139b6cc56d69c442fd
|
[
"MIT"
] | null | null | null |
Raspberry Pi Pico/Pi_Pico_TrafficLight.py
|
ckuehnel/MicroPython
|
c57d0df744fe5301e755bd139b6cc56d69c442fd
|
[
"MIT"
] | 1 |
2021-02-06T10:07:36.000Z
|
2021-02-06T10:07:36.000Z
|
# Pi_Pico_TrafficLight.py
# Controlling Neopixel by PIO to simulate a traffic light
# using ws2812b library by benevpi
# https://github.com/benevpi/pico_python_ws2812b
import time
import ws2812b
NUM_PIX = 3 # this is for M5Stack RGB LED
PIN_NUM = 16
light = ws2812b.ws2812b(NUM_PIX, 0, PIN_NUM)
RED = (255, 0, 0)
YELLOW = (255, 150, 0)
GREEN = (0, 255, 0)
BLACK = (0, 0, 0)
COLORS = (RED, YELLOW, GREEN, BLACK)
def lights(L0, L1, L2, t):
color = L0
light.set_pixel(0, color[0], color[1], color[2])
color = L1
light.set_pixel(1, color[0], color[1], color[2])
color = L2
light.set_pixel(2, color[0], color[1], color[2])
light.show()
time.sleep(t)
while True:
lights(RED, BLACK, BLACK, 2)
lights(RED, YELLOW, BLACK, 1)
lights(BLACK, BLACK, GREEN, 3)
lights(BLACK, YELLOW, BLACK, 2)
| 24.529412 | 57 | 0.655875 |
b081a38729addd1e06a8d879010ab8225a044073
| 9,011 |
py
|
Python
|
research/nlp/tprr/src/reader_downstream.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/nlp/tprr/src/reader_downstream.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/nlp/tprr/src/reader_downstream.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""downstream Model for reader"""
import numpy as np
from mindspore import nn, ops
from mindspore import Tensor, Parameter
from mindspore.ops import operations as P
from mindspore import dtype as mstype
dst_type = mstype.float16
dst_type2 = mstype.float32
class Linear(nn.Cell):
"""module of reader downstream"""
def __init__(self, linear_weight_shape, linear_bias_shape):
"""init function"""
super(Linear, self).__init__()
self.matmul = nn.MatMul()
self.matmul_w = Parameter(Tensor(np.random.uniform(0, 1, linear_weight_shape).astype(np.float32)),
name=None)
self.add = P.Add()
self.add_bias = Parameter(Tensor(np.random.uniform(0, 1, linear_bias_shape).astype(np.float32)), name=None)
self.relu = nn.ReLU()
def construct(self, hidden_state):
"""construct function"""
output = self.matmul(ops.Cast()(hidden_state, dst_type), ops.Cast()(self.matmul_w, dst_type))
output = self.add(ops.Cast()(output, dst_type2), self.add_bias)
output = self.relu(output)
return output
class BertLayerNorm(nn.Cell):
"""Normalization module of reader downstream"""
def __init__(self, bert_layer_norm_weight_shape, bert_layer_norm_bias_shape, eps=1e-12):
"""init function"""
super(BertLayerNorm, self).__init__()
self.reducemean = P.ReduceMean(keep_dims=True)
self.sub = P.Sub()
self.pow = P.Pow()
self.add = P.Add()
self.sqrt = P.Sqrt()
self.div = P.Div()
self.mul = P.Mul()
self.variance_epsilon = eps
self.bert_layer_norm_weight = Parameter(Tensor(np.random.uniform(0, 1, bert_layer_norm_weight_shape)
.astype(np.float32)), name=None)
self.bert_layer_norm_bias = Parameter(Tensor(np.random.uniform(0, 1, bert_layer_norm_bias_shape)
.astype(np.float32)), name=None)
def construct(self, x):
"""construct function"""
u = self.reducemean(x, -1)
s = self.reducemean(self.pow(self.sub(x, u), 2), -1)
x = self.div(self.sub(x, u), self.sqrt(self.add(s, self.variance_epsilon)))
output = self.mul(self.bert_layer_norm_weight, x)
output = self.add(output, self.bert_layer_norm_bias)
return output
class SupportingOutputLayer(nn.Cell):
"""module of reader downstream"""
def __init__(self, linear_1_weight_shape, linear_1_bias_shape, bert_layer_norm_weight_shape,
bert_layer_norm_bias_shape):
"""init function"""
super(SupportingOutputLayer, self).__init__()
self.linear_1 = Linear(linear_weight_shape=linear_1_weight_shape,
linear_bias_shape=linear_1_bias_shape)
self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=bert_layer_norm_weight_shape,
bert_layer_norm_bias_shape=bert_layer_norm_bias_shape)
self.matmul = nn.MatMul()
self.matmul_w = Parameter(Tensor(np.random.uniform(0, 1, (8192, 1)).astype(np.float32)), name=None)
def construct(self, x):
"""construct function"""
output = self.linear_1(x)
output = self.bert_layer_norm(output)
output = self.matmul(ops.Cast()(output, dst_type), ops.Cast()(self.matmul_w, dst_type))
return ops.Cast()(output, dst_type2)
class PosOutputLayer(nn.Cell):
"""module of reader downstream"""
def __init__(self, linear_weight_shape, linear_bias_shape, bert_layer_norm_weight_shape,
bert_layer_norm_bias_shape):
"""init function"""
super(PosOutputLayer, self).__init__()
self.linear_1 = Linear(linear_weight_shape=linear_weight_shape,
linear_bias_shape=linear_bias_shape)
self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=bert_layer_norm_weight_shape,
bert_layer_norm_bias_shape=bert_layer_norm_bias_shape)
self.matmul = nn.MatMul()
self.linear_2_weight = Parameter(Tensor(np.random.uniform(0, 1, (4096, 1)).astype(np.float32)), name=None)
self.add = P.Add()
self.linear_2_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None)
def construct(self, state):
"""construct function"""
output = self.linear_1(state)
output = self.bert_layer_norm(output)
output = self.matmul(ops.Cast()(output, dst_type), ops.Cast()(self.linear_2_weight, dst_type))
output = self.add(ops.Cast()(output, dst_type2), self.linear_2_bias)
return output
class MaskInvalidPos(nn.Cell):
"""module of reader downstream"""
def __init__(self):
"""init function"""
super(MaskInvalidPos, self).__init__()
self.squeeze = P.Squeeze(2)
self.sub = P.Sub()
self.mul = P.Mul()
def construct(self, pos_pred, context_mask):
"""construct function"""
output = self.squeeze(pos_pred)
invalid_pos_mask = self.mul(self.sub(1.0, context_mask), 1e30)
output = self.sub(output, invalid_pos_mask)
return output
class Reader_Downstream(nn.Cell):
"""Downstream model for reader"""
def __init__(self):
"""init function"""
super(Reader_Downstream, self).__init__()
self.add = P.Add()
self.para_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None)
self.para_output_layer = SupportingOutputLayer(linear_1_weight_shape=(4096, 8192),
linear_1_bias_shape=(8192,),
bert_layer_norm_weight_shape=(8192,),
bert_layer_norm_bias_shape=(8192,))
self.sent_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None)
self.sent_output_layer = SupportingOutputLayer(linear_1_weight_shape=(4096, 8192),
linear_1_bias_shape=(8192,),
bert_layer_norm_weight_shape=(8192,),
bert_layer_norm_bias_shape=(8192,))
self.start_output_layer = PosOutputLayer(linear_weight_shape=(4096, 4096),
linear_bias_shape=(4096,),
bert_layer_norm_weight_shape=(4096,),
bert_layer_norm_bias_shape=(4096,))
self.end_output_layer = PosOutputLayer(linear_weight_shape=(4096, 4096),
linear_bias_shape=(4096,),
bert_layer_norm_weight_shape=(4096,),
bert_layer_norm_bias_shape=(4096,))
self.mask_invalid_pos = MaskInvalidPos()
self.gather_input_weight = Tensor(np.array(0))
self.gather = P.Gather()
self.type_linear_1 = nn.Dense(in_channels=4096, out_channels=4096, has_bias=True)
self.relu = nn.ReLU()
self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=(4096,), bert_layer_norm_bias_shape=(4096,))
self.type_linear_2 = nn.Dense(in_channels=4096, out_channels=3, has_bias=True)
def construct(self, para_state, sent_state, state, context_mask):
"""construct function"""
para_logit = self.para_output_layer(para_state)
para_logit = self.add(para_logit, self.para_bias)
sent_logit = self.sent_output_layer(sent_state)
sent_logit = self.add(sent_logit, self.sent_bias)
start = self.start_output_layer(state)
start = self.mask_invalid_pos(start, context_mask)
end = self.end_output_layer(state)
end = self.mask_invalid_pos(end, context_mask)
cls_emb = self.gather(state, self.gather_input_weight, 1)
q_type = self.type_linear_1(cls_emb)
q_type = self.relu(q_type)
q_type = self.bert_layer_norm(q_type)
q_type = self.type_linear_2(q_type)
return q_type, start, end, para_logit, sent_logit
| 46.932292 | 118 | 0.619021 |
7c08672a1f4b3a7a17149a2b57e2b2a120ca8857
| 1,046 |
py
|
Python
|
src/onegov/wtfs/upgrade.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/wtfs/upgrade.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/wtfs/upgrade.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
""" Contains upgrade tasks that are executed when the application is being
upgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`.
"""
from onegov.core.upgrade import upgrade_task
@upgrade_task('Add payment types')
def add_payment_types(context):
session = context.session
if context.has_table('wtfs_payment_type'):
query = session.execute('SELECT count(*) FROM wtfs_payment_type')
if not query.scalar():
session.execute("""
INSERT INTO wtfs_payment_type ("name", "price_per_quantity")
VALUES ('normal', 700), ('spezial', 850);
""")
query = session.execute("""
UPDATE groups
SET meta = CASE
WHEN meta @> '{"_price_per_quantity"\\:850}'::jsonb
THEN jsonb_set(meta, '{payment_type}', '"spezial"')
ELSE jsonb_set(meta, '{payment_type}', '"normal"')
END
WHERE groups.meta ? '_price_per_quantity';
""")
| 36.068966 | 76 | 0.578394 |
b01c8f325df0368355928771fb0cdac6ea1b83fc
| 553 |
py
|
Python
|
exercises/es/test_01_04.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/es/test_01_04.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/es/test_01_04.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert (
"if token.like_num" in __solution__
), "¿Estás revisando el atributo del token like_num?"
assert (
'next_token.text == "%"' in __solution__
), "¿Estás revisando si el texto del siguiente token es un símbolo de porcentaje?"
assert (
next_token.text == "%"
), "¿Estás revisando si el texto del siguiente token es un símbolo de porcentaje?"
__msg__.good(
"¡Bien hecho! Como puedes ver hay muchos análisis poderosos que puedes hacer usando los tokens y sus atributos."
)
| 36.866667 | 120 | 0.65642 |
05a8c4fd95b6efca543f543621dd2cffe6ce31a4
| 1,092 |
py
|
Python
|
DataProcess/my_utils/myplot.py
|
ZhangQiHang-98/RFID_Scirpt
|
0f74087add4cf16e2d201ad4f31cc1abd287db7e
|
[
"MIT"
] | 3 |
2021-12-24T04:52:03.000Z
|
2021-12-27T02:14:49.000Z
|
DataProcess/my_utils/myplot.py
|
ZhangQiHang-98/RFID_Scirpt
|
0f74087add4cf16e2d201ad4f31cc1abd287db7e
|
[
"MIT"
] | null | null | null |
DataProcess/my_utils/myplot.py
|
ZhangQiHang-98/RFID_Scirpt
|
0f74087add4cf16e2d201ad4f31cc1abd287db7e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project :DataProcess
@File :myplot.py
@Author :Zhang Qihang
@Date :2021/11/8 14:41
"""
import seaborn as sns
import matplotlib.pyplot as plt
import os
import pandas as pd
import glob
import config
import myunwrap
import numpy as np
import scipy.constants as C
from sklearn.preprocessing import scale
def phase_heatmap(phase_mat):
sns.set_context({"figure.figsize": (8, 8)})
sns.heatmap(phase_mat)
plt.show()
def normalization(data):
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range
def phase_scatter(df):
phases = df["phase"].values
times = df["time"].values
plt.plot(times, phases)
plt.show()
if __name__ == '__main__':
test_path = glob.glob(os.path.join(config.PEN_PATH, '*.csv'))
file_path = "../20220105104342normal.csv"
df = pd.read_csv(file_path, header=None)
df.columns = config.COMMON_COLUMNS
df["phase"] = 2 * C.pi - df["phase"]
phase_scatter(df)
# print(test_path)
# for path in test_path:
# phase_scatter(path)
| 22.285714 | 65 | 0.667582 |
bbd9406456d0020c9cdbba07ff1aed2650cbfe2a
| 1,631 |
py
|
Python
|
barcode.py
|
T9C5F/packstation-barcode
|
3d7e719d6931d2d9e4834e7aaef2a892bd564aa1
|
[
"MIT"
] | 45 |
2018-11-22T10:18:11.000Z
|
2021-06-17T07:20:25.000Z
|
barcode.py
|
T9C5F/packstation-barcode
|
3d7e719d6931d2d9e4834e7aaef2a892bd564aa1
|
[
"MIT"
] | 2 |
2018-11-18T14:55:43.000Z
|
2020-10-14T15:15:08.000Z
|
barcode.py
|
T9C5F/packstation-barcode
|
3d7e719d6931d2d9e4834e7aaef2a892bd564aa1
|
[
"MIT"
] | 4 |
2018-12-27T22:02:13.000Z
|
2020-10-18T14:29:57.000Z
|
#!/usr/bin/python3
# coding: utf-8
import os
import luhn # sudo apt install python3-pip && sudo pip3 install luhn
# Deutsche Post DHL hat einfach an Packstationen den Kartenleser
# ausgebaut und duch einen Barcodeleser ersetzt, ohne den
# Besitzern einer Goldcard automatisch rechtzeitig eine neue
# Karte zuzuschicken. Man kann aber weiterhin an solchen Stationen
# Pakete abholen, nur muss man jetzt die PostNummer manuell eintippen.
# Mit diesem Skript generieren wir uns den entsprechenden Barcode, da das
# Zusenden einer neuen Karte bei DHL beauftragt werden muss, die alte Karte
# in der Zwischenzeit sofort gesperrt wird und die neue bis zu 2 Wochen
# dauern kann. Das ist maximal kundenunfreundlich.
# Sicherheitstechnisch liegt kein Verstoß vor, da man durch Eintippen
# der PostNummer sowieso Pakete abholen kann und die Umrechnung von
# PostNummer zu Barcode im Netz bereits vielfach beschrieben ist.
# Der 16-stellige ITF-Barcode ist relativ einfach aufgebaut:
# "3”+”[so viele ‘0’, dass die Zahl insgesamt 16 Stellen hat]”
# +”[Postnummer*631]”+”[Luhn-Prüfziffer über ‘Postnummer*631’]"
# http://www.frei-tag.com/index.php?/archives/445-DHL-Packstation-ohne-Goldcard.html
def generate(number):
postnummer = int(number)
number = postnummer*631
luhnnr = luhn.generate(str(number))
number = "3" + (str(number)+str(luhnnr)).zfill(15)
return(str(number))
# Beispiel anhand einer zufallsgenerierten Zahl:
# 20281557 ergibt 3000127976624677
assert generate("20281557") == "3000127976624677"
# Interleaved 2 of 5 ITF barcode
os.system("xdg-open https://barcode.tec-it.com/de/Code25IL?data=" + generate(20281557))
| 41.820513 | 87 | 0.774985 |
a548d4044babcf769d72c98f4c36e053030cd2ea
| 214 |
py
|
Python
|
src/bo4e/enum/landescode.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/enum/landescode.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/enum/landescode.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
"""
Der ISO-Landescode als Enumeration.
"""
from enum import Enum
from iso3166 import countries
alpha2codes = {c.alpha2: c.alpha2 for c in countries}
Landescode = Enum("Landescode", alpha2codes) # type: ignore
| 19.454545 | 60 | 0.742991 |
a553923330965bb513b635f568c86575b16db188
| 1,410 |
py
|
Python
|
WiSe-2122/Wiederholung/Gruppe-C/Online-Banking.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | 1 |
2022-03-23T09:40:39.000Z
|
2022-03-23T09:40:39.000Z
|
WiSe-2122/Wiederholung/Gruppe-C/Online-Banking.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
WiSe-2122/Wiederholung/Gruppe-C/Online-Banking.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
ueberweisungslimit = 50_000
kontostand = 3_500
ungueltiger_betrag = True
# Funktionsdefinition
def trenner(anzahl):
for zaehler in range(anzahl):
print("-", end="")
print()
# Funktionsaufruf
trenner(50)
print("Willkommen beim Online-Banking der DKB")
print("Ihr Überweisungslimit beträgt", ueberweisungslimit, "€")
print("Ihr aktueller Kontostand beträgt", kontostand, "€")
trenner(50)
while ungueltiger_betrag:
try:
betrag = round(float(input("Bitte geben Sie einen Überweisungbetrag in € ein: ")),2)
if betrag > ueberweisungslimit:
print("Ihr Betrag liegt über den Überweisungslimit von", ueberweisungslimit, "€.")
elif betrag < 0:
print("Bitte geben Sie nur positive Zahle für eine Überweisung ein.")
elif betrag > kontostand:
print("Leider reicht Ihr Kontostand i.H.v.", kontostand,"€ nicht für die Überweisung i.H.v.", betrag,"€ aus.")
entscheidung = input("Wollen Sie einen niedrigeren Betrag überweisen (ja/nein): ")
if entscheidung.lower() == "nein":
ungueltiger_betrag = False
else:
print("Ihre Überweisung i.H.v.", betrag,"€ wurde durchgeführt.")
kontostand -= betrag
print("Ihr neuer Kontostand beträgt:", kontostand, "€.")
ungueltiger_betrag = False
except:
print("Bitte geben Sie nur Zahlen ein.")
| 38.108108 | 122 | 0.647518 |
a5916d10fd080c93d3ed806b1372e236b4a374e6
| 1,078 |
py
|
Python
|
Vergleich-Display/upy.py
|
aboehm/CLT2019
|
51b9b5674b5ed18297c5ee7e825888d632d96a0e
|
[
"BSD-2-Clause"
] | 1 |
2019-07-01T11:59:06.000Z
|
2019-07-01T11:59:06.000Z
|
Vergleich-Display/upy.py
|
aboehm/CLT2019
|
51b9b5674b5ed18297c5ee7e825888d632d96a0e
|
[
"BSD-2-Clause"
] | null | null | null |
Vergleich-Display/upy.py
|
aboehm/CLT2019
|
51b9b5674b5ed18297c5ee7e825888d632d96a0e
|
[
"BSD-2-Clause"
] | null | null | null |
import machine
from display import ssd1306
display = None
run = 0
def setup():
global display
from machine import I2C, Pin
import uos
_, nodename, _, _, _ = uos.uname()
if nodename == 'esp32':
i2c = I2C(freq=400000, scl=machine.Pin(22), sda=machine.Pin(21))
elif nodename == 'pyboard':
i2c = I2C(freq=400000, scl=machine.Pin('X9'), sda=machine.Pin('X10'))
else:
raise Exception('No compatible board found')
display = ssd1306.SSD1306_I2C(128, 64, i2c, addr=0x3c)
def stress(show=True):
global display, run
display.fill(0)
run += 1
display.text('CLT2019 %i uPython' % (run), 0, 0)
i = 0
for y in range(8, 56, 8):
for x in range(0, 119, 8):
display.text('%c' % (ord('0') + (((run + i) * 17) % 36)), x, y)
i += 1
if show:
display.show()
def loop():
loops = 100
print('Stressing library and io ...')
for i in range(loops):
stress(show=True)
print('Done')
try:
setup()
while True:
loop()
except:
pass
| 19.25 | 77 | 0.558442 |
3c8ae15a51f7ed8aa4a4793f2527828936db7e90
| 294 |
py
|
Python
|
FUNDASTORE/APPS/PRODUCTOS/forms.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
FUNDASTORE/APPS/PRODUCTOS/forms.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
FUNDASTORE/APPS/PRODUCTOS/forms.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
from django import forms
from .models import Producto
class FormularioProducto(forms.ModelForm):
class Meta:
model = Producto
fields = '__all__'
labels = {'pro_nombre': 'NOMBRE','pro_precio': 'PRECIO','pro_stock': 'STOCK','pro_descripcion':'DESCRIPCIÖN'}
| 29.4 | 118 | 0.663265 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.