seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
45386087756
|
"""
Classes representing uploaded files.
"""
import errno
import os
from io import BytesIO
from theory.conf import settings
from theory.core.files.base import File
from theory.core.files import temp as tempfile
from theory.utils.encoding import forceStr
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file=None, name=None, contentType=None, size=None, charset=None, contentTypeExtra=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.contentType = contentType
self.charset = charset
self.contentTypeExtra = contentTypeExtra
def __repr__(self):
return forceStr("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.contentType))
def _getName(self):
return self._name
def _setName(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
ext = ext[:255]
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_getName, _setName)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, contentType, size, charset, contentTypeExtra=None):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, contentType, size, charset, contentTypeExtra)
def temporaryFilePath(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.closeCalled and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, fieldName, name, contentType, size, charset, contentTypeExtra=None):
super(InMemoryUploadedFile, self).__init__(file, name, contentType, size, charset, contentTypeExtra)
self.fieldName = fieldName
def open(self, mode=None):
self.file.seek(0)
def chunks(self, chunkSize=None):
self.file.seek(0)
yield self.read()
def multipleChunks(self, chunkSize=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, contentType='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
contentType, len(content), None, None)
@classmethod
def fromDict(cls, fileDict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(fileDict['filename'],
fileDict['content'],
fileDict.get('content-type', 'text/plain'))
|
grapemix/theory
|
theory/core/files/uploadedfile.py
|
uploadedfile.py
|
py
| 3,916 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2677444598
|
# This code is based on https://github.com/openai/guided-diffusion
"""
Train a diffusion model on images.
"""
import os
import json
from mdm_utils.fixseed import fixseed
from mdm_utils.parser_util import train_args
from mdm_utils import dist_util
from train_utils.train_loop import TrainLoop
from mdm_utils.model_util import create_model_and_diffusion
from train_utils.train_platforms import ClearmlPlatform, TensorboardPlatform, NoPlatform # required for the eval operation
from train_utils.ted_loader import build_dataloader
def main():
args = train_args()
save_dir = f"{args.save_dir}/{args.exp}"
args.save_dir = save_dir
print("save_dir:", save_dir)
fixseed(args.seed)
train_platform_type = eval(args.train_platform_type)
train_platform = train_platform_type(args.save_dir)
train_platform.report_args(args, name='Args')
args_path = os.path.join(args.save_dir, 'args.json')
with open(args_path, 'w') as fw:
json.dump(vars(args), fw, indent=4, sort_keys=True)
dist_util.setup_dist(args.device)
print("creating data loader...")
data = build_dataloader('train', args, shuffle = True)
print("creating model and diffusion...")
lang_model = data.dataset.lang_model
args.lang_model = lang_model
model, diffusion = create_model_and_diffusion(args, '')
model.to(dist_util.dev())
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters_wo_clip()) / 1000000.0))
print("Training...")
TrainLoop(args, train_platform, model, diffusion, data).run_loop()
train_platform.close()
if __name__ == "__main__":
main()
|
zyhbili/LivelySpeaker
|
scripts/train_RAG.py
|
train_RAG.py
|
py
| 1,624 |
python
|
en
|
code
| 38 |
github-code
|
6
|
38075843165
|
import gc
from collections import defaultdict
import cupy as cp
import pandas as pd
import torch
import torch.nn.functional as F
from cuml.metrics import pairwise_distances
from cuml.neighbors import NearestNeighbors
from torch.utils.data import DataLoader, Dataset, default_collate
from tqdm import tqdm
from transformers import AutoTokenizer, TrainerCallback
from utils import clean_text, f2_score, get_pos_score
LANGUAGE_TOKENS = [
"<|lang_pnb|>",
"<|lang_tr|>",
"<|lang_ur|>",
"<|lang_bn|>",
"<|lang_hi|>",
"<|lang_en|>",
"<|lang_kn|>",
"<|lang_km|>",
"<|lang_zh|>",
"<|lang_gu|>",
"<|lang_ta|>",
"<|lang_my|>",
"<|lang_fr|>",
"<|lang_swa|>",
"<|lang_or|>",
"<|lang_mul|>",
"<|lang_fil|>",
"<|lang_sw|>",
"<|lang_es|>",
"<|lang_pt|>",
"<|lang_pl|>",
"<|lang_ru|>",
"<|lang_mr|>",
"<|lang_it|>",
"<|lang_ar|>",
"<|lang_bg|>",
"<|lang_te|>",
"<|lang_as|>",
]
CATEGORY_TOKENS = [
"<|category_supplemental|>",
"<|category_aligned|>",
"<|category_source|>",
]
LEVEL_TOKENS = [
"<|level_0|>",
"<|level_1|>",
"<|level_2|>",
"<|level_3|>",
"<|level_4|>",
"<|level_5|>",
"<|level_6|>",
"<|level_7|>",
"<|level_8|>",
"<|level_9|>",
"<|level_10|>",
]
KIND_TOKENS = [
"<|kind_document|>",
"<|kind_video|>",
"<|kind_html5|>",
"<|kind_exercise|>",
"<|kind_audio|>",
]
OTHER_TOKENS = [
"<|topic|>",
"<|content|>",
"<s_title>",
"</s_title>",
"<s_description>",
"</s_description>",
"<s_text>",
"</s_text>",
]
RELATION_TOKENS = [
"<s_parent>",
"</s_parent>",
"<s_children>",
"</s_children>",
]
def init_tokenizer(tokenizer_name):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
tokenizer.add_special_tokens(
dict(
additional_special_tokens=LANGUAGE_TOKENS
+ CATEGORY_TOKENS
+ LEVEL_TOKENS
+ KIND_TOKENS
+ OTHER_TOKENS
+ RELATION_TOKENS
)
)
if "sentence-t5" in tokenizer_name:
tokenizer.add_special_tokens({"sep_token": "<sep>"})
return tokenizer
class LECRDataset(Dataset):
def __init__(
self,
supervised_df,
topic_df,
content_df,
topic_dict,
content_dict,
correlation_df,
tokenizer_name="xlm-roberta-base",
max_len=512,
use_content_pair=False,
is_training=False,
use_augmentation=False,
objective="siamese",
):
self.tokenizer = init_tokenizer(tokenizer_name)
self.max_len = max_len
self.supervised_df = supervised_df.dropna()
self.topic_df = topic_df
self.content_df = content_df
self.topic_dict, self.content_dict = topic_dict, content_dict
self.correlation_df = correlation_df
self.use_content_pair = use_content_pair
self.is_training = is_training
self.use_augmentation = use_augmentation
self.objective = objective
self.topic_texts, self.content_texts, self.labels = self.process_csv()
def process_csv(self):
# get text pairs
topic_ids = self.supervised_df.topic_id.values
content_ids = self.supervised_df.content_ids.values
labels = list(self.supervised_df.target.values)
topic_texts = []
content_texts = []
for topic_id in topic_ids:
topic_texts.append(self.topic_dict[topic_id])
for content_id in content_ids:
content_texts.append(self.content_dict[content_id])
set_topic_ids = set(topic_ids)
use_all_pairs = (
False # use all pair, no need to be in the intersection of content_ids of topic ids
)
if self.use_content_pair:
# todo: create content pairs from each topic
content_to_topic = defaultdict(lambda: [])
topic_to_content = defaultdict(lambda: [])
pairs = set()
for i, row in tqdm(self.correlation_df.iterrows()):
content_list = row["content_ids"].split(" ")
if row["topic_id"] not in set_topic_ids:
continue
for content_id in content_list:
content_to_topic[content_id].append(row["topic_id"])
topic_to_content[row["topic_id"]].append(content_id)
if len(content_list) <= 1:
continue
if use_all_pairs:
for idx1 in range(len(content_list) - 1):
for idx2 in range(idx1 + 1, len(content_list)):
if (content_list[idx1], content_list[idx2],) not in pairs and (
content_list[idx2],
content_list[idx1],
) not in pairs:
pairs.add((content_list[idx1], content_list[idx2]))
if not use_all_pairs:
for content_id, topics in tqdm(content_to_topic.items()):
intersection_contents = list(
set.intersection(*[set(topic_to_content[topic_id]) for topic_id in topics])
)
for idx1 in range(len(intersection_contents) - 1):
for idx2 in range(idx1 + 1, len(intersection_contents)):
if (
intersection_contents[idx1],
intersection_contents[idx2],
) not in pairs and (
intersection_contents[idx2],
intersection_contents[idx1],
) not in pairs:
pairs.add(
(
intersection_contents[idx1],
intersection_contents[idx2],
)
)
for pair in pairs:
topic_texts.append(self.content_dict[pair[0]])
content_texts.append(self.content_dict[pair[1]])
labels.append(1)
return topic_texts, content_texts, labels
def __len__(self):
if self.is_training:
return len(self.labels)
else:
return 1
def augment(self, inputs):
probability_matrix = torch.full(inputs["input_ids"].shape, 0.15)
masked_indices = torch.bernoulli(probability_matrix).bool()
indices_replaced = (
torch.bernoulli(torch.full(inputs["input_ids"].shape, 0.8)).bool() & masked_indices
)
inputs["input_ids"][indices_replaced] = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.mask_token
)
inputs["input_ids"] *= inputs["attention_mask"]
return inputs
def __getitem__(self, idx):
topic_text = self.topic_texts[idx]
content_text = self.content_texts[idx]
label = self.labels[idx]
if self.objective == "siamese":
# topic
if isinstance(topic_text, tuple):
topic_inputs = self.tokenizer.encode_plus(
topic_text[0],
topic_text[1],
return_tensors=None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
else:
topic_inputs = self.tokenizer.encode_plus(
topic_text,
return_tensors=None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
for k, v in topic_inputs.items():
topic_inputs[k] = torch.tensor(v, dtype=torch.long)
# content
content_inputs = self.tokenizer.encode_plus(
content_text,
return_tensors=None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
for k, v in content_inputs.items():
content_inputs[k] = torch.tensor(v, dtype=torch.long)
if isinstance(topic_text, tuple):
topic_text = topic_text[0] + topic_text[1]
if self.is_training and self.use_augmentation:
topic_inputs = self.augment(topic_inputs)
content_inputs = self.augment(content_inputs)
return topic_inputs, content_inputs, topic_inputs, label
elif self.objective == "classification":
combined_inputs = self.tokenizer.encode_plus(
topic_text,
content_text,
return_tensors=None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
for k, v in combined_inputs.items():
combined_inputs[k] = torch.tensor(v, dtype=torch.long)
if self.is_training and self.use_augmentation:
combined_inputs = self.augment(combined_inputs)
return combined_inputs, combined_inputs, combined_inputs, label
else:
raise ValueError("Only support siamese/classification for now.")
class InferenceDataset(Dataset):
def __init__(self, texts, tokenizer_name="xlm-roberta-base", max_len=512):
self.texts = texts
self.tokenizer = init_tokenizer(tokenizer_name)
self.max_len = max_len
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = self.texts[idx]
# topic
inputs = self.tokenizer.encode_plus(
text,
return_tensors=None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
for k, v in inputs.items():
inputs[k] = torch.tensor(v, dtype=torch.long)
return inputs
def collate_fn(inputs):
inputs = default_collate(inputs)
mask_len = int(inputs["attention_mask"].sum(axis=1).max())
for k, v in inputs.items():
inputs[k] = inputs[k][:, :mask_len]
return inputs
class DatasetUpdateCallback(TrainerCallback):
"""
Trigger re-computing dataset
A hack that modifies the train/val dataset, pointed by Trainer's dataloader
0. Calculate new train/val topic/content embeddings, train KNN, get new top-k
1. Calculate top-k max positive score, compare to current val best, if greater, continue to step 2, else do nothing
2. Update supervised_df and update dataset:
self.topic_texts, self.content_texts, self.labels = self.process_csv()
"""
def __init__(
self,
trainer,
train_topic_ids,
val_topic_ids,
topic_df,
content_df,
topic_dict,
content_dict,
correlation_df,
tokenizer_name,
max_len,
best_score=0,
top_k=50,
use_translated=False,
mix_translated=False,
fold=0,
):
super(DatasetUpdateCallback, self).__init__()
self.trainer = trainer
self.topic_df = topic_df
self.content_df = content_df
self.correlation_df = correlation_df
self.best_score = best_score
self.top_k = top_k
self.use_translated = use_translated
self.mix_translated = mix_translated
self.fold = fold
self.tokenizer = init_tokenizer(tokenizer_name)
self.topic_dict, self.content_dict = topic_dict, content_dict
train_topic_texts = [
topic_dict[topic_id]
for topic_id in self.topic_df.id.values
if topic_id in train_topic_ids
]
self.train_topic_ids = [
topic_id for topic_id in self.topic_df.id.values if topic_id in train_topic_ids
]
self.train_topic_languages = []
for topic_id, topic_lang in zip(self.topic_df.id.values, self.topic_df.language.values):
if topic_id in train_topic_ids:
self.train_topic_languages.append(topic_lang)
val_topic_texts = [
topic_dict[topic_id]
for topic_id in self.topic_df.id.values
if topic_id in val_topic_ids
]
self.val_topic_ids = [
topic_id for topic_id in self.topic_df.id.values if topic_id in val_topic_ids
]
content_texts = [
content_dict[content_id]
for content_id in self.content_df.id.values
if content_id.startswith("c_")
]
def inference_collate_fn(inputs):
inputs = default_collate(inputs)
mask_len = int(inputs["attention_mask"].sum(axis=1).max())
for k, v in inputs.items():
inputs[k] = inputs[k][:, :mask_len]
return inputs
train_topic_dataset = InferenceDataset(
texts=train_topic_texts, tokenizer_name=tokenizer_name, max_len=max_len
)
self.train_topic_dataloader = DataLoader(
train_topic_dataset,
num_workers=self.trainer.args.dataloader_num_workers,
batch_size=32,
shuffle=False,
collate_fn=inference_collate_fn,
)
val_topic_dataset = InferenceDataset(
texts=val_topic_texts, tokenizer_name=tokenizer_name, max_len=max_len
)
self.val_topic_dataloader = DataLoader(
val_topic_dataset,
num_workers=self.trainer.args.dataloader_num_workers,
batch_size=32,
shuffle=False,
collate_fn=inference_collate_fn,
)
content_dataset = InferenceDataset(
texts=content_texts, tokenizer_name=tokenizer_name, max_len=max_len
)
self.content_dataloader = DataLoader(
content_dataset,
num_workers=self.trainer.args.dataloader_num_workers,
batch_size=32,
shuffle=False,
collate_fn=inference_collate_fn,
)
def on_train_begin(self, args, state, control, **kwargs):
self.on_epoch_end(args, state, control, **kwargs)
def on_epoch_end(self, args, state, control, **kwargs):
local_rank = args.local_rank if args.local_rank != -1 else 0
with cp.cuda.Device(local_rank):
torch.cuda.empty_cache()
print("Callback on local_rank =", local_rank)
self.trainer.model.eval()
print("On Epoch Begin")
topic_embs = []
device = f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu"
with torch.no_grad():
for inputs in tqdm(self.val_topic_dataloader):
for k, v in inputs.items():
inputs[k] = inputs[k].to(device)
out = self.trainer.model.feature(inputs)
topic_embs.extend(out.cpu().detach().numpy())
content_embs = []
# TODO: only use original content embeddings to avoid translation confusing
for inputs in tqdm(self.content_dataloader):
for k, v in inputs.items():
inputs[k] = inputs[k].to(device)
out = self.trainer.model.feature(inputs)
content_embs.extend(out.cpu().detach().numpy())
# Transfer predictions to gpu
with cp.cuda.Device(local_rank):
topic_embs_gpu = cp.array(topic_embs)
content_embs_gpu = cp.array(content_embs)
# Release memory
torch.cuda.empty_cache()
# KNN model
content_idx_to_id = {}
for idx, row in self.content_df.iterrows():
content_idx_to_id[idx] = row.id
print("Evaluating current score...")
if self.use_translated:
# get 500 nearest contents, then select top k contents that is in original contents, just approximate, can't check all
original_indices = [ # indices of original contents in self.content_df
i
for i, emb in enumerate(content_embs)
if self.content_df.id.values[i].startswith("c_")
]
# original_content_embs = [
# emb
# for i, emb in enumerate(content_embs)
# if self.content_df.id.values[i].startswith("c_")
# ]
# original_content_embs_gpu = cp.array(original_content_embs)
original_content_embs_gpu = content_embs_gpu
neighbors_model = NearestNeighbors(n_neighbors=500, metric="cosine")
neighbors_model.fit(original_content_embs_gpu)
indices = neighbors_model.kneighbors(topic_embs_gpu, return_distance=False)
for selected_k in [5, 10, 20, 50, 100, 200]:
predictions = []
for k in tqdm(range(len(indices))):
pred = indices[k]
# original_contents = [self.content_df.loc[ind, "id"] for ind in pred.get() if self.content_df.loc[ind, "id"].startswith("c_")]
# original_contents = [content_idx_to_id[ind] for ind in pred.get() if content_idx_to_id[ind].startswith("c_")]
original_contents = [
content_idx_to_id[original_indices[ind]] for ind in pred.get()
]
p = " ".join(original_contents[:selected_k])
predictions.append(p)
knn_preds = pd.DataFrame(
{"topic_id": self.val_topic_ids, "content_ids": predictions}
).sort_values("topic_id")
gt = self.correlation_df[
self.correlation_df.topic_id.isin(self.val_topic_ids)
].sort_values("topic_id")
score = get_pos_score(
gt["content_ids"],
knn_preds.sort_values("topic_id")["content_ids"],
selected_k,
)
print(
"Selecting",
selected_k,
"nearest contents",
"top-k score =",
f2_score(
gt["content_ids"],
knn_preds.sort_values("topic_id")["content_ids"],
),
"max positive score =",
score,
)
print("Training KNN model...")
print("Generating KNN predictions with top_k =", self.top_k)
neighbors_model = NearestNeighbors(n_neighbors=self.top_k, metric="cosine")
neighbors_model.fit(original_content_embs_gpu)
print("Generating embedding for validation topics")
indices = neighbors_model.kneighbors(topic_embs_gpu, return_distance=False)
predictions = []
for k in tqdm(range(len(indices))):
pred = indices[k]
# original_contents = [self.content_df.loc[ind, "id"] for ind in pred.get() if self.content_df.loc[ind, "id"].startswith("c_")]
# original_contents = [content_idx_to_id[ind] for ind in pred.get() if content_idx_to_id[ind].startswith("c_")]
original_contents = [
content_idx_to_id[original_indices[ind]] for ind in pred.get()
]
p = " ".join(original_contents[: self.top_k])
predictions.append(p)
else:
for selected_k in [5, 10, 20, 50, 100, 200]:
neighbors_model = NearestNeighbors(n_neighbors=selected_k, metric="cosine")
neighbors_model.fit(content_embs_gpu)
indices = neighbors_model.kneighbors(topic_embs_gpu, return_distance=False)
predictions = []
for k in tqdm(range(len(indices))):
pred = indices[k]
# p = " ".join([self.content_df.loc[ind, "id"] for ind in pred.get()])
p = " ".join([content_idx_to_id[ind] for ind in pred.get()])
predictions.append(p)
knn_preds = pd.DataFrame(
{"topic_id": self.val_topic_ids, "content_ids": predictions}
).sort_values("topic_id")
gt = self.correlation_df[
self.correlation_df.topic_id.isin(self.val_topic_ids)
].sort_values("topic_id")
score = get_pos_score(
gt["content_ids"],
knn_preds.sort_values("topic_id")["content_ids"],
selected_k,
)
print(
"Selecting",
selected_k,
"nearest contents",
"top-k score =",
f2_score(
gt["content_ids"],
knn_preds.sort_values("topic_id")["content_ids"],
),
"max positive score =",
score,
)
print("Training KNN model...")
print("Generating KNN predictions with top_k =", self.top_k)
neighbors_model = NearestNeighbors(n_neighbors=self.top_k, metric="cosine")
neighbors_model.fit(content_embs_gpu)
print("Generating embedding for validation topics")
indices = neighbors_model.kneighbors(topic_embs_gpu, return_distance=False)
predictions = []
for k in tqdm(range(len(indices))):
pred = indices[k]
# p = " ".join([self.content_df.loc[ind, "id"] for ind in pred.get()])
p = " ".join([content_idx_to_id[ind] for ind in pred.get()])
predictions.append(p)
knn_preds = pd.DataFrame(
{"topic_id": self.val_topic_ids, "content_ids": predictions}
).sort_values("topic_id")
score = get_pos_score(
gt["content_ids"],
knn_preds.sort_values("topic_id")["content_ids"],
self.top_k,
)
print("Current Score:", score, "Best Score:", self.best_score)
if score > self.best_score:
self.best_score = score
print("saving best model to data/ folder")
# torch.save(self.trainer.model.state_dict(), f"data/siamese_model_{score}.pth")
generate_new_dataset_every_epoch = True
if generate_new_dataset_every_epoch or (score == self.best_score):
# generate new pairs in dataset
print("Building new validation supervised df")
new_val_supervised_df = build_new_supervised_df(knn_preds, self.correlation_df)[
["topic_id", "content_ids", "target"]
].sort_values(["topic_id", "content_ids"])
if score == self.best_score: # only save for the best checkpoint
print("saving new_val_supervised_df to data/ folder")
new_val_supervised_df.to_csv("data/new_val_supervised_df.csv")
# get top-k for training set
# TODO: only get original content neighbors for original topics
print("Generating embedding for train topics")
train_topic_embs = []
with torch.no_grad():
for inputs in tqdm(self.train_topic_dataloader):
for k, v in inputs.items():
inputs[k] = inputs[k].to(device)
out = self.trainer.model.feature(inputs)
train_topic_embs.extend(out.cpu().detach().numpy())
with cp.cuda.Device(local_rank):
train_topic_embs_gpu = cp.array(train_topic_embs)
train_indices = neighbors_model.kneighbors(
train_topic_embs_gpu, return_distance=False
)
# if self.use_translated:
# topic_language_df = pd.DataFrame({
# "topic_id": self.train_topic_ids,
# "language": self.train_topic_languages
# })
train_predictions = []
for k in tqdm(range(len(train_indices))):
pred = train_indices[k]
# p = " ".join([self.content_df.loc[ind, "id"] for ind in pred.get()])
if self.use_translated:
p = " ".join(
[content_idx_to_id[original_indices[ind]] for ind in pred.get()]
)
else:
p = " ".join([content_idx_to_id[ind] for ind in pred.get()])
train_predictions.append(p)
train_knn_preds = pd.DataFrame(
{
"topic_id": self.train_topic_ids,
"content_ids": train_predictions,
"language": self.train_topic_languages,
}
).sort_values("topic_id")
print("Building new train supervised df")
# if self.use_translated:
# count_dict = {
# "ar": 3701,
# "as": 167,
# "bg": 2867,
# "bn": 2176,
# "en": 36161,
# "es": 13910,
# "fil": 247,
# "fr": 3701,
# "gu": 2320,
# "hi": 1786,
# "it": 866,
# "km": 121,
# "kn": 119,
# "mr": 300,
# "mul": 4,
# "my": 135,
# "or": 70,
# "pl": 43,
# "pnb": 51,
# "pt": 4177,
# "ru": 34,
# "sw": 2860,
# "swa": 35,
# "ta": 60,
# "te": 93,
# "tr": 40,
# "ur": 66,
# "zh": 862,
# }
# times_positive_samples = 4
# # select all original topics and a part of translated topics
# translated_knn_preds = (
# train_knn_preds[~train_knn_preds.topic_id.str.startswith("t_")]
# .groupby("language")
# .apply(
# lambda x: x.sample(
# n=count_dict[x["language"].iat[0]] * times_positive_samples,
# replace=True,
# )
# )
# .reset_index(drop=True)
# )
# original_knn_preds = train_knn_preds[
# train_knn_preds.topic_id.str.startswith("t_")
# ]
# train_knn_preds = pd.concat([original_knn_preds, translated_knn_preds])
new_train_supervised_df = build_new_supervised_df(
train_knn_preds, self.correlation_df
)
if self.use_translated:
# Only add positive cases in training set for translated topics
translated_supervised_df = new_train_supervised_df[
~new_train_supervised_df.topic_id.str.startswith("t_")
& new_train_supervised_df.target
== 1
].copy()
# Only original contents for original topics
original_supervised_df = new_train_supervised_df[
new_train_supervised_df.topic_id.str.startswith("t_")
& new_train_supervised_df.content_ids.str.startswith("c_")
].copy()
# TODO: duplicate number of positive by using translated data
id_to_language = {}
for _, row in tqdm(self.topic_df.iterrows()):
id_to_language[row.id] = row.language
original_supervised_df["language"] = original_supervised_df["topic_id"].apply(
lambda x: id_to_language[x]
)
count_df = (
original_supervised_df[original_supervised_df.target == 1]
.groupby("language")
.size()
.reset_index(name="counts")
)
count_dict = {}
for _, row in count_df.iterrows():
count_dict[row.language] = row.counts
times_positive_samples = 3
translated_supervised_df["language"] = translated_supervised_df[
"topic_id"
].apply(lambda x: id_to_language[x])
translated_supervised_df = (
translated_supervised_df.groupby("language")
.apply(
lambda x: x.sample(
n=count_dict[x["language"].iat[0]] * times_positive_samples,
replace=True,
)
)
.reset_index(drop=True)
)
original_supervised_df = original_supervised_df.drop(columns=["language"])
translated_supervised_df = translated_supervised_df.drop(columns=["language"])
new_train_supervised_df = pd.concat(
[translated_supervised_df, original_supervised_df]
)[["topic_id", "content_ids", "target"]].sort_values(
["topic_id", "content_ids"]
)
if score == self.best_score: # only save for the best checkpoint
print("saving new_train_supervised_df to data/ folder")
new_train_supervised_df.to_csv("data/new_train_supervised_df.csv")
# update train_dataset and val_dataset
print("preprocess csv for train/validation topics, contents, labels")
self.trainer.train_dataset.supervised_df = new_train_supervised_df.dropna()
(
self.trainer.train_dataset.topic_texts,
self.trainer.train_dataset.content_texts,
self.trainer.train_dataset.labels,
) = self.trainer.train_dataset.process_csv()
self.trainer.eval_dataset.supervised_df = new_val_supervised_df.dropna()
(
self.trainer.eval_dataset.topic_texts,
self.trainer.eval_dataset.content_texts,
self.trainer.eval_dataset.labels,
) = self.trainer.eval_dataset.process_csv()
print("Saving knn csvs ...")
train_knn_preds.to_csv(f"data/train_knn_fold{self.fold}.csv")
knn_preds.to_csv(f"data/val_knn_fold{self.fold}.csv")
del (
train_topic_embs,
train_topic_embs_gpu,
train_knn_preds,
train_indices,
train_predictions,
)
gc.collect()
del (
topic_embs,
content_embs,
topic_embs_gpu,
content_embs_gpu,
knn_preds,
indices,
neighbors_model,
predictions,
)
gc.collect()
torch.cuda.empty_cache()
if self.mix_translated:
self.use_translated = not self.use_translated
def build_new_supervised_df(knn_df, correlations):
# Create lists for training
topics_ids = []
content_ids = []
targets = []
# Iterate over each topic in df
mapping = set()
# get all class 1 in correlations
topic_ids = set(knn_df.topic_id.values)
filtered_correlations = correlations[correlations["topic_id"].isin(topic_ids)]
for i, row in tqdm(filtered_correlations.iterrows()):
if str(row["content_ids"]) and str(row["content_ids"]) != "nan":
content_ids = str(row["content_ids"]).split(" ")
for content_id in content_ids:
mapping.add((row["topic_id"], content_id, 1))
for i, row in tqdm(knn_df.iterrows()):
if str(row["content_ids"]) and str(row["content_ids"]) != "nan":
content_ids = str(row["content_ids"]).split(" ")
for content_id in content_ids:
if (
row["topic_id"],
content_id,
1,
) not in mapping: # because mapping already contains all positive cases
mapping.add((row["topic_id"], content_id, 0))
# Build training dataset
mapping = list(mapping)
new_df = pd.DataFrame(
{
"topic_id": [item[0] for item in mapping if item[1]],
"content_ids": [item[1] for item in mapping if item[1]],
"target": [item[2] for item in mapping if item[1]],
}
)
# Release memory
del topics_ids, content_ids
gc.collect()
return new_df
def collate_fn(batch):
batch = default_collate(batch)
topic_inputs, content_inputs, combined_inputs, labels = batch
mask_len = int(topic_inputs["attention_mask"].sum(axis=1).max())
for k, v in topic_inputs.items():
topic_inputs[k] = topic_inputs[k][:, :mask_len]
mask_len = int(content_inputs["attention_mask"].sum(axis=1).max())
for k, v in content_inputs.items():
content_inputs[k] = content_inputs[k][:, :mask_len]
mask_len = int(combined_inputs["attention_mask"].sum(axis=1).max())
for k, v in combined_inputs.items():
combined_inputs[k] = combined_inputs[k][:, :mask_len]
return {
"topic_inputs": topic_inputs,
"content_inputs": content_inputs,
"combined_inputs": combined_inputs,
"labels": labels,
}
|
thanhhau097/lecr
|
dataset.py
|
dataset.py
|
py
| 35,343 |
python
|
en
|
code
| 0 |
github-code
|
6
|
369174475
|
from typing import Optional
#se verifica daca treeul este symetric
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSymmetric(self, root: Optional[TreeNode]) -> bool:
if not root or not root.left and not root.right:
return True
stack=[root.left,root.right]
while stack:
num_items=len(stack)
for i in range(num_items//2):
node1=stack.pop()
node2=stack.pop()
if node1 and not node2 or node2 and not node1:
return False
elif not node1 and not node2:
continue
elif node1.data!=node2.data:
return False
else:
stack.append(node1.left)
stack.append(node2.right)
stack.append(node1.right)
stack.append(node2.left)
return True
root=TreeNode()
root.data="root"
root.left=TreeNode()
root.left.data = "a"
root.right = TreeNode()
root.right.data = "a"
root.left.left=TreeNode()
root.left.left.data="a"
root.right.right=TreeNode()
root.right.right.data="a"
if Solution.isSymmetric(self=Solution,root=root):
print("true")
else:
print("false")
|
ArdaiArtur/PY
|
LeetCode/SymetricTree.py
|
SymetricTree.py
|
py
| 1,405 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21485666359
|
import math
from collections import defaultdict
import heapq
from itertools import permutations
from itertools import combinations
from itertools import combinations_with_replacement
from collections import Counter
import random
def test_case():
n, a, b = list(map(int, input().split()))
arr = list(map(int, input().split()))
arr = [0, 0] + arr
count = 0
def find_par(node, x):
res = 0
while x > 0 and node > 0:
node = arr[node]
x -= 1
return node
for i in range(1, n + 1):
for j in range(1, n + 1):
vis = [0 for i in range(n + 1)]
temp = 0
node = i
while node > 0:
vis[node] = 1
temp += 1
node = find_par(node, a)
node = j
while node > 0:
if vis[node] != 1:
temp += 1
node = find_par(node, b)
count += temp
print(count / (n ** 2))
def main():
T = int(input())
for i in range(1, T+1):
print("Case #{}: ".format(i), end = "")
test_case()
if __name__=="__main__":
main()
|
bboychencan/Algorithm
|
google/kickstart/roundD2020/c.py
|
c.py
|
py
| 957 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70494302589
|
import solid as sp
import solid.utils as spu
from frame.materials import tube
from frame.utils import entrypoint
from . import column_mount, instrument_panel, throttle, arm, arm_mount, wheel, wheel_mount
from .dimensions import column_diameter, column_length
def assembly():
column = tube.volume(diameter=column_diameter, wall_thickness=2., length=column_length)
return sp.union()(
spu.up(column_length)(
spu.up(wheel.plate_thickness / 2.)(
sp.color('red')(wheel.volume()),
spu.up(wheel.plate_thickness / 2.)(
sp.color('green')(instrument_panel.assembly())
),
sp.translate((150, 80))(
sp.rotate((0., 60., 0.))(
sp.color('blue')(throttle.assembly())
)
),
),
sp.rotate((0, 180, 0))(sp.color('cyan')(wheel_mount.volume())),
),
sp.color('magenta')(column),
spu.up(440.)(sp.color('purple')(column_mount.upper.assembly())),
spu.up(60.)(sp.color('grey')(column_mount.lower.assembly())),
sp.rotate((0, 0, 0))(
sp.color('orange')(arm_mount.volume()),
sp.color('pink')(spu.down(arm.thickness)(arm.volume())),
),
)
if __name__ == '__main__':
entrypoint.main(assembly())
|
DanNixon/HackyRacer
|
cad/frame/assembly/steering/assembly.py
|
assembly.py
|
py
| 1,362 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4298091636
|
"""
自动轨迹绘制
"""
import turtle as t
t.title("自动轨迹绘制")
t.setup(800, 600, 0, 0)
t.pencolor("red")
t.pensize(5)
#数据读取
datals = []
f = open("../resources/data.txt", encoding="utf-8")
for line in f:
#去掉当前行末尾的换行符
line = line.replace("\n", "")
#用逗号分隔当前行,并把分割后得到的列表中的每个元素应用eval函数,
# 这里的map就是起到每个元素作为参数传递到前面的函数中去
#最后再把获取的值组成一个list追加到datals中
datals.append(list(map(eval, line.split(","))))
f.close()
#自动绘制
for i in range(len(datals)):
t.pencolor(datals[i][3], datals[i][4], datals[i][5])
t.fd(datals[i][0])
if datals[i][1]:
t.right(datals[i][2])
else:
t.left(datals[i][2])
t.done()
|
HALF-MAN/pythonlearn
|
learning/file_and_data_formatting/AutoTraceDraw.py
|
AutoTraceDraw.py
|
py
| 830 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
37555301338
|
class DjangoModelPermissionsWithRead(DjangoModelPermissions):
perms_map = {
'GET': ['%(app_label)s.view_%(model_name)s'],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': [],
'PATCH': [],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
|
rabar1995/pollisterjango
|
.history/poll/permissions_20191031152258.py
|
permissions_20191031152258.py
|
py
| 335 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13138241281
|
from django.contrib import admin
from django.urls import path, include
from django.http import HttpResponse
def homepage(request):
return HttpResponse("you're in the home page, goto polls.")
urlpatterns = [
path('admin/', admin.site.urls),
path('', homepage),
path('polls/', include('polls.urls')),
]
|
callmebhawesh/100-Days-Of-Code
|
Day 31/mysite/mysite/urls.py
|
urls.py
|
py
| 321 |
python
|
en
|
code
| 3 |
github-code
|
6
|
71623457467
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 11:31:08 2020
@author: dkafkes
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('master_stdev.csv', header = 0, skiprows = list(np.arange(1, 177)))
df.drop(columns = ['Filename'], inplace = True)
df = df.set_index('Unnamed: 0')
#%%
x = df['B:IMINER']
array, bins, patches = plt.hist(x, bins = 100)
plt.title("B:IMINER Standard Deviation Spread")
plt.xlabel("Average Standard Deviation")
plt.ylabel("Log(Files)")
plt.ylim(0.1, 1000)
plt.semilogy()
plt.show()
|
dkafkes/simplified-ai-for-accelerators
|
data pipeline/histogram.py
|
histogram.py
|
py
| 577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31746854279
|
""" This module allows you to download public files from Google Drive and Dropbox """
import os
import requests
import zipfile
import logging
import patoolib
from bs4 import BeautifulSoup
import gdrivedl
# Define urls to filter by cloud service
GDRIVE_URL = 'drive.google.com'
DROPBOX_URL = 'dropbox.com'
def download_folder(url, output_folder, silent, filename=None):
"""Download Google Drive folders"""
dl = gdrivedl.GDriveDL(quiet=silent, overwrite=False, mtimes=False)
dl.process_url(url, output_folder, filename=None)
def download_file(url, output_folder, filename, silent):
""" Download Google Drive files"""
dl = gdrivedl.GDriveDL(quiet=silent, overwrite=False, mtimes=False)
dl.process_url(url, output_folder, filename)
def gd_download(url, directory, quiet):
""" Detects if url belongs to Google Drive folder or file and calls relavent function """
if 'folder' in url:
output = get_title(url)[:-15]
output_path = directory + output
logging.info(f"---> Downloading Google Drive folder to: {output_path}")
download_folder(url, output_path, quiet)
return True
elif 'file' in url:
temp_output = get_title(url)[:-15]
output = temp_output.split('.', 1)[0]
logging.info(f"---> Downloading Google Drive file to {directory + temp_output}")
download_file(url, directory, temp_output, quiet)
unzip(temp_output, output, directory)
return True
else:
return False
def get_title(url):
""" Gets file/folder title with requests library """
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
for title in soup.find_all('title'):
return title.get_text()
def compression_type(file_name):
""" Detects file compression type """
ext = os.path.splitext(file_name)[-1].lower()
return ext
def unzip(zipped_file, unzipped_file, directory):
""" Uncompresses files and then deletes compressed folder """
if compression_type(zipped_file) == '.zip':
zip_path = directory + zipped_file
unzip_path = directory + unzipped_file
logging.info(f"--> Extracting to: {unzip_path}")
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(unzip_path)
zip_ref.close()
os.remove(zip_path)
if compression_type(zipped_file) == '.rar':
zip_path = directory + zipped_file
unzip_path = directory + unzipped_file
logging.info(f"---> Extracting to: {unzip_path}")
patoolib.extract_archive(zip_path, outdir=directory)
os.remove(zip_path)
return
def db_download(url, directory):
""" Downloads files from Dropbox URL """
url = url[:-1] + '0'
file_name = get_title(url)[:-21][10:]
logging.info(f"Dropbox file name: {file_name}")
suffix1 = file_name.endswith(".zip")
suffix2 = file_name.endswith(".rar")
dl_url = url[:-1] + '1'
filepath = directory + file_name
logging.info(f"Downloading dropbox file to: {filepath}")
output = file_name[:-4]
headers = {'user-agent': 'Wget/1.16 (linux-gnu)'}
r = requests.get(dl_url, stream=True, headers=headers)
if r.status_code == 200:
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if suffix1 or suffix2:
unzip(file_name, output, directory)
return True
else:
return False
def grab(url, output_path, quiet=True):
"""
Detects if url belongs to Google Drive or a Dropbox url and calls the relevant method.
You may change logging level by calling grab with quiet=False).
"""
if(quiet==True):
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.WARNING)
else:
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.INFO)
if GDRIVE_URL in url:
if (gd_download(url, output_path, quiet)):
return True
else:
logging.warning(f"The Google Drive URL {url} is not supported")
return False
if DROPBOX_URL in url:
if(db_download(url, output_path)):
return True
else:
logging.warning(f"The Dropbox URL {url} is not supported")
return False
else:
logging.warning(f"The URL {url} is not supported")
return False
|
duckduckgrayduck/clouddl
|
src/clouddl/clouddl.py
|
clouddl.py
|
py
| 4,484 |
python
|
en
|
code
| 3 |
github-code
|
6
|
40107035382
|
version = "0.8"
import os, io
import chardet
from functools import wraps
from tempfile import mkstemp, mkdtemp
from json import JSONEncoder as _JSONEncoder
from pathlib import Path
from collections import deque
from colorama import Fore as F
markdown = None
class LabelledTree (object) :
def __init__ (self, label, children=[]) :
self.label = str(label)
self.children = list(children)
def _print (self, out, prefix=None, last=True) :
if prefix is None :
out.write(f"{self.label}\n")
elif last :
out.write(f"{prefix}{F.WHITE}└─{F.RESET} {self.label}\n")
else :
out.write(f"{prefix}{F.WHITE}├─{F.RESET} {self.label}\n")
for child in self.children :
if prefix is None :
child._print(out, "", child is self.children[-1])
elif last :
child._print(out, prefix + " ", child is self.children[-1])
else :
child._print(out, prefix + f"{F.WHITE}│{F.RESET} ", child is self.children[-1])
def __str__ (self) :
out = io.StringIO()
self._print(out)
return out.getvalue().rstrip()
class tree (dict) :
def __getattr__ (self, key) :
cls = self.__class__
val = self.get(key, None)
if isinstance(val, dict) and not isinstance(val, cls) :
val = self[key] = tree(val)
elif isinstance(val, list) :
val = self[key] = [tree(v) if isinstance(v, dict) and not isinstance(v, cls)
else v for v in val]
return val
def __setattr__ (self, key, val) :
if isinstance(val, dict) :
val = self.__class__(val)
self[key] = val
cwd = Path().absolute()
def new_path (type="file", **args) :
if type == "file" :
fd, path = mkstemp(**args)
os.close(fd)
elif type == "dir" :
path = mkdtemp(**args)
else :
raise ValueError(f"unsupported path type {type!r}")
return Path(path).absolute().relative_to(cwd)
encoding = tree(encoding="utf-8",
errors="replace")
class JSONEncoder (_JSONEncoder) :
def default (self, obj) :
handler = getattr(obj, "__json__", None)
if handler is None :
return super().default(obj)
else :
return handler()
def cached_property (method) :
@wraps(method)
def wrapper (self) :
name = method.__name__
if not hasattr(self, "__cache") :
self.__cache = {}
if name not in self.__cache :
self.__cache[name] = method(self)
return self.__cache[name]
@wraps(method)
def delete (self) :
self.__cache.pop(method.__name__, None)
return property(wrapper, None, delete, method.__doc__)
def recode (path) :
with open(path, "rb") as inf :
raw = inf.read()
try :
enc = chardet.detect(raw)
src = raw.decode(enc["encoding"], errors="replace")
except :
return
with open(path, "w", **encoding) as out :
out.write(src)
return src
def md (text, inline=True) :
# only load if necessary to speedup prog startup
global markdown
from markdown import markdown
#
try :
html = markdown(str(text))
if inline :
html = html.replace("<p>", "").replace("</p>", "")
return html.replace("§", " ")
except :
return text.replace("§", " ")
_esc = {c : f"\\{c}" for c in r"\`*_{}[]()#+-.!"}
def mdesc (text) :
return str(text).translate(_esc)
def chmod_r (path) :
q = deque([Path(path)])
while q :
sub = q.popleft()
if sub.is_dir() :
sub.chmod(sub.stat().st_mode | 0o750)
q.extend(sub.iterdir())
else :
sub.chmod(sub.stat().st_mode | 0o640)
|
fpom/badass
|
badass/__init__.py
|
__init__.py
|
py
| 3,857 |
python
|
en
|
code
| 4 |
github-code
|
6
|
16737781321
|
import pdb
import unittest
import json
from objbrowser import browse
import mock
from mock import patch
import music_server
from music_server import youtube_search
from music_server import config
class YoutubeSearchTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_format_query(self):
# given
search_query = "simple_query"
expected_result = "http://youtube.com/results?search_query=simple_query"
# when
query = youtube_search.format_query(search_query)
# then
self.assertEqual(query, expected_result)
def test_format_query_with_space(self):
# given
search_query = "a b"
expected_result = "http://youtube.com/results?search_query=a+b"
# when
query = youtube_search.format_query(search_query)
# then
self.assertEqual(query, expected_result)
def test_format_with_plus(self):
# given
search_query = "a+b"
expected_result = "http://youtube.com/results?search_query=a%2Bb"
# when
query = youtube_search.format_query(search_query)
# then
self.assertEqual(query, expected_result)
def test_fetch_first_result_when_empty(self):
self.assertRaises(TypeError, youtube_search.fetch_results, None)
# empty list
def test_fetch_first_result_when_no_result(self):
# given
html_content = "wrong html content"
# when
result = youtube_search.fetch_results(html_content)
# then
self.assertFalse(result, 'Result should be an empty list')
def test_fetch_results(self):
# given
with open(config.test_resources_folder + 'youtube_search_pratos_osni.html', 'r') as myfile:
html_content = myfile.read()
with open(config.test_resources_folder + 'youtube_search_pratos_osni.json', 'r') as myfile2:
expected_links = json.loads(myfile2.read())
# when
results = youtube_search.fetch_results(html_content)
# then
self.assertEqual(results, expected_links)
@patch('music_server.youtube_search.get_html')
def test_youtube_search(self, test_patch):
# given
with open(music_server.config.test_resources_folder + 'youtube_search_pratos_osni.html') as fh:
mock_html = fh.read()
test_patch.return_value = mock_html
with open(config.test_resources_folder + 'youtube_search_pratos_osni.json', 'r') as myfile2:
expected_links = json.loads(myfile2.read())
# when
results = youtube_search.YoutubeSearch("pratos osni").video_ids
# then
self.assertEqual(results, expected_links)
def test_search_empty(self):
# given
search_query = ''
# when
results = youtube_search.YoutubeSearch(search_query)
# then
self.assertTrue(results)
def test_search_none(self):
# given
search_query = None
# when
results = youtube_search.YoutubeSearch(search_query)
# then
self.assertTrue(results)
if __name__ == '__main__':
unittest.main()
|
Sun42/music_server
|
tests/youtube_search_tests.py
|
youtube_search_tests.py
|
py
| 3,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
308791926
|
# my_file = open("data.txt")
# contents = my_file.read()
# print(contents)
# my_file.close()
# automaattisesti sulkee tiedoston lopussa
with open("data.txt") as my_file:
contents = my_file.read()
print(contents)
# read on moden default
with open("data.txt", mode="w") as my_file_again:
my_file_again.write("Content changed to this.\n")
with open("data.txt") as my_file:
contents = my_file.read()
print(contents)
with open("data.txt", mode="a") as my_file_third_time:
my_file_third_time.write("Append this line to content.")
with open("data.txt") as my_file:
contents = my_file.read()
print(contents)
with open("non_existent.txt", mode="w") as some_file:
some_file.write("Write a line to a file that does not yet exist.")
with open("non_existent.txt") as my_file:
contents = my_file.read()
print(contents)
|
satuhyva/100daysOfPython
|
Day 024/practicing/files.py
|
files.py
|
py
| 856 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38592347384
|
from .atari import Atari
from .obj3d import Obj3D
from torch.utils.data import DataLoader
from object_detector import CLIPort_Dataset
__all__ = ['get_dataset', 'get_dataloader']
def get_dataset(cfg, mode):
assert mode in ['train', 'val', 'test']
return CLIPort_Dataset(cfg.dataset_roots.TABLE, mode)
def get_dataloader(cfg, mode):
assert mode in ['train', 'val', 'test']
batch_size = getattr(cfg, mode).batch_size
shuffle = True if mode == 'train' else False
num_workers = getattr(cfg, mode).num_workers
dataset = get_dataset(cfg, mode)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
return dataloader
|
1989Ryan/paragon
|
object_detector/space/dataset/__init__.py
|
__init__.py
|
py
| 713 |
python
|
en
|
code
| 7 |
github-code
|
6
|
1104386399
|
import random
karakterler= "qwertyuıopasdfghjklzxcvbnmiIQWERTYUOPASDFGHJKLZXCVBNM1234567890é!'^+%&/()=?_-*<>£#$½{[]}"
sifresayisi = int(input("olusturmak istediginiz sifre sayisini giriniz "))
for x in range(sifresayisi):
sifre = ""
for x in range(16):
karakter = random.choice(karakterler)
sifre = sifre + karakter
print("Random Sifreniz : ", sifre)
|
quebec164/pythonodevleri
|
sifreolusturucu.py
|
sifreolusturucu.py
|
py
| 384 |
python
|
en
|
code
| 12 |
github-code
|
6
|
74387397629
|
# SOAl 3
# KAMUS
# x, y : int
# ALGORITMA
# membuat fungsi convert
def convert(code, TC):
if code == 'F':
hasil = ((9/5)*TC)+32
elif code == 'R':
hasil = (4/5)*TC
else:
hasil = TC + 273
return f'{hasil} {code}'
# mengingput code dan besar suhu dalam celcius
code = input('kode konversi = ')
TC = float(input('Suhu (dalam celcius): '))
# panggil fungsi dan cetak
print(convert(code, TC))
|
xmriz/kuliah-main
|
Pengkom-TPB1/08 - Tugas Pengkom/PR/PR1/3.py
|
3.py
|
py
| 431 |
python
|
id
|
code
| 0 |
github-code
|
6
|
18453506476
|
from icecream import ic
from stack import Stack
from datetime import datetime
def time_format():
return f'{datetime.now().strftime("%m/%d/%Y, %I:%M:%S")}|> '
ic.configureOutput(prefix=time_format, includeContext=True)
def nextLargestElment(items):
tempStack = Stack()
returnStack = Stack()
tempStack.push(items[0])
print("Intial tempStack:", tempStack.getStack())
print("-----------------------------------------")
for current_item in items[1::]:
print("current_item:", current_item)
print("stack_top_item:", tempStack.peek())
if tempStack.isEmpty() == False:
stack_top_item = tempStack.pop()
while stack_top_item < current_item:
print(str(stack_top_item) + " -- " + str(current_item))
returnStack.push(current_item)
if tempStack.isEmpty():
break
stack_top_item = tempStack.pop()
if stack_top_item > current_item:
tempStack.push(stack_top_item)
tempStack.push(current_item)
print("tempStack:", tempStack.getStack())
print("-----------------------------------------")
while tempStack.isEmpty() == False:
element = tempStack.pop()
returnStack.push(-1)
next = -1
print(str(element) + " -- " + str(next))
return returnStack.getStack()
if __name__ == '__main__':
# ic(nextLargestElment([int(item)
# for item in input("Enter the list items : ").strip().split()]))
ic(nextLargestElment([2, 6, 5, 4, 19]))
|
beharamadhu270405/python-DS
|
stack/next_greatest_element_using_stacks.py
|
next_greatest_element_using_stacks.py
|
py
| 1,594 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27825918351
|
"""
создайте асинхронные функции для выполнения запросов к ресурсам (используйте aiohttp)
- доработайте модуль `jsonplaceholder_requests`:
- установите значения в константы `USERS_DATA_URL` и `POSTS_DATA_URL` (ресурсы нужно взять отсюда https://jsonplaceholder.typicode.com/)
- создайте асинхронные функции для выполнения запросов к данным ресурсам (используйте `aiohttp`)
- рекомендуется добавить базовые функции для запросов, которые будут переиспользованы (например `fetch_json`)
"""
from aiohttp import ClientSession
import asyncio
# import logging
#
# DEFAULT_FORMAT = "%(asctime)s %(levelname)-8s [%(name)-8s] (%(filename)s:%(funcName)s:%(lineno)d) %(message)s"
#
# logging.basicConfig(format=DEFAULT_FORMAT, level=logging.DEBUG)
#
# log = logging.getLogger(__name__)
USERS_DATA_URL = "https://jsonplaceholder.typicode.com/users"
POSTS_DATA_URL = "https://jsonplaceholder.typicode.com/posts"
async def fetch_json(session: ClientSession, url: str):
async with session.get(url) as response:
return await response.json()
async def fetch_users():
# log.info(f"Fetch users from {USERS_DATA_URL}")
async with ClientSession() as session:
json_data = await fetch_json(session, USERS_DATA_URL)
# log.info(f"Fetch json from {USERS_DATA_URL}: {json_data}")
return json_data
async def fetch_posts():
# log.info(f"Fetch posts from {POSTS_DATA_URL}")
async with ClientSession() as session:
json_data = await fetch_json(session, POSTS_DATA_URL)
# log.info(f"Fetch json from {POSTS_DATA_URL}: {json_data}")
return json_data
# def main():
# asyncio.run(fetch_users())
# asyncio.run(fetch_posts())
#
#
# if __name__ == '__main__':
# main()
|
MikhailParkin/MikhailParkin
|
homework_04/jsonplaceholder_requests.py
|
jsonplaceholder_requests.py
|
py
| 2,013 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
72946767548
|
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import LatentDirichletAllocation
import os
from time import strftime
# Python 3.5
def load_data(filename):
return np.loadtxt(filename, skiprows=1, delimiter=' ')
def save_predictions(X, model):
filename = 'random_forest_' + strftime('%b%d%H%M%S') + '.csv'
preds = model.predict(X).reshape((len(X), 1))
ids = (np.arange(1, len(X) + 1)).reshape((len(X), 1))
np.savetxt(
os.path.join('predictions', filename),
np.hstack((ids, preds)),
fmt='%d',
delimiter=',',
header='Id,Prediction',
comments=''
)
def decompose(X, d, args={}):
pca_model = LatentDirichletAllocation(n_components=d, **args)
pca_model.fit(X)
return pca_model
def train(X, y, args={}):
model = RandomForestClassifier(**args)
model.fit(X, y)
return model
def test(X, y, model):
return np.sum(model.predict(X) == y) / len(y)
train_raw = load_data('training_data.txt')
n_train = 10000
n_val = len(train_raw) - n_train
X_train, y_train = train_raw[:, 1:][:n_train], train_raw[:, 0][:n_train]
X_val, y_val = train_raw[:, 1:][n_train:], train_raw[:, 0][n_train:]
# reduce dimensions from 1000 to 200
pca_model = decompose(X_train, 10)
X_train_red = pca_model.transform(X_train)
X_val_red = pca_model.transform(X_val)
model = train(X_train_red, y_train, args={})
print('train / val split : %d / %d' % (n_train, n_val))
print('train acc :', test(X_train_red, y_train, model))
print('val acc :', test(X_val_red, y_val, model))
test_raw = load_data('test_data.txt')
X_test = test_raw[:, :]
X_test_red = pca_model.transform(X_test)
# save_predictions(X_test_red, model)
'''
<output>
train / val split : 10000 / 10000
train acc : 0.9892
val acc : 0.6557
'''
|
bchidamb/AmazonFeels
|
shit_tier/random_forest_pca.py
|
random_forest_pca.py
|
py
| 1,890 |
python
|
en
|
code
| 3 |
github-code
|
6
|
17688731362
|
import json
import os
import gui
import wx
import addonHandler
import braille
import config
import controlTypes
import languageHandler
from .common import configDir
addonHandler.initTranslation()
CUR_LANG = languageHandler.getLanguage().split('_')[0]
PATH_JSON = os.path.join(configDir, f"roleLabels-{CUR_LANG}.json")
class SettingsDlg(gui.settingsDialogs.SettingsPanel):
# Translators: title of a dialog.
title = _("Role labels")
roleLabels = {}
def makeSettings(self, settingsSizer):
self.roleLabels = roleLabels.copy()
sHelper = gui.guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
self.toggleRoleLabels = sHelper.addItem(wx.CheckBox(self, label=_("Use custom braille &role labels")))
self.toggleRoleLabels.SetValue(config.conf["brailleExtender"]["features"]["roleLabels"])
self.toggleRoleLabels.Bind(wx.EVT_CHECKBOX, self.onToggleRoleLabels)
self.categories = sHelper.addLabeledControl(_("Role cate&gory:"), wx.Choice, choices=[_("General"), _("Landmarks"), _("Positive states"), _("Negative states")])
self.categories.Bind(wx.EVT_CHOICE, self.onCategories)
self.categories.SetSelection(0)
choices = []
if hasattr(controlTypes, "roleLabels"):
choices = [controlTypes.roleLabels[int(k)] for k in braille.roleLabels.keys()]
self.labels = sHelper.addLabeledControl(_("&Role:"), wx.Choice, choices=choices)
self.labels.Bind(wx.EVT_CHOICE, self.onLabels)
self.label = sHelper.addLabeledControl(_("Braille &label"), wx.TextCtrl)
self.label.Bind(wx.EVT_TEXT, self.onLabel)
bHelper = gui.guiHelper.ButtonHelper(orientation=wx.HORIZONTAL)
self.resetLabelBtn = bHelper.addButton(self, wx.NewId(), _("&Reset this role label"), wx.DefaultPosition)
self.resetLabelBtn.Bind(wx.EVT_BUTTON, self.onResetLabelBtn)
self.resetAllLabelsBtn = bHelper.addButton(self, wx.NewId(), _("Reset a&ll role labels"), wx.DefaultPosition)
self.resetAllLabelsBtn.Bind(wx.EVT_BUTTON, self.onResetAllLabelsBtn)
sHelper.addItem(bHelper)
self.onToggleRoleLabels(None)
self.onCategories(None)
def onToggleRoleLabels(self, evt):
l = [
self.categories,
self.labels,
self.label,
self.resetLabelBtn,
self.resetAllLabelsBtn,
]
for e in l:
if self.toggleRoleLabels.IsChecked():
e.Enable()
else:
e.Disable()
def onCategories(self, event):
labels = []
idCategory = self.categories.GetSelection()
oldRoleLabels = hasattr(controlTypes, "roleLabels")
if idCategory == 0:
if oldRoleLabels:
labels = [controlTypes.roleLabels[int(k)] for k in braille.roleLabels.keys()]
else:
labels = [role.displayString for role in braille.roleLabels.keys()]
elif idCategory == 1:
labels = list(braille.landmarkLabels.keys())
elif idCategory == 2:
if oldRoleLabels:
labels = [controlTypes.stateLabels[k] for k in braille.positiveStateLabels.keys()]
else:
labels = [role.displayString for role in braille.positiveStateLabels.keys()]
elif idCategory == 3:
if oldRoleLabels:
labels = [controlTypes.stateLabels[k] for k in braille.negativeStateLabels.keys()]
else:
labels = [role.displayString for role in braille.negativeStateLabels.keys()]
for iLabel, label in enumerate(labels):
idLabel = getIDFromIndexes(idCategory, iLabel)
actualLabel = getLabelFromID(idCategory, idLabel)
originalLabel = self.getOriginalLabel(idCategory, idLabel, actualLabel)
labels[iLabel] += _(": %s") % actualLabel
if actualLabel != originalLabel: labels[iLabel] += " (%s)" % originalLabel
self.labels.SetItems(labels)
if idCategory > -1 and idCategory < 4: self.labels.SetSelection(0)
self.onLabels(None)
def onLabels(self, event):
idCategory = self.categories.GetSelection()
idLabel = getIDFromIndexes(idCategory, self.labels.GetSelection())
key = f"{idCategory}:{idLabel}"
if key in self.roleLabels.keys(): self.label.SetValue(self.roleLabels[key])
else: self.label.SetValue(self.getOriginalLabel(idCategory, idLabel))
def onLabel(self, evt):
idCategory = self.categories.GetSelection()
iLabel = self.labels.GetSelection()
idLabel = getIDFromIndexes(idCategory, iLabel)
key = "%d:%s" % (idCategory, idLabel)
label = self.label.GetValue()
if idCategory >= 0 and iLabel >= 0:
if self.getOriginalLabel(idCategory, idLabel, chr(4)) == label:
if key in self.roleLabels.keys():
self.roleLabels.pop(key)
else: self.roleLabels[key] = label
actualLabel = getLabelFromID(idCategory, idLabel)
originalLabel = self.getOriginalLabel(idCategory, idLabel, actualLabel)
if label != originalLabel: self.resetLabelBtn.Enable()
else: self.resetLabelBtn.Disable()
def onResetLabelBtn(self, event):
idCategory = self.categories.GetSelection()
iLabel = self.labels.GetSelection()
idLabel = getIDFromIndexes(idCategory, iLabel)
key = "%d:%s" % (idCategory, idLabel)
actualLabel = getLabelFromID(idCategory, idLabel)
originalLabel = self.getOriginalLabel(idCategory, idLabel, actualLabel)
self.label.SetValue(originalLabel)
self.onLabel(None)
self.label.SetFocus()
def onResetAllLabelsBtn(self, event):
nbCustomizedLabels = len(self.roleLabels)
if not nbCustomizedLabels:
msg = _("You have no customized role labels.")
res = gui.messageBox(msg, _("Reset role labels"),
wx.OK|wx.ICON_INFORMATION)
return
msg = _("You have %d customized role labels defined. Do you want to reset all labels?") % nbCustomizedLabels
flags = wx.YES|wx.NO|wx.ICON_INFORMATION
res = gui.messageBox(msg, _("Reset role labels"), flags)
if res == wx.YES:
self.roleLabels = {}
self.onCategories(None)
def getOriginalLabel(self, idCategory, idLabel, defaultValue = ''):
key = f"{idCategory}:{idLabel}"
if key in backupRoleLabels.keys():
return backupRoleLabels[key][1]
return getLabelFromID(idCategory, idLabel)
def postInit(self): self.toggleRoleLabels.SetFocus()
def onSave(self):
global roleLabels
config.conf["brailleExtender"]["features"]["roleLabels"] = self.toggleRoleLabels.IsChecked()
saveRoleLabels(self.roleLabels)
discardRoleLabels()
if config.conf["brailleExtender"]["features"]["roleLabels"]:
loadRoleLabels()
backupRoleLabels = {}
roleLabels = {}
def getIDFromIndexes(idCategory, idLabel):
oldRoleLabels = hasattr(controlTypes, "roleLabels")
if not isinstance(idCategory, int):
raise TypeError(f"Wrong type for idCategory ({idCategory})")
if not isinstance(idLabel, int):
raise TypeError(f"Wrong type for idLabel ({idLabel})")
idRole = -1
if idCategory == 0: idRole = list(braille.roleLabels.keys())[idLabel]
elif idCategory == 1: idRole = list(braille.landmarkLabels.keys())[idLabel]
elif idCategory == 2: idRole = list(braille.positiveStateLabels.keys())[idLabel]
elif idCategory == 3: idRole = list(braille.negativeStateLabels.keys())[idLabel]
else: raise ValueError(f"Wrong value for category ({idCategory})")
if not oldRoleLabels and isinstance(idRole, (controlTypes.Role, controlTypes.State)):
idRole = idRole.value
return idRole
def getLabelFromID(idCategory, idLabel):
if idCategory == 0: return braille.roleLabels[int(idLabel)]
if idCategory == 1: return braille.landmarkLabels[idLabel]
if idCategory == 2: return braille.positiveStateLabels[int(idLabel)]
if idCategory == 3: return braille.negativeStateLabels[int(idLabel)]
raise ValueError("Invalid value: %d" % idCategory)
def setLabelFromID(idCategory, idLabel, newLabel):
if idCategory == 0: braille.roleLabels[int(idLabel)] = newLabel
elif idCategory == 1: braille.landmarkLabels[idLabel] = newLabel
elif idCategory == 2: braille.positiveStateLabels[int(idLabel)] = newLabel
elif idCategory == 3: braille.negativeStateLabels[int(idLabel)] = newLabel
else:
raise ValueError(f"Unknown category {idCategory}")
def loadRoleLabels(roleLabels_=None):
global backupRoleLabels, roleLabels
roleLabels.clear()
if roleLabels_:
roleLabels.update(roleLabels_)
elif "roleLabels" in config.conf["brailleExtender"] and config.conf["brailleExtender"]["roleLabels"].copy():
roleLabels.update(config.conf["brailleExtender"]["roleLabels"].copy())
saveRoleLabels(roleLabels)
config.conf["brailleExtender"]["roleLabels"] = {}
elif os.path.exists(PATH_JSON):
f = open(PATH_JSON, "r", encoding="UTF-8")
try:
roleLabels.update(json.load(f))
except json.decoder.JSONDecodeError:
pass
f.close()
for k, v in roleLabels.items():
idCategory, idRole = k.split(':')
idCategory = int(idCategory)
backupRoleLabels[k] = (v, getLabelFromID(idCategory, idRole))
setLabelFromID(idCategory, idRole, v)
def saveRoleLabels(roleLabels_):
f = open(PATH_JSON, 'w')
json.dump(roleLabels_, f, ensure_ascii=False, indent=2)
f.close()
def discardRoleLabels():
global backupRoleLabels, roleLabels
for k, v in backupRoleLabels.items():
idCategory, idRole = k.split(':')
idCategory = int(idCategory)
setLabelFromID(idCategory, idRole, v[1])
backupRoleLabels = {}
roleLabels = {}
|
aaclause/BrailleExtender
|
addon/globalPlugins/brailleExtender/rolelabels.py
|
rolelabels.py
|
py
| 8,877 |
python
|
en
|
code
| 15 |
github-code
|
6
|
17882061657
|
from demisto_sdk.commands.common.constants import CLASSIFIERS_DIR, PACKS_DIR
from demisto_sdk.commands.common.content.objects.pack_objects.abstract_pack_objects.json_content_object import \
JSONContentObject
from demisto_sdk.commands.common.tools import src_root
TEST_DATA = src_root() / 'tests' / 'test_files'
TEST_CONTENT_REPO = TEST_DATA / 'content_slim'
TEST_JSON_NO_FROM_VERSION = TEST_CONTENT_REPO / PACKS_DIR / 'Sample01' / CLASSIFIERS_DIR / 'classifier-sample_new.json'
def test_to_version_no_from_version(datadir):
from packaging.version import parse
obj = JSONContentObject(TEST_JSON_NO_FROM_VERSION, "classifier")
assert obj.from_version == parse("0.0.0")
assert obj.to_version == parse("4.0.0")
class TestFileWithStem:
def test_with_readme_change_log(self):
obj = JSONContentObject(TEST_JSON_NO_FROM_VERSION, "classifier")
assert obj.readme is not None
assert obj.changelog is not None
|
AdouniH/demisto-sdk
|
demisto_sdk/commands/common/content/tests/objects/pack_objects/abstract_pack_objects/json_content_object_test.py
|
json_content_object_test.py
|
py
| 952 |
python
|
en
|
code
| null |
github-code
|
6
|
33526673883
|
#Faça um programa que ajude um jogador da MEGA SENA a criar palpites.O programa vai perguntar quantos jogos serão gerados e vai sortear 6 números entre 1 e 60 para cada jogo, cadastrando tudo em uma lista composta.
from random import randint
from time import sleep
jogos=int(input("Quantos jogos você deseja? "))
números_sorteados=[]
lista_jogos=[]
for c in range(jogos):
while True:
número=randint(1,60)
if número not in números_sorteados:
números_sorteados.append(número)
if len(números_sorteados)==6:
break
lista_jogos.append(números_sorteados[:])
números_sorteados.clear()
print(" MEGA SENA ")
print("-"*33)
for c in range(jogos):
print(f"{c+1}º jogo: {lista_jogos[c]}")
sleep(1)
print("-"*33)
print(" BOA SORTE!! ")
|
cauavsb/python
|
mundo-3-py/ex17.py
|
ex17.py
|
py
| 838 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
29147426513
|
'''-Crear una subrutina llamada “Login”, que recibe un nombre de usuario y una contraseña y te devuelve Verdadero si el nombre de usuario es “admin” y la contraseña es “admin123*”. Además recibe el número de intentos que se ha intentado hacer login y si no se ha podido hacer login incremente este valor.'''
def login(usuario="",contra=""):
intentos=0
while(intentos < 4):
usuario=input("Ingrese el usuario:")
contra=input("Ingrese la contraseña:")
if(usuario=='admin' and contra=='admin123'):
print("Verdadero")
else:
print("Falso")
intentos += 1
login()
|
insoul-code/proyectos-python
|
funciones/reto3.py
|
reto3.py
|
py
| 648 |
python
|
es
|
code
| 1 |
github-code
|
6
|
38044932492
|
import requests
import uuid
from datetime import datetime
import pandas as pd
# https://kcnew.ifrc.org/api/v1/forms find the kpi asset uid for forms here
#from settings import * #to import MYTOKEN and KPIASSETUID
##################
## RUN SETTINGS ##
##################
##https://kobonew.ifrc.org/token/?format=json
MYTOKEN = ""
#"kpi_asset_uid":
KPIASSETUID= ""
# https://kcnew.ifrc.org/api/v1/forms find the kpi asset uid
headers = {
'Authorization': f'Token {MYTOKEN}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
now = datetime.now()
current_time = now.strftime("%Y-%b-%d %I:%M %p")
# Specify the path to your Excel file First Qtr 2022
file_path = 'data/ercs_base_wh_dummy.xlsx'
# Read the Excel file into a Pandas DataFrame
data_frame = pd.read_excel(file_path)
for index, row in data_frame.iterrows():
# Access values in each column for the current row
submission = {
'meta': {
'instanceID': f'uuid:{uuid.uuid4()}',
},
'Supplier_Donor':row['Supplier_Donor'],
'Local_or_Foreign_Receival':row['Local_or_Foreign_Receival'],
'Packing_List_Number':row['Packing_List_Number'],
'Certificate_of_Origin':row['Certificate_of_Origin'],
'Donation_Certificate':row['Donation_Certificate'],
'Waybill_Number':row['Waybill_Number'],
'Contract_Number':row['Contract_Number'],
'Invoice_Number':row['Invoice_Number'],
'Purchase_Requisition_Number':row['Purchase_Requisition_Number'],
'Department_Name':row['Department_Name'],
'Receiver':row['Receiver'],
'Purchase_Order':row['Purchase_Order'],
'Date_of_Reception':row['Date_of_Reception'].date().strftime("%Y-%m-%d"),
'Items_Inspected_approved':row['Items_Inspected_approved'],
'Received_By':row['Received_By'],
'Received_On':row['Received_On'].date().strftime("%Y-%m-%d"),
'Account_Number':row['Account_Number'],
'Project_code':row['Project_code'],
'Items':row['Items'],
'Remark':row['Remark'],
'EXPIRY_DATES':row['EXPIRY_DATES'].date().strftime("%Y-%m-%d"),
'Vender_Manufacturer_No':row['Vender_Manufacturer_No'],
'Vender_seiral_No':row['Vender_seiral_No'],
'Unit_of_Measure':row['Unit_of_Measure'],
'Quantity_Intial':row['Quantity_Intial'],
'Unit_Price':row['Unit_Price'],
'Currency_of_Purchase':row['Currency_of_Purchase'],
}
data_request = requests.post(
f'https://kcnew.ifrc.org/api/v1/submissions',
json={
"id": f"{KPIASSETUID}",
"submission": submission
},
headers=headers
)
|
aklilu/BachUploadToKobo
|
bathcuploadtokobo.py
|
bathcuploadtokobo.py
|
py
| 2,722 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24285372674
|
#! python3
# program to load current weather from api
# via cmd
# display for today and the next two days
# to run: currentWeather location
import json
import requests
import sys
if len(sys.argv) < 2:
print('More argument pls')
sys.exit()
location = ' '.join(sys.argv[1:])
key = ''
# download
url = 'http://api.openweathermap.org/data/2.5/forecast/daily?q=%s&cnt=3&APPID=%s' % (location, key)
print(url)
try:
res = requests.get(url)
res.raise_for_status()
weatherData = json.loads(res.text)
print(weatherData)
w = weatherData['list']
print('Current Weather', w)
except Exception as e:
print(e)
|
chhatrachhorm/ABS
|
PythonStuff/JsonApi/currentWeather.py
|
currentWeather.py
|
py
| 632 |
python
|
en
|
code
| 5 |
github-code
|
6
|
19827881272
|
from flask import Flask, request
import json
import socket
import urllib.request as urllib2
import re
from functools import wraps
application = Flask(__name__)
CONFIG = json.load(open("config.json", "r"))
API_KEYS = CONFIG["api_keys"]
def requires_auth_key(func):
@wraps(func)
def wrapplicationed(*args, **kwargs):
api_key = request.form.get("api_key", None)
if api_key not in API_KEYS:
return "Unauthorized", 401
else:
if not API_KEYS[api_key]["enabled"]:
return "Unauthorized", 401
return func(*args, **kwargs)
return wrapped
@application.route('/carbon/metrics', methods=["POST"])
@requires_auth_key
def post_metric():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((CONFIG["carbon"]["host"], int(CONFIG["carbon"]["port"])))
except Exception as e:
return "<h2>Error: %s</h2>" % e, 500
else:
data = request.form.get('data');
if (data != None):
data = re.findall("([\w\.]+\ [\S]+\ [\d]+)",request.form.get('data'), re.MULTILINE);
else:
data = request.form.getlist('data[]')
sentCmd = 0
for str in data:
str = re.findall("([\w\.]+\ [\S]+\ [\d]+)",str);
str = str[0]
if (len(str) < 10):
continue
str += "\n"
#print(("Send:"+str).encode('utf8'))
s.send(b"%s" % str.encode('utf8'))
sentCmd+=1
s.close()
if sentCmd < 1:
return "NOTHING SENT TO SERVER. BAD FORMATED STRING/VAR?", 202
return "OK", 200
return "Unkown error", 500
@application.route('/carbon/events', methods=["POST"])
@requires_auth_key
def post_event():
req = urllib2.Request('http://{host}:{port}/events'.format(**CONFIG["graphite"]),
data=request.form.get('data').encode('utf8'), headers={'Content-type': 'application/json'})
try:
urllib2.urlopen(req)
except Exception as e:
return "<h2>Error: %s</h2>" % e, 500
else:
return "OK", 200
return "Unkown error", 500
if __name__ == "__main__":
application.run(debug=False, use_reloader=False, host="127.0.0.1", port=8081, threaded=True)
|
s0lesurviv0r/graphite_http_relay
|
main.py
|
main.py
|
py
| 2,262 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21988639916
|
# update.py
import requests
import json
import tarfile
url = "https://ddragon.leagueoflegends.com/api/versions.json"
response = requests.get(url)
obj = response.json()
patch = str(obj[0])
zipUrl = "https://ddragon.leagueoflegends.com/cdn/dragontail-" + patch + ".tgz"
print(zipUrl)
data = requests.get(zipUrl)
with open("src/assets/prev-data/dragontail-" + patch + ".tgz", 'wb') as f:
# opening the file in write mode
f.write(data.content)
tgzFile = tarfile.open("src/assets/prev-data/dragontail-10.22.1.tgz", 'r')
print('Extracting one file...')
tgzFile.extractall('src/assets/prev-data/data-hold')
print('Extracting Done!')
tgzFile.close()
|
ryanweston/lol-skills
|
src/assets/update.py
|
update.py
|
py
| 659 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72066886907
|
import sys, os, re
import unittest
from itertools import product as prod
from timeit import Timer
import time
import math
import logging
import numpy as np
from scipy.optimize import fmin, fmin_bfgs
from hydrodiy.stat.transform import BoxCox2
from hydrodiy.data.containers import Vector
from pygme.model import Model, ParameterCheckValueError
from pygme.calibration import Calibration, CalibParamsVector
from pygme.calibration import ObjFunSSE, ObjFunBCSSE, \
ObjFunKGE, ObjFunBiasBCSSE
from pygme.calibration import CalibrationExplorationError
from dummy import Dummy, CalibrationDummy, ObjFunSSEargs
BC = BoxCox2()
# Set logger
LOGGER = logging.getLogger('pygme.Calibration')
fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
ft = logging.Formatter(fmt)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(ft)
LOGGER.addHandler(sh)
class ObjFunTestCase(unittest.TestCase):
def setUp(self):
print('\t=> ObjFunTestCase')
nval = 1000
obs = np.random.uniform(0., 1, size=nval)
idx = np.random.choice(np.arange(nval), nval//100)
obs[idx] = np.nan
self.obs = obs
sim = np.random.uniform(0., 1, size=nval)
idx = np.random.choice(np.arange(nval), nval//100)
sim[idx] = np.nan
self.sim = sim
def test_print(self):
of = ObjFunBCSSE(0.2)
print(of)
of = ObjFunSSE()
print(of)
of = ObjFunKGE()
print(of)
def test_SSE(self):
obs, sim = self.obs, self.sim
idx = (~np.isnan(obs)) & (~np.isnan(sim))
of = ObjFunSSE()
value = of.compute(obs[idx], sim[idx])
err = self.obs-self.sim
expected = np.nansum(err*err)
self.assertTrue(np.allclose(value, expected))
value = of.compute(obs, sim)
self.assertTrue(np.isnan(value))
def test_KGE(self):
of = ObjFunKGE()
obs, sim = self.obs, self.sim
idx = (~np.isnan(obs)) & (~np.isnan(sim))
value = of.compute(obs[idx], sim[idx])
obsok, simok = obs[idx], sim[idx]
bias = np.mean(simok)/np.mean(obsok)
rstd = np.std(simok)/np.std(obsok)
corr = np.corrcoef(obsok, simok)[0, 1]
expected = 1-math.sqrt((1-bias)**2+(1-rstd)**2+(1-corr)**2)
self.assertTrue(np.allclose(value, expected))
value = of.compute(obs, sim)
self.assertTrue(np.isnan(value))
def test_BCSSE(self):
''' test the BCSSE objfun '''
obs, sim = self.obs, self.sim
idx = (~np.isnan(obs)) & (~np.isnan(sim))
for lam, nu in prod([0.1, 0.5, 1., 2], [1e-4, 1e-2, 1]):
of = ObjFunBCSSE(lam, nu)
assert of.name == f"BCSSE{lam:0.1f}"
value = of.compute(obs[idx], sim[idx])
BC.lam = lam
BC.nu = nu
err = BC.forward(obs)-BC.forward(sim)
expected = np.nansum(err*err)
self.assertTrue(np.isclose(value, expected))
value = of.compute(obs, sim)
self.assertTrue(np.isnan(value))
def test_BiasBCSSE(self):
''' test the BiasBCSSE objfun '''
obs, sim = self.obs, self.sim
idx = (~np.isnan(obs)) & (~np.isnan(sim))
mo = obs[idx].mean()
ms = sim[idx].mean()
for lam, nu in prod([0.1, 0.5, 1., 2], [1e-4, 1e-2, 1]):
of = ObjFunBiasBCSSE(lam, nu)
assert of.name == f"BiasBCSSE{lam:0.1f}"
value = of.compute(obs[idx], sim[idx])
BC.lam = lam
BC.nu = nu
err = BC.forward(obs)-BC.forward(sim)
expected = np.nansum(err*err)*(1+abs(ms-mo)/mo)
self.assertTrue(np.isclose(value, expected))
value = of.compute(obs, sim)
self.assertTrue(np.isnan(value))
class CalibParamsVectorTestCase(unittest.TestCase):
def setUp(self):
print('\t=> CalibParamsVectorTestCase')
config = Vector([])
nval = 10
params = Vector(['X{0}'.format(k) for k in range(1, nval+1)],
defaults=np.ones(nval), mins=np.zeros(nval), \
maxs=np.ones(nval)*5)
states = Vector(['S{0}'.format(k) for k in range(1, 3)])
self.model = Model('test', config, params, states, 2, 2)
def test_default(self):
''' Test setting default values '''
calparams = CalibParamsVector(self.model)
self.assertTrue(np.all([s1==s2 for s1, s2 in zip(calparams.names, \
self.model.params.names)]))
self.assertTrue(np.allclose(calparams.defaults, \
self.model.params.defaults))
def test_errors_infinite(self):
''' Test errors for finite values in calibrated params '''
nval = self.model.params.nval
cp = Vector(['X{0}'.format(k) for k in range(1, nval+1)])
try:
calparams = CalibParamsVector(self.model, cp)
except ValueError as err:
self.assertTrue(str(err).startswith('Expected no infinite'))
else:
raise ValueError('Problem with error handling')
def test_errors_funs(self):
''' Test errors related to trans2true and true2trans '''
nval = self.model.params.nval
cp = Vector(['X{0}'.format(k) for k in range(1, nval+1)])
cp = Vector(['tX{0}'.format(k) for k in range(1, nval+1)],\
defaults=[0]*nval, mins=[-1]*nval, maxs=[1]*nval)
fun1 = lambda x: 'string1'
fun2 = lambda x: 'string2'
try:
calparams = CalibParamsVector(self.model, cp, fun1, fun2)
except ValueError as err:
self.assertTrue(str(err).startswith(\
'Problem with trans2true for'))
else:
raise ValueError('Problem with error handling')
fun = lambda x: np.column_stack([x, x])
try:
calparams = CalibParamsVector(self.model, cp, fun, fun)
except ValueError as err:
self.assertTrue(str(err).startswith(\
'Problem with trans2true for'))
else:
raise ValueError('Problem with error handling')
def test_identity(self):
nval = self.model.params.nval
cp = Vector(['tX{0}'.format(k) for k in range(1, nval+1)],\
defaults=[0]*nval, mins=[-1]*nval, maxs=[1]*nval)
calparams = CalibParamsVector(self.model, cp)
for i in range(10):
val = np.random.uniform(0, 1, nval)
calparams.values = val
self.assertTrue(np.allclose(self.model.params.values, val))
val = np.random.uniform(0, 1, nval)
calparams.truevalues = val
self.assertTrue(np.allclose(calparams.values, val))
def test_common_transform(self):
nval = self.model.params.nval
cp = Vector(['tX{0}'.format(k) for k in range(1, nval+1)],\
defaults=[0]*nval, mins=[-1]*nval, maxs=[1]*nval)
for i, trans in enumerate(['exp', 'sinh']):
calparams = CalibParamsVector(self.model, cp, trans2true=trans)
if i == 0:
trans2true = np.exp
true2trans = np.log
elif i == 1:
trans2true = np.sinh
true2trans = np.arcsinh
for i in range(10):
val = np.random.uniform(0, 1, nval)
calparams.values = val
self.assertTrue(np.allclose(calparams.truevalues, \
trans2true(val)))
self.assertTrue(np.allclose(self.model.params.values, \
trans2true(val)))
val = np.random.uniform(math.exp(-1), 1, nval)
calparams.truevalues = val
self.assertTrue(np.allclose(calparams.values, \
true2trans(val)))
def test_fixed(self):
nval = self.model.params.nval
cp = Vector(['tX{0}'.format(k) for k in range(1, nval+1)],\
defaults=[0]*nval, mins=[-5]*nval, maxs=[5]*nval)
# Choose a fixed value below the max value
x1 = 4
fixed = {'X1':x1}
calparams = CalibParamsVector(self.model, cp, fixed=fixed)
for i in range(10):
val = np.random.uniform(0, 1, nval)
calparams.values = val
val2 = val.copy()
val2[0] = x1
self.assertTrue(np.allclose(self.model.params.values, val2))
val = np.random.uniform(0, 1, nval)
calparams.truevalues = val
val2 = val.copy()
val2[0] = x1
self.assertTrue(np.allclose(calparams.truevalues, val2))
self.assertTrue(np.allclose(calparams.values, val2))
class CalibrationTestCase(unittest.TestCase):
def setUp(self):
print('\t=> CalibrationTestCase')
# Create random inputs
inputs = np.random.exponential(1, (100, 2))
# Allocate model
dum = Dummy()
dum.allocate(inputs, 2)
# Run model to create a sudo obs
params = dum.params.defaults+0.1
dum.params.values = params
dum.run()
obs = dum.outputs[:, 0].copy()
# Store calibration set up
self.inputs = inputs
self.params = params
self.obs = obs
self.ical = np.arange(10, obs.shape[0])
def test_calibration_instance_print(self):
''' Test printing of calibration object '''
calib = CalibrationDummy(warmup=10)
calib.allocate(self.obs, self.inputs)
str = '{0}'.format(calib)
def test_calibration_errors(self):
''' Test calibration errors '''
inputs = np.random.uniform(0, 1, (1000, 2))
obs = np.random.uniform(0, 1, 1000)
cp = Vector(['tX1', 'tX2'], mins=[-10]*2, maxs=[10]*2, \
defaults=[1, 0])
calparams = CalibParamsVector(Dummy(), cp, trans2true='exp')
calib = Calibration(calparams)
try:
plib = calib.paramslib
except ValueError as err:
self.assertTrue(str(err).startswith(\
'Trying to access paramslib, but '))
else:
raise ValueError('Problem with error handling')
try:
calib.ical = obs==obs
except ValueError as err:
self.assertTrue(str(err).startswith('Trying to get obs, but '))
else:
raise ValueError('Problem with error handling')
def test_explore(self):
''' Test explore function '''
calib = CalibrationDummy(warmup=10)
plib = np.random.uniform(-0.1, 0.1, size=(1000, 2)) \
+ self.params[None, :]
calib.paramslib = plib
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, explo_ofun = calib.explore()
self.assertTrue(np.allclose(start, self.params, rtol=0., atol=0.05))
def test_explore_error(self):
''' Test calibration exploration error '''
class ObjFunError(ObjFunSSE):
''' Sum of squared error objective function '''
def __init__(self):
super(ObjFunError, self).__init__()
self.name = 'Error'
def compute(self, obs, sim, **kwargs):
of = super(ObjFunError, self).compute(obs, sim)
if of < 1e-1:
# This is a stupid error generation
# we use it just for testing
raise ValueError('Error in exploration')
return of
calib = CalibrationDummy(warmup=10, objfun=ObjFunError())
plib = np.random.uniform(-0.1, 0.1, size=(1000, 2)) \
+ self.params[None, :]
calib.paramslib = plib
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, explo_ofun = calib.explore()
# Check that no objective function is below 1e-1
# because the objective function does not allow it
self.assertTrue(np.all(explo_ofun > 1e-1))
# Check that we trigger an error during exploration
try:
start, _, explo_ofun = calib.explore(raise_error=True)
except CalibrationExplorationError as err:
self.assertTrue(str(err).startswith('Error in explo'))
else:
raise ValueError('Problem with error handling')
def test_explore_fit(self):
''' Test explore and fit functions '''
calib = CalibrationDummy(warmup=10)
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, _ = calib.explore()
final, _, _ = calib.fit(iprint=10,
maxfun=100000, ftol=1e-8)
ck = np.allclose(calib.model.params.values, self.params, \
atol=1e-3, rtol=0.)
self.assertTrue(ck)
def test_fit_args(self):
''' Test passing arguments to objective function '''
kwargs = {'lam':1.0, 'idx':np.arange(len(self.ical))}
calib = CalibrationDummy(objfun=ObjFunSSEargs(), \
warmup=10, \
objfun_kwargs=kwargs)
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, _ = calib.explore()
final, _, _ = calib.fit(iprint=10,
maxfun=100000, ftol=1e-8)
ck = np.allclose(calib.model.params.values, self.params, \
atol=1e-3, rtol=0.)
self.assertTrue(ck)
def test_checkvalues(self):
def fun(values):
if values[1] < 0.5:
raise ParameterCheckValueError
calib = CalibrationDummy(warmup=10, checkvalues=fun)
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, ofuns = calib.explore()
idx = calib.paramslib[:, 1] < 0.5
self.assertTrue(np.all(np.isinf(ofuns[idx])))
def test_fixed(self):
''' Test calibration with fixed parameters '''
# Test error
fixed = {'X10':self.params[0]+3}
try:
calib = CalibrationDummy(warmup=10, fixed=fixed)
except ValueError as err:
self.assertTrue(str(err).startswith('Expected names '+\
'of fixed parameters'))
else:
raise ValueError('Problem with error handling')
fixed = {'X1':self.params[0]+3}
calib = CalibrationDummy(warmup=10, fixed=fixed)
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, _ = calib.explore()
final, _, _ = calib.fit(iprint=10,
maxfun=100000, ftol=1e-8)
self.assertEqual(fixed, calib.fixed)
self.assertTrue(np.allclose(fixed['X1'], start[0]))
self.assertTrue(np.allclose(fixed['X1'], final[0]))
self.assertTrue(np.allclose(fixed['X1'], \
calib.model.params.values[0]))
def test_workflow(self):
''' Test calibration workflow (i.e. explore+fit) '''
calib = CalibrationDummy(warmup=10)
# Check parameter are not close at the beginning
ck = ~np.allclose(calib.model.params.values, self.params)
self.assertTrue(ck)
# Run calibration
calib.workflow(self.obs, self.inputs, self.ical, iprint=0,
maxfun=100000, ftol=1e-8)
# Test parameters at the end
ck = np.allclose(calib.model.params.values, self.params, \
atol=1e-5, rtol=0.)
self.assertTrue(ck)
def test_customised_objfun(self):
''' Test customised objective function '''
# Define a customized objective function
objfun = ObjFunBCSSE(lam=0.8, nu=1e-5)
# Instanciate a new calib obj and applies objfun
calib = CalibrationDummy(warmup=10, objfun=objfun)
# Check parameter are not close at the beginning
ck = ~np.allclose(calib.model.params.values, self.params)
self.assertTrue(ck)
# Run calibration
calib.workflow(self.obs, self.inputs, self.ical, iprint=0,
maxfun=100000, ftol=1e-8)
# Test parameters at the end
ck = np.allclose(calib.model.params.values, self.params, atol=1e-3)
self.assertTrue(ck)
def test_optimizers(self):
''' Test a range of optimizer from scipy '''
calib = CalibrationDummy(objfun=ObjFunSSE(), \
warmup=10)
calib.allocate(self.obs, self.inputs)
calib.ical = self.ical
start, _, _ = calib.explore()
for iopt, opt in enumerate([fmin, fmin_bfgs]):
if opt.__name__ in ['fmin', 'fmin_powell']:
kwargs = dict(maxfun=100000, ftol=1e-8)
else:
kwargs = dict(maxiter=100000, gtol=1e-8)
final, _, _ = calib.fit(start=start, iprint=10, optimizer=opt, \
**kwargs)
ck = np.allclose(calib.model.params.values, self.params, \
atol=5e-3, rtol=0.)
if not ck:
print(('Failing optimizer test {0} '+\
'expected params={1}, got {2}').format(\
iopt+1, \
' '.join(list(np.round(\
self.params, 2).astype(str))), \
' '.join(list(np.round(\
calib.model.params.values, 2).astype(str)))
))
self.assertTrue(ck)
if __name__ == "__main__":
unittest.main()
|
csiro-hydroinformatics/pygme
|
tests/test_pygme_calibration.py
|
test_pygme_calibration.py
|
py
| 17,767 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41603934185
|
from unittest import TestCase
import numpy as np
import phi
from phi import math
from phi.math import channel, batch
from phi.math._shape import CHANNEL_DIM, BATCH_DIM, shape_stack, spatial
from phi.math._tensors import TensorStack, CollapsedTensor, wrap, tensor
from phi.math.backend import Backend
BACKENDS = phi.detect_backends()
class TestTensors(TestCase):
def test_tensor_from_constant(self):
for backend in BACKENDS:
with backend:
for const in (1, 1.5, True, 1+1j):
tens = math.wrap(const)
self.assertEqual(math.NUMPY, tens.default_backend)
self.assertTrue(isinstance(tens.native(), (int, float, bool, complex)), msg=backend)
math.assert_close(tens, const)
tens = math.tensor(const)
self.assertEqual(backend, math.choose_backend(tens), f'{const} was not converted to the specified backend')
math.assert_close(tens, const)
def test_tensor_from_native(self):
for creation_backend in BACKENDS:
native = creation_backend.ones((4,))
for backend in BACKENDS:
with backend:
tens = math.tensor(native, convert=False)
self.assertEqual(creation_backend, tens.default_backend)
math.assert_close(tens, native)
tens = math.tensor(native)
self.assertEqual(backend, tens.default_backend, f'Conversion failed from {creation_backend} to {backend}')
math.assert_close(tens, native)
def test_tensor_from_tuple_of_numbers(self):
data_tuple = (1, 2, 3)
for backend in BACKENDS:
with backend:
tens = math.tensor(data_tuple, convert=False)
self.assertEqual(math.NUMPY, math.choose_backend(tens))
math.assert_close(tens, data_tuple)
tens = math.tensor(data_tuple)
self.assertEqual(backend, math.choose_backend(tens))
math.assert_close(tens, data_tuple)
def test_tensor_from_tuple_of_tensor_like(self):
native = ([1, 2, 3], math.zeros(channel(vector=3)))
for backend in BACKENDS:
with backend:
tens = wrap(native, batch(stack=2), channel(vector=3))
self.assertEqual(math.NUMPY, math.choose_backend(tens))
self.assertEqual(batch(stack=2) & channel(vector=3), tens.shape)
tens = tensor(native, batch(stack=2), channel(vector=3))
self.assertEqual(backend, math.choose_backend(tens))
self.assertEqual(batch(stack=2) & channel(vector=3), tens.shape)
def test_tensor_from_tensor(self):
ref = math.stack([math.zeros(spatial(x=5)), math.zeros(spatial(x=4))], batch('stack'))
for backend in BACKENDS:
with backend:
tens = math.tensor(ref, convert=False)
self.assertEqual(math.NUMPY, math.choose_backend(tens))
self.assertEqual(2, tens.shape.get_size('stack'))
self.assertEqual(('stack', 'x'), tens.shape.names)
tens = math.tensor(ref)
self.assertEqual(backend, math.choose_backend(tens))
self.assertEqual(backend, math.choose_backend(tens.stack[0]))
self.assertEqual(backend, math.choose_backend(tens.stack[1]))
tens = math.tensor(ref, batch('n1', 'n2'))
self.assertEqual(backend, math.choose_backend(tens))
def test_multi_dim_tensor_from_numpy(self):
v = math.tensor(np.ones([1, 4, 3, 2]), batch('batch'), spatial('x,y'), channel('vector'))
self.assertEqual((1, 4, 3, 2), v.shape.sizes)
v = math.tensor(np.ones([10, 4, 3, 2]), batch('batch'), spatial('x,y'), channel('vector'))
self.assertEqual((10, 4, 3, 2), v.shape.sizes)
def test_native_constant_ops(self):
v = math.tensor(np.ones([1, 4, 3, 2]), batch('batch'), spatial('x,y'), channel('vector'))
math.assert_close(v + 1, 2)
math.assert_close(v * 3, 3)
math.assert_close(v / 2, 0.5)
math.assert_close(v ** 2, 1)
math.assert_close(2 ** v, 2)
math.assert_close(v + [0, 1], [1, 2])
def test_native_native_ops(self):
v = math.ones(batch(batch=10) & spatial(x=4, y=3) & channel(vector=2))
d = v.unstack('vector')[0]
math.assert_close(v + d, d + v, 2)
math.assert_close(v * d, d * v, 1)
def test_native_unstack(self):
v = math.ones(batch(batch=10), spatial(x=4, y=3), channel(vector=2))
vx, vy = v.vector.unstack()
self.assertEqual((10, 4, 3), vx.shape.sizes)
self.assertEqual(4, len(v.x.unstack()))
self.assertEqual(10, len(v.batch.unstack()))
def test_native_slice(self):
v = math.ones(batch(batch=10), spatial(x=4, y=3), channel(vector=2))
self.assertEqual((10, 4, 3), v.vector[0].shape.sizes)
self.assertEqual((10, 2, 2), v.y[0:2].x[0].shape.sizes)
def test_stacked_shapes(self):
t0 = math.ones(batch(batch=10) & spatial(x=4, y=3) & channel(vector=2))
for dim in t0.shape.names:
tensors = t0.unstack(dim)
stacked = math.stack(tensors, t0.shape[dim].with_sizes([None]))
self.assertEqual(set(t0.shape.names), set(stacked.shape.names))
self.assertEqual(t0.shape.volume, stacked.shape.volume)
def test_stacked_native(self):
t0 = math.ones(batch(batch=10) & spatial(x=4, y=3) & channel(vector=2))
tensors = t0.unstack('vector')
stacked = math.stack(tensors, channel('vector2'))
math.assert_close(stacked, t0)
self.assertEqual((10, 4, 3, 2), stacked.native(stacked.shape).shape)
self.assertEqual((4, 3, 2, 10), stacked.native(order=('x', 'y', 'vector2', 'batch')).shape)
self.assertEqual((2, 10, 3, 4), stacked.native(order=('vector2', 'batch', 'y', 'x')).shape) # this should re-stack since only the stacked dimension position is different
def test_stacked_get(self):
t0 = math.ones(batch(batch=10) & spatial(x=4, y=3) & channel(vector=2))
tensors = t0.unstack('vector')
stacked = math.stack(tensors, channel('channel'))
self.assertEqual(tensors, stacked.channel.unstack())
assert tensors[0] is stacked.channel[0]
assert tensors[1] is stacked.channel[1:2].channel.unstack()[0]
self.assertEqual(4, len(stacked.x.unstack()))
def test_shape_math(self):
vector = math.ones(spatial(x=4, y=3) & channel(vector=2))
vector *= vector.shape.spatial
math.assert_close(vector.vector[0], 4)
math.assert_close(vector.vector[1], 3)
def test_collapsed(self):
scalar = math.zeros(spatial(x=4, y=3))
math.assert_close(scalar, 0)
self.assertEqual((4, 3), scalar.shape.sizes)
self.assertEqual(4, scalar.y[0].shape.size)
self.assertEqual(0, scalar.y[0].x[0].shape.rank)
self.assertEqual(3, len(scalar.y.unstack()))
def test_collapsed_op2(self):
# Collapsed + Collapsed
a = math.zeros(channel(vector=4))
b = math.ones(batch(batch=3))
c = a + b
self.assertIsInstance(c, CollapsedTensor)
self.assertEqual(c.shape.volume, 12)
self.assertEqual(c._inner.shape.volume, 1)
# Collapsed + Native
n = math.ones(channel(vector=3)) + (0, 1, 2)
math.assert_close(n, (1, 2, 3))
def test_semi_collapsed(self):
scalar = math.ones(spatial(x=4, y=3))
scalar = CollapsedTensor(scalar, scalar.shape._expand(batch(batch=10)))
self.assertEqual((10, 4, 3), scalar.shape.sizes)
self.assertEqual(4, len(scalar.x.unstack()))
self.assertEqual(10, len(scalar.batch.unstack()))
self.assertEqual(0, scalar.y[0].batch[0].x[0].shape.rank)
def test_zeros_nonuniform(self):
nonuniform = shape_stack(batch('stack'), batch(time=1) & spatial(x=3, y=3), spatial(x=3, y=4), channel())
self.assertEqual(math.zeros(nonuniform).shape, nonuniform)
self.assertEqual(math.ones(nonuniform).shape, nonuniform)
self.assertEqual(math.random_normal(nonuniform).shape, nonuniform)
self.assertEqual(math.random_uniform(nonuniform).shape, nonuniform)
def test_repr(self):
print("--- Eager ---")
print(repr(math.zeros(batch(b=10))))
print(repr(math.zeros(batch(b=10)) > 0))
print(repr(math.ones(channel(vector=3))))
print(repr(math.ones(batch(vector=3))))
def tracable(x):
print(x)
return x
print("--- Placeholders ---")
for backend in BACKENDS:
if backend.supports(Backend.jit_compile):
with backend:
math.jit_compile(tracable)(math.ones(channel(vector=3)))
def test_tensor_like(self):
class Success(Exception): pass
class MyObjV:
def __init__(self, x):
self.x = x
def __value_attrs__(self):
return 'x',
def __with_tattrs__(self, **tattrs):
math.assert_close(tattrs['x'], 1)
raise Success
class MyObjT:
def __init__(self, x1, x2):
self.x1 = x1
self.x2 = x2
def __variable_attrs__(self):
return 'x1', 'x2'
v = MyObjV(math.wrap(0))
t = MyObjT(math.wrap(0), math.wrap(1))
self.assertIsInstance(v, math.TensorLike)
self.assertIsInstance(t, math.TensorLike)
try:
math.cos(v)
except Success:
pass
try:
math.cos(t)
except AssertionError:
pass
def test_Dict(self):
d1 = math.Dict(a=1, b=math.ones(), c=math.ones(spatial(x=3)))
math.assert_close(d1 * 2, d1 + d1, 2 * d1, 2 / d1)
math.assert_close(0 + d1, d1, d1 - 0, abs(d1), round(d1))
math.assert_close(-d1, 0 - d1)
math.assert_close(d1 // 2, d1 * 0, d1 % 1)
math.assert_close(d1 / 2, d1 * 0.5, 0.5 * d1)
math.assert_close(math.sin(d1 * 0), d1 * 0)
def test_collapsed_non_uniform_tensor(self):
non_uniform = math.stack([math.zeros(spatial(a=2)), math.ones(spatial(a=3))], batch('b'))
e = math.expand(non_uniform, channel('vector'))
assert e.shape.without('vector') == non_uniform.shape
|
Brian-Hsieh/shapeOptim
|
phiflow/tests/commit/math/test__tensors.py
|
test__tensors.py
|
py
| 10,515 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32094781612
|
import sys
sys.stdin = open("input.txt", "r")
from collections import Counter
A = int(input())
B = int(input())
C = int(input())
X = str(A*B*C)
for n in range(0,10):
N = str(n)
if N in Counter(X):
print(Counter(X).get(N))
else:
print(0)
|
doll2gom/TIL
|
KDT/week4/01.19/2577.py
|
2577.py
|
py
| 267 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21254950435
|
"""
La fonction pascal renvoit une liste correspondant au triangle de Pascal de la ligne 1 à la ligne n où
n est un nombre entier supérieur ou égal à 2 (le tableau sera contenu dans la variable C).
La variable Ck doit, quant à elle, contenir, à l’étape numéro k, la k-ième ligne du tableau.
"""
def pascal(n):
C= [[1]]
for k in range(1,n+1):
Ck = [1]
for i in range(1,k):
Ck.append(C[k-1][i-1]+C[k-1][i] )
Ck.append(1)
C.append(Ck)
return C
pascal(10)
|
SwordLoveDev/AlgorithmBasicPython
|
tableauPascal.py
|
tableauPascal.py
|
py
| 542 |
python
|
fr
|
code
| 3 |
github-code
|
6
|
27132126928
|
import logging
import redis
from rq import Connection, Queue
from agent.agents import get_agent_info
from plugins.patching.os_apps.incoming_updates import \
incoming_packages_from_agent
from plugins.patching.custom_apps.custom_apps import \
add_custom_app_to_agents
from plugins.patching.supported_apps.syncer import \
get_all_supported_apps_for_agent, get_all_agent_apps_for_agent
rq_host = 'localhost'
rq_port = 6379
rq_db = 0
rq_pool = redis.StrictRedis(host=rq_host, port=rq_port, db=rq_db)
logging.config.fileConfig('/opt/TopPatch/conf/logging.config')
logger = logging.getLogger('rvapi')
class RvHandOff():
def __init__(self, username, customer_name, uri, method,
agentid, rv_plugin, agent_data=None,
oper_type='newagent', delete_afterwards=True):
self.delete_afterwards = delete_afterwards
self.customer_name = customer_name
if not agent_data:
agent_data = get_agent_info(
agentid=agentid
)
self.add_packages_from_agent(
username, agentid,
agent_data, rv_plugin
)
if oper_type == 'newagent':
self.add_custom_apps(
username, customer_name,
uri, method, agentid
)
self.add_supported_apps(agentid)
self.add_agent_apps(agentid)
elif oper_type == 'updatesapplications':
self.add_supported_apps(agentid)
self.add_agent_apps(agentid)
def add_custom_apps(self, username, customer_name,
uri, method, agentid):
rv_q = Queue('incoming_updates', connection=rq_pool)
rv_q.enqueue_call(
func=add_custom_app_to_agents,
args=(
username, customer_name,
uri, method, None, agentid
),
timeout=3600
)
def add_supported_apps(self, agentid):
rv_q = Queue('incoming_updates', connection=rq_pool)
rv_q.enqueue_call(
func=get_all_supported_apps_for_agent,
args=(
agentid,
),
timeout=3600
)
def add_agent_apps(self, agentid):
rv_q = Queue('incoming_updates', connection=rq_pool)
rv_q.enqueue_call(
func=get_all_agent_apps_for_agent,
args=(
agentid,
),
timeout=3600
)
def add_packages_from_agent(self, username, agent_id, agent_data, apps):
rv_q = Queue('incoming_updates', connection=rq_pool)
rv_q.enqueue_call(
func=incoming_packages_from_agent,
args=(
username, agent_id,
self.customer_name,
agent_data['os_code'], agent_data['os_string'],
apps, self.delete_afterwards
),
timeout=3600
)
|
SteelHouseLabs/vFense
|
tp/src/receiver/rvhandler.py
|
rvhandler.py
|
py
| 2,924 |
python
|
en
|
code
| 5 |
github-code
|
6
|
23327135383
|
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import settings
logging.basicConfig(filename='bot.log', level=logging.INFO)
# Настройки прокси. Используем ради интереса
PROXY = {'proxy_url': settings.PROXY_URL,
'urllib3_proxy_kwargs': {'username': settings.PROXY_USERNAME, 'password': settings.PROXY_PASSWORD}}
def greet_user(update, context):
print('Вызван /start')
# print(update)
update.message.reply_text('Привет, пользователь! Ты вызвал команду /start')
def talk_to_me(update, context):
user_text = update.message.text
print(user_text)
update.message.reply_text(user_text)
def main():
# Создаем бота и передаем ему токен, выданный BOTfather при регистрации нашего бота
mybot = Updater(settings.API_KEY, use_context=True, request_kwargs=PROXY)
dp = mybot.dispatcher # запускаем диспитчер
dp.add_handler(CommandHandler('start', greet_user)) # запускаем обработчик
dp.add_handler(MessageHandler(Filters.text, talk_to_me))
# Включаем логирование
logging.info("Бот стартовал")
# Комманда для запуска обращения бота к телеграмму с запросом о наличие новых сообщений
mybot.start_polling()
# Запуск бота. Будет работать до принудительного останова.
mybot.idle()
if __name__ == "__main__":
main()
|
SanuNak/mybot
|
bot.py
|
bot.py
|
py
| 1,646 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
14993235685
|
# 引用url模块
from django.conf.urls import url
#导入视图函数
from .views import *
app_name="booktest"
urlpatterns=[
# url('myurl/',myview)
# url(r'^index/$',index),
#
url(r'^$',index,name="index"),
# url(r'^$',indexView.as_view(),name="index"),
# url(r'^$',indexTemplateView.as_view(),name="index"),
# url(r'^list/$',listView.as_view(),name="list"),
url(r'^list/$',list,name="list"),
url(r'^detail/(\d+)/$',detail,name="detail"),
url(r'^deletebook/(\d+)/$',deletebook,name="deletebook"),
url(r'^addhero/(\d+)/$',addhero,name="addhero"),
url(r'^deletehero/(\d+)/$',deletehero,name="deletehero"),
url(r'^addads/$',addads,name="addads"),
]
|
pan0527/chenpan
|
demo1/booktest/urls.py
|
urls.py
|
py
| 712 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22757452562
|
# stdlib
import unittest
# project
from stackstate_checks.splunk.config import AuthType, SplunkInstanceConfig
from stackstate_checks.base.errors import CheckException
mock_defaults = {
'default_request_timeout_seconds': 5,
'default_search_max_retry_count': 3,
'default_search_seconds_between_retries': 1,
'default_verify_ssl_certificate': False,
'default_batch_size': 1000,
'default_saved_searches_parallel': 3,
'default_app': "search",
'default_parameters': {
"force_dispatch": True,
"dispatch.now": True
}
}
class TestSplunkInstanceConfig(unittest.TestCase):
def test_check_token_auth_preferred_over_basic_auth(self):
"""
Splunk topology check should prefer Token based authentication over Basic auth mechanism
"""
instance = {
'url': 'http://localhost:8089',
'authentication': {
'basic_auth': {
'username': "admin",
'password': "admin"
},
'token_auth': {
'name': "api-admin",
'initial_token': "dsfdgfhgjhkjuyr567uhfe345ythu7y6tre456sdx",
'audience': "admin",
'renewal_days': 10
}
},
'component_saved_searches': [{
"name": "components",
"parameters": {}
}],
'relation_saved_searches': [],
'tags': ['mytag', 'mytag2']
}
instance_config = SplunkInstanceConfig(instance, {}, mock_defaults)
assert instance_config.auth_type == AuthType.TokenAuth
def test_checks_backward_compatibility(self):
"""
Test whether username/password without the authentication block is still accepted
"""
instance = {
'url': 'http://localhost:8089',
'username': 'admin',
'password': 'admin',
'component_saved_searches': [{
"name": "components",
"parameters": {}
}],
'relation_saved_searches': [{
"name": "relations",
"parameters": {}
}],
'tags': ['mytag', 'mytag2']
}
instance_config = SplunkInstanceConfig(instance, {}, mock_defaults)
assert instance_config.auth_type == AuthType.BasicAuth
def test_combine_old_and_new_conf(self):
instance = {
'url': 'http://localhost:8089',
'username': 'admin',
'password': 'admin',
'authentication': {
'basic_auth': {
'username': "adminNew",
'password': "adminNew"
}
},
'component_saved_searches': [{
"name": "components",
"parameters": {}
}],
'relation_saved_searches': [{
"name": "relations",
"parameters": {}
}],
'tags': ['mytag', 'mytag2']
}
instance_config = SplunkInstanceConfig(instance, {}, mock_defaults)
assert instance_config.auth_type == AuthType.BasicAuth
assert instance_config.username == "adminNew"
assert instance_config.password == "adminNew"
def test_check_audience_param_not_set(self):
"""
Splunk topology check should fail and raise exception when audience param is not set
"""
instance = {
'url': 'http://localhost:8089',
'authentication': {
'token_auth': {
'name': "admin",
'initial_token': "dsfdgfhgjhkjuyr567uhfe345ythu7y6tre456sdx",
'renewal_days': 10
}
},
'component_saved_searches': [{
"name": "components",
"parameters": {}
}],
'relation_saved_searches': [],
'tags': ['mytag', 'mytag2']
}
try:
SplunkInstanceConfig(instance, {}, mock_defaults)
assert False
except CheckException as e:
assert str(e) == 'Instance missing "authentication.token_auth.audience" value'
def test_check_name_param_not_set(self):
"""
Splunk topology check should fail and raise exception when name param is not set
"""
instance = {
'url': 'http://localhost:8089',
'authentication': {
'token_auth': {
'initial_token': "dsfdgfhgjhkjuyr567uhfe345ythu7y6tre456sdx",
'audience': "search",
'renewal_days': 10
}
},
'component_saved_searches': [{
"name": "components",
"parameters": {}
}],
'relation_saved_searches': [],
'tags': ['mytag', 'mytag2']
}
try:
SplunkInstanceConfig(instance, {}, mock_defaults)
assert False
except CheckException as e:
assert str(e) == 'Instance missing "authentication.token_auth.name" value'
|
StackVista/stackstate-agent-integrations
|
splunk_base/tests/test_splunk_instance_config.py
|
test_splunk_instance_config.py
|
py
| 5,203 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74795559226
|
from django.db import models
from Pages.models import Page
import urllib
from .special_character_table import TABLE
def get_report_url(post_hashtag):
return "http://c8763.webutu.com?hashtag="+str(post_hashtag)
# Create your models here.
class Record(models.Model):
submit_type=models.IntegerField(default=0)
post_id=models.IntegerField(blank=False)
fb_post_id=models.TextField(blank=False)
class Report(models.Model):
REPORTER_TYPE=(
("S","Submitter"),
("R","Related"),
("F","Friend"),
("O","Other")
)
reporter=models.CharField(max_length=10,choices=REPORTER_TYPE,default="S")
reason=models.TextField(blank=False)
post_hashtag=models.IntegerField(blank=False)
fb_post_id=models.TextField(blank=False)
class Submission(models.Model):
context=models.TextField(blank=False)
submit_type=models.IntegerField(default=0)
submit_time=models.DateTimeField(auto_now_add=True)
def publish(self,manager):
page=Page.objects.all()[0]
fb_api_url="https://graph.facebook.com/v2.12/"+page.page_id
post_context="#"
post_context+=page.prefix+str(page.post_count)
# post_context+="\n檢舉這篇文章:"
# post_context+=get_report_url(page.post_count)
page.post_count=page.post_count+1
page.save()
response=None
if self.submit_type==0:
fb_api_url+="/feed"
post_context+="\n\n"+self.context+"\n\n"
post_context+=manager
values={
'message':post_context,
'access_token':page.access_token
}
data=urllib.parse.urlencode(values)
byte_data=data.encode('utf8')
response=urllib.request.urlopen(fb_api_url,byte_data)
else:
fb_api_url+="/photos"
image_text=self.context+"\n"
watermark=manager
for tup in TABLE:
image_text=image_text.replace(tup[0],tup[1])
watermark=watermark.replace(tup[0],tup[1])
param=urllib.parse.urlencode({'text':image_text,'line_length':16,'watermark':watermark})
image_url="http://complain-kskg.ga/texttoimage/?%s"%param
values={
'caption':post_context,
'url':image_url,
'access_token':page.access_token
}
data=urllib.parse.urlencode(values)
byte_data=data.encode('utf8')
response=urllib.request.urlopen(fb_api_url,byte_data)
return response.read()
|
austin880625/KSKGcomplain
|
Submissions/models.py
|
models.py
|
py
| 2,572 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32188022347
|
from itertools import permutations
def primenumber(x):
if x < 2:
return False
for i in range(2, x):
if x % i == 0:
return False
return True
def solution(numbers):
answer = 0
num = []
for i in range(1, len(numbers)+1) :
num.append(list(set(map(''.join, permutations(numbers, i)))))
per = list(set(map(int, set(sum(num, [])))))
for p in per :
if primenumber(p) == True :
answer += 1
return answer
# ========================================================================
# 2023년 4월 16일 문제를 다시 풀어봄.
from itertools import permutations
def primenumber(x):
if x < 2:
return False
for i in range(2, x):
if x % i == 0:
return False
return True
def solution(numbers):
answer = 0
result = []
for number in range(1, len(numbers)+1):
first = list(set(map(''.join, permutations(numbers, number))))
result.append(first)
unduplicated_numbers = list(set(map(int, sum(result, []))))
for i in unduplicated_numbers:
if primenumber(i) == True:
answer += 1
return answer
|
kcw0331/python-for-coding-test
|
programmers-coding/소수찾기.py
|
소수찾기.py
|
py
| 1,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73652386109
|
# 给定一个包含 [0, n] 中 n 个数的数组 nums ,找出 [0, n] 这个范围内没有出现在数组中的那个数
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = nums + [len(nums) + 1] * 2
for i in range(len(nums) - 1):
nums[abs(nums[i])] = -abs(nums[abs(nums[i])])
for i in range(len(nums)):
if nums[i] > 0:
return i
for i in range(len(nums)):
if nums[i] == 0:
return i
nums = [2,0]
a = Solution()
print(a.missingNumber(nums))
|
xxxxlc/leetcode
|
array/missingNumber.py
|
missingNumber.py
|
py
| 642 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41933031591
|
import pyautogui
import time
pyautogui.moveTo(3530, 983) # Lokasi kursor kearah chat
pyautogui.click()
# Spam chat 100 pesan.
for i in range(100):
pyautogui.write("PING!!!") # Message pesan spam
time.sleep(0.01) # Waktu jeda spam
pyautogui.press("Enter")
|
arvandha121/SPAM_CHAT_WHATSAPP
|
spam.py
|
spam.py
|
py
| 268 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15018796065
|
from View.GUI.Windows.ParameterWindow.ComponentSections.AbstractParameterSection import AbstractParameterSection
from View.GUI.Windows.ParameterWindow.ComponentSections.TkParameterSection import TkParameterSection
class EdgeParameterSection(AbstractParameterSection):
def __init__(self, root, edge, controller):
super().__init__(root, controller, edge, TkParameterSection.ParameterType.Edge)
edge.start.subscribe(self)
edge.end.subscribe(self)
def update_value_dictionary(self):
super().update_value_dictionary()
self.value_dictionary["name"] = self.observed_subject.name
self.value_dictionary["start node"] = self.observed_subject.start.name
self.value_dictionary["end node"] = self.observed_subject.end.name
def destroy(self):
self.observed_subject.start.unsubscribe(self)
self.observed_subject.end.unsubscribe(self)
super().destroy()
|
Moni5656/npba
|
View/GUI/Windows/ParameterWindow/ComponentSections/EdgeParameterSection.py
|
EdgeParameterSection.py
|
py
| 933 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23777296235
|
import numpy as np
import tensorflow as tf
from models import vgg
class network():
def __init__(self, batch_size=1):
self._batch_size = None
self.x = tf.placeholder(dtype=tf.float32, shape=[self._batch_size, None, None, 3], name="input_image")
self.cls_plc = tf.placeholder(tf.float32, shape=[self._batch_size, None, None, 18], name="rpn_cls")
self.box_plc = tf.placeholder(tf.float32, shape=[self._batch_size, None, None, 72], name="rpn_box")
def build_network(self):
initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)
vgg_16 = vgg.ConvNetVgg16('vgg16.npy')
cnn = vgg_16.inference(self.x)
features = vgg_16.get_features()
rpn_cls_score, rpn_bbox_pred = self.build_rpn(features, initializer)
return [rpn_cls_score, rpn_bbox_pred, features]
def build_rpn(self, net, initializer):
num_anchors = 9
rpn1 = tf.layers.conv2d(net,
filters=512,
kernel_size=(3, 3),
padding='same',
kernel_initializer = initializer,
name='npn_conv/3x3')
rpn_cls_score = tf.layers.conv2d(rpn1,
filters=num_anchors,
kernel_size=(1, 1),
activation='sigmoid',
kernel_initializer = initializer,
name="rpn_out_class")
rpn_bbox_pred = tf.layers.conv2d(rpn1,
filters=num_anchors * 4,
kernel_size=(1, 1),
activation='linear',
kernel_initializer = initializer,
name='rpn_out_regre')
rpn_cls = tf.reshape(rpn_cls_score, [-1, 14, 14, 9], name='rpn_cls_pred')
rpn_bbox = tf.reshape(rpn_bbox_pred, [-1, 14, 14, 36], name='rpn_bbox_pred')
# num = 2
# rpn_cls_score_reshape = self._reshape(rpn_cls_score, num, 'rpn_cls_scores_reshape')
# rpn_cls_score_reshape = self._softmax(rpn_cls_score_reshape, 'rpn_cls_softmax')
# rpn_cls_score_reshape = self._softmax(rpn_cls_score_reshape, 'rpn_cls_softmax')
# rpn_cls_prob = self._reshape(rpn_cls_score, num_anchors , "rpn_cls_prob")
return rpn_cls_score, rpn_bbox_pred
def get_placeholder(self):
return self.x, self.cls_plc, self.box_plc
|
anandhupvr/rpn-tf
|
models/net.py
|
net.py
|
py
| 2,694 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23896439023
|
# repeat_bot.py
from bot.common import verify_user, job_name
from dotenv import load_dotenv
from bot.messages import account_summary
from telegram import Update
from telegram.ext import Application, CommandHandler, ContextTypes
from data_model import BotConfig
from utils import load_config
load_dotenv()
class PostHelp:
def __init__(self, cfg: BotConfig):
self.cfg = cfg
async def post_help_info(self, update: Update, context: ContextTypes.DEFAULT_TYPE): # pylint: disable=W0613
if await verify_user(update=update, auth_users=self.cfg.auth.telegram.users):
text = [
"/help to view this text",
"/set [number] to set how often the message should be posted",
"/stop to stop the repeating message",
"/jobs to see what repeating message is currently working",
]
text = "\n".join(text)
await update.message.reply_text(text)
class RepeatMessage:
def __init__(self, cfg: BotConfig):
self.cfg = cfg
async def send_message(self, context: ContextTypes.DEFAULT_TYPE):
job = context.job
text = await account_summary(cfg=self.cfg)
await context.bot.send_message(
job.chat_id,
message_thread_id=self.cfg.chat.message_thread_id,
text=text
)
class StopRepeatMessage:
def __init__(self, cfg: BotConfig):
self.cfg = cfg
async def stop(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
current_jobs = context.job_queue.get_jobs_by_name(self.cfg.name)
if len(current_jobs) > 0:
for job in current_jobs:
job.schedule_removal()
await update.effective_message.reply_text(
"succesfully stopped repeat message"
)
return
await update.effective_message.reply_text(
"there are no repeating message jobs to stop"
)
class SetTimer:
def __init__(self, cfg: BotConfig):
self.cfg = cfg
async def set_timer(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
if await verify_user(update=update, auth_users=self.cfg.auth.telegram.users):
try:
interval = float(context.args[0])
if interval < 0:
await update.effective_message.reply_text(
"interval must be numeric and greater than zero"
)
return
message_function = RepeatMessage(cfg=self.cfg)
context.job_queue.run_repeating(
message_function.send_message,
interval=interval,
chat_id=self.cfg.chat.chat_id,
name=self.cfg.name,
data=interval
)
text = f"repeating message every {interval} seconds"
await update.effective_message.reply_text(text)
except (IndexError, ValueError):
await update.effective_message.reply_text(
"The interval has to be a number, interpreted as seconds"
)
class Jobs:
def __init__(self, cfg: BotConfig):
self.cfg = cfg
async def post_job_status(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
if await verify_user(update=update, auth_users=self.cfg.auth.telegram.users):
current_jobs = context.job_queue.get_jobs_by_name(self.cfg.name)
if len(current_jobs) > 0:
text = job_name(cfg=self.cfg)
await update.effective_message.reply_text(text=text)
return
text = "idle, no jobs"
await update.effective_message.reply_text(text=text)
def repeat_bot(cfg: BotConfig):
# cfg = load_config(bot_name=bot_name)
post_help = PostHelp(cfg=cfg)
set_timer = SetTimer(cfg=cfg)
jobs = Jobs(cfg=cfg)
stop_message = StopRepeatMessage(cfg=cfg)
application = Application.builder().token(cfg.auth.telegram.token).build()
application.add_handler(CommandHandler("help", post_help.post_help_info))
application.add_handler(CommandHandler("set", set_timer.set_timer))
application.add_handler(CommandHandler("stop", stop_message.stop))
application.add_handler(CommandHandler("jobs", jobs.post_job_status))
application.run_polling()
|
KD6-Dash-37/telegram-chat-bot
|
bot/repeat_bot.py
|
repeat_bot.py
|
py
| 4,481 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1715742701
|
from __future__ import print_function
import os
import sys
from py2gcode import gcode_cmd
from py2gcode import cnc_dxf
feedrate = 0.4*0.10
depth_per_360 = 0.4*0.03
zero_pos = {'x': 0.0, 'y': 0.0, 'z': 0.0, 'a': 0.0}
start_pos = {'x': 0.0, 'y': 0.0, 'z': 0.0, 'a': 0.0}
final_pos = {'x': 0.0, 'y': 0.0, 'z': -0.6}
#start_pos = {'x': 0.0, 'y': 0.0, 'z': -0.5, 'a': 0.0}
#final_pos = {'x': 0.0, 'y': 0.0, 'z': -0.9}
final_pos['a'] = 360*abs(final_pos['z']-start_pos['z'])/depth_per_360
total_t = abs(final_pos['z'] - start_pos['z'])/feedrate
angle_rate = abs(final_pos['a'] - start_pos['a'])/total_t
print('start_pos: ', start_pos)
print('final_pos: ', final_pos)
print('total_t: ', total_t)
print('angle_rate: ', angle_rate)
prog = gcode_cmd.GCodeProg()
prog.add(gcode_cmd.GenericStart())
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.FeedRate(feedrate))
prog.add(gcode_cmd.RapidMotion(**start_pos))
prog.add(gcode_cmd.LinearFeed(**final_pos))
del zero_pos['a']
prog.add(gcode_cmd.RapidMotion(**zero_pos))
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.End(),comment=True)
baseName, dummy = os.path.splitext(__file__)
fileName = '{0}.ngc'.format(baseName)
print('generating: {0}'.format(fileName))
prog.write(fileName)
|
willdickson/sphere_w_rotary_axis
|
sphere.py
|
sphere.py
|
py
| 1,227 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3516700430
|
#********************* BGINFO_MULTI ***************************
# Desenvolvido por Frederico de Jesus Almeida
# Analista de Suporte PLENO - Multi
#******************* 06/06/2023 ****************************
import os
import re
import psutil
import socket
import subprocess
import tkinter as tk
def get_ip_address():
ip_local = socket.gethostbyname(socket.gethostname())
return ip_local
def get_mac_address():
# Obtém o endereço MAC do adaptador de rede principal
mac_address = ''
for iface in psutil.net_if_addrs().values():
for addr in iface:
if addr.family == psutil.AF_LINK:
mac_address = addr.address
break
if mac_address:
break
return mac_address
def get_hostname():
# Obtém o nome do host do computador
return socket.gethostname()
def get_username():
# Obtém o nome do usuário logado
return os.getlogin()
def get_domain():
# Obtém o nome de domínio do computador
texto = socket.getfqdn()
if "MLTBR.LOCAL" in texto:
return ("Domínio: 'MLTBR.LOCAL'")
else:
return ("Domínio: NONE")
def update_data():
# Atualiza os dados dos widgets da interface gráfica
hostname_label.config(text='Hostname: ' + get_hostname())
mac_address_label.config(text='MAC: ' + get_mac_address())
ip_address_label.config(text='IP: ' + get_ip_address())
username_label.config(text='Usuário : ' + get_username())
domain_label.config(text=get_domain())
network_type = get_network_type()
network_type_label.config(text='' + network_type)
# Aguarda 5 minutos e chama a função update_data novamente
root.after(300000, update_data)
#Função que verifica se esta no wifi ou no cabo
def verificar_conectado(linha):
padrao = r"\bConectado\b"
resultado = re.search(padrao, linha)
if resultado:
return False
else:
return True
#Função que retorna o tipo da conexão
def get_network_type():
# Chama a função no CMD
output = subprocess.check_output('netsh interface show interface | findstr "Ethernet"', shell=True)
# Decodifica a saída para uma string legível
output = output.decode('utf-8')
#Verifica se esta conectado no wi-fi ou no cabo
if verificar_conectado(output):
wifi = subprocess.check_output('netsh wlan show interfaces | findstr "Faixa"', shell=True)
wifi = wifi.decode('utf-8')
wifi = wifi.replace(" ", "")
return (wifi)
else:
wifi = 'Conexão: Cabeada'
return (wifi)
get_network_type()
# Cria a janela principal
root = tk.Tk()
root.title('Sistema')
# Configura o fundo da janela para ser transparente
root.attributes('-alpha', 0.5)
# Oculta a barra de título
root.overrideredirect(True)
# Define a posição da janela no canto inferior direito
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
window_width = 300
window_height = 180
x_position = screen_width - window_width
y_position = screen_height - window_height
root.geometry('{}x{}+{}+{}'.format(window_width, window_height, x_position, y_position))
# Cria os widgets da interface
hostname_label = tk.Label(root, text='Hostname: ' + get_hostname(), anchor='w', justify='left')
mac_address_label = tk.Label(root, text='MAC: ' + get_mac_address(), anchor='w', justify='left')
ip_address_label = tk.Label(root, text='IP: ' + get_ip_address(), anchor='w', justify='left')
username_label = tk.Label(root, text='Usuário: ' + get_username(), anchor='w', justify='left')
domain_label = tk.Label(root, text=get_domain(), anchor='w', justify='left')
network_type_label = tk.Label(root, text='' + get_network_type(), anchor='w', justify='left')
# Posiciona os widgets na janela
hostname_label.pack()
mac_address_label.pack()
ip_address_label.pack()
username_label.pack()
domain_label.pack()
network_type_label.pack()
# Aguarda 5 minutos e chama a função update_data
root.after(30000, update_data)
# Inicia o loop da interface gráfica
root.mainloop()
|
Frederico02/info-sistema
|
main_final.py
|
main_final.py
|
py
| 4,077 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
5753443462
|
# O(n) Time, O(n) Space :-
# def findDuplicate(List):
# myDict = {}
# for _ in List:
# if _ in myDict:
# myDict[_]+=1
# else:
# myDict[_] = 1
# for ele in myDict:
# if myDict[ele]>1:
# return ele
# O(n) Time, O(n) Space : Floyd's Algo
def findDuplicate(List):
slow = fast = List[0]
while True:
slow = List[slow]
fast = List[List[fast]]
if (slow == fast):
break
fast = List[0]
while(slow != fast):
slow = List[slow]
fast = List[fast]
return fast
List = list(map(int, input().split()))
print(findDuplicate(List))
|
Abhrajyoti00/Data-Structures-and-Algorithms
|
450 Questions for DSA/Array/11_Find_the_Duplicate_Number.py
|
11_Find_the_Duplicate_Number.py
|
py
| 663 |
python
|
en
|
code
| 3 |
github-code
|
6
|
70766850107
|
from fastapi import APIRouter, Depends
from app.model.param import (
ListTaskParams,
NewTasksListParams,
StopTaskParams,
)
from app.model.response import (
NewTasksResp,
ListTasksResp,
StopTasksResp,
)
from exception import DataExistsError, APIBaseError
from app.model.data import TaskModel, StopTaskModel
from .helper import task as taskhelper
from traceback import format_exc
task_router = APIRouter()
@task_router.get(
'/list',
response_model=ListTasksResp
)
async def list_task(
param: ListTaskParams = Depends(ListTaskParams)
):
""" 任务列表。"""
# data = _list_task(param.offset, param.limit)
data = taskhelper.list(param.offset, param.limit, param.active)
return ListTasksResp(
data=data
)
@task_router.post(
'/new',
response_model=NewTasksResp
)
async def create_tasks(
params: NewTasksListParams,
):
""" 批量添加任务。"""
data = []
for url in params.urls:
try:
t = taskhelper.create(url, params.options)
t.run_async()
errcode = 0
errmsg = None
except APIBaseError as err:
t = taskhelper.get(err.data)
errcode = err.code
errmsg = err.msg
data.append(TaskModel(
sign=t.sign,
title=t.title,
url=t.url,
errcode=errcode,
errmsg=errmsg
))
return NewTasksResp(data=data)
@task_router.post(
'/stop',
response_model=StopTasksResp
)
async def stop_tasks(
params: StopTaskParams
):
data = []
for key in params.keys:
try:
result = taskhelper.stop(key)
errcode = 0
errmsg = None
except APIBaseError as err:
errcode = err.code
errmsg = err.msg
data.append(StopTaskModel(
errcode=errcode,
errmsg=errmsg
))
return StopTasksResp(data=data)
|
ZSAIm/VideoCrawlerEngine
|
app/taskflow/routers/task.py
|
task.py
|
py
| 1,962 |
python
|
en
|
code
| 420 |
github-code
|
6
|
33188473740
|
# -*-coding:utf-8-*-
import logging
from datetime import datetime
class MyLogger():
def __init__(self, name):
self.logger = logging.getLogger(name)
self.handler = logging.FileHandler(filename='logging/%s.log' % name)
self.logger.addHandler(self.handler)
def warning(self, info):
msg = '%s : %s \n==========================\n' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), info)
self.logger.warning(msg)
if __name__ == '__main__':
logger = MyLogger('test')
logger.warning('test msg')
|
xxxx-hhhh/spider
|
baojianhui_spider/my_logging.py
|
my_logging.py
|
py
| 546 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8660192902
|
import nltk
nltk.download('stopwords')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
#global set of stopwords
english_stopwords = set(stopwords.words('english'))
def tokenizeText(content):
global english_stopwords
#returns a list of tokens found in the given pathname
tokens = word_tokenize(content)
tokensWithoutStopWords = []
for word in tokens:
if word not in english_stopwords:
tokensWithoutStopWords.append(word)
#print(Simhash(tokensWithoutStopWords))
return tokensWithoutStopWords
def computeWordFrequencies(tokens):
mydict = dict()
for token in tokens:
frequency = 1
if(token not in mydict.keys()):
mydict[token] = frequency
else:
mydict[token] += frequency
return mydict
|
daveA420/ics121Crawler
|
newParser.py
|
newParser.py
|
py
| 857 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30804216456
|
import sys,tty,termios
class _Getch:
def __call__(self):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(3)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def get():
inkey = _Getch()
while(1):
k=inkey()
if k!='':break
if k=='\x1b[A':
return "up"
elif k=='\x1b[B':
return "down"
elif k=='\x1b[C':
return "right"
elif k=='\x1b[D':
return "left"
else:
return "not an arrow key!"
if __name__ == "__main__":
for i in range(10):
print(get())
|
AAmir007-code/Game-2048
|
keyboard.py
|
keyboard.py
|
py
| 817 |
python
|
en
|
code
| 5 |
github-code
|
6
|
72469437949
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 13:00:31 2023
@author: samir
"""
import pandas as pd
dat = pd.read_csv('School Data.csv')
print("PART ONE++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print('Shape',dat.shape)
'''
for c in dat.columns:
print( c, dat[c].isnull().sum() )
'''
#I want to drop the cols with the most missing data. First going for ones over 100
toDrop = ['Offers Electives?','Sports Rank','Mental Health Services?','Math Score',\
'English Score','Suicide Data',\
'Crime-related Data','Lunch%-Free','Lunch%-Reduced',\
'Lunch%-Paid','Unnamed: 27','Teaching/Educational Method']
print("Deleted Every Column with missing values over 95")
for i in range(0,len(toDrop)):
dat = dat.drop(toDrop[i],axis=1)
print('New Shape',dat.shape)
'''
for c in dat.columns:
print( c, dat[c].isnull().sum() )
'''
print("Dropping all rows with empty values")
dat = dat.dropna()
print('New Shape',dat.shape)
print("PART TWO++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
dat = dat.drop_duplicates(subset=['School Name', 'Zip Code'])
print("Removed Duplicates if they had the same name/zipcode")
print('New Shape',dat.shape)
print("PART THREE++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
dat['2022 Student Enrollments'] = pd.to_numeric(dat['2022 Student Enrollments'],\
errors='coerce')
dat = dat.dropna()
print("I made the [2022 Student Enrollment] part numeric and dropped all rows",\
"that could not be converted as some had things like 'cant find'")
print('New Shape',dat.shape)
print("")
dat['National Rank'] = pd.to_numeric(dat['National Rank'],\
errors='coerce')
dat = dat.dropna()
print("I made the [National Rank] part numeric and dropped all rows",\
"that could not be converted as some had things like 'Unranked'")
print('New Shape',dat.shape)
print("")
dat['AZ Rank'] = pd.to_numeric(dat['AZ Rank'],\
errors='coerce')
dat = dat.dropna()
print("I made the [AZ Rank] part numeric and dropped all rows",\
"that could not be converted as some had things like 'Unranked'")
print('New Shape',dat.shape)
racialL = ['Racial%-White','Racial%-Black','Racial%-Native','Racial%-Hispanic',\
'Racial%-Asian','Racial%-Other']
print("")
for n in racialL:
dat[n] = dat[n].str.replace('%','')
dat[n] = pd.to_numeric(dat[n],\
errors='coerce')
dat = dat.dropna()
print("I made the [Racial%-XXXXX] parts numeric and dropped all rows",\
"that could not be converted as some had things like 'Not Found'")
print('New Shape',dat.shape)
print("")
print("Next I get rid of ', AZ' and similar strings in the City tab as we",\
" know all data is in arizona. Size does not change ")
dat['City'] = dat['City'].str.replace(', AZ','')
dat['City'] = dat['City'].str.replace(',AZ','')
dat['City'] = dat['City'].str.replace(', Arizona','')
print("")
print("Now I want to clean the YES and NO columns. First I need to make all Y/N's the same")
a = dat['AP Classes?'].value_counts()
print("For example this is what the column [AP Classes?] looks like if we value count it\n",a)
WaysOfYes = ['Yes','yes','YES','Yes ','Y ','YEs','AP CLASSES - AP CLASSES - Yes',\
'Dual Enrollment - Dual Enrollment - Yes']
WaysOfNo = ['No','NO','no']
for y in WaysOfYes:
dat['AP Classes?'] = dat['AP Classes?'].str.replace(y,'1')
dat['Dual Enrollment?'] = dat['Dual Enrollment?'].str.replace(y,'1')
dat['Offers Sports?'] = dat['Offers Sports?'].str.replace(y,'1')
for n in WaysOfNo:
dat['AP Classes?'] = dat['AP Classes?'].str.replace(n,'0')
dat['Dual Enrollment?'] = dat['Dual Enrollment?'].str.replace(n,'0')
dat['Offers Sports?'] = dat['Offers Sports?'].str.replace(n,'0')
a = dat['AP Classes?'].value_counts()
print("Now it looks like\n",a)
print("I WILL DO THIS FOR ALL Y/N FEATURES BUT WILL NOT SHOW IT ALL :)")
v = dat['Student-Teacher Ratio'].value_counts()
print("\nLooking at the [Student-Teacher ratio] there are 27 missing values.")
print("At this point that is more than a 5th of our data, therefore I think it is")
print("better if we just drop the column")
dat = dat.drop('Student-Teacher Ratio',axis=1)
print('New Shape',dat.shape)
dat = dat.reset_index()
dat = dat.drop("index",axis=1)
dat['City'] = dat['City'].str.upper()
print("\nI also made the city column all uppercase so that when I divide them up")
print("catagorically the names are consitant")
#Making the new data
print("\nNow I am making the new data First we add the ratio values")
newDat = dat[['School Name','City',"2022 Student Enrollments","National Rank","AZ Rank",'Racial%-White','Racial%-Black','Racial%-Native','Racial%-Hispanic',\
'Racial%-Asian','Racial%-Other']]
print("New Data shape",newDat.shape)
cat = ['AP Classes?','Dual Enrollment?','Offers Sports?']
for c in cat :
newDat = pd.concat([newDat,dat[c].astype(int)],axis=1)
print("\nNow sorting all of the catagorical comlumns")
print("New Data shape",newDat.shape)
newDat.to_csv("NewDat.csv")
|
samir-strasser/IFT511_Project_27
|
DataCleaning.py
|
DataCleaning.py
|
py
| 5,356 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16256206871
|
__author__ = 'harrigan'
import mcmd
import glob
class WriteDirectoryListing(mcmd.Parsable):
"""List files and write them in a directory.
:param out_fn: Where to write the file
:param glob_str: How to glob files
:param limit: Max number of files or -1 for all
"""
def __init__(self, out_fn, glob_str='data/*.txt', limit=-1):
self.out_fn = out_fn
self.glob_str = glob_str
self.limit = limit
def main(self):
fns = glob.glob(self.glob_str)
limit = self.limit
if 0 < limit < len(fns):
fns = fns[:limit]
with open(self.out_fn, 'w') as f:
f.write('\n'.join(fns))
class WriteOnlyPart(WriteDirectoryListing):
"""Write only filename or dirname."""
_subcommand_shortname = 'writeonly'
def __init__(self, out_fn, glob_str='sample/*.txt', limit=-1,
which='dirname'):
pass
def parse():
c_inst = mcmd.parsify(WriteDirectoryListing)
c_inst.main()
if __name__ == "__main__":
parse()
|
mpharrigan/mcmd
|
mcmd/test_mcmd.py
|
test_mcmd.py
|
py
| 1,038 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70315957309
|
"""
bony_downloader.py
module contains BonyDownloader class to provide provider specific functionality
"""
__author__ = 'Dattatraya Tembare<[email protected]>'
import datetime
import itertools
import lxml.html
import requests
from common.download_exceptions import DownloadException
from download.file_downloader import FileDownloader
class BonyDownloader(FileDownloader):
"""
BonyDownloader class has functions for parsing page source code
parse() : implementation for 'BONY' provider
"""
def authenticate(self, provider):
"""
Step 1:: Authenticate and login to provider's portal
:param provider: provider
:return: requests session
"""
logging.debug('BonyDownloader:authenticate')
auth_config = self.configs.auth_config[provider]
access_config = self.configs.access_config[provider]
session = requests.Session()
logging.debug(f':::1 Connect to {access_config["login-url"]} and get cookies')
session.get(access_config['login-url'])
logging.debug(f':::2 Call {access_config["auth-url"]} page')
# requests will use the available cookies from session
try:
res1 = session.post(access_config["auth-url"], data=auth_config)
if self._login_failed(provider, res1):
raise DownloadException('2000_AUTHENTICATION_FAILED',
custom_message=f"Authentication failed for {provider}")
logging.debug(f'Login status :: {res1.status_code}')
# BONY request need certificate key for each request
f_html = self.utils.format_html(res1.text)
tree = lxml.html.fromstring(f_html)
csrf_key = tree.xpath('//form[@name="NavForm"]/input[@name="csrfKey"]/@value')[0]
except Exception as e:
raise DownloadException('2000_AUTHENTICATION_FAILED', e) from None
return session, {'for_next_params': True, 'csrfKey': csrf_key}
def _login_failed(self, provider, response):
if 'Invalid Login' in response.text:
return True
else:
return False
def access(self, session, **opts):
"""
Step 2:: Pull access URL/s from configs file and use it to pull page source which has URLs for file download
after method execution a_url['deal_info_dict_list'] appended to opts dictionary
TODO Use namedtuple DealInfo to make current dictionary generic to all providers
:param session: session with site cookies
:param opts: user/commandline inputs
:return: None
"""
logging.debug('FileDownloader:access')
provider = opts['provider']
previous_url_results = list()
for a_url in opts['access_urls']:
logging.debug(f':::3 Send request to {a_url} page')
# Pull input parameters to append as a query string
user_config = opts['user_input_config'] if 'user_input_config' in opts else None
user_inputs = user_config['input'] if user_config else self.configs.user_input_config[provider][
'input']
deal_info_list = self._prepare_params(a_url, user_inputs)
# Update URL with values pulled from previous page response
deal_info_list = self._use_previous_url_result(deal_info_list, previous_url_results)
# After use clean the previous_url_results
previous_url_results = []
for deal_info in deal_info_list:
params = deal_info['params']
from_opts = opts['response_dict'] if 'response_dict' in opts else {}
params = {**params, **from_opts}
opts['response_dict'] = {}
try:
if a_url['method'] == 'POST':
res = session.post(deal_info['link'], data=params)
elif a_url['method'] == 'GET':
res = session.get(deal_info['link'], params=params)
except Exception as e:
raise DownloadException('3000_ACCESS_FAILED', e)
logging.debug(f'status code :: {res.status_code} history :: {res.history} response URL :: {res.url}')
f_html = self.utils.format_html(res.text)
tree = lxml.html.fromstring(f_html)
for ele_name, ele_value in a_url['result-dict'].items():
if 'for_next_params' in ele_name:
_result = self._dict_for_next_url(ele_value, tree)
_result['for_next_params'] = True
previous_url_results.append(_result)
deal_info['for_next_params'] = _result
opts['response_dict'] = {'csrfKey': _result['csrfKey']}
elif 'for_next_url' in ele_name:
_result = self._dict_for_next_url(ele_value, tree)
_result['for_next_url'] = True
previous_url_results.append(_result)
elif 'deal_info' in ele_name:
deal_info['deal_info'] = self._dict_for_next_url(ele_value, tree)
elif 'for_parsing' in ele_name:
f_html_trees = list()
for xp in ele_value:
f_html_trees.append(tree.xpath(xp))
deal_info['f_html'] = f_html_trees
a_url['deal_info_dict_list'] = deal_info_list
def _prepare_params(self, a_url, user_inputs):
# pull mandatory input parameters from access-config
input_param_dict = a_url['input-param']
# prepare links for next request/s
links_with_params = list()
for attr_name, attr_values in user_inputs.items():
for attr_value in attr_values:
req_body = input_param_dict.copy()
req_body[attr_name] = attr_value
links_with_params.append({'link': a_url['url'], 'params': req_body})
return links_with_params
def _use_previous_url_result(self, links, previous_url_results):
if len(links) == len(previous_url_results):
for link, previous_url_result in zip(links, previous_url_results):
if 'hd_deal_number' in previous_url_result:
deal_num = previous_url_result['hd_deal_number']
deal_num = deal_num[:deal_num.index('~')] if deal_num else deal_num
previous_url_result['hd_deal_number'] = deal_num
if 'for_next_params' in previous_url_result:
link['params'] = {**link['params'], **previous_url_result}
else:
for link, previous_url_result in itertools.product(links, previous_url_results):
if 'for_next_params' in previous_url_result:
link['params'] = {**link['params'], **previous_url_result}
return links
def _dict_for_next_url(self, input_dict, tree):
# print(f'table.text :: {etree.tostring(tree)}')
result_dict = dict()
for k, xp in input_dict.items():
try:
xp_result = tree.xpath(xp)
result_dict[k] = ''.join(xp_result).strip()
except Exception as e:
raise DownloadException('3000_ACCESS_FAILED', e)
return result_dict
def parse(self, **opts):
"""
method parses the 'BONY' specific page source using xpath from access-configs, after method execution
a_url['download_urls'] appended to opts dictionary
:param opts: user/commandline inputs + a_url['deal_info_dict_list']
:return:
"""
logging.debug('BonyDownloader:parse')
out_dir = opts['output']
provider = opts['provider']
for a_url in opts['access_urls']:
download_urls = list()
for deal_info_dict in a_url['deal_info_dict_list']:
if 'f_html' in deal_info_dict:
f_url = a_url['for_download_urls']['download_url']
input_dict = a_url['for_download_urls']['request_body'].copy()
for k, v in deal_info_dict['for_next_params'].items():
if 'for_next_params' not in k:
input_dict[k] = v
deal_name = deal_info_dict['deal_info']['deal_name']
for trs in deal_info_dict['f_html']:
for tr in trs:
# print(f'table.text :: {etree.tostring(tr)}')
report_id = tr.xpath('td/input[@name="cb_rpt_id"]/@value')
report_name = ''.join(tr.xpath('td[2]/a/text()')).strip()
if len(report_id) > 0:
report_id = report_id[0][:report_id[0].index('~')]
payment_date = tr.xpath('td[6]/text()')
if len(payment_date) > 0:
payment_date = payment_date[0].strip()
dt = datetime.datetime.strptime(payment_date, "%d-%b-%Y")
for span in tr.xpath('td/span[@class="RecordNormalText"]/input'):
report_ext_key = span.xpath('@name')[0]
report_ext_value = span.xpath('@value')[0]
file_extension = report_ext_value[report_ext_value.index('~') + 1:]
input_dict_copy = dict(input_dict)
input_dict_copy['hd_avl_rpt_id'] = report_id
input_dict_copy[report_ext_key] = report_ext_value
input_dict_copy['lb_reportdate'] = dt.strftime("%B") + '++' + str(dt.year)
input_dict_copy['hd_extension'] = file_extension
o_file = out_dir + '/' + str(dt.year) + '-' + str(dt.month) + '/' + provider + '/'
o_file += (deal_name + ' pay ' + payment_date + ' ' + report_name).replace(' ', '_')
o_file += '.' + file_extension
search_data = report_id + ' || ' + report_name + ' || ' + dt.strftime("%b") + ' '
search_data += str(dt.year) + ' || ' + deal_name
download_urls.append(
DownloadUrl(f_url, o_file, search_data, deal_name, input_dict_copy, 'POST'))
# del a_url['f_html']
a_url['download_urls'] = download_urls
|
dattatembare/file_downloader
|
src/download/bony_downloader.py
|
bony_downloader.py
|
py
| 10,730 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38200306892
|
import turtle
star = turtle.Turtle()
star.color('red', 'yellow')
star.begin_fill()
while True:
star.forward(200)
star.left(170)
if abs(star.pos()) < 1:
break
star.end_fill()
star.done()
|
Priyanshu360-cpu/Machine-Learning
|
turtlestar.py
|
turtlestar.py
|
py
| 206 |
python
|
en
|
code
| 3 |
github-code
|
6
|
24229213542
|
import sys, os,shutil
import traceback
import util
new_pro_1000_info_python_list= util.load_json(util.data_root, "python3_star_10000_repos_info")
print("num of python3: ",len(new_pro_1000_info_python_list))
dict_repo_file_python = util.load_json(util.data_root, "python3_1000repos_files_info")
print("num of python3: ",len(list(dict_repo_file_python.keys())))
dict_repo_name_info=dict()
for e in new_pro_1000_info_python_list:
dict_repo_name_info[e["name"]] = e
repos_sort_by_star = sorted(dict_repo_name_info.items(), key=lambda x: x[1]["stargazers_count"])
print("num of python3: ",len(repos_sort_by_star),repos_sort_by_star[0])
pro_path= util.data_root + "python_star_2000repo/"
remove_pro_infor=[]
count=0
for repo_name,info in repos_sort_by_star:
try:
if repo_name not in dict_repo_file_python:
if count>=3200:
break
print("repo_name: ",repo_name,pro_path+repo_name)
remove_pro_infor.append(info)
if os.path.exists(pro_path+repo_name):
shutil.rmtree(pro_path+repo_name) # Removes all the subdirectories!
print("has removed the repo ",repo_name)
count+=1
# break
except:
traceback.print_exc(repo_name,info)
continue
print(len(remove_pro_infor))
# util.save_pkl(util.data_root,"remove_non_python3_pro_inf",remove_pro_infor)
# util.save_pkl(util.data_root,"remove_non_python3_pro_inf_add_200",remove_pro_infor)
# util.save_pkl(util.data_root,"remove_non_python3_pro_inf_add_400",remove_pro_infor)
# util.save_pkl(util.data_root,"remove_non_python3_pro_inf_add_2600",remove_pro_infor)
util.save_pkl(util.data_root,"remove_non_python3_pro_inf_add_3000",remove_pro_infor)
|
anonymousdouble/Deidiom
|
code/remov_non_python3_pro.py
|
remov_non_python3_pro.py
|
py
| 1,738 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17534446407
|
from functools import reduce
from typing import List
from project.caretaker import Caretaker
from project.cheetah import Cheetah
from project.keeper import Keeper
from project.lion import Lion
from project.tiger import Tiger
from project.vet import Vet
from project.animal import Animal
from project.worker import Worker
class Zoo:
def __init__(self,
name: str,
budget: int,
animal_capacity: int,
workers_capacity: int
):
# public instance attribute
self.name = name
# private attributes
self.__budget = budget
self.__animal_capacity = animal_capacity
self.__workers_capacity = workers_capacity
# public instance attributes
self.animals: List[Animal] = []
self.workers: List[Worker] = []
def add_animal(self, animal: Animal, price: int) -> str:
if (price <= self.__budget) and (len(self.animals) < self.__animal_capacity):
self.animals.append(animal)
self.__budget -= price
return f'{animal.name} the {animal.__class__.__name__} added to the zoo'
# or type(animal).__name__
if (price > self.__budget) and (len(self.animals) < self.__animal_capacity):
return 'Not enough budget'
return 'Not enough space for animal'
def hire_worker(self, worker):
if len(self.workers) < self.__workers_capacity:
self.workers.append(worker)
return f'{worker.name} the {worker.__class__.__name__} hired successfully'
# or {type(worker).__name__}
return 'Not enough space for worker'
def fire_worker(self, worker_name):
worker = [w for w in self.workers if w.name == worker_name]
if worker:
self.workers.remove(worker[0])
return f'{worker[0].name} fired successfully'
return f'There is no {worker_name} in the zoo'
def pay_workers(self):
# !!!!!
workers_payment = sum([w.salary for w in self.workers])
if workers_payment <= self.__budget:
self.__budget -= workers_payment
return f'You payed your workers. They are happy. ' \
f'Budget left: {self.__budget}'
return 'You have no budget to pay your workers. They are unhappy'
def tend_animals(self):
# get_needs = self.money_for_care
amount_to_pay = sum([t.get_needs() for t in self.animals])
if self.__budget >= amount_to_pay:
self.__budget -= amount_to_pay
return f"You tended all the animals. They are happy. Budget left: {self.__budget}"
return "You have no budget to tend the animals. They are unhappy."
def profit(self, amount) -> None:
self.__budget += amount
def animals_status(self):
animals_types = ['Lion', 'Tiger', 'Cheetah']
animals_list = {idx: [] for idx in range(0, 3)}
for animal in self.animals:
idx = animals_types.index(type(animal).__name__)
animals_list[idx].append(animal)
lions, tigers, cheetahs = animals_list[0], animals_list[1], animals_list[2]
#
# lions = [animal for animal in self.animals if type(animal).__name__ == animals_types[0]]
# tigers = [animal for animal in self.animals if type(animal).__name__ == animals_types[1]]
# cheetahs = [animal for animal in self.animals if type(animal).__name__ == animals_types[2]]
result = [f'You have {len(self.animals)} animals']
result.append(f'----- {len(lions)} Lions:')
result.append('\n'.join([animal.__repr__() for animal in lions]))
result.append(f'----- {len(tigers)} Tigers:')
result.append('\n'.join([animal.__repr__() for animal in tigers]))
result.append(f'----- {len(cheetahs)} Cheetahs:')
result.append('\n'.join([animal.__repr__() for animal in cheetahs]))
return '\n'.join(result)
def workers_status(self):
keepers = [w for w in self.workers if w.__class__.__name__ == 'Keeper']
caretakers = [w for w in self.workers if w.__class__.__name__ == 'Caretaker']
vets = [w for w in self.workers if w.__class__.__name__ == 'Vet']
result = f"You have {len(self.workers)} workers\n"
result += f'----- {len(keepers)} Keepers:\n'
result += '\n'.join([k.__repr__() for k in keepers]) + '\n'
result += f'----- {len(caretakers)} Caretakers:\n'
result += '\n'.join([c.__repr__() for c in caretakers]) + '\n'
result += f'----- {len(vets)} Vets:\n'
result += '\n'.join([v.__repr__() for v in vets])
return result
|
emilynaydenova/SoftUni-Python-Web-Development
|
Python-OOP-Oct2023/Exercises/04.Encapsulation/wild_cat_zoo/project/zoo.py
|
zoo.py
|
py
| 4,687 |
python
|
en
|
code
| 0 |
github-code
|
6
|
810990786
|
'''Time Based Key-Value Store - https://leetcode.com/problems/time-based-key-value-store/
Design a time-based key-value data structure that can store multiple values for the same key at
different time stamps and retrieve the key's value at a certain timestamp.
Implement the TimeMap class:
TimeMap() Initializes the object of the data structure.
void set(String key, String value, int timestamp) Stores the key key with the value value at the given time timestamp.
String get(String key, int timestamp) Returns a value such that set was called previously, with
timestamp_prev <= timestamp. If there are multiple such values, it returns the value associated with the
largest timestamp_prev. If there are no values, it returns "".
Example 1:
Input
["TimeMap", "set", "get", "get", "set", "get", "get"]
[[], ["foo", "bar", 1], ["foo", 1], ["foo", 3], ["foo", "bar2", 4], ["foo", 4], ["foo", 5]]
Output
[null, null, "bar", "bar", null, "bar2", "bar2"]
Explanation
TimeMap timeMap = new TimeMap();
timeMap.set("foo", "bar", 1); // store the key "foo" and value "bar" along with timestamp = 1.
timeMap.get("foo", 1); // return "bar"
timeMap.get("foo", 3); // return "bar", since there is no value corresponding to foo at timestamp 3
and timestamp 2, then the only value is at timestamp 1 is "bar".
timeMap.set("foo", "bar2", 4); // store the key "foo" and value "ba2r" along with timestamp = 4.
timeMap.get("foo", 4); // return "bar2"
timeMap.get("foo", 5); // return "bar2"
'''
from collections import OrderedDict
class TimeMap:
def __init__(self):
self.time_mapping = {}
def set(self, key: str, value: str, timestamp: int) -> None:
if key not in self.time_mapping:
self.time_mapping[key] = OrderedDict()
self.time_mapping[key][timestamp] = value
def get(self, key: str, timestamp: int) -> str:
if key in self.time_mapping:
dictValues = self.time_mapping[key]
temp = []
result = ""
while dictValues:
time, value = dictValues.popitem()
temp.append((time, value))
if time <= timestamp:
result = value
break
while temp:
time, value = temp.pop()
self.time_mapping[key][time] = value
return result
else:
return ""
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
# Using Binary Search
from collections import defaultdict
class TimeMap:
def __init__(self):
self.time_mapping = defaultdict(list)
def set(self, key: str, value: str, timestamp: int) -> None:
self.time_mapping[key].append((value, timestamp))
def get(self, key: str, timestamp: int) -> str:
if key not in self.time_mapping:
return ""
dictValues = self.time_mapping[key]
left = 0
right = len(dictValues) - 1
while left < right:
mid = left + (right - left) // 2
if dictValues[mid][1] < timestamp:
left = mid + 1
elif dictValues[mid][1] > timestamp:
right = mid - 1
else:
return dictValues[mid][0]
if dictValues[right][1] <= timestamp:
return dictValues[right][0]
return "" if right < 0 else dictValues[right - 1][0]
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
|
Saima-Chaity/Leetcode
|
Google/Time Based Key-Value Store.py
|
Time Based Key-Value Store.py
|
py
| 3,635 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30170732214
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PyPDF2 import PdfWriter, PdfReader
import io
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib import pagesizes
# ======== Plotting Util ========
# assign numbers for sorting when combining outputs
export_counter = 1
def plot_amplitude_data(plot_title: str, axis1_name: str, resolution, data1, data1dots: list = [None], axis2_name: str = "", data2: list = [None], data2dots: list = [None], graph_on_same_axis: bool = False, export: bool = True, custom_prefix: str = ""):
global export_counter
x = np.linspace(0, len(data1) / resolution, len(data1))
plt.figure()
fig, ax = plt.subplots()
ax.plot(x, data1, "-b", label="data1")
if len(data1dots) > 1 and data1dots[0] != None:
ax.plot(x, data1dots, ".", color="#55AAFF", label="data1 dots")
ax.set_xlabel("Time passed [s]")
ax.set_ylabel(axis1_name, color="blue")
# set the x-spine
ax.spines['left'].set_position('zero') # type: ignore
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
# set the y-spine
ax.spines['bottom'].set_position('zero') # type: ignore
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
if len(data2) > 1 and data2[0] != None:
ax2 = ax
if not graph_on_same_axis:
ax2 = ax.twinx()
ax2.plot(x, data2, "-r", label="data2")
if len(data2dots) > 1 and data2dots[0] != None:
ax2.plot(x, data2dots, ".", color='#FFA500', label="data2 dots")
ax2.set_xlabel("Time passed [s]")
ax2.set_ylabel(axis2_name, color="red")
plt.title(plot_title)
if export:
name = plot_title.lower().replace(" ", "_")
plt.savefig(
f"summarized_plots/png/({custom_prefix}a_{export_counter}){name}.png")
plt.savefig(
f"summarized_plots/pdf/({custom_prefix}a_{export_counter}){name}.pdf")
export_counter += 1
plt.show()
export_counter = 1
def plot_graph(plot_title: str, axis_name: str, points_x, points_val, graph_x, graph_y, y_axis_limit, export: bool = True, custom_prefix: str = ""):
"""
Usage example:
>>> t = np.arange(0, 5, 0.2)
>>> plot_graph("", "", ..., ..., t, t ** 2)
"""
global export_counter
plt.figure()
fig, ax = plt.subplots()
ax.plot(points_x, points_val, ".", color="#55AAFF", label="points")
ax.plot(graph_x, graph_y, "-r", label="function")
ax.set_ylim(ymax=y_axis_limit)
ax.set_xlabel("Points [1]")
ax.set_ylabel(axis_name, color="blue")
plt.title(plot_title)
if export:
name = plot_title.lower().replace(" ", "_")
plt.savefig(
f"summarized_plots/png/({custom_prefix}b_{export_counter}){name}.png")
plt.savefig(
f"summarized_plots/pdf/({custom_prefix}b_{export_counter}){name}.pdf")
export_counter += 1
plt.show()
def plot_4_curves__vs_time(data1, data2, data3, data4, steps_per_second, y_axis_title):
x1 = np.linspace(0, len(data1) / steps_per_second, len(data1))
x2 = np.linspace(0, len(data2) / steps_per_second, len(data2))
x3 = np.linspace(0, len(data3) / steps_per_second, len(data3))
x4 = np.linspace(0, len(data4) / steps_per_second, len(data4))
plt.figure()
fig, ax = plt.subplots()
ax.plot(x1, data1)
ax.plot(x2, data2)
ax.plot(x3, data3)
ax.plot(x4, data4)
ax.set_xlabel("Verstrichene Zeit [s]")
ax.set_ylabel(y_axis_title)
plt.title(f"{y_axis_title} gegen Zeit")
plt.show()
def create_pdf_text_page(filename: str, text: str, page_size=pagesizes.landscape(pagesizes.A5)):
global A5
# PDF page with info data
# src: https://stackoverflow.com/a/17538003/19474335
packet = io.BytesIO()
cvs = Canvas(packet, bottomup=False, pagesize=page_size)
# utf-8 encoding support: https://stackoverflow.com/a/17011377/19474335
pdfmetrics.registerFont(TTFont('Verdana', 'Verdana.ttf'))
cvs.setFont("Verdana", 11)
line_height = 15
y_counter = 2 * line_height
for line in text.split("\n"):
cvs.drawString(40, y_counter, line)
y_counter += line_height
cvs.save()
# move to the beginning of the BytesIO buffer
# packet.seek(0)
new_pdf = PdfReader(packet)
with open(filename.replace(".pdf", "") + ".pdf", "wb") as outStream:
output = PdfWriter()
output.add_page(new_pdf.pages[0])
output.write(outStream)
|
vexplained/JugendForscht2022
|
programming/python-analysis/plotting_util.py
|
plotting_util.py
|
py
| 4,641 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14539790458
|
class Solution:
import copy
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
#################################超时##################################
# def MinSum(x,y):
#
# if x == len(triangle):
# return 0
#
# left = MinSum(x+1,y)
# right = MinSum(x+1,y+1)
# ans = min(left,right) + triangle[x][y]
# print(ans)
# return min(left,right) + triangle[x][y]
#
# ans_min = MinSum(0,0)
# return ans_min
#################################超时##################################
# 使用 O(n) 的额外空间
# 动态规划 规定一个MinNum数组,来记录每次的最短路径
MinNum = triangle[-1].copy()
for i in reversed(range(len(triangle))):
for j in range(i):
MinNum[j] = min(MinNum[j],MinNum[j+1]) + triangle[i-1][j]
ans = MinNum[0]
return ans
if __name__ == '__main__':
s = Solution()
triangle = [[2],[3,4],[6,5,7],[4,1,8,3]]
#triangle = [[-1],[-2,-3]]
#triangle = [[1],[1,2],[1,2,3]]
ans = s.minimumTotal(triangle)
print(ans)
|
Rainphix/LeetCode
|
120_triangle.py
|
120_triangle.py
|
py
| 1,267 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15653063144
|
from aiogram import Bot, types, Dispatcher, executor
import logging
from config import TOKEN, html
import parser as ps
import time
import random
import os
import qrcode
def make_qr(text):
qr = qrcode.QRCode()
qr.add_data(text)
img_qr = qr.make_image(fill_color='white', back_color="black")
img_qr.save('qr.png')
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
logging.basicConfig(level=logging.INFO)
async def on_startup(_):
print('Бот онлайн')
@dp.message_handler(commands='numhent')
async def numhent(msg : types.Message):
number = msg.text.split(' ', 1)
try:
ps.get_html(html,number[1])
photo = ps.parse('content', number[1])
await msg.reply_photo(photo,caption=number[1])
except:
await msg.reply('отправь число дурак')
@dp.message_handler(commands='hent')
async def hent(msg : types.Message):
rnd = random.randint(1,6330000)
ps.get_html(html,rnd)
t = ps.parse('content', rnd)
await msg.reply_photo(t,caption=rnd)
@dp.message_handler(commands='qr')
async def test(msg : types.Message):
split = msg.text.split(' ', 1)[1]
make_qr(split)
await msg.reply_photo(open('qr.png', 'rb'), caption=split)
if __name__ == '__main__':
executor.start_polling(dp,skip_updates=True, on_startup=on_startup)
|
sarenis/tg_parsing_bot
|
bot.py
|
bot.py
|
py
| 1,329 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4524699811
|
import pytest
import requests
from budget.enums import ExpensesCategoryEnum, IncomeCategoryEnum
from common.tests_fixtures.fixtures import admin_credentials, admin_id, base_url
budgets_url = f"{base_url}/budgets/"
incomes_url = f"{base_url}/incomes/"
expenses_url = f"{base_url}/expenses/"
@pytest.fixture
def create_budget():
budget_data = {
"owner": admin_id,
"name": "New budget name",
}
response = requests.post(budgets_url, json=budget_data, **admin_credentials)
assert response.status_code == 201
return response.json()
def test_creating_budget():
budget_data = {
"owner": admin_id,
"name": "New budget name",
}
response = requests.post(budgets_url, json=budget_data, **admin_credentials)
assert response.status_code == 201
created_budget_url = response.json()["url"]
response = requests.get(created_budget_url, **admin_credentials)
assert response.status_code == 200
response = response.json()
assert response["owner"] == budget_data["owner"]
assert response["name"] == budget_data["name"]
def test_add_income(create_budget):
created_budget_url = create_budget["url"]
budget_id = int(created_budget_url.split("/")[-2])
income_data = {"category": IncomeCategoryEnum.EARNED_INCOME, "amount": 1000.00, "budget": budget_id}
response = requests.post(incomes_url, json=income_data, **admin_credentials)
assert response.status_code == 201
response = response.json()
assert income_data["category"] == response["category"]
assert float(income_data["amount"]) == float(response["amount"])
assert income_data["budget"] == response["budget"]
def test_add_expense(create_budget):
created_budget_url = create_budget["url"]
budget_id = int(created_budget_url.split("/")[-2])
expense_data = {"category": ExpensesCategoryEnum.SAVING, "amount": 950.21, "budget": budget_id}
response = requests.post(expenses_url, json=expense_data, **admin_credentials)
assert response.status_code == 201
response = response.json()
assert expense_data["category"] == response["category"]
assert float(expense_data["amount"]) == float(response["amount"])
assert expense_data["budget"] == response["budget"]
def test_add_expense_with_incorrect_category(create_budget):
created_budget_url = create_budget["url"]
budget_id = int(created_budget_url.split("/")[-2])
expense_data = {"category": "incorrect_category", "amount": 950.21, "budget": budget_id}
response = requests.post(expenses_url, json=expense_data, **admin_credentials)
assert response.status_code == 400
assert response.json() == {"category": ['"incorrect_category" is not a valid choice.']}
def test_filtering_expense(create_budget):
created_budget_url = create_budget["url"]
budget_id = int(created_budget_url.split("/")[-2])
expense_data_1 = {"category": ExpensesCategoryEnum.SAVING, "amount": 950.21, "budget": budget_id}
expense_data_2 = {"category": ExpensesCategoryEnum.PERSONAL, "amount": 950.21, "budget": budget_id}
response_1 = requests.post(expenses_url, json=expense_data_1, **admin_credentials)
assert response_1.status_code == 201
response_1 = response_1.json()
response_2 = requests.post(expenses_url, json=expense_data_2, **admin_credentials)
assert response_2.status_code == 201
response_2 = response_2.json()
response = requests.get(f"{expenses_url}?category={ExpensesCategoryEnum.SAVING}", **admin_credentials)
assert response.status_code == 200
response = response.json()
responses_url = [expense["url"] for expense in response["results"]]
assert response_1["url"] in responses_url
assert response_2["url"] not in responses_url
|
MaciejChalusiak/FamilyBudget
|
budget/tests.py
|
tests.py
|
py
| 3,755 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36650794154
|
from pywrap.exporter import (MethodDefinition, SetterDefinition,
GetterDefinition, ConstructorDefinition,
FunctionDefinition, CythonDeclarationExporter)
from pywrap.ast import (Param, Function, Clazz, Constructor, Method,
Field, Enum, Typedef)
from pywrap.parser import Includes, TypeInfo
from pywrap.utils import lines
from pywrap.defaultconfig import Config
from nose.tools import assert_multi_line_equal
def test_simple_function_def():
method = MethodDefinition(
"Testclass", "", "testfun", [], Includes(),
"void", TypeInfo({}), Config()).make()
assert_multi_line_equal(
method,
lines("cpdef testfun(Testclass self):",
" self.thisptr.testfun()")
)
def test_array_arg_function_def():
method = MethodDefinition(
"Testclass", "", "testfun", [Param("a", "double *"),
Param("aSize", "unsigned int")],
Includes(), "void", TypeInfo({}), Config()).make()
assert_multi_line_equal(
method,
lines("cpdef testfun(Testclass self, np.ndarray[double, ndim=1] a):",
" self.thisptr.testfun(&a[0], a.shape[0])")
)
def test_setter_definition():
field = Field("myField", "double", "MyClass")
setter = SetterDefinition(
"MyClass", field, Includes(), TypeInfo(), Config()).make()
assert_multi_line_equal(
setter,
lines(
"cpdef __set_my_field(MyClass self, double myField):",
" cdef double cpp_myField = myField",
" self.thisptr.myField = cpp_myField"
)
)
def test_getter_definition():
field = Field("myField", "double", "MyClass")
getter = GetterDefinition(
"MyClass", field, Includes(), TypeInfo(), Config()).make()
assert_multi_line_equal(
getter,
lines(
"cpdef __get_my_field(MyClass self):",
" cdef double result = self.thisptr.myField",
" return result",
""
)
)
def test_default_ctor_def():
ctor = ConstructorDefinition("MyClass", "", [], Includes(), TypeInfo(),
Config(), "MyClass").make()
assert_multi_line_equal(
ctor,
lines(
"def __init__(MyClass self):",
" self.thisptr = new cpp.MyClass()"
)
)
def test_function_def():
fun = FunctionDefinition("myFun", "", [], Includes(), "void", TypeInfo(),
Config()).make()
assert_multi_line_equal(
fun,
lines(
"cpdef my_fun():",
" cpp.myFun()"
)
)
def test_function_def_with_another_cppname():
fun = FunctionDefinition("myFunInt", "", [], Includes(), "void", TypeInfo(),
Config(), cppname="myFun").make()
assert_multi_line_equal(
fun,
lines(
"cpdef my_fun_int():",
" cpp.myFun()"
)
)
def test_function_decl():
fun = Function("test.hpp", "", "myFun", "void")
ignored_fun = Function("test.hpp", "", "myFun", "void")
ignored_fun.ignored = True
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_function(fun)
exporter.visit_function(ignored_fun)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" void myFun() except +"
)
)
def test_class_decl():
clazz = Clazz("test.hpp", "", "MyClass")
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_clazz(clazz)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" cdef cppclass MyClass:",
" pass"
)
)
def test_ctor_decl():
clazz = Clazz("test.hpp", "", "MyClass")
ctor = Constructor("MyClass")
ignored_ctor = Constructor("MyClass")
ignored_ctor.ignored = True
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_constructor(ctor)
exporter.visit_constructor(ignored_ctor)
exporter.visit_clazz(clazz)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" cdef cppclass MyClass:",
" MyClass()"
)
)
def test_method_decl():
clazz = Clazz("test.hpp", "", "MyClass")
method = Method("myMethod", "void", "MyClass")
ignored_method = Method("", "", "")
ignored_method.ignored = True
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_param(Param("myParam", "double"))
exporter.visit_method(method)
exporter.visit_method(ignored_method)
exporter.visit_clazz(clazz)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" cdef cppclass MyClass:",
" void myMethod(double myParam) except +"
)
)
def test_field_decl():
clazz = Clazz("test.hpp", "", "MyClass")
field = Field("myField", "double", "MyClass")
ignored_field = Field("myField", "double", "MyClass")
ignored_field.ignored = True
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_field(field)
exporter.visit_field(ignored_field)
exporter.visit_clazz(clazz)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" cdef cppclass MyClass:",
" double myField"
)
)
def test_enum_decl():
enum = Enum("test.hpp", "", "MyEnum")
enum.constants.append("one")
enum.constants.append("two")
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_enum(enum)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" cdef enum MyEnum:",
" one",
" two"
)
)
def test_typedef_decl():
typedef = Typedef("test.hpp", "", "MyType", "double")
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_typedef(typedef)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" ctypedef double MyType"
)
)
|
AlexanderFabisch/cythonwrapper
|
pywrap/test/test_exporter.py
|
test_exporter.py
|
py
| 6,972 |
python
|
en
|
code
| 37 |
github-code
|
6
|
73562100029
|
from Fiat.DB.mysql import mysql
from Fiat.Base.Host import BaseHost
from Fiat.Core.Utils import loggable
class westhost(BaseHost):
def __init__(self, Instance, dict):
self.config = {
"westhost_username": dict["username"],
"ssh_user": "username",
"ssh_host": "hostname.com.whsites.net",
"ssh_port": 22,
"scratch_path": "/home/%s/scratch" % dict["username"],
"install_path": "/home/%s/v1" % dict["username"],
}
super(westhost, self).__init__(Instance)
|
iandennismiller/fiat
|
lib/Fiat/Host/westhost.py
|
westhost.py
|
py
| 566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23284310692
|
from pyes.base import clock, elapsed_time, start_time, time_unit
from functools import reduce
import sys
class stats:
"""Class statistics"""
def __init__(self):
"""Class statistics c-tor"""
# Number of calls
self.__count = 0
# Current number of agents
self.__size = 0
# Mainimal number of agents
self.__min_size = sys.maxsize
# Maximum number of agents
self.__max_size = 0
# Number of 'zero wait' agents
self.__count_zw = 0
# Total waiting time
self.__total_time = 0.0
# Elapsed time per state
self.__state_time = {}
# Moment of last state change
self.__prev_ti = None
# Dictonary of agents and their entry times
self.__memory = {}
def start(self,a,n = 1):
if not isinstance(n,int):
raise ValueError("stats.start - int is expected")
if not self.__prev_ti:
# Moment of last state change
self.__prev_ti = start_time()
# Number of calls
self.__count+=1
# Area
if not self.__size in self.__state_time:
self.__state_time[self.__size] = ((clock() - self.__prev_ti)/time_unit())
else:
self.__state_time[self.__size] += ((clock() - self.__prev_ti)/time_unit())
# Current number of agents
self.__size += n
# Remember moment of last state change
self.__prev_ti = clock()
# Maximum number of current agents during simulation
self.__max_size = max([self.__size, self.__max_size])
# Pamtimo redni broj entiteta u resursu i trenutak ulaska entiteta u resurs
self.__memory[id(a)] = clock()
def stop(self,a,n = 1):
if not isinstance(n,int):
raise ValueError("stats.stop - int is expected")
if id(a) in self.__memory:
dt = (clock()-self.__memory[id(a)])/time_unit()
# Ukoliko je vreme koje je transakcija provela u redu 0
# uvecavamo broj transakcija koji nisu cekale u redu
if abs(dt)==0.0:
self.__count_zw += 1
# Total time
self.__total_time += dt
# Calculate area
if not self.__size in self.__state_time:
self.__state_time[self.__size] = ((clock() - self.__prev_ti)/time_unit())
else:
self.__state_time[self.__size] += ((clock() - self.__prev_ti)/time_unit())
# Reduce current number of transactions
self.__size -= n
# Minimal number of current agents during simulation
self.__min_size = min([self.__size, self.__min_size])
# Remember moment of last state change
self.__prev_ti = clock()
# Removing item from dictonary
del self.__memory[id(a)]
def finish(self):
"""Finishing statistics at the end of simulation"""
for mt in self.__memory.values():
self.__total_time += (clock()-mt)/time_unit()
if not self.__size in self.__state_time:
self.__state_time[self.__size] = ((clock() - self.__prev_ti)/time_unit())
else:
self.__state_time[self.__size] += ((clock() - self.__prev_ti)/time_unit())
def reset(self):
"""Reset statistics"""
self.__count = 0
self.__min_size = self.__size
self.__max_size = 0
self.__size = 0
self.__count_zw = 0
self.__total_time = 0.0
self.__state_time = {}
def clear(self):
"""Clear statistics"""
self.reset()
self.__min_size = sys.maxsize
self.__prev_ti = None
self.__memory.clear()
@property
def count(self):
return self.__count
@property
def size(self):
return self.__size
@property
def max_size(self):
return self.__max_size
@property
def min_size(self):
return self.__min_size
@property
def count_zw(self):
return self.__count_zw
@property
def total_time(self):
return self.__total_time
@property
def mean_time(self):
return self.__total_time / self.__count
@property
def mean_time_zw(self):
if self.__count - self.__count_zw:
return self.__total_time / (self.__count - self.__count_zw)
else:
return float('nan')
@property
def average(self):
return sum(s*t for s,t in self.__state_time.items()) / elapsed_time()
def utilization(self,num_of_servers = 1):
if not isinstance(num_of_servers,int):
raise ValueError("stats.utilization - int is expected")
return self.average/num_of_servers
@property
def percent_zw(self):
return (100.0*self.__count_zw)/self.__count
|
mdjogatovic/pyes
|
pyes/stats.py
|
stats.py
|
py
| 4,351 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29214466760
|
from celery import shared_task, Celery
from django.utils import timezone
from .models import Post
app = Celery()
@shared_task
def publish_posts_task():
posts = Post.objects.filter(
status=False, published_date__lte=timezone.now()
)
for post in posts:
post.status = True
post.save()
return (
print(f"{posts.count()} published!")
if posts
else print("There is no post to publish")
)
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(
60 * 60,
publish_posts_task().s(),
name="published posts every one hour",
)
|
smz6990/DRF-Blog
|
core/blog/tasks.py
|
tasks.py
|
py
| 665 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2441674100
|
from flask import Flask, render_template, request
from pymysql import connections
import os
import boto3
from config import *
from datetime import date
from botocore.exceptions import ClientError
app = Flask(__name__)
bucket = custombucket
region = customregion
db_conn = connections.Connection(
host=customhost,
port=3306,
user=customuser,
password=custompass,
db=customdb
)
output = {}
table = 'employee'
@app.route("/", methods=['GET', 'POST'])
@app.route("/index")
def home():
return render_template('Login.html')
@app.route("/addemp", methods=['GET'])
def addemp():
return render_template('AddEmp.html', Title="Add to Employee Database")
@app.route("/updateemp", methods=['GET'])
def updateemp():
return render_template('UpdateEmp.html', Title="Update Employee Database")
@app.route("/about", methods=['GET','POST'])
def about():
return "Hello, Flask is running"
@app.route("/leave", methods=['GET'])
def leave():
return render_template('AddLeave.html')
#get employee codes
@app.route("/getemp", methods=['GET','POST'])
def GetEmp():
return render_template('GetEmp.html')
@app.route("/addleave", methods=['POST'])
def AddLeave():
leave_id = request.form['leave_id']
emp_id = request.form['emp_id']
date = request.form['date']
reason = request.form['reason']
prove = request.files['prove_file']
insert_sql = "INSERT INTO leaves VALUES (%s, %s, %s, %s)"
cursor = db_conn.cursor()
if prove.filename == "":
return "Please select a file"
try:
cursor.execute(insert_sql, (leave_id, emp_id, date, reason))
db_conn.commit()
#emp_name = "" + first_name + " " + last_name
# Uplaod image file in S3 #
prove_image_in_s3 = "leave_id-" + str(leave_id) + "_image_file"
s3 = boto3.resource('s3')
try:
print("Data inserted in MySQL RDS... uploading image to S3...")
s3.Bucket(custombucket).put_object(Key=prove_image_in_s3, Body=prove)
bucket_location = boto3.client('s3').get_bucket_location(Bucket=custombucket)
s3_location = (bucket_location['LocationConstraint'])
if s3_location is None:
s3_location = ''
else:
s3_location = '-' + s3_location
object_url = "https://s3{0}.amazonaws.com/{1}/{2}".format(
s3_location,
custombucket,
prove_image_in_s3)
except Exception as e:
return str(e)
finally:
cursor.close()
print("all modification done...")
return render_template('AddLeaveOutput.html', name=emp_id)
@app.route("/login", methods=['POST'])
def login():
id = request.form['admin_id']
password = request.form['admin_password']
sqllogin = "SELECT COUNT(*) FROM admin WHERE password= %s AND username= %s"
cursor = db_conn.cursor()
try:
cursor.execute(sqllogin, (password, id))
valid = cursor.fetchall()
db_conn.commit()
except Exception as e:
return str(e)
finally:
cursor.close()
if valid[-1][-1] == 1:
print("Login Success")
return render_template('AddEmp.html')
else :
print("Invalid User Credentials")
return render_template('Login.html')
@app.route("/addemp", methods=['POST'])
def AddEmp():
emp_id = request.form['emp_id']
first_name = request.form['first_name']
last_name = request.form['last_name']
pri_skill = request.form['pri_skill']
location = request.form['location']
emp_image_file = request.files['emp_image_file']
insert_sql = "INSERT INTO employee VALUES (%s, %s, %s, %s, %s)"
cursor = db_conn.cursor()
if emp_image_file.filename == "":
return "Please select a file"
try:
cursor.execute(insert_sql, (emp_id, first_name, last_name, pri_skill, location))
db_conn.commit()
emp_name = first_name + " " + last_name
# Uplaod image file in S3 #
emp_image_file_name_in_s3 = "emp-id-" + str(emp_id) + "_image_file"
s3 = boto3.resource('s3')
try:
print("Data inserted in MySQL RDS... uploading image to S3...")
s3.Bucket(custombucket).put_object(Key=emp_image_file_name_in_s3, Body=emp_image_file)
bucket_location = boto3.client('s3').get_bucket_location(Bucket=custombucket)
s3_location = (bucket_location['LocationConstraint'])
if s3_location is None:
s3_location = ''
else:
s3_location = '-' + s3_location
object_url = "https://s3{0}.amazonaws.com/{1}/{2}".format(
s3_location,
custombucket,
emp_image_file_name_in_s3)
except Exception as e:
return str(e)
finally:
cursor.close()
print("all modification done...")
return render_template('AddEmpOutput.html', name=emp_name)
@app.route("/fetchdata", methods=['POST'])
def GetEmpOutput():
try:
emp_id = request.form['emp_id']
if(emp_id == ""):
raise ValueError("Please enter a valid employee id")
except ValueError:
emp_id, first_name, last_name, pri_skill, location = "N/A","N/A","N/A","N/A","N/A"
image_link = "../static/images/getUser.png"
return render_template('GetEmpOutput.html', id=emp_id, fname=first_name, lname=last_name, interest=pri_skill, location=location, image_url=image_link)
select_sql = "SELECT * FROM employee WHERE emp_id = %s"
cursor = db_conn.cursor()
try:
cursor.execute(select_sql, (emp_id))
db_conn.commit()
(emp_id, first_name, last_name, pri_skill, location) = cursor.fetchone()
emp_image_file_name_in_s3 = "emp-id-" + str(emp_id) + "_image_file"
try:
# Generate temporary URL for image file in S3
image_link = boto3.client('s3').generate_presigned_url('get_object',
Params={'Bucket': custombucket,
'Key': emp_image_file_name_in_s3},
ExpiresIn=3600)
except ClientError:
image_link = "../static/images/getUser.png"
finally:
cursor.close()
return render_template('GetEmpOutput.html', id=emp_id, fname=first_name, lname=last_name, interest=pri_skill, location=location, image_url=image_link)
#update employee code
@app.route("/updateemp", methods=['POST'])
def UpdateEmp():
emp_id = request.form['emp_id']
first_name = request.form['first_name']
last_name = request.form['last_name']
pri_skill = request.form['pri_skill']
location = request.form['location']
emp_image_file = request.files['emp_image_file']
update_sql = "UPDATE employee SET first_name = %s, last_name = %s, pri_skill = %s, location = %s WHERE emp_id = %s"
values = (first_name, last_name, pri_skill, location, emp_id)
cursor = db_conn.cursor()
try:
cursor.execute(update_sql, values)
db_conn.commit()
emp_name = "" + first_name + " " + last_name
# Uplaod image file in S3 #
emp_image_file_name_in_s3 = "emp-id-" + str(emp_id) + "_image_file"
s3 = boto3.resource('s3')
try:
print("Data updated in MySQL RDS... updating image to S3...")
s3.Object(custombucket, emp_image_file_name_in_s3).delete()
s3.Bucket(custombucket).put_object(Key=emp_image_file_name_in_s3, Body=emp_image_file)
bucket_location = boto3.client('s3').get_bucket_location(Bucket=custombucket)
s3_location = (bucket_location['LocationConstraint'])
if s3_location is None:
s3_location = ''
else:
s3_location = '-' + s3_location
object_url = "https://s3{0}.amazonaws.com/{1}/{2}".format(
s3_location,
custombucket,
emp_image_file_name_in_s3)
except Exception as e:
return str(e)
finally:
cursor.close()
print("All modification done...")
return render_template('UpdateEmp.html', name=emp_name)
# delete employee code
# TODO: HTML page for delete employee
@app.route("/deletemp", methods=['POST'])
def DeleteEmp():
emp_id = request.form['emp_id']
delete_sql = "DELETE FROM employee WHERE emp_id = %s"
cursor = db_conn.cursor()
try:
cursor.execute(delete_sql, (emp_id))
db_conn.commit()
print("Data deleted from MySQL RDS... deleting image from S3...")
emp_image_file_name_in_s3 = "emp-id-" + str(emp_id) + "_image_file"
s3 = boto3.resource('s3')
s3.Object(custombucket, emp_image_file_name_in_s3).delete()
finally:
cursor.close()
print("all modification done...")
return "Deleted employee with id: " + emp_id
@app.route("/attendance", methods=['GET'])
def takeattendance():
today = date.today()
date_time = today.strftime("%d/%m/%Y")
return render_template('Attendance.html',Title="Attendance", date=date_time)
@app.route("/attendance", methods=['POST'])
def attendance():
cursor = db_conn.cursor()
emp_id = request.form['emp_id']
today = date.today()
date_time = today.strftime("%d/%m/%Y")
select_sql = "SELECT emp_id, first_name, last_name FROM employee WHERE emp_id = %s"
insert_sql = "INSERT INTO attandance VALUES (%s, %s, %s, %s)"
try:
cursor.execute(select_sql, (emp_id))
(emp_id, first_name, last_name) = cursor.fetchone()
cursor.execute(insert_sql, (emp_id, first_name, last_name, date_time))
db_conn.commit()
message = "Attendance marked for " + emp_id + " " + first_name + " " + last_name
except Exception as e:
emp_id = "Employee not found"
message = "Employee not found"
finally:
cursor.close()
return render_template('Attendance.html', Title="Attendance", date=date_time, message=message)
@app.route("/viewatt", methods=['GET'])
def viewatt():
cursor = db_conn.cursor()
select_sql = "SELECT * FROM attandance"
try:
cursor.execute(select_sql)
data = cursor.fetchall()
finally:
cursor.close()
return render_template('ViewAttandance.html', Title="Attendance", data=data)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True)
|
Darkless123/aws-live
|
EmpApp.py
|
EmpApp.py
|
py
| 10,617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21138667122
|
#!/usr/bin/python3
# -*-coding:utf-8 -*-
# Reference:**********************************************
# @Time : 2019/11/1 23:30
# @Author : Raymond Luo
# @File : train_emb.py
# @User : luoli
# @Software: PyCharm
# Reference:**********************************************
import pickle
from gensim.models import Word2Vec, KeyedVectors
import pandas as pd
import torch.nn as nn
import torch
def train_motif_wordemb(path):
data = pd.read_csv(path)
walk_a = data['user_neighbor'].values.tolist()
walk_b = data['target_neighbor'].values.tolist()
walk_a.extend(walk_b)
walk = []
for line in walk_a:
new_line = line[1:-1].split(", ")
walk.append(new_line)
model = Word2Vec(walk, size=128, window=3, min_count=0, sg=1, workers=12, iter=2, compute_loss=True)
print("Node2vec loss:", model.get_latest_training_loss())
model.wv.save_word2vec_format("../model/motif_walk.emb")
def change_emb_index(emb_path, uid2idx_path):
with open(uid2idx_path, "rb") as f:
uid2idx = pickle.load(f)
with open(emb_path, "r") as f:
emb_file = f.readlines()
head = 1
new_file = []
for line in emb_file:
if head:
head = 0
new_file.append(line)
continue # 跳过第一行
line_list = line.split(" ")
idx = uid2idx[int(line_list[0])] # uid 2 idx
line_list[0] = str(idx) # 转回去
new_line = " ".join(line_list)
new_file.append(new_line)
with open("../model/motif_walk_idx.emb", "w", encoding="utf-8") as f:
for line in new_file:
f.write(line)
if __name__ == "__main__":
# train_motif_wordemb("../data/train_data.csv")
# change_emb_index("../model/motif_walk.emb", "../data/uid_2_idx.pkl")
# test
# 构建词向量
word_vectors = KeyedVectors.load_word2vec_format("../model/motif_walk_idx.emb", binary=False) # 节点向量
weight = torch.FloatTensor(word_vectors.syn0) # 获取2D numpy矩阵
emb = nn.Embedding.from_pretrained(weight, freeze=False)
print(emb(torch.LongTensor([47066])))
|
RManLuo/MotifGNN
|
src_sjjy/train_emb.py
|
train_emb.py
|
py
| 2,114 |
python
|
en
|
code
| 7 |
github-code
|
6
|
40205545759
|
# encoding: utf-8
"""
CalculationModule.py
Author: Dario Marroquin 18269 (dariomarroquin)
Author: Pablo Ruiz 18259 (PingMaster99)
Version 1.0
Updated March 4, 2021
Required functions for the op amp calculator
"""
from sympy import *
import DatabaseConnection as Db
x = symbols('x')
y = symbols('y')
database = Db.Data()
def quadratic_least_square(points):
"""
Calculates the quadratic least square regression of a set of points
:param points: points
:return: a0 and a1 values of the regression
"""
n = len(points)
x_summation = ls_summation(points, "x")
y_summation = ls_summation(points, "y")
xy_summation = ls_summation(points, "x * y")
x_squared_summation = ls_summation(points, "x ** 2")
a0 = (x_squared_summation * y_summation - x_summation * xy_summation) / (n * x_squared_summation - x_summation ** 2)
a1 = (n * xy_summation - x_summation * y_summation) / (n * x_squared_summation - x_summation ** 2)
return a0, a1
def ls_summation(points, function):
"""
Calculates the least square regression needed summations
:param points: point list
:param function: function for the summation
:return: summation result
"""
ls_result = 0
function = parse_expr(function)
for i in range(0, len(points)):
ls_result += function.evalf(subs={x: points[i][0], y: points[i][1]})
return ls_result
def populate_calculations(filename=None):
"""
Loads the database for it to be operated
:param filename: name of the file
:return: True if the operation was successful
"""
if filename is not None:
try:
database.set_data(filename)
except FileNotFoundError:
return None
return True
def calculate_opamp_function(point=None, inverter=False):
"""
Calculates the op amp function using quadratic least square regression
and evaluates it at a point
:param point: point to be evaluated
:param inverter: if the circuit is an inverter or not
:return: function, evaluation, and theoretical value
"""
data = database.get_data()
function_values = quadratic_least_square(data[0])
if point is not None:
evaluation = parse_expr(f"{function_values[1]} * x + {function_values[0]}").evalf(subs={x: point})
else:
evaluation = "N.A."
resistors = data[1]
if inverter:
real_value = - resistors[1] / resistors[0]
else:
real_value = resistors[1] / resistors[0] + 1
return function_values, evaluation, real_value
def calculate_opamp_spline(point=None):
"""
Calculates the spline result of the op amp points
:param point: point to be evaluated
:return: point, spline result
"""
if point is None:
data = database.get_data()[0]
result = spline(data, quadratic=False)
print_spline = print_spline_result(result, False)
return "N.A.", print_spline
data = database.get_data()[0]
result = spline(data, quadratic=False)
print_spline = print_spline_result(result, False)
return evaluate_spline(point, result, False), print_spline
def print_spline_result(equations, quadratic=True):
"""
Prints the equations that represent a spline
:param equations: equations
:param quadratic: if the spline is quadratic or cubic
"""
cutoff = 3 if quadratic else 4
points = equations[1]
equations = equations[0]
print_result = ""
point_index = 0
element_number = 0
for element in equations:
element = round(element, 4)
if element == 0:
point_index = (point_index + 1) % cutoff
if point_index > 2:
point_index = 0
continue
elif element > 0:
e_sign = "+" if point_index != 0 else ""
else:
element = str(element)[1:]
e_sign = "-"
print_result += f"{e_sign} {element}"
if quadratic:
if point_index == 0:
print_result += "x^2 "
elif point_index == 1:
print_result += "x "
else:
print_result += f" [{points[element_number][0]}, {points[element_number + 1][0]}]\n"
element_number += 1
else:
if point_index == 0:
print_result += "x^3 "
elif point_index == 1:
print_result += f"x^2 "
elif point_index == 2:
print_result += "x "
else:
print_result += f" [{points[element_number][0]}, {points[element_number + 1][0]}]\n"
element_number += 1
point_index = (point_index + 1) % cutoff
return print_result[0:len(print_result) - 1]
def calculate_error(point, result, inverter=False):
"""
Calculates the error of a point
:param point: point
:param result: result
:param inverter: if the circuit is an inverter
:return: percentage error
"""
if point is None or result is None:
return "N.A."
data = database.get_data()
resistors = data[1]
if inverter:
theoretical_value = - resistors[1] / resistors[0]
else:
theoretical_value = resistors[1] / resistors[0] + 1
real_value = theoretical_value * point
error = abs((real_value - result) / result) * 100
return error
def spline(points, quadratic=True):
"""
Calculates a quadratic or cubic spline of a set of points
:param points: point list
:param quadratic: if the spline is quadratic. For cubic, set this to False
:return: spline equations and intervals
"""
s_degree = 2 if quadratic else 3
equations = Matrix([])
constant_vector = Matrix([])
zeros_to_add = 0
# Continuity and extremes
for i in range(len(points)):
if len(points) > i > 1:
zeros_to_add = (i - 1) * (s_degree + 1)
point_list = []
if i != 0 and i < len(points) - 1:
double_insert = True
else:
double_insert = False
# Initial zeros
for k in range(zeros_to_add):
point_list.append(0)
# Coefficients
for j in range(s_degree, -1, -1):
point_list.append(points[i][0] ** j)
# Final zeros
for k in range((len(points) - 1) * (s_degree + 1) - (s_degree + 1) - zeros_to_add):
point_list.append(0)
equations = equations.row_insert(len(equations), Matrix([point_list]))
constant_vector = constant_vector.row_insert(len(constant_vector), Matrix([points[i][1]]))
if double_insert:
for j in range(s_degree + 1):
point_list.insert(0, 0)
point_list.pop()
equations = equations.row_insert(len(equations), Matrix([point_list]))
constant_vector = constant_vector.row_insert(len(constant_vector), Matrix([points[i][1]]))
# First derivative
for i in range(1, len(points) - 1):
point_list = []
coefficient = s_degree
# Initial zeros
for k in range((i - 1) * (s_degree + 1)):
point_list.append(0)
# Coefficients
for j in range(s_degree - 1, -1, -1):
point_list.append(coefficient * points[i][0] ** j)
coefficient -= 1
point_list.append(0)
coefficient = s_degree
for j in range(s_degree - 1, -1, -1):
point_list.append(-1 * coefficient * points[i][0] ** j)
coefficient -= 1
point_list.append(0)
# Final zeros
for k in range((len(points) - 1) * (s_degree + 1) - (s_degree + 1) * 2 - (i - 1) * (s_degree + 1)):
point_list.append(0)
equations = equations.row_insert(len(equations), Matrix([point_list]))
constant_vector = constant_vector.row_insert(len(constant_vector), Matrix([[0]]))
if quadratic:
point_list = [1]
for i in range((len(points) - 1) * (s_degree + 1) - 1):
point_list.append(0)
equations = equations.row_insert(len(equations), Matrix([point_list]))
constant_vector = constant_vector.row_insert(len(constant_vector), Matrix([[0]]))
if equations.det() == 0:
return None
return (equations ** -1) * constant_vector, points
else:
coefficient = s_degree * 2
# Second derivative
# Initial point
point_list = [6 * points[0][0], 2, 0, 0]
for i in range((len(points) - 1) * (s_degree + 1) - 4):
point_list.append(0)
equations = equations.row_insert(len(equations), Matrix([point_list]))
constant_vector = constant_vector.row_insert(len(constant_vector), Matrix([[0]]))
for i in range(1, len(points) - 1):
point_list = []
# Initial zeros
for k in range((i - 1) * (s_degree + 1)):
point_list.append(0)
# Coefficients
for j in range(s_degree - 2, -1, -1):
point_list.append(coefficient * points[i][0] ** j)
coefficient -= 4
point_list.append(0)
point_list.append(0)
coefficient = s_degree * 2
for j in range(s_degree - 2, -1, -1):
point_list.append(-1 * coefficient * points[i][0] ** j)
coefficient -= 4
point_list.append(0)
point_list.append(0)
# Final zeros
for k in range((len(points) - 1) * (s_degree + 1) - (s_degree + 1) * 2 - (i - 1) * (s_degree + 1)):
point_list.append(0)
equations = equations.row_insert(len(equations), Matrix([point_list]))
constant_vector = constant_vector.row_insert(len(constant_vector), Matrix([[0]]))
# Final point
point_list = []
for i in range((len(points) - 1) * (s_degree + 1) - 4):
point_list.append(0)
point_list.append(6 * points[len(points) - 1][0])
point_list.append(2)
for i in range(2):
point_list.append(0)
equations = equations.row_insert(len(equations), Matrix([point_list]))
constant_vector = constant_vector.row_insert(len(constant_vector), Matrix([[0]]))
if equations.det() != 0:
return (equations ** -1) * constant_vector, points
else:
return None
def evaluate_spline(point, e_spline, quadratic=True):
"""
Evaluates a selected point in a spline
:param point: point to evaluate
:param e_spline: spline equations and intervals
:param quadratic: if the spline is quadratic or cubic. Set to False for
a cubic spline
:return: The result of the evaluation
"""
points = e_spline[1]
equations = e_spline[0]
equation_index = 0
equation_offset = 3 if quadratic else 4
for i in range(len(points) - 1):
if points[i + 1][0] >= point >= points[i][0]:
equation_index = i * equation_offset
break
if i == (len(points) - 2):
return None
if quadratic:
return parse_expr(f"{equations[equation_index]} * x ** 2 + {equations[equation_index + 1]} * x + "
f"{equations[equation_index + 2]}").evalf(subs={x: point})
else:
return parse_expr(f"{equations[equation_index]} * x ** 3 + {equations[equation_index + 1]} * x ** 2 + "
f"{equations[equation_index + 2]} * x + "
f"{equations[equation_index + 3]}").evalf(subs={x: point})
|
PingMaster99/MNOpampCalculator
|
CalculationsModule.py
|
CalculationsModule.py
|
py
| 11,929 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29643231166
|
import math
import os
import random
import re
import sys
def breakingRecords(scores):
lowestScore = sys.maxsize
highestScore = -1
countMin = -1
countMax = -1
for i in scores:
if i > highestScore:
highestScore = i
countMax += 1
if i < lowestScore:
lowestScore = i
countMin += 1
return [countMax, countMin]
print(breakingRecords([10, 5, 20, 20, 4, 5, 2, 25, 1]))
|
Paradiddle131/Hackerrank
|
Python/ProblemSolving/Easy/BreakingTheRecords.py
|
BreakingTheRecords.py
|
py
| 451 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18480731961
|
#!/usr/bin/env python
# coding=utf-8
import datetime
import hashlib
import json
class LastUpdated():
def __init__(self, file='last-updated.json'):
self.file = file
def read(self):
with open(self.file, 'r') as f:
data = json.load(f)
return {
'amiibo_sha1': data['amiibo_sha1'],
'game_info_sha1': data['game_info_sha1'],
'timestamp': datetime.datetime.strptime(data['timestamp'], '%Y-%m-%dT%H:%M:%S.%f'),
}
def read_timestamp(self):
return self.read()['timestamp']
def write(self, amiibo_sha1, game_info_sha1, timestamp):
with open(self.file, 'w') as f:
json.dump({
'amiibo_sha1': amiibo_sha1,
'game_info_sha1': game_info_sha1,
'timestamp': timestamp.isoformat(),
}, f, sort_keys=True)
def hash(self, data):
return hashlib.sha1(data).hexdigest()
def update(self, data, data1):
amiibo_sha1 = self.hash(data)
game_info_sha1 = self.hash(data1)
try:
last_update = self.read()
except Exception as e:
print(e)
last_update = None
updated = False
if last_update is None or last_update['amiibo_sha1'] != amiibo_sha1 or last_update['game_info_sha1'] != game_info_sha1:
last_update = {
'amiibo_sha1': amiibo_sha1,
'game_info_sha1': game_info_sha1,
'timestamp': datetime.datetime.utcnow(),
}
self.write(**last_update)
updated = True
return last_update, updated
if __name__ == '__main__':
last_updater = LastUpdated()
with open('database/amiibo.json', 'rb') as f:
with open('database/games_info.json', 'rb') as g:
last_update, updated = last_updater.update(f.read(), g.read())
if updated:
print('Updated: {}'.format(last_updater.file))
print('amiibo_sha1: {}'.format(last_update['amiibo_sha1']))
print('game_info_sha1: {}'.format(last_update['game_info_sha1']))
print('timestamp: {}'.format(last_update['timestamp'].isoformat()))
|
N3evin/AmiiboAPI
|
last_updated.py
|
last_updated.py
|
py
| 2,178 |
python
|
en
|
code
| 459 |
github-code
|
6
|
31569881800
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from corai_util.tools.src.function_file import is_empty_file
from data_input.json.parameter_loader import fetch_param_json_loader_simulation, fetch_param_json_loader_itideep
from root_dir import linker_path_to_result_file
from src.estim_hawkes.estim_hawkes import Estim_hawkes
sns.set()
STR_CONFIG = "MSE"
(STR_CONFIG, NB_SIMUL, SEED, UNDERLYING_FUNCTION_NUMBER, _, KERNEL_DIVIDER,
NB_DIFF_TIME_ESTIM, DIM, STYL, NB_POINTS_TT, id_hp, parameters, t0, time_batch,
fct_parameters, true_breakpoints, _, _, _) = fetch_param_json_loader_simulation(False, STR_CONFIG)
(L, R, h, l, CONSIDERED_PARAM, ALL_KERNELS_DRAWN,
TYPE_ANALYSIS, NUMBER_OF_BREAKPOINTS, MODEL,
MIN_SIZE, WIDTH) = fetch_param_json_loader_itideep(flagprint=True, str_config=STR_CONFIG)
# should match the data given in the script.sh
NB_T_MAX = 10 # from 1 to 10.
NB_TH_OF_CURRENT_ESTIMATION = 2 # any int > 0. Represents the refinement of the ITiDeEP.
# 1 is the first naive estimation.
# The number given is the number of lines on the plot / nb of repetition of the estimation process undergone.
# Only possible to plot all the lines (1, 2...) together and not a subset of it not including the lower part.
#########
LIST_T_MAX = np.linspace(6000, 33000, NB_T_MAX)
#######################################################
# TODO explain gather result in readme + explain MSE pipeline.
# We use this file to gather the estimation together (gather function) and then plot the curve of the MSE.
matrix_err_tmax_APE = np.zeros((NB_TH_OF_CURRENT_ESTIMATION, len(LIST_T_MAX)))
matrix_err_tmax_SPE = np.zeros((NB_TH_OF_CURRENT_ESTIMATION, len(LIST_T_MAX)))
iter_refinement = NB_TH_OF_CURRENT_ESTIMATION
while iter_refinement > 0: # we collect the data from
# NB_TH_OF_CURRENT_ESTIMATION to 1 by reducing by 1 at every iteration.
for i_tmax in range(len(LIST_T_MAX)):
######################
# gather results of previous estimation for a given T max
######################
path_result_directory = linker_path_to_result_file(["MSE",
f"{STR_CONFIG}_res_{iter_refinement}",
f"data_{i_tmax}", ""])
assert not is_empty_file(path_result_directory), \
f"file must contain some data. Directory {path_result_directory} is empty."
list_estim_hp = Estim_hawkes.folder_csv2list_estim(path_result_directory)
estim_hp = Estim_hawkes.merge(list_estim_hp) # new estim gathered result
path_super_result = linker_path_to_result_file(
["MSE",
f"{STR_CONFIG}_res_{iter_refinement}",
f"data_together_{i_tmax}",
f"results_together.csv"])
estim_hp.to_csv(path_super_result) # saved gather result
######################
# compute error:
######################
path_result_res = linker_path_to_result_file(
["MSE", f"{STR_CONFIG}_res_{iter_refinement}", f"data_together_{i_tmax}", "results_together.csv"])
print("Reading: ", path_result_res)
estim_hp = Estim_hawkes.from_csv(path_result_res)
estim_hp.add_SPE_APE_col() # computed the SRE per parameter
groupby_param, keys = estim_hp.groupby(['parameter', 'm', 'n'])
total_SPE_APE = (groupby_param.get_group(('alpha', 0, 0))[["time estimation", 'SPE', 'APE']]
.sort_values(by="time estimation").reset_index(drop=True)) # a copy is made
# : we create a container where the error is aggregated.
total_SPE_APE['SPE'] = 0 # we empty the values inside the column
total_SPE_APE['APE'] = 0 # we empty the values inside the column
for key in keys:
ordered_SPE_APE = (groupby_param.get_group(key)[["time estimation", 'SPE', 'APE']]
.sort_values(by="time estimation").reset_index(drop=True))
# sort to be sure we add the correct values together, drop index for prettiness.
total_SPE_APE['SPE'] += ordered_SPE_APE['SPE']
total_SPE_APE['APE'] += ordered_SPE_APE['APE']
# MISRE = total_SRE.mean()["RSE"] # this is wrong. We need to compute it by hand.
# It does not account for non converging estimations.
total_SPE_APE_grouped = total_SPE_APE.groupby("time estimation") # we groupby so we compute the integral
MISPE = 0
MIAPE = 0
# compute the mean squared error and compute the mean absolute error
for time in total_SPE_APE_grouped.groups:
average_per_time = total_SPE_APE_grouped.get_group(time).mean()
MISPE += average_per_time['SPE'] / len(total_SPE_APE_grouped.groups)
MIAPE += average_per_time['APE'] / len(total_SPE_APE_grouped.groups)
matrix_err_tmax_SPE[iter_refinement - 1, i_tmax] = MISPE # store result
matrix_err_tmax_APE[iter_refinement - 1, i_tmax] = MIAPE # store result
iter_refinement -= 1
dict_result = {"MISPE": matrix_err_tmax_SPE.flatten(),
"MIAPE": matrix_err_tmax_APE.flatten(),
"nb application ITiDeEP": np.repeat(range(NB_TH_OF_CURRENT_ESTIMATION), NB_T_MAX),
"T max": np.tile(LIST_T_MAX, NB_TH_OF_CURRENT_ESTIMATION)}
data_err = pd.DataFrame(dict_result)
fig, ax = plt.subplots(2, 1)
sns.lineplot(x="T max", y="MISPE",
hue="nb application ITiDeEP", marker='o',
legend='full', ci=None, err_style="band",
palette='Dark2', ax=ax[0],
data=data_err)
sns.lineplot(x="T max", y="MIAPE",
hue="nb application ITiDeEP", marker='o',
legend='full', ci=None, err_style="band",
palette='Dark2', ax=ax[1],
data=data_err)
path_save_plot = linker_path_to_result_file(["MSE", f"MSE_result_{NB_TH_OF_CURRENT_ESTIMATION}" + '.png'])
fig.savefig(path_save_plot, dpi=500)
plt.show()
|
Code-Cornelius/ITiDeEP
|
mse/estimation_MSE_plot.py
|
estimation_MSE_plot.py
|
py
| 6,145 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31988903167
|
import numpy as np
from util import *
import sys
def dunn(X: np.array, labels: np.array):
ks = np.unique(labels)
k_list = [X[labels == k] for k in ks]
deltas = np.ones([len(k_list), len(k_list)]) * 1000000
big_deltas = np.zeros([len(k_list), 1])
l_range = list(range(0, len(k_list)))
for k in l_range:
for l in (l_range[0:k] + l_range[k + 1:]):
deltas[k, l] = delta(k_list[k], k_list[l])
big_deltas[k] = big_delta(k_list[k])
di = np.min(deltas) / np.max(big_deltas)
return di
def gd41(X, labels):
n_clusters = len(np.unique(labels))
centroids = cluster_centroid(X, labels, n_clusters)
rows, colums = X.shape
minimum_dif_c = sys.float_info.max
maximum_same_c = sys.float_info.min
centres_l = [[0.0] * n_clusters] * n_clusters
centers = np.array(centres_l)
for i in range(0, n_clusters - 1):
for j in range(i + 1, n_clusters):
centers[i][j] = euclidian_dist(centroids[i], centroids[j])
centers[j][i] = euclidian_dist(centroids[i], centroids[j])
for i in range(0, int(math.ceil(float(rows) / 2.0))):
for j in range(0, rows):
if (labels[i] != labels[j]):
dist = centers[labels[i]][labels[j]]
minimum_dif_c = min(dist, minimum_dif_c)
else:
dist = euclidian_dist(X[i], X[j])
maximum_same_c = max(dist, maximum_same_c)
return minimum_dif_c / maximum_same_c
def os_score(X, labels):
n_clusters = len(np.unique(labels))
centroids = cluster_centroid(X, labels, n_clusters)
cluster_sizes = count_cluster_sizes(labels, n_clusters)
numerator = 0.0
for k in range(0, n_clusters):
for i in range(0, len(labels)):
if labels[i] != k: continue
numerator += ov(X, labels, X[i], k, cluster_sizes[k])
denominator = 0.0
for k in range(0, n_clusters):
l = []
for i in range(0, len(labels)):
if labels[i] != k:
continue
l.append(euclidian_dist(X[i], centroids[k]))
# get sum of 0.1*|Ck| largest elements
acc = 0.0
max_n = heapq.nlargest(int(math.ceil(0.1 * cluster_sizes[k])), l)
for i in range(0, len(max_n)):
acc += max_n[i]
denominator += acc * 10.0 / cluster_sizes[k]
return numerator / denominator
|
fedix/ensemble_clustering
|
metrics.py
|
metrics.py
|
py
| 2,396 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31209257710
|
import uuid
from random import randint
from src.infratructure.json_parser import JsonParser
from src.infratructure.serializable_object import SerializableObject
class PersonModel(SerializableObject):
def __init__(self, id: int, nick: str, photo: str, name: str = None):
self.id = id
self.nick = nick
self.photo = photo
self.name = name
@classmethod
def random(cls):
id = randint(0, 10)
nick = str(uuid.uuid4())
photo = str(uuid.uuid4())
name = str(uuid.uuid4())
return cls(id=id, nick=nick, photo=photo, name=name)
@classmethod
def from_json(cls, json):
id = JsonParser.try_get_parameter_with_sub_name(json, "member", "id")
nick = JsonParser.try_get_parameter_with_sub_name(json, "member", "name")
photo = JsonParser.try_get_parameter_with_two_sub_name(json, "member", "photo", "highres_link")
return cls(id=id, nick=nick, photo=photo, name=None)
|
GDGPetropolis/backend-event-checkin
|
src/application/models/person_model.py
|
person_model.py
|
py
| 978 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6966794859
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from kazoo.client import KazooClient
__name__ = "weichigong"
__version__ = '1.0.3'
__author__ = 'dashixiong'
__author_email__ = '[email protected]'
class zconfig:
def __init__(self, zkHosts, app, env):
self.app = app
self.env = env
self.client = KazooClient(hosts=zkHosts)
self.client.start()
def getPath(self, path):
return os.path.join('/', self.app, self.env, path)
def set(self, path, value):
fullPath = self.getPath(path)
self.client.ensure_path(fullPath)
self.client.set(fullPath, value)
def get(self, path):
fullPath = self.getPath(path)
return self.client.get(fullPath)[0].decode('utf-8')
|
perfeelab/weichigong
|
weichigong/__init__.py
|
__init__.py
|
py
| 764 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6942571337
|
from otree.api import *
from settings import SESSION_CONFIGS
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'Intro'
players_per_group = None
num_rounds = 1
max_payoff = "£2.20"
money = "£3.00"
total_balls = "five"
no_task_balls = "three"
# create a vector to randomise treatment
num_participants = 350 # note this should be substantially larger than the number of participants I actually intend to hire, because some Prolificers will join the session but not complete
num_blocks = -1*( -num_participants // 14) # I'm gonna create blocks within which the treatment is exactly balanced (2 in LC, 2 in LN, 5 in HC, 5 in HN). Then add the blocks together to get to the desired number of participants.
import random
treatment_block = list(range(1,15))
treatment_assignment = []
for i in range(num_blocks):
treatment_assignment = treatment_assignment + treatment_block
random.shuffle(treatment_assignment)
for i in range(len(treatment_assignment)):
if treatment_assignment[i] <= 2:
treatment_assignment[i] = "LC"
elif treatment_assignment[i] > 2 and treatment_assignment[i] <= 4:
treatment_assignment[i] = "LN"
elif treatment_assignment[i] > 4 and treatment_assignment[i] <= 9:
treatment_assignment[i] = "HC"
elif treatment_assignment[i] >9:
treatment_assignment[i] = "HN"
class Subsession(BaseSubsession):
pass
def creating_session(subsession):
import itertools, random
treatment_assignment = itertools.cycle(Constants.treatment_assignment)
for player in subsession.get_players():
# determine treatment
player.participant.treatment = next(treatment_assignment)
player.treatment = player.participant.treatment
# practice maths questions - randomly select two to show in instructions
practice_maths_qs_index = list(range(4))
random.shuffle(practice_maths_qs_index)
player.participant.mathspractice_q1 = practice_maths_qs_index[0]
player.participant.mathspractice_q2 = practice_maths_qs_index[1]
class Group(BaseGroup):
pass
class Player(BasePlayer):
ProlificID = models.StringField()
treatment = models.StringField()
start_epochtime = models.IntegerField()
start_clocktime = models.StringField()
# maths practice questions
q1 = models.StringField(
label = "A shop has an offer: buy 8 kiwis, and every extra kiwi after that is half price. A man goes to the shop and pays £4.50 for some kiwis. The full price of a kiwi is £0.50. How many does he buy?",
choices = [
"9",
"12",
"10",
"15"
],
widget = widgets.RadioSelectHorizontal,
blank=True)
q2 = models.StringField(
label = "A hairdresser has an offer: every third visit is free. They charge £48 for a haircut. Last year Sarah paid £144 for a haaircut. How many times did she go?",
choices = [
"Two times",
"Three times",
"Four times",
"Five times"
],
widget = widgets.RadioSelectHorizontal,
blank=True)
q3 = models.StringField(
label = "A woman walks from the bottom to the top of a hill. She starts at 9.40am and arrives at the top at 10.20 am. She takes a rest for ten minutes. Then she walks back down. On the way down she walks twice as fast as she did on the way up. What time is it when she reaches the bottom of the hill?",
choices = [
"11.20",
"10.40",
"10.50",
"11.10"
],
widget = widgets.RadioSelectHorizontal,
blank=True)
q4 = models.StringField(
label = "A trader buys a painting for £120 and sells it for £170. They pay a £10 transaction fee. Their profit expressed as a percentage of total cost is:",
choices = [
"50%",
"60%",
"80%",
"33%"
],
widget = widgets.RadioSelectHorizontal,
blank=True)
# PAGES
class Consent(Page):
def is_displayed(player):
# record time player entered application
import time
time_in = round(time.time())
player.start_epochtime = time_in
player.participant.start_epochtime = time_in
player.start_clocktime = time.strftime('%H:%M:%S', time.localtime(time_in))
return 1
class ProlificID(Page):
form_model = 'player'
form_fields = ['ProlificID']
class Introduction(Page):
form_model = 'player'
def get_form_fields(player: Player):
questions = ['q1','q2','q3','q4']
form_fields = [
questions[player.participant.mathspractice_q1]
]
return form_fields
page_sequence = [Consent, ProlificID, Introduction]
|
LiamOFoghlu/Receiver
|
Intro/__init__.py
|
__init__.py
|
py
| 5,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31969871422
|
from django.contrib.auth import get_user_model
from django.db import transaction
from django.db.models import Q
from rest_framework import serializers
from rest_framework.exceptions import ValidationError, NotFound
from rest_framework.generics import get_object_or_404
from versatileimagefield.serializers import VersatileImageFieldSerializer
User = get_user_model()
class PrivateMeSerializer(serializers.ModelSerializer):
image = VersatileImageFieldSerializer(
required=False,
sizes=[
("original", "url"),
("at256", "crop__256x256"),
("at512", "crop__512x512"),
],
)
class Meta:
model = User
fields = [
"first_name",
"last_name",
"username",
"slug",
"phone",
"image",
"email",
]
read_only_fields = ["slug", "phone","username",]
|
seefat/harvest_hub_apis
|
core/rest/serializers/me.py
|
me.py
|
py
| 928 |
python
|
en
|
code
| 0 |
github-code
|
6
|
51224431
|
from typing import *
class Solution:
def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:
def w2i(w):
return ''.join(str(i) for i in map(lambda x: w.index(x), w))
p = w2i(pattern)
res = []
for w in words:
w_i = w2i(w)
if w_i == p:
res.append(w)
return res
if __name__ == "__main__":
s = Solution()
words = ["abc","deq","mee","aqq","dkd","ccc"]
pattern = "abb"
assert s.findAndReplacePattern(words, pattern) == ["mee","aqq"]
|
code-cp/leetcode
|
solutions/890/main.py
|
main.py
|
py
| 576 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30818901121
|
# Created by Andrew Davison
# Instructions to run unittest: Run main conditional at end of file
import unittest
from incident_app import calculations as calc
class TestCalculations(unittest.TestCase):
def test_calculate_average_force(self):
measurements = [30.2, 30.5, 30.4, 30.2, 30.3]
assert calc.calculate_average_force(measurements) == 30.32
measurements = [130.2, 130.5, 130.4, 130.2, 130.3]
assert calc.calculate_average_force(measurements) == 130.32
measurements = [210, 202.2, 215, 205, 204.3]
assert calc.calculate_average_force(measurements) == 207.3
with self.assertRaises(TypeError):
measurements = ['130.2', '130.5', '130.4', '130.2', '130.3']
calc.calculate_average_force(measurements)
def test_calculate_drag_factor(self):
force, sled_weight = 30.32, 230
assert round(calc.calculate_drag_factor(force, sled_weight), 2) == 0.13
force, sled_weight = 130.32, 230
assert round(calc.calculate_drag_factor(force, sled_weight), 2) == 0.57
with self.assertRaises(TypeError):
force, sled_weight = '130.32', 230
round(calc.calculate_drag_factor(force, sled_weight), 2)
force, sled_weight = 130.32, '230'
round(calc.calculate_drag_factor(force, sled_weight), 2)
with self.assertRaises(ZeroDivisionError):
force, sled_weight = 130.32, 0
round(calc.calculate_drag_factor(force, sled_weight), 2)
def test_calculate_velocity(self):
factor, distance = 0.13, 45
assert round(calc.calculate_velocity(factor, distance), 2) == 19.41
distance = 15
assert round(calc.calculate_velocity(factor, distance), 2) == 11.21
distance = 30
assert round(calc.calculate_velocity(factor, distance), 2) == 15.85
distance = 0
assert round(calc.calculate_velocity(factor, distance), 2) == 0
factor, distance = calc.calculate_drag_factor(30.32, 230), 45
assert round(calc.calculate_velocity(factor, distance), 2) == 19.55
distance = 15
assert round(calc.calculate_velocity(factor, distance), 2) == 11.28
distance = 30
assert round(calc.calculate_velocity(factor, distance), 2) == 15.96
with self.assertRaises(TypeError):
factor, distance = 0.13, '0'
round(calc.calculate_velocity(factor, distance), 2)
factor, distance = '0.13', 0
round(calc.calculate_velocity(factor, distance), 2)
def test_calculate_time_of_skid(self):
factor = calc.calculate_drag_factor(30.32, 230)
velocity = calc.calculate_velocity(factor, 45)
assert round(calc.calculate_time_of_skid(velocity, factor), 2) == 4.60
velocity = calc.calculate_velocity(factor, 30)
assert round(calc.calculate_time_of_skid(velocity, factor), 2) == 3.76
velocity = calc.calculate_velocity(factor, 15)
assert round(calc.calculate_time_of_skid(velocity, factor), 2) == 2.66
with self.assertRaises(TypeError):
velocity = calc.calculate_velocity(factor, 15)
round(calc.calculate_time_of_skid(str(velocity), factor), 2)
velocity = calc.calculate_velocity(factor, 15)
round(calc.calculate_time_of_skid(velocity, str(factor)), 2)
with self.assertRaises(ZeroDivisionError):
round(calc.calculate_time_of_skid(velocity, 0), 2)
def test_calculate_kinetic_energy(self):
factor = calc.calculate_drag_factor(30.32, 230)
weight, velocity = 3674, calc.calculate_velocity(factor, 30)
assert round(calc.calculate_kinetic_energy(weight, velocity), 2) == 14529.87
assert round(calc.calculate_kinetic_energy(0, velocity), 2) == 0
assert round(calc.calculate_kinetic_energy(weight, 0), 2) == 0
with self.assertRaises(TypeError):
round(calc.calculate_kinetic_energy(str(weight), velocity), 2)
round(calc.calculate_kinetic_energy(weight, str(velocity)), 2)
def test_calculate_speed(self):
factor, distance = calc.calculate_drag_factor(30.32, 230), 45
velocity = calc.calculate_velocity(factor, distance)
assert round(calc.calculate_speed(velocity), 2) == 13.33
distance = 15
velocity = calc.calculate_velocity(factor, distance)
assert round(calc.calculate_speed(velocity), 2) == 7.70
distance = 30
velocity = calc.calculate_velocity(factor, distance)
assert round(calc.calculate_speed(velocity), 2) == 10.89
assert round(calc.calculate_speed(0), 2) == 0
with self.assertRaises(TypeError):
round(calc.calculate_speed(str(velocity)), 2)
if __name__ == "__main__":
unittest.main()
|
wrosoff4/software_engineering_capstone
|
tests/unit/calculations_test.py
|
calculations_test.py
|
py
| 4,892 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10721383879
|
import os, time, ctypes, sys, winreg
os.system("title wineditor ^| www.milu.cf")
os.system('mode con lines=17 cols=78')
def is_admin():
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
if is_admin():
# ---------- defender options ----------
def defenderoptions():
os.system("cls")
print(" Windows Defender Options")
print("=====================================")
print("1. Enable Defender")
print("2. Disable Defender")
print("3. Go Back")
print("=====================================")
defenderchoice = input("Choice> ")
if defenderchoice == '1':
enabledefender()
elif defenderchoice == '2':
disabledefender()
elif defenderchoice == '3':
main()
else:
print("invalid choice option")
time.sleep(2)
os.system("cls")
defenderoptions()
# - enable defender -
def enabledefender():
winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\Microsoft\Windows Defender\Features")
os.system(r'reg add "HKLM\SOFTWARE\Microsoft\Windows Defender\Features" /v "TamperProtection" /t "REG_DWORD" /d "5" /f')
os.system(r'reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows Defender" /v "DisableAntiSpyware" /t "REG_DWORD" /d "0" /f')
os.system(r'reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows Defender\Real-Time Protection" /v "DisableRealtimeMonitoring" /t "REG_DWORD" /d "0" /f')
rebootdefender()
# - disable defender -
def disabledefender():
print("due to a windows update, doing this can no longer be done 100% automated\nyou have to manually go into windows security and turn off \"Tamper Protection\"")
print("after you have turned it off, come back here and type \"continue\"")
ans = input("> ")
if ans == "continue":
winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\Microsoft\Windows Defender\Features")
#os.system(r'reg add "HKLM\SOFTWARE\Microsoft\Windows Defender\Features" /v "TamperProtection" /t "REG_DWORD" /d "0" /f')
os.system(r'reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows Defender" /v "DisableAntiSpyware" /t "REG_DWORD" /d "1" /f')
os.system(r'reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows Defender\Real-Time Protection" /v "DisableRealtimeMonitoring" /t "REG_DWORD" /d "1" /f')
rebootdefender()
else:
print("returning to main screen...")
time.sleep(3)
main()
# - reboot defender -
def rebootdefender():
os.system("cls")
restart = input("Windows Defender has been modified. Do you want to re-log your PC to apply\nthe new setting? (Y/N): ")
if restart == 'N':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'n':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'Y':
os.system("shutdown -l")
elif restart == 'y':
os.system("shutdown -l")
else:
print("invalid choice option")
time.sleep(1)
os.system("cls")
rebootdefender()
# ---------- cortana options ----------
def cortanaoptions():
os.system("cls")
print(" Cortana Options")
print("=====================================")
print("1. Enable Cortana")
print("2. Disable Cortana")
print("3. Go Back")
print("=====================================")
cortanachoice = input("Choice> ")
if cortanachoice == '1':
enablecortana()
elif cortanachoice == '2':
disablecortana()
elif cortanachoice == '3':
main()
else:
print("invalid choice option")
time.sleep(2)
os.system("cls")
defenderoptions()
# - enable cortana -
def enablecortana():
os.system(r'reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows\Windows Search" /v "AllowCortana" /t "REG_DWORD" /d "1" /f')
rebootcortana()
# - disable cortana -
def disablecortana():
os.system(r'reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows\Windows Search" /v "AllowCortana" /t "REG_DWORD" /d "0" /f')
rebootcortana()
# - reboot cortana -
def rebootcortana():
os.system("cls")
restart = input("Cortana has been modified. Do you want to re-log your PC to apply the new\nsetting? (Y/N): ")
if restart == 'N':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'n':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'Y':
os.system("shutdown -l /t 1")
elif restart == 'y':
os.system("shutdown -l /t 1")
else:
print("invalid choice option")
time.sleep(1)
os.system("cls")
rebootcortana()
# ---------- windows feedback options ----------
def feedbackoptions():
os.system("cls")
print(" Windows Feedback Options")
print("=====================================")
print("1. Enable Feedback Notifs")
print("2. Disable Feedback Notifs")
print("3. Go Back")
print("=====================================")
feedbackchoice = input("Choice> ")
if feedbackchoice == '1':
enablefeedback()
elif feedbackchoice == '2':
disablefeedback()
elif feedbackchoice == '3':
main()
else:
print("invalid choice option")
time.sleep(2)
os.system("cls")
defenderoptions()
# - enable feedback -
def enablefeedback():
os.system(r'reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows\DataCollection" /v "DoNotShowFeedbackNotifications" /t "REG_DWORD" /d "0" /f')
rebootfeedback()
# - disable feedback -
def disablefeedback():
os.system(r'reg add "HKLM\SOFTWARE\Policies\Microsoft\Windows\DataCollection" /v "DoNotShowFeedbackNotifications" /t "REG_DWORD" /d "1" /f')
rebootfeedback()
# - reboot feedback -
def rebootfeedback():
os.system("cls")
restart = input("Windows Feedback has been modified. Do you want to re-log your PC to apply\nthe new setting? (Y/N): ")
if restart == 'N':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'n':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'Y':
os.system("shutdown -l /t 1")
elif restart == 'y':
os.system("shutdown -l /t 1")
else:
print("invalid choice option")
time.sleep(1)
os.system("cls")
rebootfeedback()
# ---------- optimize shutdown options ----------
def shutdownoptions():
os.system("cls")
print(" Optimize Shutdown Options")
print("=====================================")
print("1. Optimize Shutdown")
print("2. What Does This Do?")
print("3. Go Back")
print("=====================================")
shutdownchoice = input("Choice> ")
if shutdownchoice == '1':
optimizeshutdown()
elif shutdownchoice == '2':
aboutshutdown()
elif shutdownchoice == '3':
main()
else:
print("invalid choice option")
time.sleep(2)
os.system("cls")
shutdownoptions()
# - optimize shutdown -
def optimizeshutdown():
os.system(r'reg add "HKLM\SYSTEM\CurrentControlSet\Control" /v "WaitToKillServiceTimeout" /t "REG_DWORD" /d "1000" /f')
rebootshutdown()
# - about shutdown -
def aboutshutdown():
print("Optimize Shutdown will speed up the time it takes for your PC to shut down.\nIt removes the \"waiting for all apps to close\" feature.")
shutdownchoice = input("Choice> ")
if shutdownchoice == '1':
optimizeshutdown()
elif shutdownchoice == '2':
aboutshutdown()
elif shutdownchoice == '3':
main()
else:
print("invalid choice option")
time.sleep(2)
os.system("cls")
shutdownoptions()
# - reboot shutdown -
def rebootshutdown():
os.system("cls")
restart = input("Shutdown speed has been optimized. Do you want to re-log your PC to apply\nthe new setting? (Y/N): ")
if restart == 'N':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'n':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'Y':
os.system("shutdown -l /t 1")
elif restart == 'y':
os.system("shutdown -l /t 1")
else:
print("invalid choice option")
time.sleep(1)
os.system("cls")
rebootshutdown()
# ---------- optimize start-up options ----------
def startupoptions():
os.system("cls")
print(" Optimize Start-Up Options")
print("=====================================")
print("1. Optimize Start-Up")
print("2. What Does This Do?")
print("3. Go Back")
print("=====================================")
print("NOTE: This may not have a visible effect.")
startupchoice = input("Choice> ")
if startupchoice == '1':
optimizestartup()
elif startupchoice == '2':
aboutstartup()
elif startupchoice == '3':
main()
else:
print("invalid choice option")
time.sleep(2)
os.system("cls")
startupoptions()
# - optimize startup -
def optimizestartup():
winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Serialize")
os.system(r'reg add "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer" /v "StartupDelayInMSec" /t "REG_DWORD" /d "0" /f')
rebootstartup()
# - about start-up -
def aboutstartup():
print("Optimize Start-Up will speed up the time it takes for your PC to start-up\nwhen you turn it on.\nIt removes the delay that windows defaults before your apps open at start-up.")
startupchoice = input("Choice> ")
if startupchoice == '1':
optimizestartup()
elif startupchoice == '2':
aboutstartup()
elif startupchoice == '3':
main()
else:
print("invalid choice option")
time.sleep(2)
os.system("cls")
startupoptions()
# - reboot start-up -
def rebootstartup():
os.system("cls")
restart = input("Start-Up speed has been optimized. Do you want to re-log your PC to apply\nthe new setting? (Y/N): ")
if restart == 'N':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'n':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'Y':
os.system("shutdown -l /t 1")
elif restart == 'y':
os.system("shutdown -l /t 1")
else:
print("invalid choice option")
time.sleep(1)
os.system("cls")
rebootstartup()
# ---------- clear TEMP options ----------
def cleartempoptions():
os.system("cls")
print(" Clear TEMP Folders Options")
print("=====================================")
print("1. Clear TEMP Folders")
print("2. What Does This Do?")
print("3. Go Back")
print("=====================================")
cleartempchoice = input("Choice> ")
if cleartempchoice == '1':
cleartemp()
elif cleartempchoice == '2':
aboutcleartemp()
elif cleartempchoice == '3':
main()
else:
print("invalid choice option")
time.sleep(2)
os.system("cls")
cleartempoptions()
# - clear TEMP -
def cleartemp():
os.system(r'del /F /S /Q "%TEMP%\*.*" >nul 2>nul')
os.system(r'rd /S /Q "%TEMP%" >nul 2>nul')
os.system(r'md "%TEMP%" >nul 2>nul')
os.system(r'rd /S /Q "%SystemDrive%\temp" >nul 2>nul')
print("successfully cleared TEMP folders, returning to main screen...")
time.sleep(3)
main()
# - about clear TEMP -
def aboutcleartemp():
print("Clear TEMP Folders will delete most files in your %temp% folders.\nIt won't delete anything needed, they're temporary files that take up space.")
cleartempchoice = input("Choice> ")
if cleartempchoice == '1':
cleartemp()
elif cleartempchoice == '2':
aboutcleartemp()
elif cleartempchoice == '3':
main()
else:
print("invalid choice option")
time.sleep(2)
os.system("cls")
cleartempoptions()
# ---------- windows security options ----------
def securityoptions():
os.system("cls")
print(" Windows Security Options")
print("=====================================")
print("1. Enable Security Notifs")
print("2. Disable Security Notifs")
print("3. Go Back")
print("=====================================")
print("NOTE: This may or may not work for you.")
securitychoice = input("Choice> ")
if securitychoice == '1':
enablesecnotifs()
elif securitychoice == '2':
disablesecnotifs()
elif securitychoice == '3':
main()
else:
print("invalid choice option")
time.sleep(2)
os.system("cls")
securityoptions()
# - enable security notifs -
def enablesecnotifs():
os.system(r'reg add "HKLM\SOFTWARE\Microsoft\Windows Defender Security Center\Notifications" /v "DisableAllNotifications" /t "REG_DWORD" /d "0" /f')
rebootsecnotifs()
# - disable security notifs -
def disablesecnotifs():
os.system(r'reg add "HKLM\SOFTWARE\Microsoft\Windows Defender Security Center\Notifications" /v "DisableAllNotifications" /t "REG_DWORD" /d "1" /f')
rebootsecnotifs()
# - reboot start-up -
def rebootsecnotifs():
os.system("cls")
restart = input("Windows Security Notifs have been changed. Do you want to re-log your PC to\napply the new setting? (Y/N): ")
if restart == 'N':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'n':
print("returning to main screen...")
time.sleep(3)
main()
elif restart == 'Y':
os.system("shutdown -l /t 1")
elif restart == 'y':
os.system("shutdown -l /t 1")
else:
print("invalid choice option")
time.sleep(1)
os.system("cls")
rebootsecnotifs()
# ---------- info screen ----------
def info():
os.system("cls")
print(" info")
print("=====================================")
print(" wineditor vers | 1.1")
print(" creator | www.milu.cf")
print("=====================================")
choice = input("Type 1 to go back> ")
if choice == '1':
main()
else:
print("invalid choice option")
time.sleep(2)
info()
def defendercheck(): # unfinished
try:
path = winreg.HKEY_LOCAL_MACHINE
key = winreg.OpenKeyEx(path, r"SOFTWARE\\Policies\\Microsoft\\Windows Defender")
value = "DisableAntiSpyware"
data = winreg.QueryValueEx(key,value)
if key:
winreg.CloseKey(key)
print(data[1:2])
except Exception as e:
print(e)
return None
# ---------- main screen ----------
def main():
os.system("cls")
print(" Welcome to wineditor v1.1 by milu")
print("=====================================")
print("1. Windows Defender Options")
print("2. Cortana Options")
print("3. Windows Feedback Options")
print("4. Optimize Shutdown")
print("5. Optimize Start-Up")
print("6. Clear TEMP Folders")
print("7. Windows Security Options")
print("8. Info")
print("=====================================")
choice = input("Choice> ")
if choice == '1':
defenderoptions()
elif choice == '2':
cortanaoptions()
elif choice == '3':
feedbackoptions()
elif choice == '4':
shutdownoptions()
elif choice == '5':
startupoptions()
elif choice == '6':
cleartempoptions()
elif choice == '7':
securityoptions()
elif choice == '8':
info()
else:
print("invalid choice option")
time.sleep(2)
main()
main()
else:
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, " ".join(sys.argv), None, 1)
|
milu-zzz/wineditor
|
wineditor.py
|
wineditor.py
|
py
| 18,286 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3977236501
|
#!/usr/bin/env python3
from ddpg import Agent
import numpy as np
from ts_forecasting_env import ts_forecasting_env
import time
import matplotlib.pyplot as plt
import csv
import pandas as pd
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import argparse
from ray import tune
from ray.tune.schedulers import ASHAScheduler
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("--traj", type=int, default=1, help="choose trajectory")
args = parser.parse_args()
# Load and prepare data
############################## Define variables #########################################
TRAJECTORY = args.traj
SPLIT_RATE = 0.80 # split data into train and test data
#########################################################################################
# Open csv
file = open('allData/traj' + str(TRAJECTORY) + '_allData.csv')
# Read csv
csvreader = csv.reader(file)
# Store csv data in numpy ndarray
rows = []
for row in csvreader:
rows.append(row)
file.close()
data_ = np.array(rows, dtype=np.float64)
data_ = np.concatenate(data_)
# Data split
split_index = round(len(data_) * SPLIT_RATE)
train_data, test_data = data_[:split_index], data_[split_index:]
# Normalize data
max = np.max(data_)
min = np.min(data_)
TRAIN_DATA = (train_data - min) / (max - min)
TEST_DATA = (test_data - min) / (max - min)
# Run LSTM with tuning configurations
def tune_lstm(config):
# Training setup
############################## Define hyper parameters ##################################
LR_ACTOR = config["a_lr"]
LR_CRITIC = config["c_lr"]
TAU = 0.1
GAMMA = 0.9
BATCH_SIZE = config["bs"]
ACTOR_LAYER = config["layer"]
CRITIC_LAYER = config["layer"]
REPLAY_BUFFER_SIZE = 100000
HISTORICAL_DP = config["hdp"] # historical data points (length of state)
#########################################################################################
# Call environment
env = ts_forecasting_env(historical_dp=HISTORICAL_DP, data=TRAIN_DATA)
# Call agent
agent = Agent(alpha=LR_ACTOR, beta=LR_CRITIC, input_dims=[HISTORICAL_DP], tau=TAU,
gamma=GAMMA,batch_size=BATCH_SIZE, layer1_size=ACTOR_LAYER, n_actions=1,
layer2_size=CRITIC_LAYER, max_size=REPLAY_BUFFER_SIZE)
############################## Define training parameters ###############################
EPISODES = 15
MAX_STEPS = 1000
#########################################################################################
np.random.seed(0)
# Train the agent
for i in range(1, EPISODES + 1):
obs = env.reset()
done = False
reward = 0
for step in range(MAX_STEPS):
act = agent.choose_action(obs)
new_state, step_reward, done, _ = env.step(act)
agent.remember(obs, act, step_reward, new_state, int(done))
agent.learn()
reward += step_reward
obs = new_state
if done:
break
# Test the agent
pred = []
for i in range(len(TEST_DATA)):
state = np.array(TEST_DATA[0 + i:HISTORICAL_DP + i], dtype=np.float64)
action = agent.choose_action(state)
pred.append(action)
if HISTORICAL_DP + i == len(TEST_DATA):
break
pred = np.concatenate(pred)
pred = pd.Series(pred)
pred = pred * (max - min) + min
real = pd.Series(test_data[HISTORICAL_DP:])
# Report result to tuner
# MAE
tune.report(mean_accuracy=mean_absolute_error(real, pred))
# # MSE
# tune.report(mean_accuracy=mean_squared_error(real, pred, squared=False))
# Tuner configurations
config = {
"a_lr": tune.grid_search([0.001, 0.002, 0.003, 0.004, 0.005]),
"c_lr": tune.grid_search([0.001, 0.002, 0.003, 0.004, 0.005]),
"bs": tune.grid_search([2 ** i for i in range(5,8)]),
"layer": tune.grid_search([2 ** i for i in range(5,8)]),
"hdp": tune.grid_search([10, 15, 25]),
}
# Run tuner
analysis = tune.run(
tune_lstm,
resources_per_trial={"cpu": 12, "gpu": 1},
config=config,
mode="min"
)
print("Best config: ", analysis.get_best_config(metric="mean_accuracy"))
df = analysis.dataframe()
|
tiagomateus25/time-series-forecasting-ddpg
|
bvg_optimization.py
|
bvg_optimization.py
|
py
| 4,277 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33380975525
|
from oregami.reg_utils import *
from oregami.reg_type import rf_settype, get_type_from_user
from oregami.reg_frame import RegFrame
import ida_offset
class OffRegPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_PROC
comment = "OffReg"
help = "Set offset for regs in their usage frame - only when " \
"used as a specific variable"
wanted_name = "OffReg"
wanted_hotkey = "Shift+R"
@staticmethod
def init():
return idaapi.PLUGIN_OK
@staticmethod
def term():
pass
@staticmethod
def run(arg):
start, _ = sark.get_selection()
offreg_plugin_starter(start)
def PLUGIN_ENTRY():
return OffRegPlugin()
def rf_setoff(rf, off_ea):
for insn in rf.get_noinit_instructions():
done_offset = False
need_offset = False
for opnd in insn.operands:
if opnd.uf_is_read and (not opnd.uf_is_write) and \
(not opnd.uf_is_implicit):
need_offset = True
print('Setting offset {:x} for {:x} operand #{}'.format(off_ea, insn.ea, opnd.n))
if opnd.type.name == 'Memory_Displacement':
ida_offset.op_offset(insn.ea, opnd.n, idc.REFINFO_NOBASE | idc.REF_OFF32, idc.BADADDR, off_ea)
done_offset = True
if need_offset and (not done_offset):
# probably another operand is an immediate value which needs this to be applied to it. May have false positives
for opnd in insn.operands:
if opnd.type.name == 'Immediate_Value':
ida_offset.op_offset(insn.ea, opnd.n, idc.REFINFO_NOBASE | idc.REF_OFF32, idc.BADADDR, off_ea)
break
def offreg_plugin_starter(orig_ea):
canon_list = conf.proc.get_reg_list()
# print canon_list
reg = get_reg_from_cursor(orig_ea, canon_list)
if reg is None:
# Ask for user input - may be used to look for a reg influencing
# the line - even if it doesn't exist on the line
reg_idx = RegChoose(orig_ea, canon_list).Show(True)
if reg_idx >= 0:
reg = canon_list[reg_idx]
else:
return
reg = RegName(orig_ea, canon_list).canon(reg)
if reg is None:
return
# Get type name
off_ea = ida_kernwin.ask_addr(0, 'Choose offset')
if off_ea is None:
return
# global conf
rf = RegFrame(orig_ea, reg, force=(not conf.cache_bool))
rf_setoff(rf, off_ea)
|
shemesh999/oregami
|
offreg_plugin.py
|
offreg_plugin.py
|
py
| 2,515 |
python
|
en
|
code
| 183 |
github-code
|
6
|
72340854587
|
import os
import csv
import json
import tweepy
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from tweepy_auth import tweepy_auth
'''
today = datetime.today()
week_ago = today - timedelta(days=7)
week_ago_str = week_ago.strftime('%Y-%m-%d')
'''
auth = tweepy_auth()
api = tweepy.API(auth,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
tweets = tweepy.Cursor(api.search,
q=['#blacklivesmatter OR #blm'],
lang='en',
result_type='recent',
tweet_mode='extended',
count=100).items()
df = pd.DataFrame(columns=['id', 'created_at', 'full_text', 'favorite_count',
'retweet_count', 'hashtags'])
for tweet in tweets:
hashtags = []
for hashtag in tweet.entities['hashtags']:
hashtags.append(hashtag['text'])
print(tweet.created_at)
df = df.append({'id': tweet.id,
'created_at': tweet.created_at,
'full_text': tweet.full_text.encode('utf-8','ignore'),
'favorite_count': tweet.favorite_count,
'retweet_count': tweet.retweet_count,
'hashtags': hashtags},
ignore_index=True)
df['created_at'] = pd.to_datetime(df['created_at'])
print(df.head())
for name, group in df.groupby(pd.Grouper(key='created_at',freq='D')):
parsed_name = str(name).split(' ')[0].replace('-', '_')
print(parsed_name)
group.to_csv('./data/blm_'+ parsed_name +'.csv', index=False)
|
ConwayHsieh/BLM_tweets
|
tweepy_pandastry.py
|
tweepy_pandastry.py
|
py
| 1,444 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31286775508
|
import os
import sys
from datetime import datetime
from argparse import ArgumentParser, ArgumentTypeError
from subprocess import check_output, CalledProcessError, Popen, PIPE, DEVNULL
from contextlib import contextmanager
class FileExistsException(Exception):
def __init__(self, path):
self.path = path
def main():
args = parse_args(sys.argv[1:])
try:
path = jekyll_post(args)
except FileExistsException as ex:
print('A file already exists at \'{}\'.'.format(ex.path),
file=sys.stderr)
return 1
if path != '-':
print(path)
return 0
def parse_args(raw_args):
args = make_parser().parse_args(raw_args)
args.date = args.date or now()
args.attributes = args.attributes or []
return args
def make_parser():
p = ArgumentParser(description='Creates a new Jekyll post, and prints its '
'path to standard out.')
p.add_argument('title', type=escape_str,
help='The title for the new post.')
g = p.add_mutually_exclusive_group(required=True)
g.add_argument('-c', '--category',
help='The path of the category directory for the new post, '
'such that it will be written into '
'\'$JEKYLL_SITE_PATH/$category/_posts\'. ')
g.add_argument('-d', '--directory', type=directory_exists,
help='The path of the directory to write the new post '
'into.')
g.add_argument('-o', '--output', metavar='PATH',
help='The path to write the new post to. Provide \'-\' to '
'write to standard out.')
p.add_argument('-t', '--date', type=parse_datetime,
help='The date and time for the new post, in a format '
'accepted by the `date` utility. Default: now.')
p.add_argument('-x', '--extension', default='md',
help='The file extension for the new post. '
'Default: \'md\'.')
p.add_argument('-a', '--attributes', nargs="*", metavar='ATTR',
help='Extra attributes to put in the header, provided in a '
'format according to \'jekyll-post-header\'. The '
'\'layout\' attribute defaults to \'default\'.')
p.add_argument('-p', '--padding', type=int, default=10, metavar='NSPACES',
help='The number of spaces to left-align the attributes '
'by. Default: 10.')
return p
def escape_str(s):
return s.replace('\'', '\\\'')
def directory_exists(s):
if not os.path.isdir(s):
raise ArgumentTypeError('\'{}\' is not a directory.'.format(s))
return s
def parse_datetime(s):
try:
ds = check_output(['date', '--date={}'.format(s),
'--iso-8601=seconds'],
stderr=DEVNULL).decode().strip()
except CalledProcessError:
raise ArgumentTypeError(('\'{}\' is an invalid date. It must be in a '
'format accepted by the `date` utility\'s '
'`--date` argument.').format(s))
return datetime.strptime(ds, '%Y-%m-%dT%H:%M:%S%z')
def now():
return parse_datetime(datetime.now().isoformat())
def jekyll_post(args):
with header_proc(args) as proc:
path = get_post_path(args)
with open_post_file(path) as file:
for bline in proc.stdout:
line = bline.decode()[:-1]
print(line, file=file)
return path
def get_post_path(args):
if args.output:
return args.output
else:
filename = check_output(['jekyll-post-filename', args.title,
'--date', args.date.strftime('%Y-%m-%d'),
'--extension', args.extension],
stderr=DEVNULL).decode()[:-1]
dirname = (args.directory
or os.path.join(os.environ.get('JEKYLL_SITE_PATH', ''),
args.category,
'_posts'))
return os.path.join(dirname, filename)
@contextmanager
def open_post_file(path):
if path == '-':
yield sys.stdout
else:
if os.path.exists(path):
raise FileExistsException(path)
with open(path, 'w') as f:
yield f
def header_proc(args):
# TODO: this won't raise an exception if the script fails. Is there a way to
# check for errors, while still streaming the output?
return Popen(['jekyll-post-header', '--padding', str(args.padding),
'layout:"default"',
'date:"{}"'.format(args.date),
'title:"{}"'.format(args.title)]
+ args.attributes,
stdout=PIPE, stderr=DEVNULL)
if __name__ == '__main__':
rv = main()
sys.exit(rv)
|
Rainymood/rainymood.github.io
|
main.py
|
main.py
|
py
| 4,987 |
python
|
en
|
code
| 8 |
github-code
|
6
|
73823074747
|
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def isValidBST(root):
def check(root, mini, maxi):
if not root:
return True
if root.val <= mini or root.val >= maxi:
return False
return check(root.left, mini, root.val) and check(root.right, root.val, maxi)
return check(root, float("-inf"), float("inf"))
n = TreeNode(5)
n.left = TreeNode(1)
n.right = TreeNode(4)
n.right.left = TreeNode(3)
n.right.right = TreeNode(6)
print(isValidBST(n))
# time complexity: o(n)
# space complexity: o(n)
|
jateen67/leetcode
|
trees/medium/98_validate_binary_tree.py
|
98_validate_binary_tree.py
|
py
| 650 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73535540349
|
from django.urls import path
from . import views
app_name = 'party'
urlpatterns = [
#party
# Party URLs
path('create/<int:tournament_pk>/', views.PartyCreateView.as_view(), name='party_create'),
path('update/<int:pk>/', views.PartyUpdateView.as_view(), name='party_update'),
path('details/<int:pk>/', views.PartyDetailView.as_view(), name='party_details'),
path('parties/', views.PartyListView.as_view(), name='party_list'),
path('<int:pk>/', views.PartyDetailView.as_view(), name='party_detail'),
path('join/<int:party_pk>/', views.JoinPartyView.as_view(), name='join_party'),
path('leave/<int:party_pk>/', views.LeavePartyView.as_view(), name='leave_party'),
# URL pattern for closing a party
path('close/<int:pk>/', views.ClosePartyView.as_view(), name='close_party'),
# Delete an existing party
path('delete/<int:pk>/', views.PartyDeleteView.as_view(), name='party_delete'),
]
|
theAcer/wejprod
|
apps/party/urls.py
|
urls.py
|
py
| 942 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36248300326
|
class Node:
def __init__(self,val=None):
self.val = val
self.next = None
self.prev = None
def printl(head):
while head!=None:
print(head.val,end="--->")
head=head.next
print("NULL")
def reverse(head):
if head==None or head.next==None:
return head
else:
cur=head
while cur!=None:
previous=cur.prev
nex=cur.next
cur.next=previous
cur.prev=nex
cur=cur.prev
return previous.prev
head=Node(10)
n2=Node(20)
n3=Node(30)
n2.prev=head
head.next=n2
n3.prev=n2
n2.next=n3
printl(head)
head=reverse(head)
printl(head)
|
Si2-9harth/DSA-Practice-Problems
|
linked_list/reverse_dll.py
|
reverse_dll.py
|
py
| 660 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26043166506
|
from __future__ import annotations
import logging
import os
from dataclasses import dataclass
from pathlib import PurePath
from typing import Iterable
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.fs import (
AddPrefix,
CreateDigest,
Digest,
Directory,
FileContent,
MergeDigests,
RemovePrefix,
)
from pants.engine.process import ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.jvm.jdk_rules import InternalJdk, JvmProcess
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.jvm.resolve.jvm_tool import GenerateJvmLockfileFromTool
from pants.jvm.shading import jarjar
from pants.jvm.shading.jarjar import JarJar, JarJarGeneratorLockfileSentinel, MisplacedClassStrategy
from pants.jvm.target_types import JvmShadingRule, _shading_validate_rules
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class ShadeJarRequest(EngineAwareParameter):
path: PurePath
digest: Digest
rules: tuple[JvmShadingRule, ...]
# JarJar configuration options
skip_manifest: bool | None
misplaced_class_strategy: MisplacedClassStrategy | None
def __init__(
self,
*,
path: str | PurePath,
digest: Digest,
rules: Iterable[JvmShadingRule] | None = None,
skip_manifest: bool | None = None,
misplaced_class_strategy: MisplacedClassStrategy | None = None,
) -> None:
object.__setattr__(self, "path", path if isinstance(path, PurePath) else PurePath(path))
object.__setattr__(self, "digest", digest)
object.__setattr__(self, "rules", tuple(rules or ()))
object.__setattr__(self, "skip_manifest", skip_manifest)
object.__setattr__(self, "misplaced_class_strategy", misplaced_class_strategy)
self.__post_init__()
def __post_init__(self):
validation_errors = _shading_validate_rules(self.rules)
if validation_errors:
raise ValueError("\n".join(["Invalid rules provided:\n", *validation_errors]))
def debug_hint(self) -> str | None:
return str(self.path)
@dataclass(frozen=True)
class ShadedJar:
path: str
digest: Digest
_JARJAR_MAIN_CLASS = "com.eed3si9n.jarjar.Main"
_JARJAR_RULE_CONFIG_FILENAME = "rules"
@rule(desc="Applies shading rules to a JAR file")
async def shade_jar(request: ShadeJarRequest, jdk: InternalJdk, jarjar: JarJar) -> ShadedJar:
if not request.rules:
return ShadedJar(path=str(request.path), digest=request.digest)
output_prefix = "__out"
output_filename = os.path.join(output_prefix, request.path.name)
rule_config_content = "\n".join([rule.encode() for rule in request.rules]) + "\n"
logger.debug(f"Using JarJar rule file with following contents:\n{rule_config_content}")
lockfile_request, conf_digest, output_digest = await MultiGet(
Get(GenerateJvmLockfileFromTool, JarJarGeneratorLockfileSentinel()),
Get(
Digest,
CreateDigest(
[
FileContent(
path=_JARJAR_RULE_CONFIG_FILENAME,
content=rule_config_content.encode("utf-8"),
),
]
),
),
Get(Digest, CreateDigest([Directory(output_prefix)])),
)
tool_classpath, input_digest = await MultiGet(
Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)),
Get(Digest, MergeDigests([request.digest, output_digest])),
)
toolcp_prefix = "__toolcp"
conf_prefix = "__conf"
immutable_input_digests = {
toolcp_prefix: tool_classpath.digest,
conf_prefix: conf_digest,
}
def should_skip_manifest() -> bool:
if request.skip_manifest is not None:
return request.skip_manifest
return jarjar.skip_manifest
system_properties: dict[str, str] = {
"verbose": str(logger.isEnabledFor(LogLevel.DEBUG.level)).lower(),
"skipManifest": str(should_skip_manifest()).lower(),
}
misplaced_class_strategy = request.misplaced_class_strategy or jarjar.misplaced_class_strategy
if misplaced_class_strategy:
system_properties["misplacedClassStrategy"] = misplaced_class_strategy.value
result = await Get(
ProcessResult,
JvmProcess(
jdk=jdk,
argv=[
_JARJAR_MAIN_CLASS,
"process",
os.path.join(conf_prefix, _JARJAR_RULE_CONFIG_FILENAME),
str(request.path),
output_filename,
],
classpath_entries=tool_classpath.classpath_entries(toolcp_prefix),
input_digest=input_digest,
extra_immutable_input_digests=immutable_input_digests,
extra_jvm_options=[
*jarjar.jvm_options,
*[f"-D{prop}={value}" for prop, value in system_properties.items()],
],
description=f"Shading JAR {request.path}",
output_directories=(output_prefix,),
level=LogLevel.DEBUG,
),
)
shaded_jar_digest = await Get(Digest, RemovePrefix(result.output_digest, output_prefix))
if request.path.parents:
# Restore the folder structure of the original path in the output digest
shaded_jar_digest = await Get(
Digest, AddPrefix(shaded_jar_digest, str(request.path.parent))
)
return ShadedJar(path=str(request.path), digest=shaded_jar_digest)
def rules():
return [*collect_rules(), *jarjar.rules()]
|
pantsbuild/pants
|
src/python/pants/jvm/shading/rules.py
|
rules.py
|
py
| 5,649 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
13448002846
|
#!/usr/bin/env python3
import rospy
from std_msgs.msg import Int32
from study_pkg.msg import Control
msg = Control()
msg.steer = 40
msg.speed = 10
rospy.init_node('talker2')
pub = rospy.Publisher('my_chat_topic2', Control, queue_size=10)
rate = rospy.Rate(1)
def topic_cb(msg):
rospy.loginfo('Speed: %d / Steer: %d' % (msg.speed, msg.steer))
try:
topic_cb(msg)
except (rospy.ROSInterruptException, KeyboardInterrupt):
rospy.logerr('Exception catched')
|
ashenone23/study_pkg
|
scripts/talker2.py
|
talker2.py
|
py
| 465 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30396062752
|
#https://www.codingame.com/training/medium/gravity-tumbler
#GRAVITY TUMBLER
import re
import numpy as np
w,h=map(int,input().split())
count=int(input())
m=[]
for i in range(h):
r=''.join(re.findall(r"#+",input()))
m+=[list(r+(w-len(r))*".")]
#Use numpy to rotate the 2D matrix
arr=np.array(m)
for i in range(count):
arr=np.rot90(arr)
if i!=0: arr=arr[::-1]
for j in range(len(arr)):print(''.join(arr[j]))
|
AllanccWang/CodingGame
|
classic puzzle-medium/gravity-tumbler.py
|
gravity-tumbler.py
|
py
| 421 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71477059068
|
import sys
input = sys.stdin.readline
# 첫줄에 도시의 수
n = int(input())
# 여행 계획에 속한 도시의 수 m
m = int(input())
data = [list(map(int, input().split())) for _ in range(n)]
plans = list(map(int, input().split()))
for i in range(n):
for j in range(n):
for k in range(n):
if data[j][i] and data[i][k]:
data[j][k] = 1
data[i][i] = 1
# for i in data:
# print(i)
for i in range(m-1):
plan1 = plans[i] - 1
plan2 = plans[i+1] - 1
if not data[plan1][plan2]:
print('NO')
break
else:
print('YES')
|
YOONJAHYUN/Python
|
BOJ/1976.py
|
1976.py
|
py
| 601 |
python
|
ko
|
code
| 2 |
github-code
|
6
|
7789722347
|
from tqdm import tqdm
import numpy as np
import torch
import torchvision.transforms as ttr
from torch.utils.data import DataLoader
import argparse
from aermanager import AERFolderDataset
from test_spiking import test_spiking
# Parameters
BATCH_SIZE = 256
parser = argparse.ArgumentParser()
parser.add_argument('--quantize_testing', action='store_true', default=False)
parser.add_argument('--max_batches', type=int, default=1000000)
opt = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# prepare dataset and dataloader
test_dataset = AERFolderDataset(
root='data/test/',
from_spiketrain=False,
transform=ttr.ToTensor(),
)
print("Number of testing frames:", len(test_dataset))
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True)
def detach(activity):
for activations in activity:
for (i, activation) in enumerate(activations):
activations[i] = activation.item()
return np.array(activity)
# def compute_accuracy(output, target):
# _, predicted = torch.max(output, 1)
# acc = (predicted == target).sum().float() / len(target)
# return acc.cpu().numpy()
# def test(path, w_rescale=1.0):
# # Define model and learning parameters
# classifier = MNISTClassifier(quantize=opt.quantize_testing).to(device)
# # Load appropriate model
# state_dict = torch.load(path)
# # Do rescaling
# if w_rescale != 1.0:
# state_dict['seq.0.weight'] *= w_rescale
# classifier.load_state_dict(state_dict)
# # Set hooks
# activity_tracker = SynOpCounter(classifier.modules(), sum_activations=False)
# # Test network accuracy
# with torch.no_grad():
# classifier.eval()
# activity = []
# accuracy = []
# for batch_id, sample in enumerate(tqdm(test_dataloader)):
# if batch_id > opt.max_batches:
# break
# test_data, test_labels = sample
# test_data = test_data.to(device)
# output = classifier(test_data)
# accuracy.append(compute_accuracy(output, test_labels.to(device)))
# activity.append(activity_tracker())
# return np.mean(detach(activity), axis=0), np.mean(accuracy)
if __name__ == '__main__':
# test non-optimized model
baseline_activity, baseline_accuracy = test_spiking(
'models/nopenalty_0.0.pth', return_all_synops=True
)
# test optimized model
optimized_activity, optimized_accuracy = test_spiking(
'models/l1-fanout-qtrain_321289.514081772.pth',
return_all_synops=True
)
baseline_activity = baseline_activity[baseline_activity > 0]
optimized_activity = optimized_activity[optimized_activity > 0]
np.savez(
'opt_benchmark.npz',
baseline_activity=baseline_activity,
optimized_activity=optimized_activity,
baseline_accuracy=baseline_accuracy,
optimized_accuracy=optimized_accuracy
)
|
fgr1986/synoploss
|
mnist_dvs/optimization_benchmarking.py
|
optimization_benchmarking.py
|
py
| 2,996 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26213379014
|
import time
import numpy as np
from scipy.sparse import csr_matrix
from scipy.special import expit
from tqdm import tqdm
from hw1.base import FactorizationModel
from hw1.utils import log_iter
class BPRModel(FactorizationModel):
def __init__(self, factors: int, lr: float, iterations: int, lambd: float = 0.,
verbose: bool = False, verbose_every: int = 1):
super().__init__(factors, iterations, verbose, verbose_every)
self._lr = lr
self._lambd = lambd
self._correct_cnt = 0
self._triplet_acc = 0.
@staticmethod
def _sample_negative(user_item: csr_matrix, user: int) -> int:
neg_item = np.random.choice(user_item.shape[1])
while user_item[user, neg_item] != 0:
neg_item = np.random.choice(user_item.shape[1])
return neg_item
def _grad_step(self, user: int, pos_item: int, neg_item: int):
score = expit(self._U[user] @ (self._I[neg_item] - self._I[pos_item]))
self._correct_cnt += score < 0.5
grad_user = score * (self._I[neg_item] - self._I[pos_item]) + self._lambd * self._U[user]
grad_pos = score * -self._U[user] + self._lambd * self._I[pos_item]
grad_neg = score * self._U[user] + self._lambd * self._I[neg_item]
self._U[user] -= self._lr * grad_user
self._I[pos_item] -= self._lr * grad_pos
self._I[neg_item] -= self._lr * grad_neg
def _grad_steps(self, user_item: csr_matrix):
self._triplet_acc = self._correct_cnt = 0
n_samples = user_item.count_nonzero()
order = np.random.permutation(n_samples)
users, items = user_item.nonzero()
for user, pos_item in zip(users[order], items[order]):
neg_item = self._sample_negative(user_item, user)
self._grad_step(user, pos_item, neg_item)
self._triplet_acc = self._correct_cnt / n_samples
def fit(self, user_item: csr_matrix) -> "BPRModel":
self._start_time = time.time()
self.init_matrices(*user_item.shape)
for iteration in tqdm(range(self._iterations), disable=not self._verbose):
self._grad_steps(user_item)
if self._verbose and (iteration + 1) % self._verbose_every == 0:
log_iter(iteration + 1, {"Triplet acc": self._triplet_acc}, time.time() - self._start_time)
return self
|
Sushentsev/recommendation-systems
|
hw1/models/bpr_model.py
|
bpr_model.py
|
py
| 2,367 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37564490314
|
import pdb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from scipy.stats import entropy, gaussian_kde, normaltest
import nflows
from nflows import distributions, transforms, utils, flows
from nflows.transforms.normalization import BatchNorm
from nflows.nn import nets
from nflows.transforms.base import (
CompositeTransform,
InputOutsideDomain,
InverseTransform,
Transform,
)
from nflows.utils import torchutils
def build_nflows(num_layers=2, hids=20, dims=2, context_dims=2,
batch_norm=False, activation=torch.nn.functional.relu, bins = 15, tail=8.0,
device = 'cuda', rqs=True, bimodal=False):
context_net = Linear_2L(context_dims, 2*dims, hids, 0.5, 0,
mc_drop = False, fixed_masks = False,
different_heads = False, device = device)
base_dist = nflows.distributions.ConditionalDiagonalNormal(
shape=[dims], context_encoder= context_net)
transforms = []
def create_net(in_features, out_features):
return Linear_2L(in_features, out_features, hids, 0.5,
context_dims, fixed_masks = False,
different_heads = False, device=device)
for _ in range(num_layers):
if dims > 1:
transforms.append(nflows.transforms.RandomPermutation(features=dims))
mask = nflows.utils.torchutils.create_mid_split_binary_mask(dims)
transforms.append(
nflows.transforms.PiecewiseCubicCouplingTransform(mask, create_net,
tails='linear', num_bins=bins, tail_bound=tail,
))
if dims == 1:
transforms.append(
nflows.transforms.MaskedPiecewiseQuadraticAutoregressiveTransform(
features=dims,
hidden_features=hids,
context_features=context_dims,
num_blocks = 2,
use_batch_norm=batch_norm,
num_bins=bins,
tails='linear',
tail_bound = tail,
activation = activation,
use_residual_blocks = False,))
transform = nflows.transforms.CompositeTransform(transforms)
flow = nflows.flows.Flow(transform, base_dist)
return flow
def build_nflows_ensemble(num_layers=2, hids=20, dims=2, context_dims=2,
batch_norm=False, activation=torch.nn.functional.relu, bins = 15, tail=8.0,
device = 'cuda', rqs=True, base = True, flows = True, multihead=False,
fixed_masks=False, ensemble_size=15, bimodal=False):
if base:
context_net = Linear_2L(context_dims, 2*dims, hids*2, 0.5, 0,
fixed_masks = fixed_masks, num_masks = ensemble_size,
different_heads = multihead, device = device)
else:
context_net = Linear_2L(context_dims, 2*dims, hids*2, 0.5, 0,
fixed_masks = False, num_masks = ensemble_size,
different_heads = False, device = device)
base_dist = nflows.distributions.ConditionalDiagonalNormal(
shape=[dims], context_encoder= context_net)
transforms = []
if flows:
def create_net(in_features, out_features):
return Linear_2L(in_features, out_features, hids, 0.5,
context_dims, fixed_masks=fixed_masks,
different_heads = multihead, num_masks=ensemble_size, device=device)
else:
def create_net(in_features, out_features):
return Linear_2L(in_features, out_features, hids, 0.5,
context_dims, fixed_masks = False,
different_heads = False, device=device)
for _ in range(num_layers):
if dims > 1:
transforms.append(nflows.transforms.RandomPermutation(features=dims))
mask = nflows.utils.torchutils.create_mid_split_binary_mask(dims)
transforms.append(
nflows.transforms.PiecewiseCubicCouplingTransform(mask, create_net,
tails='linear', num_bins=bins, tail_bound=tail,
))
if dims == 1:
transforms.append(
nflows.transforms.MaskedPiecewiseQuadraticAutoregressiveTransform(
features=dims,
hidden_features=hids,
context_features=context_dims,
num_blocks = 1,
use_batch_norm=batch_norm,
num_bins=bins,
tails='linear',
tail_bound = tail,
activation = activation,
use_residual_blocks = False,
ensemble = flows))
#create_context_net = create_net))
transform = nflows.transforms.CompositeTransform(transforms)
flow = nflows.flows.Flow(transform, base_dist)
return flow
class Linear_2L(nn.Module):
def __init__(self, input_dim, output_dim, n_hid, pdrop, context_dim,
fixed_masks = False, num_masks = 10, different_heads = False,
device='cpu'):
super(Linear_2L, self).__init__()
self.pdrop = pdrop
self.input_dim = input_dim
self.output_dim = output_dim
self.n_hid = n_hid
self.fc1 = nn.Linear(input_dim+context_dim, n_hid)
self.fc2 = nn.Linear(n_hid, n_hid)
if different_heads:
self.heads = []
for i in range(num_masks):
exec(f'self.head{i} = nn.Linear(n_hid, output_dim)')
exec(f'self.heads.append(self.head{i})')
else:
self.fc3 = nn.Linear(n_hid, output_dim)
self.different_heads = different_heads
# choose your non linearity
# self.act = nn.Tanh()
# self.act = nn.Sigmoid()
self.act = nn.ReLU(inplace=True)
# self.act = nn.ELU(inplace=True)
# self.act = nn.SELU(inplace=True)
self.fixed_masks = fixed_masks
if fixed_masks:
self.create_masks(num_masks, device)
self.num_masks = num_masks
def forward(self, x, context=None, rand_mask=True, mask_index = 0):
if self.fixed_masks:
if rand_mask:
mask = self.masks[np.random.choice(self.num_masks)]
else:
mask = self.masks[mask_index]
if self.different_heads:
if rand_mask:
head_idx = np.random.choice(self.num_masks)
else:
head_idx = mask_index
x = x.view(-1, self.input_dim) # view(batch_size, input_dim)
if context is None:
pass
else:
x = torch.cat((x, context), dim=1)
# -----------------
x = self.fc1(x)
if self.fixed_masks:
x = mask[0].repeat(x.shape[0],1)*x
# -----------------
x = self.act(x)
# -----------------
x = self.fc2(x)
if self.fixed_masks:
x = mask[1].repeat(x.shape[0],1)*x
# -----------------
x = self.act(x)
# -----------------
if self.different_heads:
y = self.heads[head_idx](x)
else:
y = self.fc3(x)
return y
def create_masks(self, num_masks, device):
masks = []
for i in range(num_masks):
mask_l1 = torch.bernoulli(torch.full_like(torch.ones(self.n_hid), self.pdrop))\
.to(device)
mask_l2 = torch.bernoulli(torch.full_like(torch.ones(self.n_hid), self.pdrop))\
.to(device)
masks.append([mask_l1, mask_l2])
self.masks = masks
|
nwaftp23/nflows_epistemic
|
nflows_utils.py
|
nflows_utils.py
|
py
| 7,615 |
python
|
en
|
code
| 1 |
github-code
|
6
|
37076072504
|
import subprocess
import time
import os
import stat
import threading
import uuid
class Iperf3(object):
def __init__(self, _ssh_machine1,
_ssh_key1,
_ssh_machine2,
_ssh_key2):
self.ssh_machine1 = _ssh_machine1
self.ssh_machine2 = _ssh_machine2
self.ssh_key1 = _ssh_key1
self.ssh_key2 = _ssh_key2
def generate_test_file(self,
command_list,
filename):
with open(filename, 'w') as f:
f.write("#!/bin/bash\n")
for command in command_list:
f.write(" ".join(command) + "\n")
f.write("sleep 5\n")
os.chmod(filename, os.stat(filename).st_mode | stat.S_IEXEC)
def get_result_value_from_client_iperf_file(self,client_file):
print(client_file)
proc = subprocess.Popen(['./get_value.sh',client_file],stdout=subprocess.PIPE)
proc.wait()
value_bytes = proc.communicate()[0].decode('utf-8')
value=''.join(str(v) for v in value_bytes)
# May return \n only
if not value or ('\n' in value and len(value)==1):
return None
print(value)
proc = subprocess.Popen(['./get_metric.sh',client_file],stdout=subprocess.PIPE)
proc.wait()
metric_bytes = proc.communicate()[0].decode('utf-8')
metric=''.join(str(v) for v in metric_bytes)
if 'M' in metric:
return float(value)
if 'G' in metric:
return (float(value) * 1000.0)
return(float(value) * 0.001)
def get_results(self,
client_key,
client_addr,
flow_num=20):
sum = 0.0
filepath='./' + client_addr + '_'
filepath += str(uuid.uuid4())
filepath += '/'
os.mkdir(filepath)
scp = subprocess.Popen(['scp','-i',client_key,client_addr + ':~/iperf3_output.*',filepath])
scp.wait()
failed_flows = 0
for i in range(0,flow_num):
outfile = filepath + 'iperf3_output.' + str(i)
res = self.get_result_value_from_client_iperf_file(outfile)
if res == None:
failed_flows += 1
else:
sum += res
print('Total is: {} Mbps'.format(sum))
print('Mean is: {} Mbps'.format(sum/float(flow_num)))
def run_performance_tests(self,
use_udp=False, # protocol to be used
bw='500M', # bandwidth
duration='300',
flow_num=20,
server_addr=None,
server_port=5201,
server_file='server_file.sh',
client_file='client_file.sh'):
sleep_between_serv_clients = 30
s_cmd_base = 'iperf3 -s -1'
c_cmd_base = 'iperf3 -c ' + self.ssh_machine2 + ' -b ' + bw + ' -t ' + duration
if use_udp:
c_cmd_base += ' -u'
port=server_port
s_cmd_list = []
for i in range(0,flow_num):
outfile = 'iperf3_output.' + str(i)
#s_cmd = ['ssh','-i',self.ssh_key2,self.ssh_machine2,
# 'nohup',s_cmd_base,'-p',str(port+i),'&>',outfile]
s_cmd = ['nohup',s_cmd_base,'-p',str(port+i),'&>',outfile,'&']
s_cmd_list.append(s_cmd)
self.generate_test_file(s_cmd_list,server_file)
s_scp = subprocess.Popen(['scp','-i',self.ssh_key2,server_file,self.ssh_machine2 + ':~/']);
s_scp.wait()
#print("Running: {} as server".format(s_cmd))
subprocess.Popen(['ssh','-i',self.ssh_key2,self.ssh_machine2,'./' + server_file])
time.sleep(sleep_between_serv_clients)
c_cmd_list = []
for i in range(0,flow_num):
outfile = 'iperf3_output.' + str(i)
#c_cmd = ['ssh','-i',self.ssh_key1,self.ssh_machine1,
# 'nohup',c_cmd_base,'-p',str(port+i),'&>',outfile]
c_cmd = ['nohup',c_cmd_base,'-p',str(port+i),'&>',outfile,'&']
c_cmd_list.append(c_cmd)
self.generate_test_file(c_cmd_list,client_file)
c_scp = subprocess.Popen(['scp','-i',self.ssh_key1,client_file,self.ssh_machine1 + ':~/']);
c_scp.wait()
#print("Running: {} as server".format(c_cmd))
subprocess.Popen(['ssh','-i',self.ssh_key1,self.ssh_machine1,'./' + client_file])
print("Waiting for test to finish........")
time.sleep(int(duration) + sleep_between_serv_clients)
print("DONE")
#subprocess.Popen(['ssh','-i',self.ssh_key2,self.ssh_machine2,
# "kill -9 $(ps aux | grep iperf | awk \'{print $2}\')"])
self.get_results(client_key=self.ssh_key1,
client_addr=self.ssh_machine1,
flow_num=flow_num)
if __name__=="__main__":
print("*************************************")
print("** Make sure SSH keys for servers **")
print("** SSH address should of form: **")
print("** name@IP **")
print("** or **")
print("** name@hostname **")
print("** Key should be a filepath **")
print("** **")
print("** Make sure iperf3 is installed **")
print("*************************************")
##### test STARTUP parameters:
use_udp=False
bw='500M'
duration='300'
flow_num=20
server_addr=None
server_port=5201
####
# test_list syntax:
# ( IP MACHINE 1, KEY MACHINE 1, IP MACHINE 2, KEY MACHINE 2)
test_list = [('10.5.0.3','./id_iperf_test','10.5.0.30','./id_iperf_test')]
#('10.5.0.3','./id_iperf_test','10.5.0.30','./id_iperf_test')]
thread_list = []
for tup in test_list:
test = Iperf3(tup[0],tup[1],tup[2],tup[3])
thread = threading.Thread(test.run_performance_tests(use_udp=use_udp,
bw=bw,
duration=duration,
flow_num=flow_num,
server_port=server_port))
thread_list.append(thread)
thread.start()
#waiting threads to finish:
for t in thread_list:
t.join()
|
phvalguima/iperf-testing
|
iperf.py
|
iperf.py
|
py
| 6,642 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26079699510
|
#Perulangan For
#for nilai in sequence:
# blok code
#Contoh 1
for i in "mizard":
print(i)
#Contoh 2 Menggunakan fungsi range()
for i in range(10): #Start(dimulai dari 0 dan berhenti diangka sebelum angka terakhir)
print(i)
for i in range(2,11): #Stop(berhenti diangka sebelumnya angka terakhir)
print(i)
for i in range(5,20,2): #Step(bertambah sejumlah angka yang diisikan)
print(i)
#Contoh 3 perulangan menggunakan continue
for i in range(10):
print(i)
if i == 5:
print(i)
continue
#Contoh 4 perulangan menggunakan break
for i in range(20):
if i == 15:
print("Ini perulangan ke-",i)
break
#Contoh 5 perulangan menggunakan list
data = ["mizard","Jamal","Udin","Ngab"]
for i in data:
if i == "Udin":
print(i)
break
#Perulangan while
#while nilai operator sequence:
# blok kode
#Contoh 1
i = 2
while i < 11:
print(i)
i += 1
#Contoh 2
data = [10,20,30,40]
for i in data:
if i == 30:
print(i)
break
i = 1
while i < len(data):
if data[i] == 30:
print(data[i])
break
i += 1
#Challenge
for i in range(2,41):
if i == 10:
print("ini adalah nilai",i)
continue
elif i == 20:
print("ini adalah nilai",i)
continue
elif i == 30:
print("ini adalah nilai",i)
break
|
zantblue/Algoritma-Pemograman-Praktek
|
Python4.py
|
Python4.py
|
py
| 1,362 |
python
|
id
|
code
| 0 |
github-code
|
6
|
14490773282
|
"""
create model
Creator: Xiaoshui Huang
Date: 2020-06-19
"""
from se_math.so3 import inverse, transform
import torch
import numpy as np
from random import sample
import se_math.se3 as se3
import se_math.invmat as invmat
import igl
import os
import sys
sys.path.append('./../')
sys.path.append('./../../')
from loss import cal_loss_intersection_batch_whole_median_pts_lines, Reconstruction_point, Random_uniform_distribution_lines_batch_efficient_resample, chamfer_dist, Sample_neighs
from utils import npmat2euler
# we also make chamfer_loss for data!
def dict_all_to_device(tensor_dict, device):
"""Sends everything into a certain device """
for k in tensor_dict:
if isinstance(tensor_dict[k], torch.Tensor):
tensor_dict[k] = tensor_dict[k].to(device)
def save_pred_gt_obj(V_src, V_pred, V_gt, V_tgt_trans, paths_src, paths_pred,
paths_gt, paths_gt_pred):
Face = np.zeros(3).reshape(1, 3).astype(np.int32)
for i in range(V_pred.shape[0]):
igl.write_triangle_mesh(paths_src[i], V_src[i].numpy(), Face)
igl.write_triangle_mesh(paths_pred[i], V_pred[i].numpy(), Face)
igl.write_triangle_mesh(paths_gt[i], V_gt[i].numpy(), Face)
igl.write_triangle_mesh(paths_gt_pred[i], V_tgt_trans[i].numpy(), Face)
# a global function to flatten a feature
def flatten(x):
return x.view(x.size(0), -1)
# a global function to calculate max-pooling
def symfn_max(x):
# [B, K, N] -> [B, K, 1]
a = torch.nn.functional.max_pool1d(x, x.size(-1))
return a
# a global function to generate mlp layers
def _mlp_layers(nch_input,
nch_layers,
b_shared=True,
bn_momentum=0.1,
dropout=0.0):
""" [B, Cin, N] -> [B, Cout, N] or
[B, Cin] -> [B, Cout]
"""
layers = []
last = nch_input
for i, outp in enumerate(nch_layers):
if b_shared:
weights = torch.nn.Conv1d(last, outp, 1)
else:
weights = torch.nn.Linear(last, outp)
layers.append(weights)
# layers.append(torch.nn.BatchNorm1d(outp, momentum=bn_momentum))
layers.append(torch.nn.GroupNorm(8, outp))
layers.append(torch.nn.ReLU())
if b_shared == False and dropout > 0.0:
layers.append(torch.nn.Dropout(dropout))
last = outp
return layers
# a class to generate MLP network
class MLPNet(torch.nn.Module):
""" Multi-layer perception.
[B, Cin, N] -> [B, Cout, N] or
[B, Cin] -> [B, Cout]
"""
def __init__(self,
nch_input,
nch_layers,
b_shared=True,
bn_momentum=0.1,
dropout=0.0):
super().__init__()
list_layers = _mlp_layers(nch_input, nch_layers, b_shared, bn_momentum,
dropout)
self.layers = torch.nn.Sequential(*list_layers)
def forward(self, inp):
out = self.layers(inp)
return out
# encoder network
class PointNet(torch.nn.Module):
def __init__(self, dim_k=1024):
super().__init__()
scale = 1
mlp_h1 = [int(64 / scale), int(64 / scale)]
mlp_h2 = [int(64 / scale), int(128 / scale), int(dim_k / scale)]
self.h1 = MLPNet(3, mlp_h1, b_shared=True).layers
self.h2 = MLPNet(mlp_h1[-1], mlp_h2, b_shared=True).layers
self.sy = symfn_max
def forward(self, points):
""" points -> features
[B, N, 3] -> [B, K]
"""
# for pointnet feature extraction
x = points.transpose(1, 2) # [B, 3, N]
x = self.h1(x)
x = self.h2(x) # [B, K, N]
x = flatten(self.sy(x))
return x
# decoder network
class Decoder(torch.nn.Module):
def __init__(self, num_points=2048, bottleneck_size=1024):
super(Decoder, self).__init__()
self.num_points = num_points
self.bottleneck_size = bottleneck_size
# self.bn1 = torch.nn.BatchNorm1d(bottleneck_size)
# self.bn2 = torch.nn.BatchNorm1d(bottleneck_size // 2)
# self.bn3 = torch.nn.BatchNorm1d(bottleneck_size // 4)
self.bn1 = torch.nn.GroupNorm(8, bottleneck_size)
self.bn2 = torch.nn.GroupNorm(8, bottleneck_size // 2)
self.bn3 = torch.nn.GroupNorm(8, bottleneck_size // 4)
self.fc1 = torch.nn.Linear(self.bottleneck_size, bottleneck_size)
self.fc2 = torch.nn.Linear(self.bottleneck_size, bottleneck_size // 2)
self.fc3 = torch.nn.Linear(bottleneck_size // 2, bottleneck_size // 4)
self.fc4 = torch.nn.Linear(bottleneck_size // 4, self.num_points * 3)
self.th = torch.nn.Tanh()
def forward(self, x):
batchsize = x.size()[0]
x = torch.nn.functional.relu(self.bn1(self.fc1(x)))
x = torch.nn.functional.relu(self.bn2(self.fc2(x)))
x = torch.nn.functional.relu(self.bn3(self.fc3(x)))
x = self.th(self.fc4(x)) * 10
x = x.view(batchsize, 3, self.num_points).transpose(1, 2).contiguous()
return x
# the neural network of feature-metric registration
class SolveRegistration(torch.nn.Module):
def __init__(self, ptnet, decoder=None):
super().__init__()
# network
self.encoder = ptnet
self.decoder = decoder
# functions
self.inverse = invmat.InvMatrix.apply
self.exp = se3.Exp # [B, 6] -> [B, 4, 4]
self.transform = se3.transform # [B, 1, 4, 4] x [B, N, 3] -> [B, N, 3]
# initialization for dt: [w1, w2, w3, v1, v2, v3], 3 rotation angles and 3 translation
delta = 1.0e-2 # step size for approx. Jacobian (default: 1.0e-2)
dt_initial = torch.autograd.Variable(
torch.Tensor([delta, delta, delta, delta, delta, delta]))
self.dt = torch.nn.Parameter(dt_initial.view(1, 6), requires_grad=True)
# results
self.last_err = None
self.g_series = None # for debug purpose
self.prev_r = None
self.g = None # estimated transformation T
self.device = None
self.g_series_gpu = None
# estimate T
# noly return the encoder loss, but also return intersection loss
def estimate_t(self,
data,
maxiter=5,
xtol=1.0e-7,
p0_zero_mean=True,
p1_zero_mean=True,
mode='train'):
"""
give two point clouds, estimate the T by using IC algorithm
:param p0: point cloud
:param p1: point cloud
:param maxiter: maximum iteration
:param xtol: a threshold for early stop of transformation estimation
:param p0_zero_mean: True: normanize p0 before IC algorithm
:param p1_zero_mean: True: normanize p1 before IC algorithm
:return: feature-metric projection error (r), encoder-decoder loss (loss_ende) and intersection loss!
"""
p1 = data['points_src_sample']
p0 = data['points_tar_sample']
a0 = torch.eye(4).view(1, 4, 4).expand(p0.size(0), 4,
4).to(p0) # [B, 4, 4]
a1 = torch.eye(4).view(1, 4, 4).expand(p1.size(0), 4,
4).to(p1) # [B, 4, 4]
self.device = p1.device
batch_size = p1.shape[0]
# normalization
if p0_zero_mean:
p0_m = p0.mean(dim=1) # [B, N, 3] -> [B, 3]
a0 = a0.clone()
a0[:, 0:3, 3] = p0_m
q0 = p0 - p0_m.unsqueeze(1)
else:
q0 = p0
if p1_zero_mean:
p1_m = p1.mean(dim=1) # [B, N, 3] -> [B, 3]
a1 = a1.clone()
a1[:, 0:3, 3] = -p1_m
q1 = p1 - p1_m.unsqueeze(1)
else:
q1 = p1
# use IC algorithm to estimate the transformation
# generate the transform!
g0 = torch.eye(4).to(q0).view(1, 4, 4).expand(q0.size(0), 4,
4).contiguous()
r, g, loss_ende = self.ic_algo(g0, q0, q1, maxiter, xtol)
# the g don't backgrade the gradinent?
self.g = g
# re-normalization
if p0_zero_mean or p1_zero_mean:
est_g = self.g
if p0_zero_mean:
est_g = a0.to(est_g).bmm(est_g)
if p1_zero_mean:
est_g = est_g.bmm(a1.to(est_g))
self.g = est_g
est_gs = self.g_series # [M, B, 4, 4]
if p0_zero_mean:
est_gs = a0.unsqueeze(0).contiguous().to(est_gs).matmul(est_gs)
if p1_zero_mean:
est_gs = est_gs.matmul(a1.unsqueeze(0).contiguous().to(est_gs))
self.g_series = est_gs
est_gs_gpu = self.g_series_gpu # [M, B, 4, 4]
if p0_zero_mean:
est_gs_gpu = a0.unsqueeze(0).contiguous().to(
est_gs_gpu).matmul(est_gs_gpu)
if p1_zero_mean:
est_gs_gpu = est_gs_gpu.matmul(
a1.unsqueeze(0).contiguous().to(est_gs_gpu))
self.g_series_gpu = est_gs_gpu
loss_pp_wise = (torch.mean(
torch.abs(
self.transform(self.g.unsqueeze(1), data['points_src_sample'])
- self.transform(
torch.inverse(data['igt']).unsqueeze(1),
data['points_src_sample']))))
if mode is 'train':
R = (torch.norm(
data['tar_box'][:, 0, :] - data['tar_box'][:, -1, :],
dim=-1,
p=2) * 0.5).reshape(-1, 1)
lines = None
points_ref = data['points_tar_sample'].contiguous()
tar_faces_tensor = data['points_based_neighs_tar'].reshape(
points_ref.shape[0], -1, 9)
# if we used the transformed, we may generate better results!
temp_g = self.g_series_gpu[-1]
pred_src_transformed_final_sample = self.transform(
temp_g.unsqueeze(1),
data['points_src_sample'].contiguous()).detach()
# pred_src_transformed_final_sample = data['points_src_sample']
if lines is None:
lines = Random_uniform_distribution_lines_batch_efficient_resample(
R, data['centers'], 15000,
pred_src_transformed_final_sample.contiguous(),
data['points_tar_sample'].contiguous(), self.device)
# set our loss;
loss_intersection = torch.FloatTensor([0]).to(self.device)
for i in range(maxiter - 3, maxiter):
temp_g = self.g_series_gpu[i]
pred_src_transformed_final_sample = self.transform(
temp_g.unsqueeze(1), data['points_src_sample'])
pred_src_faces_tensor = self.transform(
temp_g.unsqueeze(1),
data['points_based_neighs_src']).reshape(
pred_src_transformed_final_sample.shape[0], -1, 9)
tp_loss_intersection = torch.FloatTensor([0]).to(self.device)
for j in range(pred_src_faces_tensor.shape[0]):
tp_loss_intersection += cal_loss_intersection_batch_whole_median_pts_lines(
1, 1, 5, 5, pred_src_faces_tensor[j:j + 1, :, :],
tar_faces_tensor[j:j + 1, :, :], lines[j:j + 1, :, :],
self.device) / 5.0
loss_intersection = loss_intersection + \
tp_loss_intersection*0.5**(maxiter-i-1)
loss_chamfer = chamfer_dist(pred_src_transformed_final_sample,
data['points_tar_sample'])
return r, loss_ende, loss_intersection / batch_size, loss_pp_wise, loss_chamfer
return r, loss_ende, loss_pp_wise,
# IC algorithm
# encoder, we just use the chamfer!
def ic_algo(self, g0, p0, p1, maxiter, xtol):
"""
use IC algorithm to estimate the increment of transformation parameters
:param g0: initial transformation
:param p0: point cloud
:param p1: point cloud
:param maxiter: maxmimum iteration
:param xtol: a threashold to check increment of transformation for early stop
:return: feature-metric projection error (r), updated transformation (g), encoder-decoder loss
"""
training = self.encoder.training
# training = self.decoder.training
batch_size = p0.size(0)
self.last_err = None
g = g0
self.g_series = torch.zeros(maxiter + 1, *g0.size(), dtype=g0.dtype)
self.g_series[0] = g0.clone()
self.g_series_gpu = torch.zeros(maxiter, *g0.size(),
dtype=g0.dtype).to(self.device)
# generate the features
f0 = self.encoder(p0)
f1 = self.encoder(p1)
# task 1
loss_enco_deco = 0.0
if self.decoder is not None:
# we generate the decoder f0?
# make an encoder decoder!
decoder_out_f0 = self.decoder(f0)
decoder_out_f1 = self.decoder(f1)
# the decoder meets AE!
p0_dist1, p0_dist2 = self.chamfer_loss(
p0.contiguous(), decoder_out_f0) # loss function
loss_net0 = (torch.mean(p0_dist1)) + (torch.mean(p0_dist2))
p1_dist1, p1_dist2 = self.chamfer_loss(
p1.contiguous(), decoder_out_f1) # loss function
loss_net1 = (torch.mean(p1_dist1)) + (torch.mean(p1_dist2))
loss_enco_deco = loss_net0 + loss_net1
# self.encoder.eval() # and fix them BN.
# if fix, ho to backward gradients?
# task 2
f0 = self.encoder(p0) # [B, N, 3] -> [B, K]
# approx. J by finite difference
dt = self.dt.to(p0).expand(batch_size,
6) # convert to the type of p0. [B, 6]
J = self.approx_Jac(p0, f0, dt)
# compute pinv(J) to solve J*x = -r
try:
Jt = J.transpose(1, 2) # [B, 6, K]
H = Jt.bmm(J) # [B, 6, 6]
# H = H + u_lamda * iDentity
B = self.inverse(H)
pinv = B.bmm(Jt) # [B, 6, K]
except RuntimeError as err:
self.last_err = err
f1 = self.encoder(p1) # [B, N, 3] -> [B, K]
r = f1 - f0
self.ptnet.train(training)
return r, g, -1
itr = 0
r = None
# we
for itr in range(maxiter):
p = self.transform(g.unsqueeze(1),
p1) # [B, 1, 4, 4] x [B, N, 3] -> [B, N, 3]
f1 = self.encoder(p) # [B, N, 3] -> [B, K]
r = f1 - f0 # [B,K]
# generate the r!
dx = -pinv.bmm(r.unsqueeze(-1)).view(batch_size, 6)
check = dx.norm(p=2, dim=1, keepdim=True).max()
if float(check) < xtol:
if itr == 0:
self.last_err = 0 # no update.
break
g = self.update(g, dx)
self.g_series_gpu[itr] = g
self.g_series[itr + 1] = g.clone()
self.prev_r = r
self.encoder.train(training)
return r, g, loss_enco_deco
# estimate Jacobian matrix
def approx_Jac(self, p0, f0, dt):
# p0: [B, N, 3], Variable
# f0: [B, K], corresponding feature vector
# dt: [B, 6], Variable
# Jk = (ptnet(p(-delta[k], p0)) - f0) / delta[k]
batch_size = p0.size(0)
num_points = p0.size(1)
# compute transforms
transf = torch.zeros(batch_size, 6, 4, 4).to(p0)
for b in range(p0.size(0)):
d = torch.diag(dt[b, :]) # [6, 6]
D = self.exp(-d) # [6, 4, 4]
transf[b, :, :, :] = D[:, :, :]
transf = transf.unsqueeze(2).contiguous() # [B, 6, 1, 4, 4]
p = self.transform(transf,
p0.unsqueeze(1)) # x [B, 1, N, 3] -> [B, 6, N, 3]
f0 = f0.unsqueeze(-1) # [B, K, 1]
f1 = self.encoder(p.view(-1, num_points, 3))
f = f1.view(batch_size, 6, -1).transpose(1, 2) # [B, K, 6]
df = f0 - f # [B, K, 6]
J = df / dt.unsqueeze(1) # [B, K, 6]
return J
# update the transformation
def update(self, g, dx):
# [B, 4, 4] x [B, 6] -> [B, 4, 4]
dg = self.exp(dx)
return dg.matmul(g)
# calculate the chamfer loss
def chamfer_loss(self, a, b):
x, y = a, b
bs, num_points, points_dim = x.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
# diag_ind = torch.arange(0, num_points).type(torch.cuda.LongTensor)
diag_ind = torch.arange(0, num_points)
rx = xx[:, diag_ind, diag_ind].unsqueeze(1).expand_as(xx)
ry = yy[:, diag_ind, diag_ind].unsqueeze(1).expand_as(yy)
P = (rx.transpose(2, 1) + ry - 2 * zz)
return torch.min(P, 1)[0], torch.min(P, 2)[0]
@staticmethod
def rsq(r):
# |r| should be 0
z = torch.zeros_like(r)
return torch.nn.functional.mse_loss(r, z, reduction='sum')
@staticmethod
def comp(g, igt):
""" |g*igt - I| (should be 0) """
assert g.size(0) == igt.size(0)
assert g.size(1) == igt.size(1) and g.size(1) == 4
assert g.size(2) == igt.size(2) and g.size(2) == 4
A = g.matmul(igt)
I = torch.eye(4).to(A).view(1, 4, 4).expand(A.size(0), 4, 4)
return torch.nn.functional.mse_loss(A, I, reduction='mean') * 16
@staticmethod
def comp_inv(g, igt):
""" |g*igt - I| (should be 0) """
assert g.size(0) == igt.size(0)
assert g.size(1) == igt.size(1) and g.size(1) == 4
assert g.size(2) == igt.size(2) and g.size(2) == 4
# A = g.matmul(igt)
gt = torch.inverse(igt)
# I = torch.eye(4).to(A).view(1, 4, 4).expand(A.size(0), 4, 4)
return torch.nn.functional.mse_loss(g, gt, reduction='mean')
# main algorithm class
class FMRTrain:
def __init__(self, dim_k, num_points, train_type):
self.dim_k = dim_k
self.num_points = num_points
self.max_iter = 5 # max iteration time for IC algorithm
# 0: unsupervised, 1: semi-supervised see. self.compute_loss()
self._loss_type = train_type
self.transform = se3.transform # [B, 1, 4, 4] x [B, N, 3] -> [B, N, 3]
def create_model(self):
# Encoder network: extract feature for every point. Nx1024
ptnet = PointNet(dim_k=self.dim_k)
# Decoder network: decode the feature into points
decoder = Decoder(num_points=self.num_points)
# feature-metric ergistration (fmr) algorithm: estimate the transformation T
fmr_solver = SolveRegistration(ptnet, decoder)
return fmr_solver
def compute_loss(self, solver, data, device, mode='train', maxiter=5):
# p0, p1, igt = data
# p0 = p0.to(device) # template
# p1 = p1.to(device) # source
# igt = igt.to(device) # igt: p0 -> p1
dict_all_to_device(data, device)
p1 = data['points_src_sample']
p0 = data['points_tar_sample']
igt = data['igt']
if mode is 'train':
r, loss_ende, loss_intersection, loss_pp_wise, loss_chamfer = solver.estimate_t(
data, self.max_iter, mode=mode)
else:
# test model!
r, loss_ende, loss_pp_wise = solver.estimate_t(data,
maxiter,
mode=mode)
loss_r = solver.rsq(r)
est_g = solver.g
# generate the difference between the pred and gt!
loss_g = solver.comp_inv(est_g, igt)
# unsupervised learning, set max_iter=0
if self.max_iter == 0:
return loss_ende
# semi-supervised learning, set max_iter>0
if self._loss_type == 0:
loss = loss_ende
elif self._loss_type == 1:
loss = loss_ende + loss_g
elif self._loss_type == 2:
loss = loss_r + loss_g
else:
loss = loss_g
# we need use the multiple indicators to measure the quality!
np_pred_rotation = est_g[:, :3, :3].transpose(
2, 1).detach().cpu().numpy()
np_pred_euler = npmat2euler(np_pred_rotation, 'xyz')
np_gt_rotation = data['R'].detach().cpu().numpy()
np_gt_euler = npmat2euler(np_gt_rotation, 'xyz')
loss_rotation_euler_mae = np.mean(np.abs(np_pred_euler - np_gt_euler))
loss_rotation_euler_rmse = np.sqrt(
np.mean((np_pred_euler - np_gt_euler)**2))
np_loss = {
'loss_rot_euler_mae': loss_rotation_euler_mae,
'loss_rot_euler_rmse': loss_rotation_euler_rmse
}
# set the weights
if mode is 'train':
return 0.01 * loss_ende + 1.0 * loss_intersection + .0 * loss_g + 0.0 * loss_chamfer, loss_g.detach(
), loss_intersection.detach(), loss_pp_wise.detach(
), loss_ende.detach(), np_loss
return loss_g, loss_g.detach(), loss_pp_wise.detach(
), loss_ende.detach(), np_loss
def train(self,
model,
trainloader,
optimizer,
device,
epoch,
train_writer=None):
model.train()
Debug = True
total_loss = 0
total_loss_gt = 0
total_loss_intersection = 0
total_loss_pp_wise = 0
total_loss_encoder = 0
total_loss_rot_euler_mae = 0
total_loss_rot_euler_rmse = 0
if Debug:
epe = 0
count = 0
count_mid = 9
for i, data in enumerate(trainloader):
loss, loss_gt, loss_intersection, loss_pp_wise, loss_ende, np_loss = self.compute_loss(
model, data, device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_item = loss.item()
total_loss += loss_item
total_loss_gt += loss_gt.item()
total_loss_pp_wise += loss_pp_wise.item()
total_loss_intersection += loss_intersection.item()
total_loss_encoder += loss_ende.item()
total_loss_rot_euler_mae += np_loss['loss_rot_euler_mae']
total_loss_rot_euler_rmse += np_loss['loss_rot_euler_rmse']
if Debug:
epe += loss_item
if count % 10 == 0:
print('i=%d, fmr_loss=%f ' % (i, float(epe) /
(count_mid + 1)))
epe = 0.0
count += 1
print(
"ba/ep{:0d}/{:0d},l_insec:{:4f}, l_gt{:4f},l_pp_w{:4f}, l_en{:4f}, l_rot_eu_mae{:4f}, l_rot_eu_rmse{:4f}"
.format(i, epoch, loss_intersection.item(), loss_gt.item(),
loss_pp_wise.item(), loss_ende.item(),
np_loss['loss_rot_euler_mae'],
np_loss['loss_rot_euler_rmse']))
ave_loss = float(total_loss) / count
ave_loss_gt = float(total_loss_gt) / count
ave_loss_intersection = float(total_loss_intersection) / count
ave_loss_wise = float(total_loss_pp_wise) / count
ave_loss_encoder = float(total_loss_encoder) / count
ave_loss_rot_euler_mae = (float)(total_loss_rot_euler_mae) / count
ave_loss_rot_euler_rmse = (float)(total_loss_rot_euler_rmse) / count
if train_writer is not None:
train_writer.add_scalar('./loss/loss_sum', ave_loss, epoch)
train_writer.add_scalar('./loss/loss_gt', ave_loss_gt, epoch)
train_writer.add_scalar('./loss/loss_intersec',
ave_loss_intersection, epoch)
train_writer.add_scalar('./loss/loss_wise_mse', ave_loss_wise,
epoch)
train_writer.add_scalar('./loss/loss_ende', ave_loss_encoder,
epoch)
train_writer.add_scalar('./lr', optimizer.param_groups[0]['lr'],
epoch)
train_writer.add_scalar('./loss/loss_rot_euler_mae',
ave_loss_rot_euler_mae, epoch)
train_writer.add_scalar('./loss/loss_rot_euler_rmse',
ave_loss_rot_euler_rmse, epoch)
# \033[36m,test gt:{:4f}, pp_wise:{:4f}, rot_mae{:4f}, rot_rmse{:4f}\033[0m
print(
" \033[36m,train:l_gt:{:4f}, l_intersec:{:4f}, l_pp_wise{:4f}, l_encoder{:4f}, l_rot_eu_mae{:4f}, l_rot_eu_rmse{:4f} \033[0m, "
.format(ave_loss_gt, ave_loss_intersection, ave_loss_wise,
ave_loss_encoder, ave_loss_rot_euler_mae,
ave_loss_rot_euler_rmse))
return ave_loss
def validate(self, model, testloader, device, epoch, save_results=None):
# model.eval()
vloss = 0.0
vloss_gt = 0.0
vloss_pp_wise = 0.0
vloss_rot_euler_mae = 0.0
vloss_rot_euler_rmse = 0.0
count = 0
count_i = 0
with torch.no_grad():
for i, data in enumerate(testloader):
loss_net, loss_gt, loss_pp_wise, loss_ende, np_loss = self.compute_loss(
model, data, device, mode='test')
vloss += loss_net.item()
vloss_gt += loss_gt.item()
vloss_pp_wise += loss_pp_wise.item()
vloss_rot_euler_mae += np_loss['loss_rot_euler_mae']
vloss_rot_euler_rmse += np_loss['loss_rot_euler_rmse']
count += 1
print("Test:sample{:0d},loss_pp_wise:{:4f}".format(
i, loss_pp_wise.item()))
if epoch % 10 == 0:
est_g = model.g # (1, 4, 4)
igt = data['igt']
ig_gt = igt.cpu().contiguous().view(-1, 4,
4) # --> [1, 4, 4]
g_hat = est_g.cpu().contiguous().view(-1, 4,
4) # --> [1, 4, 4]
p1 = data['points_src_sample']
p0 = data['points_tar_sample']
if save_results is not None:
paths_pred = []
paths_gt = []
paths_src = []
paths_gt_pred = []
src_transform = self.transform(est_g.unsqueeze(1), p1)
src_transform_sample = self.transform(
est_g.unsqueeze(1), data['points_src_sample'])
tgt_transform = self.transform(igt.unsqueeze(1), p0)
V_src = p0.cpu().detach()
V_pred = src_transform.cpu().detach()
V_gt = p1.cpu().detach()
V_tgt_trans = tgt_transform.cpu().detach()
for j in range(p0.shape[0]):
paths_pred.append(
os.path.join(
save_results,
str(epoch) + "pred_src" + str(count_i) +
".obj"))
paths_gt.append(
os.path.join(
save_results,
str(epoch) + "gt" + str(count_i) + ".obj"))
paths_src.append(
os.path.join(
save_results,
str(epoch) + "src" + str(count_i) +
".obj"))
paths_gt_pred.append(
os.path.join(
save_results,
str(epoch) + "pred_gt" + str(count_i) +
".obj"))
F = np.zeros([1, 3]).astype(np.int32)
igl.write_obj(
paths_gt_pred[j].replace(
'pred_gt', 'transformed_sample', 1),
src_transform_sample.cpu().detach().numpy().
reshape(-1, 3), F)
igl.write_obj(
paths_gt_pred[j].replace(
'pred_gt', 'src_sample', 1),
data['points_src_sample'].cpu().detach().numpy(
).reshape(-1, 3), F)
igl.write_obj(
paths_gt_pred[j].replace(
'pred_gt', 'tar_sample', 1),
data['points_tar_sample'].cpu().detach().numpy(
).reshape(-1, 3), F)
count_i += 1
save_pred_gt_obj(V_src, V_pred, V_gt, V_tgt_trans,
paths_src, paths_pred, paths_gt,
paths_gt_pred)
ave_vloss = float(vloss) / count
ave_vloss_gt = float(vloss_gt) / count
ave_vloss_pp_wise = float(vloss_pp_wise) / count
ave_vloss_rot_euler_mae = float(vloss_rot_euler_mae) / count
ave_vloss_rot_euler_rmse = float(vloss_rot_euler_rmse) / count
print(
"\033[36m,test gt:{:4f}, pp_wise:{:4f}, rot_mae{:4f}, rot_rmse{:4f}\033[0m, "
.format(ave_vloss_gt, ave_vloss_pp_wise, ave_vloss_rot_euler_mae,
ave_vloss_rot_euler_rmse))
return ave_vloss
class FMRTest:
def __init__(self, args):
self.filename = args.outfile
self.dim_k = args.dim_k
self.max_iter = 10 # max iteration time for IC algorithm
self._loss_type = 3 # see. self.compute_loss()
self.transform = se3.transform # [B, 1, 4, 4] x [B, N, 3] -> [B, N, 3]
def create_model(self):
# Encoder network: extract feature for every point. Nx1024
ptnet = PointNet(dim_k=self.dim_k)
# feature-metric ergistration (fmr) algorithm: estimate the transformation T
fmr_solver = SolveRegistration(ptnet)
return fmr_solver
# we save the results!
# pay attention to final results!
def evaluate(self,
solver,
testloader,
device,
save_results=None,
writer=None):
solver.eval()
with open(self.filename, 'w') as fout:
self.eval_1__header(fout)
count_i = 0
total_loss_pp_wise = 0
total_loss_gt = 0
with torch.no_grad():
for i, data in enumerate(testloader):
# p0, p1, igt = data # igt: p0->p1
dict_all_to_device(data, device)
p1 = data['points_src_sample']
p0 = data['points_tar_sample']
igt = data['igt']
# igt =
# # compute trans from p1->p0
# g = se3.log(igt) # --> [-1, 6]
# igt = se3.exp(-g) # [-1, 4, 4]
# p0, p1 = self.ablation_study(p0, p1)
p0 = p0.to(device) # template (1, N, 3)
p1 = p1.to(device) # source (1, M, 3)
# When we evaluate, we ignore the chafer, ignore any loss function!
r, loss_ende, loss_pp_wise = solver.estimate_t(
data, self.max_iter, mode='test')
total_loss_pp_wise += loss_pp_wise
est_g = solver.g # (1, 4, 4)
ig_gt = igt.cpu().contiguous().view(-1, 4,
4) # --> [1, 4, 4]
g_hat = est_g.cpu().contiguous().view(-1, 4,
4) # --> [1, 4, 4]
dg = g_hat.bmm(ig_gt) # if correct, dg == identity matrix.
dx = se3.log(
dg) # --> [1, 6] (if corerct, dx == zero vector)
dn = dx.norm(p=2, dim=1) # --> [1]
dm = dn.mean()
self.eval_1__write(fout, ig_gt, g_hat)
print('test, %d/%d, %f, %f' %
(i, len(testloader), dm, loss_pp_wise))
if writer is not None:
writer.add_scalar('./loss/test', dm, i)
# p = self.transform(g.unsqueeze(1),
# p1) # [B, 1, 4, 4] x [B, N, 3] -> [B, N, 3]
# est_g:p1--->p0
# igt: p0-->p1
if save_results is not None:
paths_pred = []
paths_gt = []
paths_src = []
paths_gt_pred = []
src_transform = self.transform(est_g.unsqueeze(1), p1)
tgt_transform = self.transform(igt.unsqueeze(1), p0)
V_src = p0.cpu().detach()
V_pred = src_transform.cpu().detach()
V_gt = p1.cpu().detach()
V_tgt_trans = tgt_transform.cpu().detach()
for i in range(p0.shape[0]):
paths_pred.append(
os.path.join(save_results,
str(count_i) + "pred_src.obj"))
paths_gt.append(
os.path.join(save_results,
str(count_i) + "gt.obj"))
paths_src.append(
os.path.join(save_results,
str(count_i) + "src.obj"))
paths_gt_pred.append(
os.path.join(save_results,
str(count_i) + "pred_gt.obj"))
count_i += 1
save_pred_gt_obj(V_src, V_pred, V_gt, V_tgt_trans,
paths_src, paths_pred, paths_gt,
paths_gt_pred)
def ablation_study(self, p0, p1, add_noise=False, add_density=False):
# ablation study
# mesh = self.plyread("./box1Kinect1.ply")
# p0 = torch.tensor(mesh).to(device).unsqueeze(0)
# mesh = self.plyread("./box11.ply")
# p1 = torch.tensor(mesh).to(device).unsqueeze(0)
# add noise
if add_noise:
p1 = torch.tensor(np.float32(np.random.normal(p1, 0.01)))
# add outliers
if add_density:
density_ratio = 0.5
pts_num = p1.shape[0]
sampleNum = int(pts_num *
density_ratio) # the number of remaining points
if pts_num > sampleNum:
num = sample(range(1, pts_num), sampleNum)
elif pts_num > 0:
num = range(0, pts_num)
else:
print("No points in this point cloud!")
return
p1 = p1[num, :]
return p0, p1
def eval_1__header(self, fout):
cols = [
'h_w1', 'h_w2', 'h_w3', 'h_v1', 'h_v2', 'h_v3', 'g_w1', 'g_w2',
'g_w3', 'g_v1', 'g_v2', 'g_v3'
] # h: estimated, g: ground-truth twist vectors
print(','.join(map(str, cols)), file=fout)
fout.flush()
def eval_1__write(self, fout, ig_gt, g_hat):
x_hat = se3.log(g_hat) # --> [-1, 6]
mx_gt = se3.log(ig_gt) # --> [-1, 6]
for i in range(x_hat.size(0)):
x_hat1 = x_hat[i] # [6]
mx_gt1 = mx_gt[i] # [6]
vals = torch.cat((x_hat1, -mx_gt1)) # [12]
valn = vals.cpu().numpy().tolist()
print(','.join(map(str, valn)), file=fout)
fout.flush()
|
Dengzhi-USTC/A-robust-registration-loss
|
code/exps_deep_learning/fmr/model.py
|
model.py
|
py
| 36,481 |
python
|
en
|
code
| 25 |
github-code
|
6
|
39267295276
|
import sys
import multiprocessing
from controls import ManualControl
from cam import Camera
from server import get_command_keyboard, stream_frame, get_command
import threading
# Klavye ile hareket için mode = 1
# Sesli komut ile hareket için mode = 2
# Klavye ile hareket ve Aynı anda Raspberryden PC'ye frame aktarma için mode = 3
# Sesli komut ile hareket ve Aynı anda Raspberryden PC'ye frame aktarma için mode = 4
# default mode = 1
mode = 1
def cam(targets, isRead, phase, frm):
# set camera object with Camera class
camera = Camera(show=False, captureIndex=-1, camRes=(640, 480))
camera.set_camera_settings(966.9541358947754)
camera.set_aruco_settings(markerSize=4, totalMarkers=50, arucoWidth=6)
while True:
camera.set_frame()
isRead.value = camera.isRead
camera.detect_aruco()
if camera.target is not None:
camera.target.set_instant_phase_angle(phase.value)
targets.append(camera.target)
frm["data"] = camera.frame
camera.break_and_release()
if camera.out:
break
if __name__ == '__main__':
manager = multiprocessing.Manager()
targets = manager.list()
isRead = multiprocessing.Value('i', 0)
phase = multiprocessing.Value('i', 0)
frm = manager.dict()
frm["command"] = "dur"
# PC'den raspberry'yi klavye ile kontrol etmek istiyorsanız mode = 1 yapın.
if mode == 1:
t1 = threading.Thread(target=get_command_keyboard, args=(frm,))
t2 = threading.Thread(target=ManualControl.get_command_keyboard_from_pc, args=(frm,))
try:
t1.start()
t2.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
# PC'den raspberry'yi sesli komut ile kontrol etmek istiyorsanız mode = 2 yapın.
elif mode == 2:
t1 = threading.Thread(target=get_command, args=(frm,))
t2 = threading.Thread(target=ManualControl.speech_move, args=(frm,))
try:
t1.start()
t2.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
# Klavye ile hareket ve Aynı anda Raspberry'den PC'ye frame aktarma için mode = 3
elif mode == 3:
p1 = multiprocessing.Process(target=cam, args=(targets, isRead, phase, frm))
t1 = threading.Thread(target=stream_frame, args=(frm,))
t2 = threading.Thread(target=get_command_keyboard, args=(frm,))
t3 = threading.Thread(target=ManualControl.get_command_keyboard_from_pc, args=(frm,))
try:
p1.start()
t1.start()
t2.start()
t3.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
# Sesli komut ile hareket ve Aynı anda Raspberry'den PC'ye frame aktarma için mode = 4
elif mode == 4:
p1 = multiprocessing.Process(target=cam, args=(targets, isRead, phase, frm))
t1 = threading.Thread(target=stream_frame, args=(frm,))
t2 = threading.Thread(target=get_command, args=(frm,))
t3 = threading.Thread(target=ManualControl.speech_move, args=(frm,))
try:
p1.start()
t1.start()
t2.start()
t3.start()
except (KeyboardInterrupt, SystemExit):
p1.kill()
sys.exit()
|
AbdullahTas123/pi-robot-car
|
raspberrypi/main.py
|
main.py
|
py
| 3,394 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41550373604
|
from . animation import Animation
class Off(Animation):
"""A trivial animation that turns all pixels in a layout off."""
def __init__(self, layout, timeout=1, **kwds):
super().__init__(layout, **kwds)
self.internal_delay = timeout
def step(self, amt=1):
self.layout.all_off()
from .. util import deprecated
if deprecated.allowed(): # pragma: no cover
OffAnim = Off
|
ManiacalLabs/BiblioPixel
|
bibliopixel/animation/off.py
|
off.py
|
py
| 412 |
python
|
en
|
code
| 263 |
github-code
|
6
|
70096824829
|
k = int(input())
def mos(n):
for i in range(len(n)):
if n[i] == "0":
n += "1"
elif n[i] == "1":
n += "0"
if len(n) == k:
return n[k - 1]
return mos(n)
print(mos("0"))
|
YooGunWook/coding_test
|
백준/백준_18222번.py
|
백준_18222번.py
|
py
| 239 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22386235362
|
from sports.nba.nba_team import NBA_Team
class PortlandTrailBlazers(NBA_Team):
"""
NBA's Portland TrailBlazers Static Information
"""
full_name = "Portland TrailBlazers"
name = "TrailBlazers"
team_id = 1610612757
def __init__(self):
"""
"""
super().__init__()
|
FBB-David/sportsdata
|
src/sportsdata/nba/teams/portland_trail_blazers.py
|
portland_trail_blazers.py
|
py
| 317 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.