file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
utils.py | import io
import os
import torch
import logging
import json
import pickle
import argparse
from pprint import pprint
# 3rd party libraries
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils import data
from sklearn import decomposition
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve, f1_score, roc_auc_score, auc, average_precision_score, precision_score, recall_score
from transformers import *
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import torchtext
# custom
from AbstractDataset import AbstractDataset
from AbstractBert import AbstractBert
def load_data(config, vocab, proportion: float=0.7, max_len: int=256, partition: dict=None, labels: dict=None):
"""Load data using PyTorch DataLoader.
Keyword Arguments:
config {string} -- config file containing data paths and tokenizer information
metadata {bool} -- whether the data contains metadata for augmented embeddings
proportion {float} -- proportion for splitting up train and test. (default: {0.7})
max_len {int} -- maximum token length for a text. (default: {128})
partition {dict} -- maps lists of training and validation data IDs (default: {None})
labels {dict} -- (default: {None})
Returns:
partition {dict} -- list of ids in train and valid datasets
torch.utils.data.Dataset -- dataset
"""
# columns if meta: [0] unique ID, [1] text, [2] metadata, [3] label
# columns if no meta: [0] unique ID, [1] text, [2] label
if config["metadata"]:
unique_id_col = 0
text_col = 1
metadata_col = 2
label_col = 3
else:
unique_id_col = 0
text_col = 1
label_col = 3
dataset = pd.read_csv(config['train_file'], header=None, sep='\t')
print(dataset)
# below fix null values wrecking encode_plus
# convert labels to integer and drop nas
dataset.iloc[:, label_col] = pd.to_numeric(dataset.iloc[:, label_col], errors = 'coerce' )
dataset = dataset[~ dataset[text_col].isnull()]
# recreate the first column with the reset index.
dataset = dataset[(dataset.iloc[:, label_col] == 1) | (dataset.iloc[:, label_col] == 0)] \
.reset_index().reset_index().drop(columns = ['index', 0]).rename(columns = {'level_0': 0})
print(dataset)
# create list of train/valid IDs if not provided
if not partition and not labels:
ids = list(dataset.iloc[:,unique_id_col])
total_len = len(ids)
np.random.shuffle(ids)
labels = {}
# metadata = {}
partition = {'train': ids[ :int(total_len * 0.7)], | labels[i] = dataset.iloc[i][label_col]
# set parameters for DataLoader -- num_workers = cores
params = {'batch_size': 32,
'shuffle': True,
'num_workers': 0
}
tokenizer = AutoTokenizer.from_pretrained(vocab)
dataset[text_col] = dataset[text_col].apply(lambda x: tokenizer.encode_plus(str(x), \
max_length=max_len, \
add_special_tokens=True, \
pad_to_max_length=True, \
truncation=True))
if config['metadata']: # glove for metadata preprocessing
glove = torchtext.vocab.GloVe(name="6B", dim=50)
dataset[metadata_col] = dataset[metadata_col].apply(lambda y: __pad__(str(y).split(" "), 30))
dataset[metadata_col] = dataset[metadata_col].apply(lambda z: __glove_embed__(z, glove))
train_data = dataset[dataset[unique_id_col].isin(partition['train'])]
valid_data = dataset[dataset[unique_id_col].isin(partition['valid'])]
# create train/valid generators
training_set = AbstractDataset(data=train_data, labels=labels, metadata=config['metadata'], list_IDs=partition['train'], max_len = max_len)
training_generator = DataLoader(training_set, **params)
validation_set = AbstractDataset(data=valid_data, labels=labels, metadata=config['metadata'], list_IDs=partition['valid'],max_len = max_len)
validation_generator = DataLoader(validation_set, **params)
return partition, training_generator, validation_generator
def __pad__(sequence, max_l):
""" Padding function for 1D sequences """
if max_l - len(sequence) < 0:
sequence = sequence[:max_l]
else:
sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))
return sequence
def __glove_embed__(sequence, model):
""" Embed words in a sequence using GLoVE model """
embedded = []
for word in sequence:
embedded.append(model[word])
return embedded
def load_embeddings(config, name, vocab, training_generator, validation_generator):
"""Load embeddings either from cache or from scratch
Args:
config (json) -- file configurations.
name --
vocab --
training_generator --
validation_generator --
Returns:
embedding_shape, train_embeddings, valid_embeddings
"""
# Pickle embeddings should be AGNOSTIC to the name. This is because each pickled embedding is specific to the dataset and transformer.
# Applies down the road when/if we attempt active learning
data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension
train_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')
valid_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')
if os.path.exists(train_embed_pkl_f):
with open( train_embed_pkl_f, 'rb') as cache:
train_embeddings = pickle.load(cache)
with open(valid_embed_pkl_f, 'rb') as cache:
valid_embeddings = pickle.load(cache)
else:
# get embeddings from scratch
tokenizer = AutoTokenizer.from_pretrained(vocab)
embedding_model = AbstractBert(vocab)
if torch.cuda.device_count() > 1:
print("GPUs Available: ", torch.cuda.device_count())
embedding_model = torch.nn.DataParallel(embedding_model, device_ids=[0, 1, 2])
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
embedding_model.eval().to(device)
logger.info(' Getting BERT/ROBERTA embeddings...')
train_embeddings = _get_bert_embeddings(training_generator, embedding_model, config["metadata"])
valid_embeddings = _get_bert_embeddings(validation_generator, embedding_model, config["metadata"])
# save embeddings
pickle.dump(train_embeddings, open(train_embed_pkl_f, 'wb'))
pickle.dump(valid_embeddings, open(valid_embed_pkl_f, 'wb'))
logger.info(' Saved full BERT/ROBERTA embeddings.')
embedding_shape = train_embeddings['embeddings'][1].shape[0]
return embedding_shape, train_embeddings, valid_embeddings
def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False):
"""Get BERT embeddings from a dataloader generator.
Arguments:
data_generator {data.Dataset} -- dataloader generator (AbstractDataset).
embedding_model {torch.nn.Module} -- embedding model.
Returns:
embeddings {dict} -- dictionary containing ids, augmented embeddings, and labels.
"""
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
with torch.set_grad_enabled(False):
embeddings = {'ids': [],
'embeddings': [],
'labels': []
}
# get BERT training embeddings
if metadata:
for local_ids, local_data, local_meta, local_labels in data_generator:
local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \
local_meta, \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data, local_meta)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
else:
for local_ids, local_data, local_labels in data_generator:
local_data, local_labels = local_data.to(device).long().squeeze(1), \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
return embeddings
def get_pca_embeddings(config, name, training_embedding: dict, validation_embedding: dict):
"""Reduced embeddings using PCA.
Args:
training_embedding (dict) -- dictionary containing training embeddings
validation_embedding (dict) -- dictionary containing validation embeddings
Returns:
generator -- Torch Dataloader
tuple -- shape of embedding
"""
data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension
train_pca_pkl_f = os.path.join(config['pca_cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')
valid_pca_pkl_f = os.path.join(config['pca_cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')
if os.path.exists(train_pca_pkl_f):
logger.info(" Loading PCA-embeddings from cache ")
with open(train_pca_pkl_f , 'rb') as cache:
train_embeddings = pickle.load(cache)
with open(valid_pca_pkl_f, 'rb') as cache:
valid_embeddings = pickle.load(cache)
else:
logger.info(' Standardizing ')
ss = StandardScaler()
train_embed_ss = ss.fit_transform(training_embedding['embeddings'])
valid_embed_ss = ss.transform(validation_embedding['embeddings'])
# Dimension reduction: PCA or UMAP (?)
logger.info(' Doing PCA...')
pca_model = decomposition.PCA(n_components = 0.90) # this can be a parameter down the road, but for debugging it's fine
train_reduc = pca_model.fit_transform(train_embed_ss)
val_reduc = pca_model.transform(valid_embed_ss)
training_embedding['embeddings'] = train_reduc
validation_embedding['embeddings'] = val_reduc
train_embeddings = training_embedding.copy()
valid_embeddings = validation_embedding.copy()
# save embeddings
pickle.dump(train_embeddings, open(train_pca_pkl_f, 'wb'))
pickle.dump(valid_embeddings, open(valid_pca_pkl_f, 'wb'))
embedding_shape = len(train_embeddings['embeddings'][0])
return embedding_shape, train_embeddings, valid_embeddings
def metrics(metric_type: str, preds: list, labels: list):
""" Provides various metrics between predictions and labels.
Arguments:
metric_type {str} -- type of metric to use ['flat_accuracy', 'f1', 'roc_auc', 'precision', 'recall']
preds {list} -- predictions.
labels {list} -- labels.
Returns:
int -- prediction accuracy
"""
assert metric_type in ['flat_accuracy', 'f1', 'roc_auc', 'ap'], 'Metrics must be one of the following: \
[\'flat_accuracy\', \'f1\', \'roc_auc\'] \
\'precision\', \'recall\', \'ap\']'
labels = np.array(labels)
# preds = np.concatenate(np.asarray(preds))
if metric_type == 'flat_accuracy':
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
elif metric_type == 'f1':
return f1_score(labels, preds)
elif metric_type == 'roc_auc':
return roc_auc_score(labels, preds)
elif metric_type == 'precision':
return precision_score(labels, preds)
elif metric_type == 'recall':
return recall_score(labels, preds)
elif metric_type == 'ap':
return average_precision_score(labels, preds) | 'valid': ids[int(total_len * 0.7): ]
}
for i in dataset.iloc[:, unique_id_col]: | random_line_split |
utils.py | import io
import os
import torch
import logging
import json
import pickle
import argparse
from pprint import pprint
# 3rd party libraries
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils import data
from sklearn import decomposition
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve, f1_score, roc_auc_score, auc, average_precision_score, precision_score, recall_score
from transformers import *
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import torchtext
# custom
from AbstractDataset import AbstractDataset
from AbstractBert import AbstractBert
def load_data(config, vocab, proportion: float=0.7, max_len: int=256, partition: dict=None, labels: dict=None):
"""Load data using PyTorch DataLoader.
Keyword Arguments:
config {string} -- config file containing data paths and tokenizer information
metadata {bool} -- whether the data contains metadata for augmented embeddings
proportion {float} -- proportion for splitting up train and test. (default: {0.7})
max_len {int} -- maximum token length for a text. (default: {128})
partition {dict} -- maps lists of training and validation data IDs (default: {None})
labels {dict} -- (default: {None})
Returns:
partition {dict} -- list of ids in train and valid datasets
torch.utils.data.Dataset -- dataset
"""
# columns if meta: [0] unique ID, [1] text, [2] metadata, [3] label
# columns if no meta: [0] unique ID, [1] text, [2] label
if config["metadata"]:
unique_id_col = 0
text_col = 1
metadata_col = 2
label_col = 3
else:
unique_id_col = 0
text_col = 1
label_col = 3
dataset = pd.read_csv(config['train_file'], header=None, sep='\t')
print(dataset)
# below fix null values wrecking encode_plus
# convert labels to integer and drop nas
dataset.iloc[:, label_col] = pd.to_numeric(dataset.iloc[:, label_col], errors = 'coerce' )
dataset = dataset[~ dataset[text_col].isnull()]
# recreate the first column with the reset index.
dataset = dataset[(dataset.iloc[:, label_col] == 1) | (dataset.iloc[:, label_col] == 0)] \
.reset_index().reset_index().drop(columns = ['index', 0]).rename(columns = {'level_0': 0})
print(dataset)
# create list of train/valid IDs if not provided
if not partition and not labels:
ids = list(dataset.iloc[:,unique_id_col])
total_len = len(ids)
np.random.shuffle(ids)
labels = {}
# metadata = {}
partition = {'train': ids[ :int(total_len * 0.7)],
'valid': ids[int(total_len * 0.7): ]
}
for i in dataset.iloc[:, unique_id_col]:
labels[i] = dataset.iloc[i][label_col]
# set parameters for DataLoader -- num_workers = cores
params = {'batch_size': 32,
'shuffle': True,
'num_workers': 0
}
tokenizer = AutoTokenizer.from_pretrained(vocab)
dataset[text_col] = dataset[text_col].apply(lambda x: tokenizer.encode_plus(str(x), \
max_length=max_len, \
add_special_tokens=True, \
pad_to_max_length=True, \
truncation=True))
if config['metadata']: # glove for metadata preprocessing
glove = torchtext.vocab.GloVe(name="6B", dim=50)
dataset[metadata_col] = dataset[metadata_col].apply(lambda y: __pad__(str(y).split(" "), 30))
dataset[metadata_col] = dataset[metadata_col].apply(lambda z: __glove_embed__(z, glove))
train_data = dataset[dataset[unique_id_col].isin(partition['train'])]
valid_data = dataset[dataset[unique_id_col].isin(partition['valid'])]
# create train/valid generators
training_set = AbstractDataset(data=train_data, labels=labels, metadata=config['metadata'], list_IDs=partition['train'], max_len = max_len)
training_generator = DataLoader(training_set, **params)
validation_set = AbstractDataset(data=valid_data, labels=labels, metadata=config['metadata'], list_IDs=partition['valid'],max_len = max_len)
validation_generator = DataLoader(validation_set, **params)
return partition, training_generator, validation_generator
def __pad__(sequence, max_l):
""" Padding function for 1D sequences """
if max_l - len(sequence) < 0:
|
else:
sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))
return sequence
def __glove_embed__(sequence, model):
""" Embed words in a sequence using GLoVE model """
embedded = []
for word in sequence:
embedded.append(model[word])
return embedded
def load_embeddings(config, name, vocab, training_generator, validation_generator):
"""Load embeddings either from cache or from scratch
Args:
config (json) -- file configurations.
name --
vocab --
training_generator --
validation_generator --
Returns:
embedding_shape, train_embeddings, valid_embeddings
"""
# Pickle embeddings should be AGNOSTIC to the name. This is because each pickled embedding is specific to the dataset and transformer.
# Applies down the road when/if we attempt active learning
data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension
train_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')
valid_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')
if os.path.exists(train_embed_pkl_f):
with open( train_embed_pkl_f, 'rb') as cache:
train_embeddings = pickle.load(cache)
with open(valid_embed_pkl_f, 'rb') as cache:
valid_embeddings = pickle.load(cache)
else:
# get embeddings from scratch
tokenizer = AutoTokenizer.from_pretrained(vocab)
embedding_model = AbstractBert(vocab)
if torch.cuda.device_count() > 1:
print("GPUs Available: ", torch.cuda.device_count())
embedding_model = torch.nn.DataParallel(embedding_model, device_ids=[0, 1, 2])
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
embedding_model.eval().to(device)
logger.info(' Getting BERT/ROBERTA embeddings...')
train_embeddings = _get_bert_embeddings(training_generator, embedding_model, config["metadata"])
valid_embeddings = _get_bert_embeddings(validation_generator, embedding_model, config["metadata"])
# save embeddings
pickle.dump(train_embeddings, open(train_embed_pkl_f, 'wb'))
pickle.dump(valid_embeddings, open(valid_embed_pkl_f, 'wb'))
logger.info(' Saved full BERT/ROBERTA embeddings.')
embedding_shape = train_embeddings['embeddings'][1].shape[0]
return embedding_shape, train_embeddings, valid_embeddings
def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False):
"""Get BERT embeddings from a dataloader generator.
Arguments:
data_generator {data.Dataset} -- dataloader generator (AbstractDataset).
embedding_model {torch.nn.Module} -- embedding model.
Returns:
embeddings {dict} -- dictionary containing ids, augmented embeddings, and labels.
"""
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
with torch.set_grad_enabled(False):
embeddings = {'ids': [],
'embeddings': [],
'labels': []
}
# get BERT training embeddings
if metadata:
for local_ids, local_data, local_meta, local_labels in data_generator:
local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \
local_meta, \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data, local_meta)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
else:
for local_ids, local_data, local_labels in data_generator:
local_data, local_labels = local_data.to(device).long().squeeze(1), \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
return embeddings
def get_pca_embeddings(config, name, training_embedding: dict, validation_embedding: dict):
"""Reduced embeddings using PCA.
Args:
training_embedding (dict) -- dictionary containing training embeddings
validation_embedding (dict) -- dictionary containing validation embeddings
Returns:
generator -- Torch Dataloader
tuple -- shape of embedding
"""
data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension
train_pca_pkl_f = os.path.join(config['pca_cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')
valid_pca_pkl_f = os.path.join(config['pca_cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')
if os.path.exists(train_pca_pkl_f):
logger.info(" Loading PCA-embeddings from cache ")
with open(train_pca_pkl_f , 'rb') as cache:
train_embeddings = pickle.load(cache)
with open(valid_pca_pkl_f, 'rb') as cache:
valid_embeddings = pickle.load(cache)
else:
logger.info(' Standardizing ')
ss = StandardScaler()
train_embed_ss = ss.fit_transform(training_embedding['embeddings'])
valid_embed_ss = ss.transform(validation_embedding['embeddings'])
# Dimension reduction: PCA or UMAP (?)
logger.info(' Doing PCA...')
pca_model = decomposition.PCA(n_components = 0.90) # this can be a parameter down the road, but for debugging it's fine
train_reduc = pca_model.fit_transform(train_embed_ss)
val_reduc = pca_model.transform(valid_embed_ss)
training_embedding['embeddings'] = train_reduc
validation_embedding['embeddings'] = val_reduc
train_embeddings = training_embedding.copy()
valid_embeddings = validation_embedding.copy()
# save embeddings
pickle.dump(train_embeddings, open(train_pca_pkl_f, 'wb'))
pickle.dump(valid_embeddings, open(valid_pca_pkl_f, 'wb'))
embedding_shape = len(train_embeddings['embeddings'][0])
return embedding_shape, train_embeddings, valid_embeddings
def metrics(metric_type: str, preds: list, labels: list):
""" Provides various metrics between predictions and labels.
Arguments:
metric_type {str} -- type of metric to use ['flat_accuracy', 'f1', 'roc_auc', 'precision', 'recall']
preds {list} -- predictions.
labels {list} -- labels.
Returns:
int -- prediction accuracy
"""
assert metric_type in ['flat_accuracy', 'f1', 'roc_auc', 'ap'], 'Metrics must be one of the following: \
[\'flat_accuracy\', \'f1\', \'roc_auc\'] \
\'precision\', \'recall\', \'ap\']'
labels = np.array(labels)
# preds = np.concatenate(np.asarray(preds))
if metric_type == 'flat_accuracy':
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
elif metric_type == 'f1':
return f1_score(labels, preds)
elif metric_type == 'roc_auc':
return roc_auc_score(labels, preds)
elif metric_type == 'precision':
return precision_score(labels, preds)
elif metric_type == 'recall':
return recall_score(labels, preds)
elif metric_type == 'ap':
return average_precision_score(labels, preds)
| sequence = sequence[:max_l] | conditional_block |
utils.py | import io
import os
import torch
import logging
import json
import pickle
import argparse
from pprint import pprint
# 3rd party libraries
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils import data
from sklearn import decomposition
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve, f1_score, roc_auc_score, auc, average_precision_score, precision_score, recall_score
from transformers import *
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import torchtext
# custom
from AbstractDataset import AbstractDataset
from AbstractBert import AbstractBert
def load_data(config, vocab, proportion: float=0.7, max_len: int=256, partition: dict=None, labels: dict=None):
"""Load data using PyTorch DataLoader.
Keyword Arguments:
config {string} -- config file containing data paths and tokenizer information
metadata {bool} -- whether the data contains metadata for augmented embeddings
proportion {float} -- proportion for splitting up train and test. (default: {0.7})
max_len {int} -- maximum token length for a text. (default: {128})
partition {dict} -- maps lists of training and validation data IDs (default: {None})
labels {dict} -- (default: {None})
Returns:
partition {dict} -- list of ids in train and valid datasets
torch.utils.data.Dataset -- dataset
"""
# columns if meta: [0] unique ID, [1] text, [2] metadata, [3] label
# columns if no meta: [0] unique ID, [1] text, [2] label
if config["metadata"]:
unique_id_col = 0
text_col = 1
metadata_col = 2
label_col = 3
else:
unique_id_col = 0
text_col = 1
label_col = 3
dataset = pd.read_csv(config['train_file'], header=None, sep='\t')
print(dataset)
# below fix null values wrecking encode_plus
# convert labels to integer and drop nas
dataset.iloc[:, label_col] = pd.to_numeric(dataset.iloc[:, label_col], errors = 'coerce' )
dataset = dataset[~ dataset[text_col].isnull()]
# recreate the first column with the reset index.
dataset = dataset[(dataset.iloc[:, label_col] == 1) | (dataset.iloc[:, label_col] == 0)] \
.reset_index().reset_index().drop(columns = ['index', 0]).rename(columns = {'level_0': 0})
print(dataset)
# create list of train/valid IDs if not provided
if not partition and not labels:
ids = list(dataset.iloc[:,unique_id_col])
total_len = len(ids)
np.random.shuffle(ids)
labels = {}
# metadata = {}
partition = {'train': ids[ :int(total_len * 0.7)],
'valid': ids[int(total_len * 0.7): ]
}
for i in dataset.iloc[:, unique_id_col]:
labels[i] = dataset.iloc[i][label_col]
# set parameters for DataLoader -- num_workers = cores
params = {'batch_size': 32,
'shuffle': True,
'num_workers': 0
}
tokenizer = AutoTokenizer.from_pretrained(vocab)
dataset[text_col] = dataset[text_col].apply(lambda x: tokenizer.encode_plus(str(x), \
max_length=max_len, \
add_special_tokens=True, \
pad_to_max_length=True, \
truncation=True))
if config['metadata']: # glove for metadata preprocessing
glove = torchtext.vocab.GloVe(name="6B", dim=50)
dataset[metadata_col] = dataset[metadata_col].apply(lambda y: __pad__(str(y).split(" "), 30))
dataset[metadata_col] = dataset[metadata_col].apply(lambda z: __glove_embed__(z, glove))
train_data = dataset[dataset[unique_id_col].isin(partition['train'])]
valid_data = dataset[dataset[unique_id_col].isin(partition['valid'])]
# create train/valid generators
training_set = AbstractDataset(data=train_data, labels=labels, metadata=config['metadata'], list_IDs=partition['train'], max_len = max_len)
training_generator = DataLoader(training_set, **params)
validation_set = AbstractDataset(data=valid_data, labels=labels, metadata=config['metadata'], list_IDs=partition['valid'],max_len = max_len)
validation_generator = DataLoader(validation_set, **params)
return partition, training_generator, validation_generator
def __pad__(sequence, max_l):
""" Padding function for 1D sequences """
if max_l - len(sequence) < 0:
sequence = sequence[:max_l]
else:
sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))
return sequence
def __glove_embed__(sequence, model):
""" Embed words in a sequence using GLoVE model """
embedded = []
for word in sequence:
embedded.append(model[word])
return embedded
def load_embeddings(config, name, vocab, training_generator, validation_generator):
"""Load embeddings either from cache or from scratch
Args:
config (json) -- file configurations.
name --
vocab --
training_generator --
validation_generator --
Returns:
embedding_shape, train_embeddings, valid_embeddings
"""
# Pickle embeddings should be AGNOSTIC to the name. This is because each pickled embedding is specific to the dataset and transformer.
# Applies down the road when/if we attempt active learning
data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension
train_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')
valid_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')
if os.path.exists(train_embed_pkl_f):
with open( train_embed_pkl_f, 'rb') as cache:
train_embeddings = pickle.load(cache)
with open(valid_embed_pkl_f, 'rb') as cache:
valid_embeddings = pickle.load(cache)
else:
# get embeddings from scratch
tokenizer = AutoTokenizer.from_pretrained(vocab)
embedding_model = AbstractBert(vocab)
if torch.cuda.device_count() > 1:
print("GPUs Available: ", torch.cuda.device_count())
embedding_model = torch.nn.DataParallel(embedding_model, device_ids=[0, 1, 2])
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
embedding_model.eval().to(device)
logger.info(' Getting BERT/ROBERTA embeddings...')
train_embeddings = _get_bert_embeddings(training_generator, embedding_model, config["metadata"])
valid_embeddings = _get_bert_embeddings(validation_generator, embedding_model, config["metadata"])
# save embeddings
pickle.dump(train_embeddings, open(train_embed_pkl_f, 'wb'))
pickle.dump(valid_embeddings, open(valid_embed_pkl_f, 'wb'))
logger.info(' Saved full BERT/ROBERTA embeddings.')
embedding_shape = train_embeddings['embeddings'][1].shape[0]
return embedding_shape, train_embeddings, valid_embeddings
def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False):
"""Get BERT embeddings from a dataloader generator.
Arguments:
data_generator {data.Dataset} -- dataloader generator (AbstractDataset).
embedding_model {torch.nn.Module} -- embedding model.
Returns:
embeddings {dict} -- dictionary containing ids, augmented embeddings, and labels.
"""
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
with torch.set_grad_enabled(False):
embeddings = {'ids': [],
'embeddings': [],
'labels': []
}
# get BERT training embeddings
if metadata:
for local_ids, local_data, local_meta, local_labels in data_generator:
local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \
local_meta, \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data, local_meta)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
else:
for local_ids, local_data, local_labels in data_generator:
local_data, local_labels = local_data.to(device).long().squeeze(1), \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
return embeddings
def get_pca_embeddings(config, name, training_embedding: dict, validation_embedding: dict):
"""Reduced embeddings using PCA.
Args:
training_embedding (dict) -- dictionary containing training embeddings
validation_embedding (dict) -- dictionary containing validation embeddings
Returns:
generator -- Torch Dataloader
tuple -- shape of embedding
"""
data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension
train_pca_pkl_f = os.path.join(config['pca_cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')
valid_pca_pkl_f = os.path.join(config['pca_cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')
if os.path.exists(train_pca_pkl_f):
logger.info(" Loading PCA-embeddings from cache ")
with open(train_pca_pkl_f , 'rb') as cache:
train_embeddings = pickle.load(cache)
with open(valid_pca_pkl_f, 'rb') as cache:
valid_embeddings = pickle.load(cache)
else:
logger.info(' Standardizing ')
ss = StandardScaler()
train_embed_ss = ss.fit_transform(training_embedding['embeddings'])
valid_embed_ss = ss.transform(validation_embedding['embeddings'])
# Dimension reduction: PCA or UMAP (?)
logger.info(' Doing PCA...')
pca_model = decomposition.PCA(n_components = 0.90) # this can be a parameter down the road, but for debugging it's fine
train_reduc = pca_model.fit_transform(train_embed_ss)
val_reduc = pca_model.transform(valid_embed_ss)
training_embedding['embeddings'] = train_reduc
validation_embedding['embeddings'] = val_reduc
train_embeddings = training_embedding.copy()
valid_embeddings = validation_embedding.copy()
# save embeddings
pickle.dump(train_embeddings, open(train_pca_pkl_f, 'wb'))
pickle.dump(valid_embeddings, open(valid_pca_pkl_f, 'wb'))
embedding_shape = len(train_embeddings['embeddings'][0])
return embedding_shape, train_embeddings, valid_embeddings
def | (metric_type: str, preds: list, labels: list):
""" Provides various metrics between predictions and labels.
Arguments:
metric_type {str} -- type of metric to use ['flat_accuracy', 'f1', 'roc_auc', 'precision', 'recall']
preds {list} -- predictions.
labels {list} -- labels.
Returns:
int -- prediction accuracy
"""
assert metric_type in ['flat_accuracy', 'f1', 'roc_auc', 'ap'], 'Metrics must be one of the following: \
[\'flat_accuracy\', \'f1\', \'roc_auc\'] \
\'precision\', \'recall\', \'ap\']'
labels = np.array(labels)
# preds = np.concatenate(np.asarray(preds))
if metric_type == 'flat_accuracy':
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
elif metric_type == 'f1':
return f1_score(labels, preds)
elif metric_type == 'roc_auc':
return roc_auc_score(labels, preds)
elif metric_type == 'precision':
return precision_score(labels, preds)
elif metric_type == 'recall':
return recall_score(labels, preds)
elif metric_type == 'ap':
return average_precision_score(labels, preds)
| metrics | identifier_name |
utils.py | import io
import os
import torch
import logging
import json
import pickle
import argparse
from pprint import pprint
# 3rd party libraries
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils import data
from sklearn import decomposition
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve, f1_score, roc_auc_score, auc, average_precision_score, precision_score, recall_score
from transformers import *
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import torchtext
# custom
from AbstractDataset import AbstractDataset
from AbstractBert import AbstractBert
def load_data(config, vocab, proportion: float=0.7, max_len: int=256, partition: dict=None, labels: dict=None):
"""Load data using PyTorch DataLoader.
Keyword Arguments:
config {string} -- config file containing data paths and tokenizer information
metadata {bool} -- whether the data contains metadata for augmented embeddings
proportion {float} -- proportion for splitting up train and test. (default: {0.7})
max_len {int} -- maximum token length for a text. (default: {128})
partition {dict} -- maps lists of training and validation data IDs (default: {None})
labels {dict} -- (default: {None})
Returns:
partition {dict} -- list of ids in train and valid datasets
torch.utils.data.Dataset -- dataset
"""
# columns if meta: [0] unique ID, [1] text, [2] metadata, [3] label
# columns if no meta: [0] unique ID, [1] text, [2] label
if config["metadata"]:
unique_id_col = 0
text_col = 1
metadata_col = 2
label_col = 3
else:
unique_id_col = 0
text_col = 1
label_col = 3
dataset = pd.read_csv(config['train_file'], header=None, sep='\t')
print(dataset)
# below fix null values wrecking encode_plus
# convert labels to integer and drop nas
dataset.iloc[:, label_col] = pd.to_numeric(dataset.iloc[:, label_col], errors = 'coerce' )
dataset = dataset[~ dataset[text_col].isnull()]
# recreate the first column with the reset index.
dataset = dataset[(dataset.iloc[:, label_col] == 1) | (dataset.iloc[:, label_col] == 0)] \
.reset_index().reset_index().drop(columns = ['index', 0]).rename(columns = {'level_0': 0})
print(dataset)
# create list of train/valid IDs if not provided
if not partition and not labels:
ids = list(dataset.iloc[:,unique_id_col])
total_len = len(ids)
np.random.shuffle(ids)
labels = {}
# metadata = {}
partition = {'train': ids[ :int(total_len * 0.7)],
'valid': ids[int(total_len * 0.7): ]
}
for i in dataset.iloc[:, unique_id_col]:
labels[i] = dataset.iloc[i][label_col]
# set parameters for DataLoader -- num_workers = cores
params = {'batch_size': 32,
'shuffle': True,
'num_workers': 0
}
tokenizer = AutoTokenizer.from_pretrained(vocab)
dataset[text_col] = dataset[text_col].apply(lambda x: tokenizer.encode_plus(str(x), \
max_length=max_len, \
add_special_tokens=True, \
pad_to_max_length=True, \
truncation=True))
if config['metadata']: # glove for metadata preprocessing
glove = torchtext.vocab.GloVe(name="6B", dim=50)
dataset[metadata_col] = dataset[metadata_col].apply(lambda y: __pad__(str(y).split(" "), 30))
dataset[metadata_col] = dataset[metadata_col].apply(lambda z: __glove_embed__(z, glove))
train_data = dataset[dataset[unique_id_col].isin(partition['train'])]
valid_data = dataset[dataset[unique_id_col].isin(partition['valid'])]
# create train/valid generators
training_set = AbstractDataset(data=train_data, labels=labels, metadata=config['metadata'], list_IDs=partition['train'], max_len = max_len)
training_generator = DataLoader(training_set, **params)
validation_set = AbstractDataset(data=valid_data, labels=labels, metadata=config['metadata'], list_IDs=partition['valid'],max_len = max_len)
validation_generator = DataLoader(validation_set, **params)
return partition, training_generator, validation_generator
def __pad__(sequence, max_l):
|
def __glove_embed__(sequence, model):
""" Embed words in a sequence using GLoVE model """
embedded = []
for word in sequence:
embedded.append(model[word])
return embedded
def load_embeddings(config, name, vocab, training_generator, validation_generator):
"""Load embeddings either from cache or from scratch
Args:
config (json) -- file configurations.
name --
vocab --
training_generator --
validation_generator --
Returns:
embedding_shape, train_embeddings, valid_embeddings
"""
# Pickle embeddings should be AGNOSTIC to the name. This is because each pickled embedding is specific to the dataset and transformer.
# Applies down the road when/if we attempt active learning
data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension
train_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')
valid_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')
if os.path.exists(train_embed_pkl_f):
with open( train_embed_pkl_f, 'rb') as cache:
train_embeddings = pickle.load(cache)
with open(valid_embed_pkl_f, 'rb') as cache:
valid_embeddings = pickle.load(cache)
else:
# get embeddings from scratch
tokenizer = AutoTokenizer.from_pretrained(vocab)
embedding_model = AbstractBert(vocab)
if torch.cuda.device_count() > 1:
print("GPUs Available: ", torch.cuda.device_count())
embedding_model = torch.nn.DataParallel(embedding_model, device_ids=[0, 1, 2])
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
embedding_model.eval().to(device)
logger.info(' Getting BERT/ROBERTA embeddings...')
train_embeddings = _get_bert_embeddings(training_generator, embedding_model, config["metadata"])
valid_embeddings = _get_bert_embeddings(validation_generator, embedding_model, config["metadata"])
# save embeddings
pickle.dump(train_embeddings, open(train_embed_pkl_f, 'wb'))
pickle.dump(valid_embeddings, open(valid_embed_pkl_f, 'wb'))
logger.info(' Saved full BERT/ROBERTA embeddings.')
embedding_shape = train_embeddings['embeddings'][1].shape[0]
return embedding_shape, train_embeddings, valid_embeddings
def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False):
"""Get BERT embeddings from a dataloader generator.
Arguments:
data_generator {data.Dataset} -- dataloader generator (AbstractDataset).
embedding_model {torch.nn.Module} -- embedding model.
Returns:
embeddings {dict} -- dictionary containing ids, augmented embeddings, and labels.
"""
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
with torch.set_grad_enabled(False):
embeddings = {'ids': [],
'embeddings': [],
'labels': []
}
# get BERT training embeddings
if metadata:
for local_ids, local_data, local_meta, local_labels in data_generator:
local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \
local_meta, \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data, local_meta)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
else:
for local_ids, local_data, local_labels in data_generator:
local_data, local_labels = local_data.to(device).long().squeeze(1), \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
return embeddings
def get_pca_embeddings(config, name, training_embedding: dict, validation_embedding: dict):
"""Reduced embeddings using PCA.
Args:
training_embedding (dict) -- dictionary containing training embeddings
validation_embedding (dict) -- dictionary containing validation embeddings
Returns:
generator -- Torch Dataloader
tuple -- shape of embedding
"""
data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension
train_pca_pkl_f = os.path.join(config['pca_cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')
valid_pca_pkl_f = os.path.join(config['pca_cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')
if os.path.exists(train_pca_pkl_f):
logger.info(" Loading PCA-embeddings from cache ")
with open(train_pca_pkl_f , 'rb') as cache:
train_embeddings = pickle.load(cache)
with open(valid_pca_pkl_f, 'rb') as cache:
valid_embeddings = pickle.load(cache)
else:
logger.info(' Standardizing ')
ss = StandardScaler()
train_embed_ss = ss.fit_transform(training_embedding['embeddings'])
valid_embed_ss = ss.transform(validation_embedding['embeddings'])
# Dimension reduction: PCA or UMAP (?)
logger.info(' Doing PCA...')
pca_model = decomposition.PCA(n_components = 0.90) # this can be a parameter down the road, but for debugging it's fine
train_reduc = pca_model.fit_transform(train_embed_ss)
val_reduc = pca_model.transform(valid_embed_ss)
training_embedding['embeddings'] = train_reduc
validation_embedding['embeddings'] = val_reduc
train_embeddings = training_embedding.copy()
valid_embeddings = validation_embedding.copy()
# save embeddings
pickle.dump(train_embeddings, open(train_pca_pkl_f, 'wb'))
pickle.dump(valid_embeddings, open(valid_pca_pkl_f, 'wb'))
embedding_shape = len(train_embeddings['embeddings'][0])
return embedding_shape, train_embeddings, valid_embeddings
def metrics(metric_type: str, preds: list, labels: list):
""" Provides various metrics between predictions and labels.
Arguments:
metric_type {str} -- type of metric to use ['flat_accuracy', 'f1', 'roc_auc', 'precision', 'recall']
preds {list} -- predictions.
labels {list} -- labels.
Returns:
int -- prediction accuracy
"""
assert metric_type in ['flat_accuracy', 'f1', 'roc_auc', 'ap'], 'Metrics must be one of the following: \
[\'flat_accuracy\', \'f1\', \'roc_auc\'] \
\'precision\', \'recall\', \'ap\']'
labels = np.array(labels)
# preds = np.concatenate(np.asarray(preds))
if metric_type == 'flat_accuracy':
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
elif metric_type == 'f1':
return f1_score(labels, preds)
elif metric_type == 'roc_auc':
return roc_auc_score(labels, preds)
elif metric_type == 'precision':
return precision_score(labels, preds)
elif metric_type == 'recall':
return recall_score(labels, preds)
elif metric_type == 'ap':
return average_precision_score(labels, preds)
| """ Padding function for 1D sequences """
if max_l - len(sequence) < 0:
sequence = sequence[:max_l]
else:
sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))
return sequence | identifier_body |
ioapic.rs | use core::{fmt, ptr};
use alloc::vec::Vec;
use spin::Mutex;
#[cfg(feature = "acpi")]
use crate::acpi::madt::{self, Madt, MadtEntry, MadtIoApic, MadtIntSrcOverride};
use crate::arch::interrupt::irq;
use crate::memory::Frame;
use crate::paging::{KernelMapper, Page, PageFlags, PhysicalAddress, RmmA, RmmArch, VirtualAddress};
use crate::paging::entry::EntryFlags;
use super::super::cpuid::cpuid;
use super::pic;
pub struct IoApicRegs {
pointer: *const u32,
}
impl IoApicRegs {
fn ioregsel(&self) -> *const u32 {
self.pointer
}
fn iowin(&self) -> *const u32 {
// offset 0x10
unsafe { self.pointer.offset(4) }
}
fn write_ioregsel(&mut self, value: u32) {
unsafe { ptr::write_volatile::<u32>(self.ioregsel() as *mut u32, value) }
}
fn read_iowin(&self) -> u32 {
unsafe { ptr::read_volatile::<u32>(self.iowin()) }
}
fn write_iowin(&mut self, value: u32) {
unsafe { ptr::write_volatile::<u32>(self.iowin() as *mut u32, value) }
}
fn read_reg(&mut self, reg: u8) -> u32 {
self.write_ioregsel(reg.into());
self.read_iowin()
}
fn write_reg(&mut self, reg: u8, value: u32) {
self.write_ioregsel(reg.into());
self.write_iowin(value);
}
pub fn read_ioapicid(&mut self) -> u32 {
self.read_reg(0x00)
}
pub fn write_ioapicid(&mut self, value: u32) {
self.write_reg(0x00, value);
}
pub fn read_ioapicver(&mut self) -> u32 {
self.read_reg(0x01)
}
pub fn read_ioapicarb(&mut self) -> u32 {
self.read_reg(0x02)
}
pub fn read_ioredtbl(&mut self, idx: u8) -> u64 {
assert!(idx < 24);
let lo = self.read_reg(0x10 + idx * 2);
let hi = self.read_reg(0x10 + idx * 2 + 1);
u64::from(lo) | (u64::from(hi) << 32)
}
pub fn write_ioredtbl(&mut self, idx: u8, value: u64) {
assert!(idx < 24);
let lo = value as u32;
let hi = (value >> 32) as u32;
self.write_reg(0x10 + idx * 2, lo);
self.write_reg(0x10 + idx * 2 + 1, hi);
}
pub fn max_redirection_table_entries(&mut self) -> u8 {
let ver = self.read_ioapicver();
((ver & 0x00FF_0000) >> 16) as u8
}
pub fn id(&mut self) -> u8 {
let id_reg = self.read_ioapicid();
((id_reg & 0x0F00_0000) >> 24) as u8
}
}
pub struct IoApic {
regs: Mutex<IoApicRegs>,
gsi_start: u32,
count: u8,
}
impl IoApic {
pub fn new(regs_base: *const u32, gsi_start: u32) -> Self {
let mut regs = IoApicRegs { pointer: regs_base };
let count = regs.max_redirection_table_entries();
Self {
regs: Mutex::new(regs),
gsi_start,
count,
}
}
/// Map an interrupt vector to a physical local APIC ID of a processor (thus physical mode).
pub fn map(&self, idx: u8, info: MapInfo) {
self.regs.lock().write_ioredtbl(idx, info.as_raw())
}
pub fn set_mask(&self, gsi: u32, mask: bool) {
let idx = (gsi - self.gsi_start) as u8;
let mut guard = self.regs.lock();
let mut reg = guard.read_ioredtbl(idx);
reg &= !(1 << 16);
reg |= u64::from(mask) << 16;
guard.write_ioredtbl(idx, reg);
}
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum ApicTriggerMode {
Edge = 0,
Level = 1,
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum ApicPolarity {
ActiveHigh = 0,
ActiveLow = 1,
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum DestinationMode {
Physical = 0,
Logical = 1,
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum DeliveryMode {
Fixed = 0b000,
LowestPriority = 0b001,
Smi = 0b010,
Nmi = 0b100,
Init = 0b101,
ExtInt = 0b111,
}
#[derive(Clone, Copy, Debug)]
pub struct MapInfo {
pub dest: u8,
pub mask: bool,
pub trigger_mode: ApicTriggerMode,
pub polarity: ApicPolarity,
pub dest_mode: DestinationMode,
pub delivery_mode: DeliveryMode,
pub vector: u8,
}
impl MapInfo {
pub fn as_raw(&self) -> u64 {
assert!(self.vector >= 0x20);
assert!(self.vector <= 0xFE);
// TODO: Check for reserved fields.
(u64::from(self.dest) << 56)
| (u64::from(self.mask) << 16)
| ((self.trigger_mode as u64) << 15)
| ((self.polarity as u64) << 13)
| ((self.dest_mode as u64) << 11)
| ((self.delivery_mode as u64) << 8)
| u64::from(self.vector)
} | }
impl fmt::Debug for IoApic {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct RedirTable<'a>(&'a Mutex<IoApicRegs>);
impl<'a> fmt::Debug for RedirTable<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut guard = self.0.lock();
let count = guard.max_redirection_table_entries();
f.debug_list().entries((0..count).map(|i| guard.read_ioredtbl(i))).finish()
}
}
f.debug_struct("IoApic")
.field("redir_table", &RedirTable(&self.regs))
.field("gsi_start", &self.gsi_start)
.field("count", &self.count)
.finish()
}
}
#[derive(Clone, Copy, Debug)]
pub enum TriggerMode {
ConformsToSpecs,
Edge,
Level,
}
#[derive(Clone, Copy, Debug)]
pub enum Polarity {
ConformsToSpecs,
ActiveHigh,
ActiveLow,
}
#[derive(Clone, Copy, Debug)]
pub struct Override {
bus_irq: u8,
gsi: u32,
trigger_mode: TriggerMode,
polarity: Polarity,
}
// static mut because only the AP initializes the I/O Apic, and when that is done, it's solely
// accessed immutably.
static mut IOAPICS: Option<Vec<IoApic>> = None;
// static mut for the same reason as above
static mut SRC_OVERRIDES: Option<Vec<Override>> = None;
pub fn ioapics() -> &'static [IoApic] {
unsafe {
IOAPICS.as_ref().map_or(&[], |vector| &vector[..])
}
}
pub fn src_overrides() -> &'static [Override] {
unsafe {
SRC_OVERRIDES.as_ref().map_or(&[], |vector| &vector[..])
}
}
#[cfg(feature = "acpi")]
pub unsafe fn handle_ioapic(mapper: &mut KernelMapper, madt_ioapic: &'static MadtIoApic) {
// map the I/O APIC registers
let frame = Frame::containing_address(PhysicalAddress::new(madt_ioapic.address as usize));
let page = Page::containing_address(VirtualAddress::new(crate::IOAPIC_OFFSET));
assert!(mapper.translate(page.start_address()).is_none());
mapper
.get_mut()
.expect("expected KernelMapper not to be locked re-entrant while mapping I/O APIC memory")
.map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::NO_CACHE.bits(), true))
.expect("failed to map I/O APIC")
.flush();
let ioapic_registers = page.start_address().data() as *const u32;
let ioapic = IoApic::new(ioapic_registers, madt_ioapic.gsi_base);
assert_eq!(ioapic.regs.lock().id(), madt_ioapic.id, "mismatched ACPI MADT I/O APIC ID, and the ID reported by the I/O APIC");
IOAPICS.get_or_insert_with(Vec::new).push(ioapic);
}
#[cfg(feature = "acpi")]
pub unsafe fn handle_src_override(src_override: &'static MadtIntSrcOverride) {
let flags = src_override.flags;
let polarity_raw = (flags & 0x0003) as u8;
let trigger_mode_raw = ((flags & 0x000C) >> 2) as u8;
let polarity = match polarity_raw {
0b00 => Polarity::ConformsToSpecs,
0b01 => Polarity::ActiveHigh,
0b10 => return, // reserved
0b11 => Polarity::ActiveLow,
_ => unreachable!(),
};
let trigger_mode = match trigger_mode_raw {
0b00 => TriggerMode::ConformsToSpecs,
0b01 => TriggerMode::Edge,
0b10 => return, // reserved
0b11 => TriggerMode::Level,
_ => unreachable!(),
};
let over = Override {
bus_irq: src_override.irq_source,
gsi: src_override.gsi_base,
polarity,
trigger_mode,
};
SRC_OVERRIDES.get_or_insert_with(Vec::new).push(over);
}
pub unsafe fn init(active_table: &mut KernelMapper) {
let bsp_apic_id = cpuid().unwrap().get_feature_info().unwrap().initial_local_apic_id(); // TODO: remove unwraps
// search the madt for all IOAPICs.
#[cfg(feature = "acpi")]
{
let madt: &'static Madt = match madt::MADT.as_ref() {
Some(m) => m,
// TODO: Parse MP tables too.
None => return,
};
if madt.flags & madt::FLAG_PCAT != 0 {
pic::disable();
}
// find all I/O APICs (usually one).
for entry in madt.iter() {
match entry {
MadtEntry::IoApic(ioapic) => handle_ioapic(active_table, ioapic),
MadtEntry::IntSrcOverride(src_override) => handle_src_override(src_override),
_ => (),
}
}
}
println!("I/O APICs: {:?}, overrides: {:?}", ioapics(), src_overrides());
// map the legacy PC-compatible IRQs (0-15) to 32-47, just like we did with 8259 PIC (if it
// wouldn't have been disabled due to this I/O APIC)
for legacy_irq in 0..=15 {
let (gsi, trigger_mode, polarity) = match get_override(legacy_irq) {
Some(over) => (over.gsi, over.trigger_mode, over.polarity),
None => {
if src_overrides().iter().any(|over| over.gsi == u32::from(legacy_irq) && over.bus_irq != legacy_irq) && !src_overrides().iter().any(|over| over.bus_irq == legacy_irq) {
// there's an IRQ conflict, making this legacy IRQ inaccessible.
continue;
}
(legacy_irq.into(), TriggerMode::ConformsToSpecs, Polarity::ConformsToSpecs)
}
};
let apic = match find_ioapic(gsi) {
Some(ioapic) => ioapic,
None => {
println!("Unable to find a suitable APIC for legacy IRQ {} (GSI {}). It will not be mapped.", legacy_irq, gsi);
continue;
}
};
let redir_tbl_index = (gsi - apic.gsi_start) as u8;
let map_info = MapInfo {
// only send to the BSP
dest: bsp_apic_id,
dest_mode: DestinationMode::Physical,
delivery_mode: DeliveryMode::Fixed,
mask: false,
polarity: match polarity {
Polarity::ActiveHigh => ApicPolarity::ActiveHigh,
Polarity::ActiveLow => ApicPolarity::ActiveLow,
Polarity::ConformsToSpecs => ApicPolarity::ActiveHigh,
},
trigger_mode: match trigger_mode {
TriggerMode::Edge => ApicTriggerMode::Edge,
TriggerMode::Level => ApicTriggerMode::Level,
TriggerMode::ConformsToSpecs => ApicTriggerMode::Edge,
},
vector: 32 + legacy_irq,
};
apic.map(redir_tbl_index, map_info);
}
println!("I/O APICs: {:?}, overrides: {:?}", ioapics(), src_overrides());
irq::set_irq_method(irq::IrqMethod::Apic);
// tell the firmware that we're using APIC rather than the default 8259 PIC.
// FIXME: With ACPI moved to userspace, we should instead allow userspace to check whether the
// IOAPIC has been initialized, and then subsequently let some ACPI driver call the AML from
// userspace.
/*#[cfg(feature = "acpi")]
{
let method = {
let namespace_guard = crate::acpi::ACPI_TABLE.namespace.read();
if let Some(value) = namespace_guard.as_ref().unwrap().get("\\_PIC") {
value.get_as_method().ok()
} else {
None
}
};
if let Some(m) = method {
m.execute("\\_PIC".into(), vec!(crate::acpi::aml::AmlValue::Integer(1)));
}
}*/
}
fn get_override(irq: u8) -> Option<&'static Override> {
src_overrides().iter().find(|over| over.bus_irq == irq)
}
fn resolve(irq: u8) -> u32 {
get_override(irq).map_or(u32::from(irq), |over| over.gsi)
}
fn find_ioapic(gsi: u32) -> Option<&'static IoApic> {
ioapics().iter().find(|apic| gsi >= apic.gsi_start && gsi < apic.gsi_start + u32::from(apic.count))
}
pub unsafe fn mask(irq: u8) {
let gsi = resolve(irq);
let apic = match find_ioapic(gsi) {
Some(a) => a,
None => return,
};
apic.set_mask(gsi, true);
}
pub unsafe fn unmask(irq: u8) {
let gsi = resolve(irq);
let apic = match find_ioapic(gsi) {
Some(a) => a,
None => return,
};
apic.set_mask(gsi, false);
} | random_line_split |
|
ioapic.rs | use core::{fmt, ptr};
use alloc::vec::Vec;
use spin::Mutex;
#[cfg(feature = "acpi")]
use crate::acpi::madt::{self, Madt, MadtEntry, MadtIoApic, MadtIntSrcOverride};
use crate::arch::interrupt::irq;
use crate::memory::Frame;
use crate::paging::{KernelMapper, Page, PageFlags, PhysicalAddress, RmmA, RmmArch, VirtualAddress};
use crate::paging::entry::EntryFlags;
use super::super::cpuid::cpuid;
use super::pic;
pub struct IoApicRegs {
pointer: *const u32,
}
impl IoApicRegs {
fn ioregsel(&self) -> *const u32 {
self.pointer
}
fn iowin(&self) -> *const u32 {
// offset 0x10
unsafe { self.pointer.offset(4) }
}
fn write_ioregsel(&mut self, value: u32) {
unsafe { ptr::write_volatile::<u32>(self.ioregsel() as *mut u32, value) }
}
fn read_iowin(&self) -> u32 {
unsafe { ptr::read_volatile::<u32>(self.iowin()) }
}
fn write_iowin(&mut self, value: u32) {
unsafe { ptr::write_volatile::<u32>(self.iowin() as *mut u32, value) }
}
fn read_reg(&mut self, reg: u8) -> u32 {
self.write_ioregsel(reg.into());
self.read_iowin()
}
fn write_reg(&mut self, reg: u8, value: u32) {
self.write_ioregsel(reg.into());
self.write_iowin(value);
}
pub fn read_ioapicid(&mut self) -> u32 {
self.read_reg(0x00)
}
pub fn write_ioapicid(&mut self, value: u32) {
self.write_reg(0x00, value);
}
pub fn read_ioapicver(&mut self) -> u32 {
self.read_reg(0x01)
}
pub fn read_ioapicarb(&mut self) -> u32 {
self.read_reg(0x02)
}
pub fn read_ioredtbl(&mut self, idx: u8) -> u64 {
assert!(idx < 24);
let lo = self.read_reg(0x10 + idx * 2);
let hi = self.read_reg(0x10 + idx * 2 + 1);
u64::from(lo) | (u64::from(hi) << 32)
}
pub fn write_ioredtbl(&mut self, idx: u8, value: u64) {
assert!(idx < 24);
let lo = value as u32;
let hi = (value >> 32) as u32;
self.write_reg(0x10 + idx * 2, lo);
self.write_reg(0x10 + idx * 2 + 1, hi);
}
pub fn max_redirection_table_entries(&mut self) -> u8 {
let ver = self.read_ioapicver();
((ver & 0x00FF_0000) >> 16) as u8
}
pub fn id(&mut self) -> u8 {
let id_reg = self.read_ioapicid();
((id_reg & 0x0F00_0000) >> 24) as u8
}
}
pub struct IoApic {
regs: Mutex<IoApicRegs>,
gsi_start: u32,
count: u8,
}
impl IoApic {
pub fn new(regs_base: *const u32, gsi_start: u32) -> Self {
let mut regs = IoApicRegs { pointer: regs_base };
let count = regs.max_redirection_table_entries();
Self {
regs: Mutex::new(regs),
gsi_start,
count,
}
}
/// Map an interrupt vector to a physical local APIC ID of a processor (thus physical mode).
pub fn map(&self, idx: u8, info: MapInfo) {
self.regs.lock().write_ioredtbl(idx, info.as_raw())
}
pub fn set_mask(&self, gsi: u32, mask: bool) {
let idx = (gsi - self.gsi_start) as u8;
let mut guard = self.regs.lock();
let mut reg = guard.read_ioredtbl(idx);
reg &= !(1 << 16);
reg |= u64::from(mask) << 16;
guard.write_ioredtbl(idx, reg);
}
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum ApicTriggerMode {
Edge = 0,
Level = 1,
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum ApicPolarity {
ActiveHigh = 0,
ActiveLow = 1,
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum DestinationMode {
Physical = 0,
Logical = 1,
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum DeliveryMode {
Fixed = 0b000,
LowestPriority = 0b001,
Smi = 0b010,
Nmi = 0b100,
Init = 0b101,
ExtInt = 0b111,
}
#[derive(Clone, Copy, Debug)]
pub struct MapInfo {
pub dest: u8,
pub mask: bool,
pub trigger_mode: ApicTriggerMode,
pub polarity: ApicPolarity,
pub dest_mode: DestinationMode,
pub delivery_mode: DeliveryMode,
pub vector: u8,
}
impl MapInfo {
pub fn as_raw(&self) -> u64 {
assert!(self.vector >= 0x20);
assert!(self.vector <= 0xFE);
// TODO: Check for reserved fields.
(u64::from(self.dest) << 56)
| (u64::from(self.mask) << 16)
| ((self.trigger_mode as u64) << 15)
| ((self.polarity as u64) << 13)
| ((self.dest_mode as u64) << 11)
| ((self.delivery_mode as u64) << 8)
| u64::from(self.vector)
}
}
impl fmt::Debug for IoApic {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct RedirTable<'a>(&'a Mutex<IoApicRegs>);
impl<'a> fmt::Debug for RedirTable<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut guard = self.0.lock();
let count = guard.max_redirection_table_entries();
f.debug_list().entries((0..count).map(|i| guard.read_ioredtbl(i))).finish()
}
}
f.debug_struct("IoApic")
.field("redir_table", &RedirTable(&self.regs))
.field("gsi_start", &self.gsi_start)
.field("count", &self.count)
.finish()
}
}
#[derive(Clone, Copy, Debug)]
pub enum TriggerMode {
ConformsToSpecs,
Edge,
Level,
}
#[derive(Clone, Copy, Debug)]
pub enum Polarity {
ConformsToSpecs,
ActiveHigh,
ActiveLow,
}
#[derive(Clone, Copy, Debug)]
pub struct Override {
bus_irq: u8,
gsi: u32,
trigger_mode: TriggerMode,
polarity: Polarity,
}
// static mut because only the AP initializes the I/O Apic, and when that is done, it's solely
// accessed immutably.
static mut IOAPICS: Option<Vec<IoApic>> = None;
// static mut for the same reason as above
static mut SRC_OVERRIDES: Option<Vec<Override>> = None;
pub fn ioapics() -> &'static [IoApic] {
unsafe {
IOAPICS.as_ref().map_or(&[], |vector| &vector[..])
}
}
pub fn src_overrides() -> &'static [Override] {
unsafe {
SRC_OVERRIDES.as_ref().map_or(&[], |vector| &vector[..])
}
}
#[cfg(feature = "acpi")]
pub unsafe fn handle_ioapic(mapper: &mut KernelMapper, madt_ioapic: &'static MadtIoApic) {
// map the I/O APIC registers
let frame = Frame::containing_address(PhysicalAddress::new(madt_ioapic.address as usize));
let page = Page::containing_address(VirtualAddress::new(crate::IOAPIC_OFFSET));
assert!(mapper.translate(page.start_address()).is_none());
mapper
.get_mut()
.expect("expected KernelMapper not to be locked re-entrant while mapping I/O APIC memory")
.map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::NO_CACHE.bits(), true))
.expect("failed to map I/O APIC")
.flush();
let ioapic_registers = page.start_address().data() as *const u32;
let ioapic = IoApic::new(ioapic_registers, madt_ioapic.gsi_base);
assert_eq!(ioapic.regs.lock().id(), madt_ioapic.id, "mismatched ACPI MADT I/O APIC ID, and the ID reported by the I/O APIC");
IOAPICS.get_or_insert_with(Vec::new).push(ioapic);
}
#[cfg(feature = "acpi")]
pub unsafe fn handle_src_override(src_override: &'static MadtIntSrcOverride) {
let flags = src_override.flags;
let polarity_raw = (flags & 0x0003) as u8;
let trigger_mode_raw = ((flags & 0x000C) >> 2) as u8;
let polarity = match polarity_raw {
0b00 => Polarity::ConformsToSpecs,
0b01 => Polarity::ActiveHigh,
0b10 => return, // reserved
0b11 => Polarity::ActiveLow,
_ => unreachable!(),
};
let trigger_mode = match trigger_mode_raw {
0b00 => TriggerMode::ConformsToSpecs,
0b01 => TriggerMode::Edge,
0b10 => return, // reserved
0b11 => TriggerMode::Level,
_ => unreachable!(),
};
let over = Override {
bus_irq: src_override.irq_source,
gsi: src_override.gsi_base,
polarity,
trigger_mode,
};
SRC_OVERRIDES.get_or_insert_with(Vec::new).push(over);
}
pub unsafe fn init(active_table: &mut KernelMapper) {
let bsp_apic_id = cpuid().unwrap().get_feature_info().unwrap().initial_local_apic_id(); // TODO: remove unwraps
// search the madt for all IOAPICs.
#[cfg(feature = "acpi")]
{
let madt: &'static Madt = match madt::MADT.as_ref() {
Some(m) => m,
// TODO: Parse MP tables too.
None => return,
};
if madt.flags & madt::FLAG_PCAT != 0 {
pic::disable();
}
// find all I/O APICs (usually one).
for entry in madt.iter() {
match entry {
MadtEntry::IoApic(ioapic) => handle_ioapic(active_table, ioapic),
MadtEntry::IntSrcOverride(src_override) => handle_src_override(src_override),
_ => (),
}
}
}
println!("I/O APICs: {:?}, overrides: {:?}", ioapics(), src_overrides());
// map the legacy PC-compatible IRQs (0-15) to 32-47, just like we did with 8259 PIC (if it
// wouldn't have been disabled due to this I/O APIC)
for legacy_irq in 0..=15 {
let (gsi, trigger_mode, polarity) = match get_override(legacy_irq) {
Some(over) => (over.gsi, over.trigger_mode, over.polarity),
None => {
if src_overrides().iter().any(|over| over.gsi == u32::from(legacy_irq) && over.bus_irq != legacy_irq) && !src_overrides().iter().any(|over| over.bus_irq == legacy_irq) {
// there's an IRQ conflict, making this legacy IRQ inaccessible.
continue;
}
(legacy_irq.into(), TriggerMode::ConformsToSpecs, Polarity::ConformsToSpecs)
}
};
let apic = match find_ioapic(gsi) {
Some(ioapic) => ioapic,
None => {
println!("Unable to find a suitable APIC for legacy IRQ {} (GSI {}). It will not be mapped.", legacy_irq, gsi);
continue;
}
};
let redir_tbl_index = (gsi - apic.gsi_start) as u8;
let map_info = MapInfo {
// only send to the BSP
dest: bsp_apic_id,
dest_mode: DestinationMode::Physical,
delivery_mode: DeliveryMode::Fixed,
mask: false,
polarity: match polarity {
Polarity::ActiveHigh => ApicPolarity::ActiveHigh,
Polarity::ActiveLow => ApicPolarity::ActiveLow,
Polarity::ConformsToSpecs => ApicPolarity::ActiveHigh,
},
trigger_mode: match trigger_mode {
TriggerMode::Edge => ApicTriggerMode::Edge,
TriggerMode::Level => ApicTriggerMode::Level,
TriggerMode::ConformsToSpecs => ApicTriggerMode::Edge,
},
vector: 32 + legacy_irq,
};
apic.map(redir_tbl_index, map_info);
}
println!("I/O APICs: {:?}, overrides: {:?}", ioapics(), src_overrides());
irq::set_irq_method(irq::IrqMethod::Apic);
// tell the firmware that we're using APIC rather than the default 8259 PIC.
// FIXME: With ACPI moved to userspace, we should instead allow userspace to check whether the
// IOAPIC has been initialized, and then subsequently let some ACPI driver call the AML from
// userspace.
/*#[cfg(feature = "acpi")]
{
let method = {
let namespace_guard = crate::acpi::ACPI_TABLE.namespace.read();
if let Some(value) = namespace_guard.as_ref().unwrap().get("\\_PIC") {
value.get_as_method().ok()
} else {
None
}
};
if let Some(m) = method {
m.execute("\\_PIC".into(), vec!(crate::acpi::aml::AmlValue::Integer(1)));
}
}*/
}
fn get_override(irq: u8) -> Option<&'static Override> |
fn resolve(irq: u8) -> u32 {
get_override(irq).map_or(u32::from(irq), |over| over.gsi)
}
fn find_ioapic(gsi: u32) -> Option<&'static IoApic> {
ioapics().iter().find(|apic| gsi >= apic.gsi_start && gsi < apic.gsi_start + u32::from(apic.count))
}
pub unsafe fn mask(irq: u8) {
let gsi = resolve(irq);
let apic = match find_ioapic(gsi) {
Some(a) => a,
None => return,
};
apic.set_mask(gsi, true);
}
pub unsafe fn unmask(irq: u8) {
let gsi = resolve(irq);
let apic = match find_ioapic(gsi) {
Some(a) => a,
None => return,
};
apic.set_mask(gsi, false);
}
| {
src_overrides().iter().find(|over| over.bus_irq == irq)
} | identifier_body |
ioapic.rs | use core::{fmt, ptr};
use alloc::vec::Vec;
use spin::Mutex;
#[cfg(feature = "acpi")]
use crate::acpi::madt::{self, Madt, MadtEntry, MadtIoApic, MadtIntSrcOverride};
use crate::arch::interrupt::irq;
use crate::memory::Frame;
use crate::paging::{KernelMapper, Page, PageFlags, PhysicalAddress, RmmA, RmmArch, VirtualAddress};
use crate::paging::entry::EntryFlags;
use super::super::cpuid::cpuid;
use super::pic;
pub struct IoApicRegs {
pointer: *const u32,
}
impl IoApicRegs {
fn ioregsel(&self) -> *const u32 {
self.pointer
}
fn iowin(&self) -> *const u32 {
// offset 0x10
unsafe { self.pointer.offset(4) }
}
fn write_ioregsel(&mut self, value: u32) {
unsafe { ptr::write_volatile::<u32>(self.ioregsel() as *mut u32, value) }
}
fn read_iowin(&self) -> u32 {
unsafe { ptr::read_volatile::<u32>(self.iowin()) }
}
fn write_iowin(&mut self, value: u32) {
unsafe { ptr::write_volatile::<u32>(self.iowin() as *mut u32, value) }
}
fn read_reg(&mut self, reg: u8) -> u32 {
self.write_ioregsel(reg.into());
self.read_iowin()
}
fn write_reg(&mut self, reg: u8, value: u32) {
self.write_ioregsel(reg.into());
self.write_iowin(value);
}
pub fn read_ioapicid(&mut self) -> u32 {
self.read_reg(0x00)
}
pub fn write_ioapicid(&mut self, value: u32) {
self.write_reg(0x00, value);
}
pub fn read_ioapicver(&mut self) -> u32 {
self.read_reg(0x01)
}
pub fn read_ioapicarb(&mut self) -> u32 {
self.read_reg(0x02)
}
pub fn read_ioredtbl(&mut self, idx: u8) -> u64 {
assert!(idx < 24);
let lo = self.read_reg(0x10 + idx * 2);
let hi = self.read_reg(0x10 + idx * 2 + 1);
u64::from(lo) | (u64::from(hi) << 32)
}
pub fn write_ioredtbl(&mut self, idx: u8, value: u64) {
assert!(idx < 24);
let lo = value as u32;
let hi = (value >> 32) as u32;
self.write_reg(0x10 + idx * 2, lo);
self.write_reg(0x10 + idx * 2 + 1, hi);
}
pub fn max_redirection_table_entries(&mut self) -> u8 {
let ver = self.read_ioapicver();
((ver & 0x00FF_0000) >> 16) as u8
}
pub fn id(&mut self) -> u8 {
let id_reg = self.read_ioapicid();
((id_reg & 0x0F00_0000) >> 24) as u8
}
}
pub struct IoApic {
regs: Mutex<IoApicRegs>,
gsi_start: u32,
count: u8,
}
impl IoApic {
pub fn new(regs_base: *const u32, gsi_start: u32) -> Self {
let mut regs = IoApicRegs { pointer: regs_base };
let count = regs.max_redirection_table_entries();
Self {
regs: Mutex::new(regs),
gsi_start,
count,
}
}
/// Map an interrupt vector to a physical local APIC ID of a processor (thus physical mode).
pub fn map(&self, idx: u8, info: MapInfo) {
self.regs.lock().write_ioredtbl(idx, info.as_raw())
}
pub fn set_mask(&self, gsi: u32, mask: bool) {
let idx = (gsi - self.gsi_start) as u8;
let mut guard = self.regs.lock();
let mut reg = guard.read_ioredtbl(idx);
reg &= !(1 << 16);
reg |= u64::from(mask) << 16;
guard.write_ioredtbl(idx, reg);
}
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum ApicTriggerMode {
Edge = 0,
Level = 1,
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum ApicPolarity {
ActiveHigh = 0,
ActiveLow = 1,
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum DestinationMode {
Physical = 0,
Logical = 1,
}
#[repr(u8)]
#[derive(Clone, Copy, Debug)]
pub enum DeliveryMode {
Fixed = 0b000,
LowestPriority = 0b001,
Smi = 0b010,
Nmi = 0b100,
Init = 0b101,
ExtInt = 0b111,
}
#[derive(Clone, Copy, Debug)]
pub struct MapInfo {
pub dest: u8,
pub mask: bool,
pub trigger_mode: ApicTriggerMode,
pub polarity: ApicPolarity,
pub dest_mode: DestinationMode,
pub delivery_mode: DeliveryMode,
pub vector: u8,
}
impl MapInfo {
pub fn as_raw(&self) -> u64 {
assert!(self.vector >= 0x20);
assert!(self.vector <= 0xFE);
// TODO: Check for reserved fields.
(u64::from(self.dest) << 56)
| (u64::from(self.mask) << 16)
| ((self.trigger_mode as u64) << 15)
| ((self.polarity as u64) << 13)
| ((self.dest_mode as u64) << 11)
| ((self.delivery_mode as u64) << 8)
| u64::from(self.vector)
}
}
impl fmt::Debug for IoApic {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct | <'a>(&'a Mutex<IoApicRegs>);
impl<'a> fmt::Debug for RedirTable<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut guard = self.0.lock();
let count = guard.max_redirection_table_entries();
f.debug_list().entries((0..count).map(|i| guard.read_ioredtbl(i))).finish()
}
}
f.debug_struct("IoApic")
.field("redir_table", &RedirTable(&self.regs))
.field("gsi_start", &self.gsi_start)
.field("count", &self.count)
.finish()
}
}
#[derive(Clone, Copy, Debug)]
pub enum TriggerMode {
ConformsToSpecs,
Edge,
Level,
}
#[derive(Clone, Copy, Debug)]
pub enum Polarity {
ConformsToSpecs,
ActiveHigh,
ActiveLow,
}
#[derive(Clone, Copy, Debug)]
pub struct Override {
bus_irq: u8,
gsi: u32,
trigger_mode: TriggerMode,
polarity: Polarity,
}
// static mut because only the AP initializes the I/O Apic, and when that is done, it's solely
// accessed immutably.
static mut IOAPICS: Option<Vec<IoApic>> = None;
// static mut for the same reason as above
static mut SRC_OVERRIDES: Option<Vec<Override>> = None;
pub fn ioapics() -> &'static [IoApic] {
unsafe {
IOAPICS.as_ref().map_or(&[], |vector| &vector[..])
}
}
pub fn src_overrides() -> &'static [Override] {
unsafe {
SRC_OVERRIDES.as_ref().map_or(&[], |vector| &vector[..])
}
}
#[cfg(feature = "acpi")]
pub unsafe fn handle_ioapic(mapper: &mut KernelMapper, madt_ioapic: &'static MadtIoApic) {
// map the I/O APIC registers
let frame = Frame::containing_address(PhysicalAddress::new(madt_ioapic.address as usize));
let page = Page::containing_address(VirtualAddress::new(crate::IOAPIC_OFFSET));
assert!(mapper.translate(page.start_address()).is_none());
mapper
.get_mut()
.expect("expected KernelMapper not to be locked re-entrant while mapping I/O APIC memory")
.map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::NO_CACHE.bits(), true))
.expect("failed to map I/O APIC")
.flush();
let ioapic_registers = page.start_address().data() as *const u32;
let ioapic = IoApic::new(ioapic_registers, madt_ioapic.gsi_base);
assert_eq!(ioapic.regs.lock().id(), madt_ioapic.id, "mismatched ACPI MADT I/O APIC ID, and the ID reported by the I/O APIC");
IOAPICS.get_or_insert_with(Vec::new).push(ioapic);
}
#[cfg(feature = "acpi")]
pub unsafe fn handle_src_override(src_override: &'static MadtIntSrcOverride) {
let flags = src_override.flags;
let polarity_raw = (flags & 0x0003) as u8;
let trigger_mode_raw = ((flags & 0x000C) >> 2) as u8;
let polarity = match polarity_raw {
0b00 => Polarity::ConformsToSpecs,
0b01 => Polarity::ActiveHigh,
0b10 => return, // reserved
0b11 => Polarity::ActiveLow,
_ => unreachable!(),
};
let trigger_mode = match trigger_mode_raw {
0b00 => TriggerMode::ConformsToSpecs,
0b01 => TriggerMode::Edge,
0b10 => return, // reserved
0b11 => TriggerMode::Level,
_ => unreachable!(),
};
let over = Override {
bus_irq: src_override.irq_source,
gsi: src_override.gsi_base,
polarity,
trigger_mode,
};
SRC_OVERRIDES.get_or_insert_with(Vec::new).push(over);
}
pub unsafe fn init(active_table: &mut KernelMapper) {
let bsp_apic_id = cpuid().unwrap().get_feature_info().unwrap().initial_local_apic_id(); // TODO: remove unwraps
// search the madt for all IOAPICs.
#[cfg(feature = "acpi")]
{
let madt: &'static Madt = match madt::MADT.as_ref() {
Some(m) => m,
// TODO: Parse MP tables too.
None => return,
};
if madt.flags & madt::FLAG_PCAT != 0 {
pic::disable();
}
// find all I/O APICs (usually one).
for entry in madt.iter() {
match entry {
MadtEntry::IoApic(ioapic) => handle_ioapic(active_table, ioapic),
MadtEntry::IntSrcOverride(src_override) => handle_src_override(src_override),
_ => (),
}
}
}
println!("I/O APICs: {:?}, overrides: {:?}", ioapics(), src_overrides());
// map the legacy PC-compatible IRQs (0-15) to 32-47, just like we did with 8259 PIC (if it
// wouldn't have been disabled due to this I/O APIC)
for legacy_irq in 0..=15 {
let (gsi, trigger_mode, polarity) = match get_override(legacy_irq) {
Some(over) => (over.gsi, over.trigger_mode, over.polarity),
None => {
if src_overrides().iter().any(|over| over.gsi == u32::from(legacy_irq) && over.bus_irq != legacy_irq) && !src_overrides().iter().any(|over| over.bus_irq == legacy_irq) {
// there's an IRQ conflict, making this legacy IRQ inaccessible.
continue;
}
(legacy_irq.into(), TriggerMode::ConformsToSpecs, Polarity::ConformsToSpecs)
}
};
let apic = match find_ioapic(gsi) {
Some(ioapic) => ioapic,
None => {
println!("Unable to find a suitable APIC for legacy IRQ {} (GSI {}). It will not be mapped.", legacy_irq, gsi);
continue;
}
};
let redir_tbl_index = (gsi - apic.gsi_start) as u8;
let map_info = MapInfo {
// only send to the BSP
dest: bsp_apic_id,
dest_mode: DestinationMode::Physical,
delivery_mode: DeliveryMode::Fixed,
mask: false,
polarity: match polarity {
Polarity::ActiveHigh => ApicPolarity::ActiveHigh,
Polarity::ActiveLow => ApicPolarity::ActiveLow,
Polarity::ConformsToSpecs => ApicPolarity::ActiveHigh,
},
trigger_mode: match trigger_mode {
TriggerMode::Edge => ApicTriggerMode::Edge,
TriggerMode::Level => ApicTriggerMode::Level,
TriggerMode::ConformsToSpecs => ApicTriggerMode::Edge,
},
vector: 32 + legacy_irq,
};
apic.map(redir_tbl_index, map_info);
}
println!("I/O APICs: {:?}, overrides: {:?}", ioapics(), src_overrides());
irq::set_irq_method(irq::IrqMethod::Apic);
// tell the firmware that we're using APIC rather than the default 8259 PIC.
// FIXME: With ACPI moved to userspace, we should instead allow userspace to check whether the
// IOAPIC has been initialized, and then subsequently let some ACPI driver call the AML from
// userspace.
/*#[cfg(feature = "acpi")]
{
let method = {
let namespace_guard = crate::acpi::ACPI_TABLE.namespace.read();
if let Some(value) = namespace_guard.as_ref().unwrap().get("\\_PIC") {
value.get_as_method().ok()
} else {
None
}
};
if let Some(m) = method {
m.execute("\\_PIC".into(), vec!(crate::acpi::aml::AmlValue::Integer(1)));
}
}*/
}
fn get_override(irq: u8) -> Option<&'static Override> {
src_overrides().iter().find(|over| over.bus_irq == irq)
}
fn resolve(irq: u8) -> u32 {
get_override(irq).map_or(u32::from(irq), |over| over.gsi)
}
fn find_ioapic(gsi: u32) -> Option<&'static IoApic> {
ioapics().iter().find(|apic| gsi >= apic.gsi_start && gsi < apic.gsi_start + u32::from(apic.count))
}
pub unsafe fn mask(irq: u8) {
let gsi = resolve(irq);
let apic = match find_ioapic(gsi) {
Some(a) => a,
None => return,
};
apic.set_mask(gsi, true);
}
pub unsafe fn unmask(irq: u8) {
let gsi = resolve(irq);
let apic = match find_ioapic(gsi) {
Some(a) => a,
None => return,
};
apic.set_mask(gsi, false);
}
| RedirTable | identifier_name |
handler.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"github.com/ghodss/yaml"
"github.com/golang/glog"
"github.com/kubernetes-incubator/service-catalog/pkg/apis/servicecatalog"
"github.com/kubernetes-incubator/service-catalog/pkg/brokerapi"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/apiclient"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/apiclient/util"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/injector"
"github.com/satori/go.uuid"
"k8s.io/client-go/1.5/kubernetes"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
)
const (
catalogURLFormatString = "%s/v2/catalog"
serviceInstanceFormatString = "%s/v2/service_instances/%s"
bindingFormatString = "%s/v2/service_instances/%s/service_bindings/%s"
defaultNamespace = "default"
)
// Handler defines an interface used as a facade for data access operations.
// The controller uses the functions of this interface as callbacks for various
// events.
type Handler interface {
// CreateServiceInstance takes in a (possibly incomplete)
// ServiceInstance and will either create or update an
// existing one.
CreateServiceInstance(*servicecatalog.Instance) (*servicecatalog.Instance, error)
// CreateServiceBinding takes in a (possibly incomplete)
// ServiceBinding and will either create or update an
// existing one.
CreateServiceBinding(*servicecatalog.Binding) (*servicecatalog.Binding, error)
DeleteServiceBinding(*servicecatalog.Binding) error
// CreateServiceBroker takes in a (possibly incomplete)
// ServiceBroker and will either create or update an
// existing one.
CreateServiceBroker(*servicecatalog.Broker) (*servicecatalog.Broker, error)
}
type handler struct {
k8sClient kubernetes.Interface
apiClient apiclient.APIClient
injector injector.BindingInjector
newClientFunc func(name, url, username, password string) brokerapi.BrokerClient
}
func createHandler(k8sClient kubernetes.Interface, client apiclient.APIClient, injector injector.BindingInjector, newClientFn brokerapi.CreateFunc) *handler {
return &handler{
k8sClient: k8sClient,
apiClient: client,
injector: injector,
newClientFunc: newClientFn,
}
}
func (h *handler) updateServiceInstance(in *servicecatalog.Instance) error {
// Currently there's no difference between create / update,
// but for prepping for future, split these into two different
// methods for now.
return h.createServiceInstance(in)
}
func (h *handler) createServiceInstance(in *servicecatalog.Instance) error {
broker, err := util.GetBrokerByServiceClassName(h.apiClient.Brokers(), h.apiClient.ServiceClasses(), in.Spec.ServiceClassName)
if err != nil {
return err
}
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, broker)
if err != nil {
return err
}
client := h.newClientFunc(broker.Name, broker.Spec.URL, authUsername, authPassword)
parameters := make(map[string]interface{})
if in.Spec.Parameters != nil && len(in.Spec.Parameters.Raw) > 0 {
err = yaml.Unmarshal([]byte(in.Spec.Parameters.Raw), ¶meters)
if err != nil {
glog.Errorf("Failed to unmarshal Instance parameters\n%s\n %v", in.Spec.Parameters, err)
return err
}
}
createReq := &brokerapi.CreateServiceInstanceRequest{
ServiceID: in.Spec.OSBServiceID,
PlanID: in.Spec.OSBPlanID,
Parameters: parameters,
}
_, err = client.CreateServiceInstance(in.Spec.OSBGUID, createReq)
return err
}
// GetAuthCredentialsFromBroker returns the auth credentials, if any,
// contained in the secret referenced in the Broker's AuthSecret field, or
// returns an error. If the AuthSecret field is nil, empty values are
// returned.
func GetAuthCredentialsFromBroker(client kubernetes.Interface, broker *servicecatalog.Broker) (username, password string, err error) {
if broker.Spec.AuthSecret == nil {
return "", "", nil
}
authSecret, err := client.Core().Secrets(broker.Spec.AuthSecret.Namespace).Get(broker.Spec.AuthSecret.Name)
if err != nil {
return "", "", err
}
usernameBytes, ok := authSecret.Data["username"]
if !ok {
return "", "", fmt.Errorf("auth secret didn't contain username")
}
passwordBytes, ok := authSecret.Data["password"]
if !ok {
return "", "", fmt.Errorf("auth secret didn't contain password")
}
return string(usernameBytes), string(passwordBytes), nil
}
///////////////////////////////////////////////////////////////////////////////
// All the methods implementing the Handler interface go here for clarity sake.
///////////////////////////////////////////////////////////////////////////////
func (h *handler) CreateServiceInstance(in *servicecatalog.Instance) (*servicecatalog.Instance, error) {
serviceID, planID, planName, err := util.GetServicePlanInfo(
h.apiClient.ServiceClasses(),
in.Spec.ServiceClassName,
in.Spec.PlanName,
)
if err != nil {
glog.Errorf("Error fetching service ID: %v", err)
return nil, err
}
in.Spec.OSBServiceID = serviceID
in.Spec.OSBPlanID = planID
in.Spec.PlanName = planName
if in.Spec.OSBGUID == "" {
in.Spec.OSBGUID = uuid.NewV4().String()
}
glog.Infof("Instantiating service %s using service/plan %s : %s", in.Name, serviceID, planID)
err = h.createServiceInstance(in)
in.Status = servicecatalog.InstanceStatus{}
if err != nil {
in.Status.Conditions = []servicecatalog.InstanceCondition{
{
Type: servicecatalog.InstanceConditionProvisionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
in.Status.Conditions = []servicecatalog.InstanceCondition{
{
Type: servicecatalog.InstanceConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
}
glog.Infof("Updating Service %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Instances(in.ObjectMeta.Namespace).Update(in)
}
// DeleteServiceBinding executes all the actions needed before a binding resource can be
// safely deleted. These actions include but are not necessarily limited to deleting the
// kubernetes resources associated with the binding and calling the unbind REST operation
// on the backing OSB API
func (h *handler) DeleteServiceBinding(sb *servicecatalog.Binding) error {
// this logic to set and update the timestamp is TPR specific. to be moved to the API server
dts := metav1.Now()
sb.DeletionTimestamp = &dts
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// uninject
if err := h.injector.Uninject(sb); err != nil {
// if 0 conditions, uninject and drop condition for uninject
// TODO: add failure condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
// TODO: add success condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// TODO: unbind && add conditions (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if err := h.unbind(sb); err != nil {
// TODO: add failure condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
// TODO: add success condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// This is where the binding is _actually_ deleted after all necessary actions have been taken
if err := h.apiClient.Bindings(sb.Namespace).Delete(sb.Name); err != nil {
// TODO: add deletion error condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
return nil
}
func (h *handler) CreateServiceBinding(in *servicecatalog.Binding) (*servicecatalog.Binding, error) |
func (h *handler) CreateServiceBroker(in *servicecatalog.Broker) (*servicecatalog.Broker, error) {
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, in)
if err != nil {
return nil, err
}
client := h.newClientFunc(in.Name, in.Spec.URL, authUsername, authPassword)
sbcat, err := client.GetCatalog()
if err != nil {
return nil, err
}
catalog, err := convertCatalog(sbcat)
if err != nil {
return nil, err
}
glog.Infof("Adding a broker %s catalog:\n%v\n", in.Name, catalog)
for _, sc := range catalog {
sc.BrokerName = in.Name
if _, err := h.apiClient.ServiceClasses().Create(sc); err != nil {
return nil, err
}
}
in.Status.Conditions = []servicecatalog.BrokerCondition{
{
Type: servicecatalog.BrokerConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
glog.Infof("Updating Service Broker %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Brokers().Update(in)
}
// unbind walks the reference graph from b to its ancestral broker resource and uses the broker's
// credentials to do the requisite DELETE call (on the OSB API that to which the broker resource
// points) to do the unbind call on the OSB API server
func (h *handler) unbind(b *servicecatalog.Binding) error {
inst, err := instanceForBinding(h.apiClient, b)
if err != nil {
return err
}
sc, err := serviceClassForInstance(h.apiClient, inst)
if err != nil {
return nil
}
broker, err := brokerForServiceClass(h.apiClient, sc)
if err != nil {
return err
}
authSecret, err := h.k8sClient.Core().Secrets(broker.Spec.AuthSecret.Namespace).Get(broker.Spec.AuthSecret.Name)
if err != nil {
return err
}
// TODO: username / password
client := h.newClientFunc(broker.Name, broker.Spec.URL, string(authSecret.Data["username"]), string(authSecret.Data["password"]))
return client.DeleteServiceBinding(inst.Spec.OSBGUID, b.Spec.OSBGUID)
}
// convertCatalog converts a service broker catalog into an array of ServiceClasses
func convertCatalog(in *brokerapi.Catalog) ([]*servicecatalog.ServiceClass, error) {
ret := make([]*servicecatalog.ServiceClass, len(in.Services))
for i, svc := range in.Services {
plans := convertServicePlans(svc.Plans)
ret[i] = &servicecatalog.ServiceClass{
Bindable: svc.Bindable,
Plans: plans,
PlanUpdatable: svc.PlanUpdateable,
OSBGUID: svc.ID,
OSBTags: svc.Tags,
OSBRequires: svc.Requires,
// OSBMetadata: svc.Metadata,
}
ret[i].SetName(svc.Name)
}
return ret, nil
}
func convertServicePlans(plans []brokerapi.ServicePlan) []servicecatalog.ServicePlan {
ret := make([]servicecatalog.ServicePlan, len(plans))
for i, plan := range plans {
ret[i] = servicecatalog.ServicePlan{
Name: plan.Name,
OSBGUID: plan.ID,
// OSBMetadata: plan.Metadata,
OSBFree: plan.Free,
}
}
return ret
}
| {
glog.Infof("Creating Service Binding: %v", in)
inst, err := instanceForBinding(h.apiClient, in)
if err != nil {
return nil, err
}
sc, err := serviceClassForInstance(h.apiClient, inst)
if err != nil {
return nil, err
}
broker, err := brokerForServiceClass(h.apiClient, sc)
if err != nil {
return nil, err
}
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, broker)
if err != nil {
return nil, err
}
client := h.newClientFunc(broker.Name, broker.Spec.URL, authUsername, authPassword)
// Assign UUID to binding.
if in.Spec.OSBGUID == "" {
in.Spec.OSBGUID = uuid.NewV4().String()
}
parameters := make(map[string]interface{})
if in.Spec.Parameters != nil && len(in.Spec.Parameters.Raw) > 0 {
err = yaml.Unmarshal([]byte(in.Spec.Parameters.Raw), ¶meters)
if err != nil {
glog.Errorf("Failed to unmarshal Binding parameters\n%s\n %v", in.Spec.Parameters, err)
return nil, err
}
}
createReq := &brokerapi.BindingRequest{
ServiceID: inst.Spec.OSBServiceID,
PlanID: inst.Spec.OSBPlanID,
Parameters: parameters,
}
sbr, err := client.CreateServiceBinding(inst.Spec.OSBGUID, in.Spec.OSBGUID, createReq)
in.Status = servicecatalog.BindingStatus{}
if err != nil {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
// Now try injection
err := h.injector.Inject(in, &sbr.Credentials)
if err != nil {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
}
}
glog.Infof("Updating Service Binding %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Bindings(in.ObjectMeta.Namespace).Update(in)
} | identifier_body |
handler.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"github.com/ghodss/yaml"
"github.com/golang/glog"
"github.com/kubernetes-incubator/service-catalog/pkg/apis/servicecatalog"
"github.com/kubernetes-incubator/service-catalog/pkg/brokerapi"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/apiclient"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/apiclient/util"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/injector"
"github.com/satori/go.uuid"
"k8s.io/client-go/1.5/kubernetes"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
)
const (
catalogURLFormatString = "%s/v2/catalog"
serviceInstanceFormatString = "%s/v2/service_instances/%s"
bindingFormatString = "%s/v2/service_instances/%s/service_bindings/%s"
defaultNamespace = "default"
)
// Handler defines an interface used as a facade for data access operations.
// The controller uses the functions of this interface as callbacks for various
// events.
type Handler interface {
// CreateServiceInstance takes in a (possibly incomplete)
// ServiceInstance and will either create or update an
// existing one.
CreateServiceInstance(*servicecatalog.Instance) (*servicecatalog.Instance, error)
// CreateServiceBinding takes in a (possibly incomplete)
// ServiceBinding and will either create or update an
// existing one.
CreateServiceBinding(*servicecatalog.Binding) (*servicecatalog.Binding, error)
DeleteServiceBinding(*servicecatalog.Binding) error
// CreateServiceBroker takes in a (possibly incomplete)
// ServiceBroker and will either create or update an
// existing one.
CreateServiceBroker(*servicecatalog.Broker) (*servicecatalog.Broker, error)
}
type handler struct {
k8sClient kubernetes.Interface
apiClient apiclient.APIClient
injector injector.BindingInjector
newClientFunc func(name, url, username, password string) brokerapi.BrokerClient
}
func createHandler(k8sClient kubernetes.Interface, client apiclient.APIClient, injector injector.BindingInjector, newClientFn brokerapi.CreateFunc) *handler {
return &handler{
k8sClient: k8sClient,
apiClient: client,
injector: injector,
newClientFunc: newClientFn,
}
}
func (h *handler) updateServiceInstance(in *servicecatalog.Instance) error {
// Currently there's no difference between create / update,
// but for prepping for future, split these into two different
// methods for now.
return h.createServiceInstance(in)
}
func (h *handler) createServiceInstance(in *servicecatalog.Instance) error {
broker, err := util.GetBrokerByServiceClassName(h.apiClient.Brokers(), h.apiClient.ServiceClasses(), in.Spec.ServiceClassName)
if err != nil {
return err
}
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, broker)
if err != nil {
return err
}
client := h.newClientFunc(broker.Name, broker.Spec.URL, authUsername, authPassword)
parameters := make(map[string]interface{})
if in.Spec.Parameters != nil && len(in.Spec.Parameters.Raw) > 0 {
err = yaml.Unmarshal([]byte(in.Spec.Parameters.Raw), ¶meters)
if err != nil {
glog.Errorf("Failed to unmarshal Instance parameters\n%s\n %v", in.Spec.Parameters, err)
return err
}
}
createReq := &brokerapi.CreateServiceInstanceRequest{
ServiceID: in.Spec.OSBServiceID,
PlanID: in.Spec.OSBPlanID,
Parameters: parameters,
}
_, err = client.CreateServiceInstance(in.Spec.OSBGUID, createReq)
return err
}
// GetAuthCredentialsFromBroker returns the auth credentials, if any,
// contained in the secret referenced in the Broker's AuthSecret field, or
// returns an error. If the AuthSecret field is nil, empty values are
// returned.
func GetAuthCredentialsFromBroker(client kubernetes.Interface, broker *servicecatalog.Broker) (username, password string, err error) {
if broker.Spec.AuthSecret == nil {
return "", "", nil
}
authSecret, err := client.Core().Secrets(broker.Spec.AuthSecret.Namespace).Get(broker.Spec.AuthSecret.Name)
if err != nil {
return "", "", err
}
usernameBytes, ok := authSecret.Data["username"]
if !ok {
return "", "", fmt.Errorf("auth secret didn't contain username")
}
passwordBytes, ok := authSecret.Data["password"]
if !ok {
return "", "", fmt.Errorf("auth secret didn't contain password")
}
return string(usernameBytes), string(passwordBytes), nil
}
///////////////////////////////////////////////////////////////////////////////
// All the methods implementing the Handler interface go here for clarity sake.
///////////////////////////////////////////////////////////////////////////////
func (h *handler) CreateServiceInstance(in *servicecatalog.Instance) (*servicecatalog.Instance, error) {
serviceID, planID, planName, err := util.GetServicePlanInfo(
h.apiClient.ServiceClasses(),
in.Spec.ServiceClassName,
in.Spec.PlanName,
)
if err != nil {
glog.Errorf("Error fetching service ID: %v", err)
return nil, err
}
in.Spec.OSBServiceID = serviceID
in.Spec.OSBPlanID = planID
in.Spec.PlanName = planName
if in.Spec.OSBGUID == "" {
in.Spec.OSBGUID = uuid.NewV4().String()
}
glog.Infof("Instantiating service %s using service/plan %s : %s", in.Name, serviceID, planID)
err = h.createServiceInstance(in)
in.Status = servicecatalog.InstanceStatus{}
if err != nil {
in.Status.Conditions = []servicecatalog.InstanceCondition{
{
Type: servicecatalog.InstanceConditionProvisionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
in.Status.Conditions = []servicecatalog.InstanceCondition{
{
Type: servicecatalog.InstanceConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
}
glog.Infof("Updating Service %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Instances(in.ObjectMeta.Namespace).Update(in)
}
// DeleteServiceBinding executes all the actions needed before a binding resource can be
// safely deleted. These actions include but are not necessarily limited to deleting the
// kubernetes resources associated with the binding and calling the unbind REST operation
// on the backing OSB API
func (h *handler) DeleteServiceBinding(sb *servicecatalog.Binding) error {
// this logic to set and update the timestamp is TPR specific. to be moved to the API server
dts := metav1.Now()
sb.DeletionTimestamp = &dts
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// uninject
if err := h.injector.Uninject(sb); err != nil {
// if 0 conditions, uninject and drop condition for uninject
// TODO: add failure condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
// TODO: add success condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// TODO: unbind && add conditions (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if err := h.unbind(sb); err != nil {
// TODO: add failure condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
// TODO: add success condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// This is where the binding is _actually_ deleted after all necessary actions have been taken
if err := h.apiClient.Bindings(sb.Namespace).Delete(sb.Name); err != nil {
// TODO: add deletion error condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
return nil
}
func (h *handler) CreateServiceBinding(in *servicecatalog.Binding) (*servicecatalog.Binding, error) {
glog.Infof("Creating Service Binding: %v", in)
inst, err := instanceForBinding(h.apiClient, in)
if err != nil {
return nil, err
}
sc, err := serviceClassForInstance(h.apiClient, inst)
if err != nil {
return nil, err
}
broker, err := brokerForServiceClass(h.apiClient, sc)
if err != nil {
return nil, err
}
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, broker)
if err != nil {
return nil, err
}
client := h.newClientFunc(broker.Name, broker.Spec.URL, authUsername, authPassword)
// Assign UUID to binding.
if in.Spec.OSBGUID == "" |
parameters := make(map[string]interface{})
if in.Spec.Parameters != nil && len(in.Spec.Parameters.Raw) > 0 {
err = yaml.Unmarshal([]byte(in.Spec.Parameters.Raw), ¶meters)
if err != nil {
glog.Errorf("Failed to unmarshal Binding parameters\n%s\n %v", in.Spec.Parameters, err)
return nil, err
}
}
createReq := &brokerapi.BindingRequest{
ServiceID: inst.Spec.OSBServiceID,
PlanID: inst.Spec.OSBPlanID,
Parameters: parameters,
}
sbr, err := client.CreateServiceBinding(inst.Spec.OSBGUID, in.Spec.OSBGUID, createReq)
in.Status = servicecatalog.BindingStatus{}
if err != nil {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
// Now try injection
err := h.injector.Inject(in, &sbr.Credentials)
if err != nil {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
}
}
glog.Infof("Updating Service Binding %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Bindings(in.ObjectMeta.Namespace).Update(in)
}
func (h *handler) CreateServiceBroker(in *servicecatalog.Broker) (*servicecatalog.Broker, error) {
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, in)
if err != nil {
return nil, err
}
client := h.newClientFunc(in.Name, in.Spec.URL, authUsername, authPassword)
sbcat, err := client.GetCatalog()
if err != nil {
return nil, err
}
catalog, err := convertCatalog(sbcat)
if err != nil {
return nil, err
}
glog.Infof("Adding a broker %s catalog:\n%v\n", in.Name, catalog)
for _, sc := range catalog {
sc.BrokerName = in.Name
if _, err := h.apiClient.ServiceClasses().Create(sc); err != nil {
return nil, err
}
}
in.Status.Conditions = []servicecatalog.BrokerCondition{
{
Type: servicecatalog.BrokerConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
glog.Infof("Updating Service Broker %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Brokers().Update(in)
}
// unbind walks the reference graph from b to its ancestral broker resource and uses the broker's
// credentials to do the requisite DELETE call (on the OSB API that to which the broker resource
// points) to do the unbind call on the OSB API server
func (h *handler) unbind(b *servicecatalog.Binding) error {
inst, err := instanceForBinding(h.apiClient, b)
if err != nil {
return err
}
sc, err := serviceClassForInstance(h.apiClient, inst)
if err != nil {
return nil
}
broker, err := brokerForServiceClass(h.apiClient, sc)
if err != nil {
return err
}
authSecret, err := h.k8sClient.Core().Secrets(broker.Spec.AuthSecret.Namespace).Get(broker.Spec.AuthSecret.Name)
if err != nil {
return err
}
// TODO: username / password
client := h.newClientFunc(broker.Name, broker.Spec.URL, string(authSecret.Data["username"]), string(authSecret.Data["password"]))
return client.DeleteServiceBinding(inst.Spec.OSBGUID, b.Spec.OSBGUID)
}
// convertCatalog converts a service broker catalog into an array of ServiceClasses
func convertCatalog(in *brokerapi.Catalog) ([]*servicecatalog.ServiceClass, error) {
ret := make([]*servicecatalog.ServiceClass, len(in.Services))
for i, svc := range in.Services {
plans := convertServicePlans(svc.Plans)
ret[i] = &servicecatalog.ServiceClass{
Bindable: svc.Bindable,
Plans: plans,
PlanUpdatable: svc.PlanUpdateable,
OSBGUID: svc.ID,
OSBTags: svc.Tags,
OSBRequires: svc.Requires,
// OSBMetadata: svc.Metadata,
}
ret[i].SetName(svc.Name)
}
return ret, nil
}
func convertServicePlans(plans []brokerapi.ServicePlan) []servicecatalog.ServicePlan {
ret := make([]servicecatalog.ServicePlan, len(plans))
for i, plan := range plans {
ret[i] = servicecatalog.ServicePlan{
Name: plan.Name,
OSBGUID: plan.ID,
// OSBMetadata: plan.Metadata,
OSBFree: plan.Free,
}
}
return ret
}
| {
in.Spec.OSBGUID = uuid.NewV4().String()
} | conditional_block |
handler.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"github.com/ghodss/yaml"
"github.com/golang/glog"
"github.com/kubernetes-incubator/service-catalog/pkg/apis/servicecatalog"
"github.com/kubernetes-incubator/service-catalog/pkg/brokerapi"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/apiclient"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/apiclient/util"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/injector"
"github.com/satori/go.uuid"
"k8s.io/client-go/1.5/kubernetes"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
)
const ( | catalogURLFormatString = "%s/v2/catalog"
serviceInstanceFormatString = "%s/v2/service_instances/%s"
bindingFormatString = "%s/v2/service_instances/%s/service_bindings/%s"
defaultNamespace = "default"
)
// Handler defines an interface used as a facade for data access operations.
// The controller uses the functions of this interface as callbacks for various
// events.
type Handler interface {
// CreateServiceInstance takes in a (possibly incomplete)
// ServiceInstance and will either create or update an
// existing one.
CreateServiceInstance(*servicecatalog.Instance) (*servicecatalog.Instance, error)
// CreateServiceBinding takes in a (possibly incomplete)
// ServiceBinding and will either create or update an
// existing one.
CreateServiceBinding(*servicecatalog.Binding) (*servicecatalog.Binding, error)
DeleteServiceBinding(*servicecatalog.Binding) error
// CreateServiceBroker takes in a (possibly incomplete)
// ServiceBroker and will either create or update an
// existing one.
CreateServiceBroker(*servicecatalog.Broker) (*servicecatalog.Broker, error)
}
type handler struct {
k8sClient kubernetes.Interface
apiClient apiclient.APIClient
injector injector.BindingInjector
newClientFunc func(name, url, username, password string) brokerapi.BrokerClient
}
func createHandler(k8sClient kubernetes.Interface, client apiclient.APIClient, injector injector.BindingInjector, newClientFn brokerapi.CreateFunc) *handler {
return &handler{
k8sClient: k8sClient,
apiClient: client,
injector: injector,
newClientFunc: newClientFn,
}
}
func (h *handler) updateServiceInstance(in *servicecatalog.Instance) error {
// Currently there's no difference between create / update,
// but for prepping for future, split these into two different
// methods for now.
return h.createServiceInstance(in)
}
func (h *handler) createServiceInstance(in *servicecatalog.Instance) error {
broker, err := util.GetBrokerByServiceClassName(h.apiClient.Brokers(), h.apiClient.ServiceClasses(), in.Spec.ServiceClassName)
if err != nil {
return err
}
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, broker)
if err != nil {
return err
}
client := h.newClientFunc(broker.Name, broker.Spec.URL, authUsername, authPassword)
parameters := make(map[string]interface{})
if in.Spec.Parameters != nil && len(in.Spec.Parameters.Raw) > 0 {
err = yaml.Unmarshal([]byte(in.Spec.Parameters.Raw), ¶meters)
if err != nil {
glog.Errorf("Failed to unmarshal Instance parameters\n%s\n %v", in.Spec.Parameters, err)
return err
}
}
createReq := &brokerapi.CreateServiceInstanceRequest{
ServiceID: in.Spec.OSBServiceID,
PlanID: in.Spec.OSBPlanID,
Parameters: parameters,
}
_, err = client.CreateServiceInstance(in.Spec.OSBGUID, createReq)
return err
}
// GetAuthCredentialsFromBroker returns the auth credentials, if any,
// contained in the secret referenced in the Broker's AuthSecret field, or
// returns an error. If the AuthSecret field is nil, empty values are
// returned.
func GetAuthCredentialsFromBroker(client kubernetes.Interface, broker *servicecatalog.Broker) (username, password string, err error) {
if broker.Spec.AuthSecret == nil {
return "", "", nil
}
authSecret, err := client.Core().Secrets(broker.Spec.AuthSecret.Namespace).Get(broker.Spec.AuthSecret.Name)
if err != nil {
return "", "", err
}
usernameBytes, ok := authSecret.Data["username"]
if !ok {
return "", "", fmt.Errorf("auth secret didn't contain username")
}
passwordBytes, ok := authSecret.Data["password"]
if !ok {
return "", "", fmt.Errorf("auth secret didn't contain password")
}
return string(usernameBytes), string(passwordBytes), nil
}
///////////////////////////////////////////////////////////////////////////////
// All the methods implementing the Handler interface go here for clarity sake.
///////////////////////////////////////////////////////////////////////////////
func (h *handler) CreateServiceInstance(in *servicecatalog.Instance) (*servicecatalog.Instance, error) {
serviceID, planID, planName, err := util.GetServicePlanInfo(
h.apiClient.ServiceClasses(),
in.Spec.ServiceClassName,
in.Spec.PlanName,
)
if err != nil {
glog.Errorf("Error fetching service ID: %v", err)
return nil, err
}
in.Spec.OSBServiceID = serviceID
in.Spec.OSBPlanID = planID
in.Spec.PlanName = planName
if in.Spec.OSBGUID == "" {
in.Spec.OSBGUID = uuid.NewV4().String()
}
glog.Infof("Instantiating service %s using service/plan %s : %s", in.Name, serviceID, planID)
err = h.createServiceInstance(in)
in.Status = servicecatalog.InstanceStatus{}
if err != nil {
in.Status.Conditions = []servicecatalog.InstanceCondition{
{
Type: servicecatalog.InstanceConditionProvisionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
in.Status.Conditions = []servicecatalog.InstanceCondition{
{
Type: servicecatalog.InstanceConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
}
glog.Infof("Updating Service %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Instances(in.ObjectMeta.Namespace).Update(in)
}
// DeleteServiceBinding executes all the actions needed before a binding resource can be
// safely deleted. These actions include but are not necessarily limited to deleting the
// kubernetes resources associated with the binding and calling the unbind REST operation
// on the backing OSB API
func (h *handler) DeleteServiceBinding(sb *servicecatalog.Binding) error {
// this logic to set and update the timestamp is TPR specific. to be moved to the API server
dts := metav1.Now()
sb.DeletionTimestamp = &dts
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// uninject
if err := h.injector.Uninject(sb); err != nil {
// if 0 conditions, uninject and drop condition for uninject
// TODO: add failure condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
// TODO: add success condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// TODO: unbind && add conditions (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if err := h.unbind(sb); err != nil {
// TODO: add failure condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
// TODO: add success condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// This is where the binding is _actually_ deleted after all necessary actions have been taken
if err := h.apiClient.Bindings(sb.Namespace).Delete(sb.Name); err != nil {
// TODO: add deletion error condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
return nil
}
func (h *handler) CreateServiceBinding(in *servicecatalog.Binding) (*servicecatalog.Binding, error) {
glog.Infof("Creating Service Binding: %v", in)
inst, err := instanceForBinding(h.apiClient, in)
if err != nil {
return nil, err
}
sc, err := serviceClassForInstance(h.apiClient, inst)
if err != nil {
return nil, err
}
broker, err := brokerForServiceClass(h.apiClient, sc)
if err != nil {
return nil, err
}
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, broker)
if err != nil {
return nil, err
}
client := h.newClientFunc(broker.Name, broker.Spec.URL, authUsername, authPassword)
// Assign UUID to binding.
if in.Spec.OSBGUID == "" {
in.Spec.OSBGUID = uuid.NewV4().String()
}
parameters := make(map[string]interface{})
if in.Spec.Parameters != nil && len(in.Spec.Parameters.Raw) > 0 {
err = yaml.Unmarshal([]byte(in.Spec.Parameters.Raw), ¶meters)
if err != nil {
glog.Errorf("Failed to unmarshal Binding parameters\n%s\n %v", in.Spec.Parameters, err)
return nil, err
}
}
createReq := &brokerapi.BindingRequest{
ServiceID: inst.Spec.OSBServiceID,
PlanID: inst.Spec.OSBPlanID,
Parameters: parameters,
}
sbr, err := client.CreateServiceBinding(inst.Spec.OSBGUID, in.Spec.OSBGUID, createReq)
in.Status = servicecatalog.BindingStatus{}
if err != nil {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
// Now try injection
err := h.injector.Inject(in, &sbr.Credentials)
if err != nil {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
}
}
glog.Infof("Updating Service Binding %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Bindings(in.ObjectMeta.Namespace).Update(in)
}
func (h *handler) CreateServiceBroker(in *servicecatalog.Broker) (*servicecatalog.Broker, error) {
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, in)
if err != nil {
return nil, err
}
client := h.newClientFunc(in.Name, in.Spec.URL, authUsername, authPassword)
sbcat, err := client.GetCatalog()
if err != nil {
return nil, err
}
catalog, err := convertCatalog(sbcat)
if err != nil {
return nil, err
}
glog.Infof("Adding a broker %s catalog:\n%v\n", in.Name, catalog)
for _, sc := range catalog {
sc.BrokerName = in.Name
if _, err := h.apiClient.ServiceClasses().Create(sc); err != nil {
return nil, err
}
}
in.Status.Conditions = []servicecatalog.BrokerCondition{
{
Type: servicecatalog.BrokerConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
glog.Infof("Updating Service Broker %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Brokers().Update(in)
}
// unbind walks the reference graph from b to its ancestral broker resource and uses the broker's
// credentials to do the requisite DELETE call (on the OSB API that to which the broker resource
// points) to do the unbind call on the OSB API server
func (h *handler) unbind(b *servicecatalog.Binding) error {
inst, err := instanceForBinding(h.apiClient, b)
if err != nil {
return err
}
sc, err := serviceClassForInstance(h.apiClient, inst)
if err != nil {
return nil
}
broker, err := brokerForServiceClass(h.apiClient, sc)
if err != nil {
return err
}
authSecret, err := h.k8sClient.Core().Secrets(broker.Spec.AuthSecret.Namespace).Get(broker.Spec.AuthSecret.Name)
if err != nil {
return err
}
// TODO: username / password
client := h.newClientFunc(broker.Name, broker.Spec.URL, string(authSecret.Data["username"]), string(authSecret.Data["password"]))
return client.DeleteServiceBinding(inst.Spec.OSBGUID, b.Spec.OSBGUID)
}
// convertCatalog converts a service broker catalog into an array of ServiceClasses
func convertCatalog(in *brokerapi.Catalog) ([]*servicecatalog.ServiceClass, error) {
ret := make([]*servicecatalog.ServiceClass, len(in.Services))
for i, svc := range in.Services {
plans := convertServicePlans(svc.Plans)
ret[i] = &servicecatalog.ServiceClass{
Bindable: svc.Bindable,
Plans: plans,
PlanUpdatable: svc.PlanUpdateable,
OSBGUID: svc.ID,
OSBTags: svc.Tags,
OSBRequires: svc.Requires,
// OSBMetadata: svc.Metadata,
}
ret[i].SetName(svc.Name)
}
return ret, nil
}
func convertServicePlans(plans []brokerapi.ServicePlan) []servicecatalog.ServicePlan {
ret := make([]servicecatalog.ServicePlan, len(plans))
for i, plan := range plans {
ret[i] = servicecatalog.ServicePlan{
Name: plan.Name,
OSBGUID: plan.ID,
// OSBMetadata: plan.Metadata,
OSBFree: plan.Free,
}
}
return ret
} | random_line_split |
|
handler.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"github.com/ghodss/yaml"
"github.com/golang/glog"
"github.com/kubernetes-incubator/service-catalog/pkg/apis/servicecatalog"
"github.com/kubernetes-incubator/service-catalog/pkg/brokerapi"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/apiclient"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/apiclient/util"
"github.com/kubernetes-incubator/service-catalog/pkg/controller/injector"
"github.com/satori/go.uuid"
"k8s.io/client-go/1.5/kubernetes"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
)
const (
catalogURLFormatString = "%s/v2/catalog"
serviceInstanceFormatString = "%s/v2/service_instances/%s"
bindingFormatString = "%s/v2/service_instances/%s/service_bindings/%s"
defaultNamespace = "default"
)
// Handler defines an interface used as a facade for data access operations.
// The controller uses the functions of this interface as callbacks for various
// events.
type Handler interface {
// CreateServiceInstance takes in a (possibly incomplete)
// ServiceInstance and will either create or update an
// existing one.
CreateServiceInstance(*servicecatalog.Instance) (*servicecatalog.Instance, error)
// CreateServiceBinding takes in a (possibly incomplete)
// ServiceBinding and will either create or update an
// existing one.
CreateServiceBinding(*servicecatalog.Binding) (*servicecatalog.Binding, error)
DeleteServiceBinding(*servicecatalog.Binding) error
// CreateServiceBroker takes in a (possibly incomplete)
// ServiceBroker and will either create or update an
// existing one.
CreateServiceBroker(*servicecatalog.Broker) (*servicecatalog.Broker, error)
}
type handler struct {
k8sClient kubernetes.Interface
apiClient apiclient.APIClient
injector injector.BindingInjector
newClientFunc func(name, url, username, password string) brokerapi.BrokerClient
}
func createHandler(k8sClient kubernetes.Interface, client apiclient.APIClient, injector injector.BindingInjector, newClientFn brokerapi.CreateFunc) *handler {
return &handler{
k8sClient: k8sClient,
apiClient: client,
injector: injector,
newClientFunc: newClientFn,
}
}
func (h *handler) updateServiceInstance(in *servicecatalog.Instance) error {
// Currently there's no difference between create / update,
// but for prepping for future, split these into two different
// methods for now.
return h.createServiceInstance(in)
}
func (h *handler) createServiceInstance(in *servicecatalog.Instance) error {
broker, err := util.GetBrokerByServiceClassName(h.apiClient.Brokers(), h.apiClient.ServiceClasses(), in.Spec.ServiceClassName)
if err != nil {
return err
}
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, broker)
if err != nil {
return err
}
client := h.newClientFunc(broker.Name, broker.Spec.URL, authUsername, authPassword)
parameters := make(map[string]interface{})
if in.Spec.Parameters != nil && len(in.Spec.Parameters.Raw) > 0 {
err = yaml.Unmarshal([]byte(in.Spec.Parameters.Raw), ¶meters)
if err != nil {
glog.Errorf("Failed to unmarshal Instance parameters\n%s\n %v", in.Spec.Parameters, err)
return err
}
}
createReq := &brokerapi.CreateServiceInstanceRequest{
ServiceID: in.Spec.OSBServiceID,
PlanID: in.Spec.OSBPlanID,
Parameters: parameters,
}
_, err = client.CreateServiceInstance(in.Spec.OSBGUID, createReq)
return err
}
// GetAuthCredentialsFromBroker returns the auth credentials, if any,
// contained in the secret referenced in the Broker's AuthSecret field, or
// returns an error. If the AuthSecret field is nil, empty values are
// returned.
func GetAuthCredentialsFromBroker(client kubernetes.Interface, broker *servicecatalog.Broker) (username, password string, err error) {
if broker.Spec.AuthSecret == nil {
return "", "", nil
}
authSecret, err := client.Core().Secrets(broker.Spec.AuthSecret.Namespace).Get(broker.Spec.AuthSecret.Name)
if err != nil {
return "", "", err
}
usernameBytes, ok := authSecret.Data["username"]
if !ok {
return "", "", fmt.Errorf("auth secret didn't contain username")
}
passwordBytes, ok := authSecret.Data["password"]
if !ok {
return "", "", fmt.Errorf("auth secret didn't contain password")
}
return string(usernameBytes), string(passwordBytes), nil
}
///////////////////////////////////////////////////////////////////////////////
// All the methods implementing the Handler interface go here for clarity sake.
///////////////////////////////////////////////////////////////////////////////
func (h *handler) CreateServiceInstance(in *servicecatalog.Instance) (*servicecatalog.Instance, error) {
serviceID, planID, planName, err := util.GetServicePlanInfo(
h.apiClient.ServiceClasses(),
in.Spec.ServiceClassName,
in.Spec.PlanName,
)
if err != nil {
glog.Errorf("Error fetching service ID: %v", err)
return nil, err
}
in.Spec.OSBServiceID = serviceID
in.Spec.OSBPlanID = planID
in.Spec.PlanName = planName
if in.Spec.OSBGUID == "" {
in.Spec.OSBGUID = uuid.NewV4().String()
}
glog.Infof("Instantiating service %s using service/plan %s : %s", in.Name, serviceID, planID)
err = h.createServiceInstance(in)
in.Status = servicecatalog.InstanceStatus{}
if err != nil {
in.Status.Conditions = []servicecatalog.InstanceCondition{
{
Type: servicecatalog.InstanceConditionProvisionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
in.Status.Conditions = []servicecatalog.InstanceCondition{
{
Type: servicecatalog.InstanceConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
}
glog.Infof("Updating Service %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Instances(in.ObjectMeta.Namespace).Update(in)
}
// DeleteServiceBinding executes all the actions needed before a binding resource can be
// safely deleted. These actions include but are not necessarily limited to deleting the
// kubernetes resources associated with the binding and calling the unbind REST operation
// on the backing OSB API
func (h *handler) DeleteServiceBinding(sb *servicecatalog.Binding) error {
// this logic to set and update the timestamp is TPR specific. to be moved to the API server
dts := metav1.Now()
sb.DeletionTimestamp = &dts
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// uninject
if err := h.injector.Uninject(sb); err != nil {
// if 0 conditions, uninject and drop condition for uninject
// TODO: add failure condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
// TODO: add success condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// TODO: unbind && add conditions (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if err := h.unbind(sb); err != nil {
// TODO: add failure condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
// TODO: add success condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
if _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {
return err
}
// This is where the binding is _actually_ deleted after all necessary actions have been taken
if err := h.apiClient.Bindings(sb.Namespace).Delete(sb.Name); err != nil {
// TODO: add deletion error condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)
return err
}
return nil
}
func (h *handler) CreateServiceBinding(in *servicecatalog.Binding) (*servicecatalog.Binding, error) {
glog.Infof("Creating Service Binding: %v", in)
inst, err := instanceForBinding(h.apiClient, in)
if err != nil {
return nil, err
}
sc, err := serviceClassForInstance(h.apiClient, inst)
if err != nil {
return nil, err
}
broker, err := brokerForServiceClass(h.apiClient, sc)
if err != nil {
return nil, err
}
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, broker)
if err != nil {
return nil, err
}
client := h.newClientFunc(broker.Name, broker.Spec.URL, authUsername, authPassword)
// Assign UUID to binding.
if in.Spec.OSBGUID == "" {
in.Spec.OSBGUID = uuid.NewV4().String()
}
parameters := make(map[string]interface{})
if in.Spec.Parameters != nil && len(in.Spec.Parameters.Raw) > 0 {
err = yaml.Unmarshal([]byte(in.Spec.Parameters.Raw), ¶meters)
if err != nil {
glog.Errorf("Failed to unmarshal Binding parameters\n%s\n %v", in.Spec.Parameters, err)
return nil, err
}
}
createReq := &brokerapi.BindingRequest{
ServiceID: inst.Spec.OSBServiceID,
PlanID: inst.Spec.OSBPlanID,
Parameters: parameters,
}
sbr, err := client.CreateServiceBinding(inst.Spec.OSBGUID, in.Spec.OSBGUID, createReq)
in.Status = servicecatalog.BindingStatus{}
if err != nil {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
// Now try injection
err := h.injector.Inject(in, &sbr.Credentials)
if err != nil {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionFailed,
Status: servicecatalog.ConditionTrue,
Reason: err.Error(),
},
}
glog.Errorf("Failed to create service instance: %v", err)
} else {
in.Status.Conditions = []servicecatalog.BindingCondition{
{
Type: servicecatalog.BindingConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
}
}
glog.Infof("Updating Service Binding %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Bindings(in.ObjectMeta.Namespace).Update(in)
}
func (h *handler) | (in *servicecatalog.Broker) (*servicecatalog.Broker, error) {
authUsername, authPassword, err := GetAuthCredentialsFromBroker(h.k8sClient, in)
if err != nil {
return nil, err
}
client := h.newClientFunc(in.Name, in.Spec.URL, authUsername, authPassword)
sbcat, err := client.GetCatalog()
if err != nil {
return nil, err
}
catalog, err := convertCatalog(sbcat)
if err != nil {
return nil, err
}
glog.Infof("Adding a broker %s catalog:\n%v\n", in.Name, catalog)
for _, sc := range catalog {
sc.BrokerName = in.Name
if _, err := h.apiClient.ServiceClasses().Create(sc); err != nil {
return nil, err
}
}
in.Status.Conditions = []servicecatalog.BrokerCondition{
{
Type: servicecatalog.BrokerConditionReady,
Status: servicecatalog.ConditionTrue,
},
}
glog.Infof("Updating Service Broker %s with State\n%v", in.Name, in.Status.Conditions[0].Type)
return h.apiClient.Brokers().Update(in)
}
// unbind walks the reference graph from b to its ancestral broker resource and uses the broker's
// credentials to do the requisite DELETE call (on the OSB API that to which the broker resource
// points) to do the unbind call on the OSB API server
func (h *handler) unbind(b *servicecatalog.Binding) error {
inst, err := instanceForBinding(h.apiClient, b)
if err != nil {
return err
}
sc, err := serviceClassForInstance(h.apiClient, inst)
if err != nil {
return nil
}
broker, err := brokerForServiceClass(h.apiClient, sc)
if err != nil {
return err
}
authSecret, err := h.k8sClient.Core().Secrets(broker.Spec.AuthSecret.Namespace).Get(broker.Spec.AuthSecret.Name)
if err != nil {
return err
}
// TODO: username / password
client := h.newClientFunc(broker.Name, broker.Spec.URL, string(authSecret.Data["username"]), string(authSecret.Data["password"]))
return client.DeleteServiceBinding(inst.Spec.OSBGUID, b.Spec.OSBGUID)
}
// convertCatalog converts a service broker catalog into an array of ServiceClasses
func convertCatalog(in *brokerapi.Catalog) ([]*servicecatalog.ServiceClass, error) {
ret := make([]*servicecatalog.ServiceClass, len(in.Services))
for i, svc := range in.Services {
plans := convertServicePlans(svc.Plans)
ret[i] = &servicecatalog.ServiceClass{
Bindable: svc.Bindable,
Plans: plans,
PlanUpdatable: svc.PlanUpdateable,
OSBGUID: svc.ID,
OSBTags: svc.Tags,
OSBRequires: svc.Requires,
// OSBMetadata: svc.Metadata,
}
ret[i].SetName(svc.Name)
}
return ret, nil
}
func convertServicePlans(plans []brokerapi.ServicePlan) []servicecatalog.ServicePlan {
ret := make([]servicecatalog.ServicePlan, len(plans))
for i, plan := range plans {
ret[i] = servicecatalog.ServicePlan{
Name: plan.Name,
OSBGUID: plan.ID,
// OSBMetadata: plan.Metadata,
OSBFree: plan.Free,
}
}
return ret
}
| CreateServiceBroker | identifier_name |
Quirk.ts |
import { renderHTML } from "../Templates/QuirkField";
import { setCookieBool } from "../CookieManager";
import { Category } from "../Categories/Category";
import { OptionalCheckbox } from "./OptionalCheckbox";
import { selectAllAndCopy } from "../Copy2Clipboard";
export abstract class Quirk {
static inputField: HTMLTextAreaElement;
static textFields: HTMLFieldSetElement;
private readonly name: string;
private shortName: string;
private id: string;
private readonly colorClass: string;
input: string;
private row: HTMLDivElement;
private textArea: HTMLTextAreaElement;
public activeCheckbox: HTMLInputElement;
optionalCheckboxes: Array<OptionalCheckbox>;
protected constructor(name: string, colorClass: string = "") {
this.name = name;
let spaceIndex = this.name.indexOf(" ");
this.shortName = spaceIndex > 0 ? this.name.substr(0, spaceIndex) : name;
this.id = this.shortName.toLocaleLowerCase();
this.optionalCheckboxes = new Array<OptionalCheckbox>();
this.colorClass = colorClass.length < 1 ? this.id : colorClass;
}
public render(category: Category): void {
Quirk.textFields.insertAdjacentHTML('beforeend', renderHTML(this.name, this.id, this.colorClass));
this.row = <HTMLTableRowElement>document.getElementById(this.id + "-row")
this.textArea = this.row.getElementsByTagName("textarea")[0];
this.textArea.onclick = selectAllAndCopy;
// Create toggle checkbox.
this.activeCheckbox = document.createElement("input");
this.activeCheckbox.classList.add("filled-in");
this.activeCheckbox.classList.add("checkbox-" + this.getColorClass());
this.activeCheckbox.type = "checkbox";
this.activeCheckbox.checked = true;
this.activeCheckbox.onchange = () => this.updateVisibility(category);
let td: HTMLTableCellElement = document.createElement("td");
td.insertAdjacentElement('beforeend', this.activeCheckbox);
// Checkbox requires a span element adjacent to it for Materialize's theme to work.
let span = document.createElement("span");
span.insertAdjacentText('beforeend', this.name);
td.insertAdjacentElement('beforeend', span);
let tr: HTMLTableRowElement = document.createElement("tr");
tr.classList.add("waves-effect");
tr.classList.add("waves-" + this.getColorClass());
tr.onclick = () => this.activeCheckbox.click();
tr.insertAdjacentElement('beforeend', td);
let toggleCheckboxSet = category.getMainCheckboxSetElement();
toggleCheckboxSet.insertAdjacentElement('beforeend', tr);
for (let i = 0; i < this.optionalCheckboxes.length; i++) {
this.optionalCheckboxes[i].render(category, this.getID(), this);
}
}
public getID(): string {
return this.id;
}
public | (bruh: string): void {
this.shortName = bruh;
this.id = bruh.toLocaleLowerCase();
}
public getShortName(): string {
return this.shortName;
}
public getColorClass(): string {
return this.colorClass;
}
public getTextAreaElement(): HTMLTextAreaElement {
return this.textArea;
}
updateVisibility(category: Category): void {
this.row.hidden = !this.activeCheckbox.checked;
let optionals = <HTMLCollectionOf<HTMLElement>>document.getElementsByClassName(this.id + "-optional");
for (let i = 0; i < optionals.length; i++) {
optionals[i].hidden = !this.activeCheckbox.checked;
}
let visible = !this.row.hidden
// Save setting to cookies.
setCookieBool(this.id, visible, 31);
let optionalCheckboxSet: HTMLDivElement = <HTMLDivElement>document.getElementById(category.tabName.toLocaleLowerCase() + "-optional-checkboxes");
if (visible) {
this.update(Quirk.inputField.value);
if (optionalCheckboxSet.hidden && optionals.length > 0) {
optionalCheckboxSet.hidden = false;
}
} else {
// Check if any other optional checkboxes are visible.
for (let i = 0; i < category.optionalCheckboxes.length; i++) {
if (!category.optionalCheckboxes[i].hidden) {
return;
}
}
// Hide the table.
optionalCheckboxSet.hidden = true;
}
}
update(str: string): void {
if (!this.activeCheckbox.checked || str.length < 1) { return; }
this.input = str;
this.quirkify();
this.updateTextField();
}
updateTextField(): void {
this.textArea.value = this.input;
// Auto resize.
Quirk.autoSize(this.textArea);
}
// Dynamically increase the height of a textarea.
static autoSize(element: HTMLTextAreaElement): void {
let minHeight: number = parseInt(window.getComputedStyle(element).getPropertyValue("min-height"));
element.style.height = "auto"; // Lets the element shrink size.
element.style.height = `${Math.max(minHeight, element.scrollHeight)}px`;
}
addCheckbox(label: string, title: string, defaultValue: boolean = false): OptionalCheckbox {
let checkbox: OptionalCheckbox = new OptionalCheckbox(label, title, defaultValue)
this.optionalCheckboxes.push(checkbox);
return checkbox;
}
abstract quirkify(): void;
protected lowerCase(pattern: string = ""): void {
if (pattern.length < 1) {
this.input = this.input.toLocaleLowerCase();
} else {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return match.toLocaleLowerCase();
});
}
}
protected upperCase(pattern: string = ""): void {
if (pattern.length < 1) {
this.input = this.input.toLocaleUpperCase();
} else {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return match.toLocaleUpperCase();
});
}
}
protected prefix(str: string): void {
this.input = str + this.input;
}
protected suffix(str: string): void {
this.input += str;
}
protected replaceString(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "g");
this.input = this.input.replace(reg, replace);
}
protected replaceCaseInsensitive(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, replace);
}
protected replaceMatchCase(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return Quirk.matchCase(replace, match);
});
}
protected replaceWord(pattern: string, replace: string) {
this.replaceString("\\b" + pattern + "\\b", replace);
}
protected replaceWordMatchCase(pattern: string, replace: string) {
this.replaceMatchCase("\\b" + pattern + "\\b", replace);
}
// Function taken from https://stackoverflow.com/a/17265031/6446221.
private static matchCase(text: string, pattern: string): string {
// If the entire text is uppercase then uppercase the whole pattern regardless of lengths.
if (pattern.toUpperCase() === pattern) {
return text.toUpperCase();
}
let result = '';
for (let i = 0; i < text.length; i++) {
let c = text.charAt(i);
let p = pattern.charCodeAt(i);
if (p >= 65 && p < 65 + 26) {
result += c.toUpperCase();
} else {
result += c.toLowerCase();
}
}
return result;
}
randomReplace(pattern: string, replace: string, prob: number): void {
let reg: RegExp = new RegExp(pattern, "g");
this.input = this.input.replace(reg, function(match) {
if (Math.random() <= prob) {
return replace;
}
return match;
});
}
// Troll-specific stuff below.
// $1 - capture group for eyes.
// $2 - capture group for mouth.
replaceEmotes(replace: string): void {
let eyes = "[:;]";
let mouth = "[\\)\\(Dd]";
this.upperCase(`(${eyes})(${mouth})`);
let reg: RegExp = new RegExp(`(${eyes})(${mouth})`, "gi");
this.input = this.input.replace(reg, replace);
}
applyCatPuns(): void {
this.replaceMatchCase("mother", "meowther");
this.replaceMatchCase("for", "fur");
this.replaceMatchCase("pause", "paws");
this.replaceMatchCase("cause", "claws");
this.replaceMatchCase("now", "meow");
this.replaceMatchCase("(per|pre)", "pur");
}
applyFishPuns(): void {
this.replaceMatchCase("kill", "krill");
this.replaceMatchCase("well", "whale");
this.replaceMatchCase("fine", "fin");
this.replaceMatchCase("see", "sea");
this.replaceMatchCase("should", "shoald");
this.replaceMatchCase("kid", "squid");
this.replaceMatchCase("sure", "shore");
this.replaceMatchCase("crap", "carp");
this.replaceMatchCase("(what are|what do)", "water");
}
applyTiaraEmotes(): void {
this.replaceEmotes("38$2");
}
censorSwears(extreme: boolean = false): void {
this.replaceWordMatchCase("fuck", "f*ck");
this.replaceWordMatchCase("bitch", "b*tch");
this.replaceWordMatchCase("shit", "sh*t");
this.replaceWordMatchCase("damn", "d*mn");
this.replaceWordMatchCase("crap", "cr*p");
if (extreme) {
this.replaceMatchCase("whoops", "wh**ps");
this.replaceMatchCase("silly", "s*lly");
this.replaceMatchCase("shoot", "sh**t");
this.replaceMatchCase("fidging", "f*dging");
}
}
}
| setShortName | identifier_name |
Quirk.ts |
import { renderHTML } from "../Templates/QuirkField";
import { setCookieBool } from "../CookieManager";
import { Category } from "../Categories/Category";
import { OptionalCheckbox } from "./OptionalCheckbox";
import { selectAllAndCopy } from "../Copy2Clipboard";
export abstract class Quirk {
static inputField: HTMLTextAreaElement;
static textFields: HTMLFieldSetElement;
private readonly name: string;
private shortName: string;
private id: string;
private readonly colorClass: string;
input: string;
private row: HTMLDivElement;
private textArea: HTMLTextAreaElement;
public activeCheckbox: HTMLInputElement;
optionalCheckboxes: Array<OptionalCheckbox>;
protected constructor(name: string, colorClass: string = "") {
this.name = name;
let spaceIndex = this.name.indexOf(" ");
this.shortName = spaceIndex > 0 ? this.name.substr(0, spaceIndex) : name;
this.id = this.shortName.toLocaleLowerCase();
this.optionalCheckboxes = new Array<OptionalCheckbox>();
this.colorClass = colorClass.length < 1 ? this.id : colorClass;
}
public render(category: Category): void {
Quirk.textFields.insertAdjacentHTML('beforeend', renderHTML(this.name, this.id, this.colorClass));
this.row = <HTMLTableRowElement>document.getElementById(this.id + "-row")
this.textArea = this.row.getElementsByTagName("textarea")[0];
this.textArea.onclick = selectAllAndCopy;
// Create toggle checkbox.
this.activeCheckbox = document.createElement("input");
this.activeCheckbox.classList.add("filled-in");
this.activeCheckbox.classList.add("checkbox-" + this.getColorClass());
this.activeCheckbox.type = "checkbox";
this.activeCheckbox.checked = true;
this.activeCheckbox.onchange = () => this.updateVisibility(category);
let td: HTMLTableCellElement = document.createElement("td");
td.insertAdjacentElement('beforeend', this.activeCheckbox);
// Checkbox requires a span element adjacent to it for Materialize's theme to work.
let span = document.createElement("span");
span.insertAdjacentText('beforeend', this.name);
td.insertAdjacentElement('beforeend', span);
let tr: HTMLTableRowElement = document.createElement("tr");
tr.classList.add("waves-effect");
tr.classList.add("waves-" + this.getColorClass());
tr.onclick = () => this.activeCheckbox.click();
tr.insertAdjacentElement('beforeend', td);
let toggleCheckboxSet = category.getMainCheckboxSetElement();
toggleCheckboxSet.insertAdjacentElement('beforeend', tr);
for (let i = 0; i < this.optionalCheckboxes.length; i++) {
this.optionalCheckboxes[i].render(category, this.getID(), this);
}
}
public getID(): string {
return this.id;
}
public setShortName(bruh: string): void {
this.shortName = bruh;
this.id = bruh.toLocaleLowerCase();
}
public getShortName(): string {
return this.shortName;
}
public getColorClass(): string {
return this.colorClass;
}
public getTextAreaElement(): HTMLTextAreaElement {
return this.textArea;
}
updateVisibility(category: Category): void {
this.row.hidden = !this.activeCheckbox.checked;
let optionals = <HTMLCollectionOf<HTMLElement>>document.getElementsByClassName(this.id + "-optional");
for (let i = 0; i < optionals.length; i++) {
optionals[i].hidden = !this.activeCheckbox.checked;
}
let visible = !this.row.hidden
// Save setting to cookies.
setCookieBool(this.id, visible, 31);
let optionalCheckboxSet: HTMLDivElement = <HTMLDivElement>document.getElementById(category.tabName.toLocaleLowerCase() + "-optional-checkboxes");
if (visible) {
this.update(Quirk.inputField.value);
if (optionalCheckboxSet.hidden && optionals.length > 0) {
optionalCheckboxSet.hidden = false;
}
} else {
// Check if any other optional checkboxes are visible.
for (let i = 0; i < category.optionalCheckboxes.length; i++) {
if (!category.optionalCheckboxes[i].hidden) {
return;
}
}
// Hide the table.
optionalCheckboxSet.hidden = true;
}
}
update(str: string): void {
if (!this.activeCheckbox.checked || str.length < 1) { return; }
this.input = str;
this.quirkify();
this.updateTextField();
}
updateTextField(): void |
// Dynamically increase the height of a textarea.
static autoSize(element: HTMLTextAreaElement): void {
let minHeight: number = parseInt(window.getComputedStyle(element).getPropertyValue("min-height"));
element.style.height = "auto"; // Lets the element shrink size.
element.style.height = `${Math.max(minHeight, element.scrollHeight)}px`;
}
addCheckbox(label: string, title: string, defaultValue: boolean = false): OptionalCheckbox {
let checkbox: OptionalCheckbox = new OptionalCheckbox(label, title, defaultValue)
this.optionalCheckboxes.push(checkbox);
return checkbox;
}
abstract quirkify(): void;
protected lowerCase(pattern: string = ""): void {
if (pattern.length < 1) {
this.input = this.input.toLocaleLowerCase();
} else {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return match.toLocaleLowerCase();
});
}
}
protected upperCase(pattern: string = ""): void {
if (pattern.length < 1) {
this.input = this.input.toLocaleUpperCase();
} else {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return match.toLocaleUpperCase();
});
}
}
protected prefix(str: string): void {
this.input = str + this.input;
}
protected suffix(str: string): void {
this.input += str;
}
protected replaceString(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "g");
this.input = this.input.replace(reg, replace);
}
protected replaceCaseInsensitive(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, replace);
}
protected replaceMatchCase(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return Quirk.matchCase(replace, match);
});
}
protected replaceWord(pattern: string, replace: string) {
this.replaceString("\\b" + pattern + "\\b", replace);
}
protected replaceWordMatchCase(pattern: string, replace: string) {
this.replaceMatchCase("\\b" + pattern + "\\b", replace);
}
// Function taken from https://stackoverflow.com/a/17265031/6446221.
private static matchCase(text: string, pattern: string): string {
// If the entire text is uppercase then uppercase the whole pattern regardless of lengths.
if (pattern.toUpperCase() === pattern) {
return text.toUpperCase();
}
let result = '';
for (let i = 0; i < text.length; i++) {
let c = text.charAt(i);
let p = pattern.charCodeAt(i);
if (p >= 65 && p < 65 + 26) {
result += c.toUpperCase();
} else {
result += c.toLowerCase();
}
}
return result;
}
randomReplace(pattern: string, replace: string, prob: number): void {
let reg: RegExp = new RegExp(pattern, "g");
this.input = this.input.replace(reg, function(match) {
if (Math.random() <= prob) {
return replace;
}
return match;
});
}
// Troll-specific stuff below.
// $1 - capture group for eyes.
// $2 - capture group for mouth.
replaceEmotes(replace: string): void {
let eyes = "[:;]";
let mouth = "[\\)\\(Dd]";
this.upperCase(`(${eyes})(${mouth})`);
let reg: RegExp = new RegExp(`(${eyes})(${mouth})`, "gi");
this.input = this.input.replace(reg, replace);
}
applyCatPuns(): void {
this.replaceMatchCase("mother", "meowther");
this.replaceMatchCase("for", "fur");
this.replaceMatchCase("pause", "paws");
this.replaceMatchCase("cause", "claws");
this.replaceMatchCase("now", "meow");
this.replaceMatchCase("(per|pre)", "pur");
}
applyFishPuns(): void {
this.replaceMatchCase("kill", "krill");
this.replaceMatchCase("well", "whale");
this.replaceMatchCase("fine", "fin");
this.replaceMatchCase("see", "sea");
this.replaceMatchCase("should", "shoald");
this.replaceMatchCase("kid", "squid");
this.replaceMatchCase("sure", "shore");
this.replaceMatchCase("crap", "carp");
this.replaceMatchCase("(what are|what do)", "water");
}
applyTiaraEmotes(): void {
this.replaceEmotes("38$2");
}
censorSwears(extreme: boolean = false): void {
this.replaceWordMatchCase("fuck", "f*ck");
this.replaceWordMatchCase("bitch", "b*tch");
this.replaceWordMatchCase("shit", "sh*t");
this.replaceWordMatchCase("damn", "d*mn");
this.replaceWordMatchCase("crap", "cr*p");
if (extreme) {
this.replaceMatchCase("whoops", "wh**ps");
this.replaceMatchCase("silly", "s*lly");
this.replaceMatchCase("shoot", "sh**t");
this.replaceMatchCase("fidging", "f*dging");
}
}
}
| {
this.textArea.value = this.input;
// Auto resize.
Quirk.autoSize(this.textArea);
} | identifier_body |
Quirk.ts | import { renderHTML } from "../Templates/QuirkField";
import { setCookieBool } from "../CookieManager";
import { Category } from "../Categories/Category";
import { OptionalCheckbox } from "./OptionalCheckbox";
import { selectAllAndCopy } from "../Copy2Clipboard";
export abstract class Quirk {
static inputField: HTMLTextAreaElement;
static textFields: HTMLFieldSetElement;
private readonly name: string;
private shortName: string;
private id: string;
private readonly colorClass: string;
input: string;
private row: HTMLDivElement;
private textArea: HTMLTextAreaElement;
public activeCheckbox: HTMLInputElement;
optionalCheckboxes: Array<OptionalCheckbox>;
protected constructor(name: string, colorClass: string = "") {
this.name = name;
let spaceIndex = this.name.indexOf(" ");
this.shortName = spaceIndex > 0 ? this.name.substr(0, spaceIndex) : name;
this.id = this.shortName.toLocaleLowerCase();
this.optionalCheckboxes = new Array<OptionalCheckbox>();
this.colorClass = colorClass.length < 1 ? this.id : colorClass;
}
public render(category: Category): void {
Quirk.textFields.insertAdjacentHTML('beforeend', renderHTML(this.name, this.id, this.colorClass));
this.row = <HTMLTableRowElement>document.getElementById(this.id + "-row")
this.textArea = this.row.getElementsByTagName("textarea")[0];
this.textArea.onclick = selectAllAndCopy;
// Create toggle checkbox.
this.activeCheckbox = document.createElement("input");
this.activeCheckbox.classList.add("filled-in");
this.activeCheckbox.classList.add("checkbox-" + this.getColorClass());
this.activeCheckbox.type = "checkbox";
this.activeCheckbox.checked = true;
this.activeCheckbox.onchange = () => this.updateVisibility(category);
let td: HTMLTableCellElement = document.createElement("td");
td.insertAdjacentElement('beforeend', this.activeCheckbox);
// Checkbox requires a span element adjacent to it for Materialize's theme to work.
let span = document.createElement("span");
span.insertAdjacentText('beforeend', this.name);
td.insertAdjacentElement('beforeend', span);
let tr: HTMLTableRowElement = document.createElement("tr");
tr.classList.add("waves-effect");
tr.classList.add("waves-" + this.getColorClass());
tr.onclick = () => this.activeCheckbox.click();
tr.insertAdjacentElement('beforeend', td);
let toggleCheckboxSet = category.getMainCheckboxSetElement();
toggleCheckboxSet.insertAdjacentElement('beforeend', tr);
for (let i = 0; i < this.optionalCheckboxes.length; i++) {
this.optionalCheckboxes[i].render(category, this.getID(), this);
}
}
public getID(): string {
return this.id;
}
public setShortName(bruh: string): void {
this.shortName = bruh;
this.id = bruh.toLocaleLowerCase();
}
public getShortName(): string {
return this.shortName;
}
public getColorClass(): string {
return this.colorClass;
}
public getTextAreaElement(): HTMLTextAreaElement {
return this.textArea;
}
updateVisibility(category: Category): void {
this.row.hidden = !this.activeCheckbox.checked;
let optionals = <HTMLCollectionOf<HTMLElement>>document.getElementsByClassName(this.id + "-optional");
for (let i = 0; i < optionals.length; i++) {
optionals[i].hidden = !this.activeCheckbox.checked;
}
let visible = !this.row.hidden
// Save setting to cookies.
setCookieBool(this.id, visible, 31);
let optionalCheckboxSet: HTMLDivElement = <HTMLDivElement>document.getElementById(category.tabName.toLocaleLowerCase() + "-optional-checkboxes");
if (visible) {
this.update(Quirk.inputField.value);
if (optionalCheckboxSet.hidden && optionals.length > 0) {
optionalCheckboxSet.hidden = false;
}
} else {
// Check if any other optional checkboxes are visible.
for (let i = 0; i < category.optionalCheckboxes.length; i++) {
if (!category.optionalCheckboxes[i].hidden) {
return;
}
}
// Hide the table.
optionalCheckboxSet.hidden = true;
}
}
update(str: string): void {
if (!this.activeCheckbox.checked || str.length < 1) { return; }
this.input = str;
this.quirkify();
this.updateTextField();
}
updateTextField(): void {
this.textArea.value = this.input;
// Auto resize.
Quirk.autoSize(this.textArea);
}
// Dynamically increase the height of a textarea.
static autoSize(element: HTMLTextAreaElement): void {
let minHeight: number = parseInt(window.getComputedStyle(element).getPropertyValue("min-height"));
element.style.height = "auto"; // Lets the element shrink size.
element.style.height = `${Math.max(minHeight, element.scrollHeight)}px`;
}
addCheckbox(label: string, title: string, defaultValue: boolean = false): OptionalCheckbox {
let checkbox: OptionalCheckbox = new OptionalCheckbox(label, title, defaultValue)
this.optionalCheckboxes.push(checkbox);
return checkbox;
}
abstract quirkify(): void;
protected lowerCase(pattern: string = ""): void {
if (pattern.length < 1) {
this.input = this.input.toLocaleLowerCase();
} else {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return match.toLocaleLowerCase();
});
}
}
protected upperCase(pattern: string = ""): void {
if (pattern.length < 1) {
this.input = this.input.toLocaleUpperCase();
} else {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return match.toLocaleUpperCase();
});
}
}
protected prefix(str: string): void {
this.input = str + this.input;
}
protected suffix(str: string): void {
this.input += str;
}
protected replaceString(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "g");
this.input = this.input.replace(reg, replace);
}
protected replaceCaseInsensitive(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, replace);
}
protected replaceMatchCase(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return Quirk.matchCase(replace, match);
});
}
protected replaceWord(pattern: string, replace: string) {
this.replaceString("\\b" + pattern + "\\b", replace);
}
protected replaceWordMatchCase(pattern: string, replace: string) {
this.replaceMatchCase("\\b" + pattern + "\\b", replace);
}
// Function taken from https://stackoverflow.com/a/17265031/6446221.
private static matchCase(text: string, pattern: string): string {
// If the entire text is uppercase then uppercase the whole pattern regardless of lengths.
if (pattern.toUpperCase() === pattern) {
return text.toUpperCase();
}
let result = '';
for (let i = 0; i < text.length; i++) {
let c = text.charAt(i);
let p = pattern.charCodeAt(i);
if (p >= 65 && p < 65 + 26) {
result += c.toUpperCase();
} else {
result += c.toLowerCase();
}
}
return result;
}
randomReplace(pattern: string, replace: string, prob: number): void {
let reg: RegExp = new RegExp(pattern, "g");
this.input = this.input.replace(reg, function(match) {
if (Math.random() <= prob) {
return replace;
}
return match;
});
}
// Troll-specific stuff below.
// $1 - capture group for eyes.
// $2 - capture group for mouth.
replaceEmotes(replace: string): void {
let eyes = "[:;]";
let mouth = "[\\)\\(Dd]";
this.upperCase(`(${eyes})(${mouth})`);
let reg: RegExp = new RegExp(`(${eyes})(${mouth})`, "gi");
this.input = this.input.replace(reg, replace);
}
applyCatPuns(): void {
this.replaceMatchCase("mother", "meowther");
this.replaceMatchCase("for", "fur");
this.replaceMatchCase("pause", "paws"); | this.replaceMatchCase("now", "meow");
this.replaceMatchCase("(per|pre)", "pur");
}
applyFishPuns(): void {
this.replaceMatchCase("kill", "krill");
this.replaceMatchCase("well", "whale");
this.replaceMatchCase("fine", "fin");
this.replaceMatchCase("see", "sea");
this.replaceMatchCase("should", "shoald");
this.replaceMatchCase("kid", "squid");
this.replaceMatchCase("sure", "shore");
this.replaceMatchCase("crap", "carp");
this.replaceMatchCase("(what are|what do)", "water");
}
applyTiaraEmotes(): void {
this.replaceEmotes("38$2");
}
censorSwears(extreme: boolean = false): void {
this.replaceWordMatchCase("fuck", "f*ck");
this.replaceWordMatchCase("bitch", "b*tch");
this.replaceWordMatchCase("shit", "sh*t");
this.replaceWordMatchCase("damn", "d*mn");
this.replaceWordMatchCase("crap", "cr*p");
if (extreme) {
this.replaceMatchCase("whoops", "wh**ps");
this.replaceMatchCase("silly", "s*lly");
this.replaceMatchCase("shoot", "sh**t");
this.replaceMatchCase("fidging", "f*dging");
}
}
} | this.replaceMatchCase("cause", "claws"); | random_line_split |
Quirk.ts |
import { renderHTML } from "../Templates/QuirkField";
import { setCookieBool } from "../CookieManager";
import { Category } from "../Categories/Category";
import { OptionalCheckbox } from "./OptionalCheckbox";
import { selectAllAndCopy } from "../Copy2Clipboard";
export abstract class Quirk {
static inputField: HTMLTextAreaElement;
static textFields: HTMLFieldSetElement;
private readonly name: string;
private shortName: string;
private id: string;
private readonly colorClass: string;
input: string;
private row: HTMLDivElement;
private textArea: HTMLTextAreaElement;
public activeCheckbox: HTMLInputElement;
optionalCheckboxes: Array<OptionalCheckbox>;
protected constructor(name: string, colorClass: string = "") {
this.name = name;
let spaceIndex = this.name.indexOf(" ");
this.shortName = spaceIndex > 0 ? this.name.substr(0, spaceIndex) : name;
this.id = this.shortName.toLocaleLowerCase();
this.optionalCheckboxes = new Array<OptionalCheckbox>();
this.colorClass = colorClass.length < 1 ? this.id : colorClass;
}
public render(category: Category): void {
Quirk.textFields.insertAdjacentHTML('beforeend', renderHTML(this.name, this.id, this.colorClass));
this.row = <HTMLTableRowElement>document.getElementById(this.id + "-row")
this.textArea = this.row.getElementsByTagName("textarea")[0];
this.textArea.onclick = selectAllAndCopy;
// Create toggle checkbox.
this.activeCheckbox = document.createElement("input");
this.activeCheckbox.classList.add("filled-in");
this.activeCheckbox.classList.add("checkbox-" + this.getColorClass());
this.activeCheckbox.type = "checkbox";
this.activeCheckbox.checked = true;
this.activeCheckbox.onchange = () => this.updateVisibility(category);
let td: HTMLTableCellElement = document.createElement("td");
td.insertAdjacentElement('beforeend', this.activeCheckbox);
// Checkbox requires a span element adjacent to it for Materialize's theme to work.
let span = document.createElement("span");
span.insertAdjacentText('beforeend', this.name);
td.insertAdjacentElement('beforeend', span);
let tr: HTMLTableRowElement = document.createElement("tr");
tr.classList.add("waves-effect");
tr.classList.add("waves-" + this.getColorClass());
tr.onclick = () => this.activeCheckbox.click();
tr.insertAdjacentElement('beforeend', td);
let toggleCheckboxSet = category.getMainCheckboxSetElement();
toggleCheckboxSet.insertAdjacentElement('beforeend', tr);
for (let i = 0; i < this.optionalCheckboxes.length; i++) {
this.optionalCheckboxes[i].render(category, this.getID(), this);
}
}
public getID(): string {
return this.id;
}
public setShortName(bruh: string): void {
this.shortName = bruh;
this.id = bruh.toLocaleLowerCase();
}
public getShortName(): string {
return this.shortName;
}
public getColorClass(): string {
return this.colorClass;
}
public getTextAreaElement(): HTMLTextAreaElement {
return this.textArea;
}
updateVisibility(category: Category): void {
this.row.hidden = !this.activeCheckbox.checked;
let optionals = <HTMLCollectionOf<HTMLElement>>document.getElementsByClassName(this.id + "-optional");
for (let i = 0; i < optionals.length; i++) |
let visible = !this.row.hidden
// Save setting to cookies.
setCookieBool(this.id, visible, 31);
let optionalCheckboxSet: HTMLDivElement = <HTMLDivElement>document.getElementById(category.tabName.toLocaleLowerCase() + "-optional-checkboxes");
if (visible) {
this.update(Quirk.inputField.value);
if (optionalCheckboxSet.hidden && optionals.length > 0) {
optionalCheckboxSet.hidden = false;
}
} else {
// Check if any other optional checkboxes are visible.
for (let i = 0; i < category.optionalCheckboxes.length; i++) {
if (!category.optionalCheckboxes[i].hidden) {
return;
}
}
// Hide the table.
optionalCheckboxSet.hidden = true;
}
}
update(str: string): void {
if (!this.activeCheckbox.checked || str.length < 1) { return; }
this.input = str;
this.quirkify();
this.updateTextField();
}
updateTextField(): void {
this.textArea.value = this.input;
// Auto resize.
Quirk.autoSize(this.textArea);
}
// Dynamically increase the height of a textarea.
static autoSize(element: HTMLTextAreaElement): void {
let minHeight: number = parseInt(window.getComputedStyle(element).getPropertyValue("min-height"));
element.style.height = "auto"; // Lets the element shrink size.
element.style.height = `${Math.max(minHeight, element.scrollHeight)}px`;
}
addCheckbox(label: string, title: string, defaultValue: boolean = false): OptionalCheckbox {
let checkbox: OptionalCheckbox = new OptionalCheckbox(label, title, defaultValue)
this.optionalCheckboxes.push(checkbox);
return checkbox;
}
abstract quirkify(): void;
protected lowerCase(pattern: string = ""): void {
if (pattern.length < 1) {
this.input = this.input.toLocaleLowerCase();
} else {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return match.toLocaleLowerCase();
});
}
}
protected upperCase(pattern: string = ""): void {
if (pattern.length < 1) {
this.input = this.input.toLocaleUpperCase();
} else {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return match.toLocaleUpperCase();
});
}
}
protected prefix(str: string): void {
this.input = str + this.input;
}
protected suffix(str: string): void {
this.input += str;
}
protected replaceString(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "g");
this.input = this.input.replace(reg, replace);
}
protected replaceCaseInsensitive(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, replace);
}
protected replaceMatchCase(pattern: string, replace: string): void {
let reg: RegExp = new RegExp(pattern, "gi");
this.input = this.input.replace(reg, function(match) {
return Quirk.matchCase(replace, match);
});
}
protected replaceWord(pattern: string, replace: string) {
this.replaceString("\\b" + pattern + "\\b", replace);
}
protected replaceWordMatchCase(pattern: string, replace: string) {
this.replaceMatchCase("\\b" + pattern + "\\b", replace);
}
// Function taken from https://stackoverflow.com/a/17265031/6446221.
private static matchCase(text: string, pattern: string): string {
// If the entire text is uppercase then uppercase the whole pattern regardless of lengths.
if (pattern.toUpperCase() === pattern) {
return text.toUpperCase();
}
let result = '';
for (let i = 0; i < text.length; i++) {
let c = text.charAt(i);
let p = pattern.charCodeAt(i);
if (p >= 65 && p < 65 + 26) {
result += c.toUpperCase();
} else {
result += c.toLowerCase();
}
}
return result;
}
randomReplace(pattern: string, replace: string, prob: number): void {
let reg: RegExp = new RegExp(pattern, "g");
this.input = this.input.replace(reg, function(match) {
if (Math.random() <= prob) {
return replace;
}
return match;
});
}
// Troll-specific stuff below.
// $1 - capture group for eyes.
// $2 - capture group for mouth.
replaceEmotes(replace: string): void {
let eyes = "[:;]";
let mouth = "[\\)\\(Dd]";
this.upperCase(`(${eyes})(${mouth})`);
let reg: RegExp = new RegExp(`(${eyes})(${mouth})`, "gi");
this.input = this.input.replace(reg, replace);
}
applyCatPuns(): void {
this.replaceMatchCase("mother", "meowther");
this.replaceMatchCase("for", "fur");
this.replaceMatchCase("pause", "paws");
this.replaceMatchCase("cause", "claws");
this.replaceMatchCase("now", "meow");
this.replaceMatchCase("(per|pre)", "pur");
}
applyFishPuns(): void {
this.replaceMatchCase("kill", "krill");
this.replaceMatchCase("well", "whale");
this.replaceMatchCase("fine", "fin");
this.replaceMatchCase("see", "sea");
this.replaceMatchCase("should", "shoald");
this.replaceMatchCase("kid", "squid");
this.replaceMatchCase("sure", "shore");
this.replaceMatchCase("crap", "carp");
this.replaceMatchCase("(what are|what do)", "water");
}
applyTiaraEmotes(): void {
this.replaceEmotes("38$2");
}
censorSwears(extreme: boolean = false): void {
this.replaceWordMatchCase("fuck", "f*ck");
this.replaceWordMatchCase("bitch", "b*tch");
this.replaceWordMatchCase("shit", "sh*t");
this.replaceWordMatchCase("damn", "d*mn");
this.replaceWordMatchCase("crap", "cr*p");
if (extreme) {
this.replaceMatchCase("whoops", "wh**ps");
this.replaceMatchCase("silly", "s*lly");
this.replaceMatchCase("shoot", "sh**t");
this.replaceMatchCase("fidging", "f*dging");
}
}
}
| {
optionals[i].hidden = !this.activeCheckbox.checked;
} | conditional_block |
main.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# reference links:
# From https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb
# https://github.com/google-research/google-research/blob/master/mutual_information_representation_learning/mirl.ipynb
# https://github.com/yaohungt/Pointwise_Dependency_Neural_Estimation/tree/master/RepreLearn_Shallow
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import itertools
import os
import pickle
import argparse
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
import pandas as pd
from scipy.ndimage import gaussian_filter1d
import seaborn as sns
import tensorflow as tf
from tensorflow.python.ops.parallel_for import gradients
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
import sklearn.linear_model as sk_linear
parser = argparse.ArgumentParser(description='Representation Learning Experiments')
parser.add_argument('--dataset', default='mnist', type=str,
help='cifar10 or mnist')
parser.add_argument('--lr', default=1e-4, type=float,
help='learning rate')
parser.add_argument('--batch_size', default=100, type=int,
help='mini batch size')
parser.add_argument('--smoothing', default=0.01, type=float,
help='label smoothing parameter')
parser.add_argument('--output_dir', type=str, default='./runs',
help='directory where the results will be stored')
args = parser.parse_args()
a, b, c = 0.005, 0.1, 0.9
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
# learning rate for ours CIFAR10 is 1e-4, otherwise follows below
TFDS_NAME = args.dataset #"cifar10" # mnist or cifar10
NRUNS = 10 #@param { type: "slider", min: 1, max: 20, step: 1}
# parameters for training
if TFDS_NAME == "mnist":
DIMS = 784
elif TFDS_NAME == "cifar10":
DIMS = 3072
LEARNING_RATE = args.lr #1e-5
N_CLASSES = 10
TRAIN_BATCH_SIZE = args.batch_size #64 #@param { type: "slider", min: 64, max: 128, step: 64}
# save results
# RESULT_DIR = '{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)
RESULT_DIR = './runs/{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)
RUN_EXPERIMENTS = True
FEATURE_INPUT = "image"
FEATURE_LABEL = "label"
#slim = tf.contrib.slim
tfb = tfp.bijectors
tfd = tfp.distributions
tfkl = tf.keras.layers
tf.keras.backend.clear_session()
ResultsConfig = collections.namedtuple(
"ResultsConfig", ["nets", "critic", "loss"])
Results = collections.namedtuple(
'Results',
['iterations', 'training_losses', 'testing_losses',
'classification_accuracies', 'singular_values'])
ResultsAdversarial = collections.namedtuple(
"ResultsAdversarial",
["losses_e", "losses_c", "classification_accuracies", "iters"]
)
ResultsSamplingIssues = collections.namedtuple(
"ResultsSamplingIssues", ["mi_true", "nce_estimates_noniid",
"nce_estimates_iid", "nwj_estimates_noniid",
"nwj_estimates_iid"])
def acti_func(x, a, b, c):
# y: a
# x: 0 b c 1
x = tf.stop_gradient(x)
alpha = tf.zeros_like(x)
alpha = tf.where(x<=b, -a*x/b+a, alpha)
alpha = tf.where((x>b) & (x<c), 0., alpha)
alpha = tf.where(x>=c, a*x/(1-c)+a*c/(c-1), alpha)
return alpha
def convert_to_data_frame(result, exp_name, nets, critic, loss, seed):
|
def apply_default_style(ax):
ax.set_xlim([0, 20001])
ax.get_xaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(x/1000), ',')))
ax.set_xlabel("Training steps (in thousands)")
plt.tick_params(top=False, right=False, bottom=False, left=False)
handles, labels = ax.get_legend_handles_labels()
plt.legend(loc="lower right", handles=handles[1:], labels=labels[1:])
def get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):
total_loss = 0
for i in range(0, x_array.shape[0], batch_size):
x_slice = x_array[i:i+batch_size, :dims]
total_loss += x_slice.shape[0] * session.run(loss,
feed_dict={data_ph: x_slice})
return total_loss / x_array.shape[0]
def get_classification_accuracy(session, codes, data_ph, dims):
x_train_mapped = map_data(x_train, session, codes, data_ph, dims)
x_test_mapped = map_data(x_test, session, codes, data_ph, dims)
accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)
return accuracy
def map_data(x_array, session, codes, data_ph, dims, batch_size=512):
x_mapped = []
for i in range(0, x_array.shape[0], batch_size):
x_mapped.append(
session.run(codes,
feed_dict={data_ph: x_array[i:i+batch_size, :dims]}))
return np.concatenate(x_mapped, axis=0)
def reduce_logmeanexp_nodiag(x, axis=None):
batch_size = x.shape[0]
logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(np.inf * tf.ones(batch_size)), axis=axis)
if axis:
num_elem = batch_size - 1.
else:
num_elem = batch_size * (batch_size - 1.)
return logsumexp - tf.math.log(num_elem)
def tuba_lower_bound(scores, log_baseline=None):
if log_baseline is not None:
scores -= log_baseline[:, None]
batch_size = tf.cast(scores.shape[0], tf.float32)
# First term is an expectation over samples from the joint,
# which are the diagonal elmements of the scores matrix.
joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))
# Second term is an expectation over samples from the marginal,
# which are the off-diagonal elements of the scores matrix.
marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))
return 1. + joint_term - marg_term
def nwj_lower_bound(scores):
# equivalent to: tuba_lower_bound(scores, log_baseline=1.)
return tuba_lower_bound(scores - 1.)
@tf.function
def js_fgan_lower_bound(f):
"""Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016)."""
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
n = tf.cast(f.shape[0], tf.float32)
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return first_term - second_term
@tf.function
def infonce_lower_bound(scores):
"""InfoNCE lower bound from van den Oord et al. (2018)."""
nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.reduce_logsumexp(input_tensor=scores, axis=1))
mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll
return mi
@tf.function
def our_lower_bound(scores):
# scores: 128, 128
"""Our lower bound"""
batch_size = tf.cast(scores.shape[0], tf.float32)
joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))
# expectation
scores_sq = scores**2
marg_num = batch_size * (batch_size - 1.)
marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(input_tensor=tf.linalg.diag_part(scores_sq))
marg_term = marg_term / marg_num
# tf.print(joint_term - 0.5*marg_term)
return joint_term - 0.5*marg_term
# nll = tf.reduce_mean(tf.linalg.diag_part(scores) - 0.5 * tf.math.reduce_euclidean_norm(scores, axis=1))
# tf.print(nll)
# mi = tf.math.log(tf.cast(scores.shape[0].value, tf.float32)) + nll
# return mi
@tf.function
def skew_js_fgan_lower_bound(f):
"""skewed js lower bound (true cross entropy)"""
n = tf.cast(f.shape[0], tf.float32)
alpha = 1/n
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return alpha*first_term - (1-alpha)*second_term
@tf.function
def label_smooth_pcc(f):
""" pcc with label smoothing trick"""
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
@tf.function
def predict_smooth_pcc(f):
""" pcc with predictor smoothing trick"""
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
@tf.function
def adap_label_smooth_pcc(f):
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
alpha = acti_func(pre_prob, a, b, c)
new_labels = (1.0 - alpha) * labels + alpha / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(new_labels, pre_prob)
@tf.function
def adap_pred_smooth_pcc(f):
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
# print('pre_prob:',pre_prob)
alpha = acti_func(pre_prob, a, b, c)
pre_prob = (1.0 - alpha) * pre_prob + alpha / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
# @title Define the linear evaluation protocol { display-mode: "form" }
def logistic_fit(x_train, y_train, x_test, y_test):
logistic_regressor = sk_linear.LogisticRegression(
solver='saga', multi_class='multinomial', tol=.1, C=10.)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
logistic_regressor.fit(x_train, y_train.ravel())
return logistic_regressor.score(x_test, y_test.ravel())
# @title Define and load the dataset, check baseline in pixel space { display-mode: "form" }
tf.compat.v1.reset_default_graph()
def map_fn(example):
image = example[FEATURE_INPUT]
image = tf.cast(image, tf.float32) / 255.0
image = tf.reshape(image, [-1]) # Flatten.
label = example[FEATURE_LABEL]
return {FEATURE_INPUT: image, FEATURE_LABEL: label}
def load_data(split):
return (tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split)
.cache()
.map(map_func=map_fn)
.shuffle(1000))
def tfds_to_np(dataset):
features = list(tfds.as_numpy(dataset))
images = np.stack([f[FEATURE_INPUT].ravel() for f in features])
labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])
return images, labels
dataset_train = load_data("train")
dataset_test = load_data("test")
x_train, y_train = tfds_to_np(dataset_train)
x_test, y_test = tfds_to_np(dataset_test)
tf.compat.v1.reset_default_graph()
x_train_noisy = x_train + 0.05 * np.random.randn(*x_train.shape)
x_test_noisy = x_test + 0.05 * np.random.randn(*x_test.shape)
print("Fit on half the pixels: {}. It should be around 0.835.".format(
logistic_fit(x_train_noisy[:, :DIMS//2], y_train,
x_test_noisy[:, :DIMS//2], y_test)))
def processed_train_data(dims, batch_size):
dataset = load_data("train")
dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)
get_next = tf.compat.v1.data.make_one_shot_iterator(dataset_batched).get_next()
features = get_next[FEATURE_INPUT]
labels = get_next[FEATURE_LABEL]
# Martin: where the problem occurs
x_1, x_2 = tf.split(features, [dims, DIMS-dims], axis=-1)
return x_1, x_2, labels
class MLP(tf.keras.Model):
def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):
super(MLP, self).__init__()
self._layers = [tfkl.Dense(dimensions, **dense_kwargs)
for dimensions in layer_dimensions[:-1]]
dense_kwargs_copy = copy.deepcopy(dense_kwargs)
dense_kwargs_copy["activation"] = None
self._layers.append(tfkl.Dense(layer_dimensions[-1], **dense_kwargs_copy))
self._shortcuts = shortcuts
@property
def layers(self):
return self._layers
def __call__(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x) + x if self._shortcuts else layer(x)
return x
# LayerNorm implementation copied from
# https://stackoverflow.com/questions/39095252/fail-to-implement-layer-normalization-with-keras
class LayerNorm(tfkl.Layer):
""" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 """
def __init__(self, scale_initializer='ones', bias_initializer='zeros',
axes=[1,2,3], epsilon=1e-6, **kwargs):
super(LayerNorm, self).__init__(**kwargs)
self.epsilon = epsilon
self.scale_initializer = tf.keras.initializers.get(scale_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.axes = axes
def build(self, input_shape):
self.scale = self.add_weight(shape=(input_shape[-1],),
initializer=self.scale_initializer,
trainable=True,
name='{}_scale'.format(self.name))
self.bias = self.add_weight(shape=(input_shape[-1],),
initializer=self.bias_initializer,
trainable=True,
name='{}_bias'.format(self.name))
self.built = True
def call(self, x, mask=None):
mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)
std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)
norm = (x - mean) * (1/(std + self.epsilon))
return norm * self.scale + self.bias
def compute_output_shape(self, input_shape):
return input_shape
class ConvNet(tf.keras.Sequential):
def __init__(self, channels=64, kernel_size=5, input_dim=DIMS//2, output_dim=100,
activation=tf.nn.relu):
# Note: This works only for the specific data set considered here.
super(ConvNet, self).__init__([
tfkl.Reshape((14, 28, 1), input_shape=(input_dim,)),
tfkl.Conv2D(channels, kernel_size, strides=2,
padding="same", activation=activation),
tfkl.Conv2D(2*channels, kernel_size, strides=2,
padding="same", activation=activation),
LayerNorm(),
tfkl.GlobalAveragePooling2D(),
tfkl.Dense(output_dim),
])
from tensorflow_probability.python.internal import tensorshape_util
import tensorflow.compat.v1 as tf1
from tensorflow_probability.python.bijectors import affine_scalar
from tensorflow_probability.python.bijectors import bijector as bijector_lib
# Modified from tensorflow_probability/python/bijectors/real_nvp.py
class RealNVP(bijector_lib.Bijector):
def __init__(self,
num_masked,
shift_and_log_scale_fn=None,
bijector_fn=None,
is_constant_jacobian=False,
validate_args=False,
name=None):
name = name or 'real_nvp'
if num_masked < 0:
raise ValueError('num_masked must be a non-negative integer.')
self._num_masked = num_masked
# At construction time, we don't know input_depth.
self._input_depth = None
if bool(shift_and_log_scale_fn) == bool(bijector_fn):
raise ValueError('Exactly one of `shift_and_log_scale_fn` and '
'`bijector_fn` should be specified.')
if shift_and_log_scale_fn:
def _bijector_fn(x0, input_depth, **condition_kwargs):
shift, log_scale = shift_and_log_scale_fn(x0, input_depth,
**condition_kwargs)
# ** First modification is here.
return affine_scalar.AffineScalar(shift=shift, scale=log_scale)
bijector_fn = _bijector_fn
if validate_args:
bijector_fn = _validate_bijector_fn(bijector_fn)
# Still do this assignment for variable tracking.
self._shift_and_log_scale_fn = shift_and_log_scale_fn
self._bijector_fn = bijector_fn
super(RealNVP, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _cache_input_depth(self, x):
if self._input_depth is None:
self._input_depth = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
if self._input_depth is None:
raise NotImplementedError(
'Rightmost dimension must be known prior to graph execution.')
if self._num_masked >= self._input_depth:
raise ValueError(
'Number of masked units must be smaller than the event size.')
def _forward(self, x, **condition_kwargs):
self._cache_input_depth(x)
x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]
y1 = self._bijector_fn(x0, self._input_depth - self._num_masked,
**condition_kwargs).forward(x1)
y = tf.concat([x0, y1], axis=-1)
return y
def _inverse(self, y, **condition_kwargs):
self._cache_input_depth(y)
y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]
x1 = self._bijector_fn(y0, self._input_depth - self._num_masked,
**condition_kwargs).inverse(y1)
x = tf.concat([y0, x1], axis=-1)
return x
def _forward_log_det_jacobian(self, x, **condition_kwargs):
self._cache_input_depth(x)
x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]
return self._bijector_fn(x0, self._input_depth - self._num_masked,
**condition_kwargs).forward_log_det_jacobian(
x1, event_ndims=1)
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
self._cache_input_depth(y)
y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]
return self._bijector_fn(y0, self._input_depth - self._num_masked,
**condition_kwargs).inverse_log_det_jacobian(
y1, event_ndims=1)
def real_nvp_default_template(hidden_layers,
shift_only=False,
activation=tf.nn.relu,
name=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):
def _fn(x, output_units, **condition_kwargs):
"""Fully connected MLP parameterized via `real_nvp_template`."""
if condition_kwargs:
raise NotImplementedError(
'Conditioning not implemented in the default template.')
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
reshape_output = lambda x: x[0]
else:
reshape_output = lambda x: x
for units in hidden_layers:
x = tf1.layers.dense(
inputs=x,
units=units,
activation=activation,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
x = tf1.layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
if shift_only:
return reshape_output(x), None
shift, log_scale = tf.split(x, 2, axis=-1)
# ** Here is the second modification.
return reshape_output(shift), 1e-7 + tf.nn.softplus(reshape_output(log_scale))
return tf1.make_template('real_nvp_default_template', _fn)
class RealNVPBijector(tf.keras.Model):
def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):
super(RealNVPBijector, self).__init__()
permutations = [np.random.permutation(dimensions)
for _ in range(n_couplings)]
bijectors = []
for permutation in permutations:
bijectors.append(RealNVP(
dimensions // 2,
real_nvp_default_template(hidden_layers, **dense_kwargs)))
bijectors.append(tfb.Permute(permutation))
self._bijector = tfb.Chain(bijectors)
def call(self, inputs):
return self._bijector.forward(inputs)
class InnerProdCritic(tf.keras.Model):
def call(self, x, y):
return tf.matmul(x, y, transpose_b=True)
class BilinearCritic(tf.keras.Model):
def __init__(self, feature_dim=100, **kwargs):
super(BilinearCritic, self).__init__(**kwargs)
self._W = tfkl.Dense(feature_dim, use_bias=False)
def call(self, x, y):
return tf.matmul(x, self._W(y), transpose_b=True)
# https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb
class ConcatCritic(tf.keras.Model):
def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):
super(ConcatCritic, self).__init__(**kwargs)
# output is scalar score
self._f = MLP([hidden_dim for _ in range(layers)]+[1], False, {"activation": "relu"})
def call(self, x, y):
batch_size = tf.shape(input=x)[0]
# Tile all possible combinations of x and y
x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))
y_tiled = tf.tile(y[:, None], (1, batch_size, 1))
# xy is [batch_size * batch_size, x_dim + y_dim]
xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2),
[batch_size * batch_size, -1])
# Compute scores for each x_i, y_j pair.
scores = self._f(xy_pairs)
return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))
class SeparableCritic(tf.keras.Model):
def __init__(self, hidden_dim=100, output_dim=100, layers=1,
activation='relu', **kwargs):
super(SeparableCritic, self).__init__(**kwargs)
self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {"activation": activation})
self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {"activation": activation})
def call(self, x, y):
x_mapped = self._f_x(x)
y_mapped = self._f_y(y)
return tf.matmul(x_mapped, y_mapped, transpose_b=True)
def train(g1,
g2,
critic,
loss_fn,
learning_rate,
batch_size=TRAIN_BATCH_SIZE,
n_iters=15000,
n_evals=15,
compute_jacobian=False,
noise_std=0.0,
data_dimensions=DIMS//2,
n_iter=1,
loss_name='InfoNCE',
):
"""Runs the training loop for a fixed model.
Args:
g1: Function, maps input1 to representation.
g2: Function, maps input2 to representation.
critic: Function, maps two representations to scalar.
loss_fn: Function, mutual information estimator.
learning_rate: Learning rate.
batch_size: Training batch size.
n_iters: Number of optimization iterations.
n_evals: Number of model evaluations.
compute_jacobian: Whether to estimate the singular values of the Jacobian.
noise_std: Standard deviation for the Gaussian noise. Default is 0.0.
data_dimensions: The dimension of the data. By default it's half of the
original data dimension.
Returns:
Returns and instance of `Results` tuple.
"""
x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)
if noise_std > 0.0:
assert x_1.shape == x_2.shape, "X1 and X2 shapes must agree to add noise!"
noise = noise_std * tf.random.normal(x_1.shape)
x_1 += noise
x_2 += noise
# Compute the representations.
code_1, code_2 = g1(x_1), g2(x_2)
critic_matrix = critic(code_1, code_2)
# Compute the Jacobian of g1 if needed.
if compute_jacobian:
jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)
singular_values = tf.linalg.svd(jacobian, compute_uv=False)
# Optimizer setup.
loss = loss_fn(critic_matrix)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
if not loss_name == 'wpc':
optimizer_op = optimizer.minimize(loss)
else:
gvs = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
optimizer_op = optimizer.apply_gradients(capped_gvs)
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
# Subgraph for eval (add noise to input if necessary)
data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None, data_dimensions])
data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(input=data_ph))
codes = g1(data_ph_noisy)
training_losses, testing_losses, classification_accuracies, iters, sigmas \
= [], [], [], [], []
# Main training loop.
for iter_n in range(n_iters):
# Evaluate the model performance.
if iter_n % (n_iters // n_evals) == 0:
iters.append(iter_n)
accuracy = get_classification_accuracy(session, codes, data_ph, data_dimensions)
classification_accuracies.append(accuracy)
testing_losses.append(
get_testing_loss(x_test, session, loss, data_ph, data_dimensions))
if compute_jacobian:
sigmas.append(session.run(singular_values))
print("{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}".format(\
n_iter, loss_name, iter_n, accuracy, args.dataset, args.batch_size, args.lr))
# Run one optimization step.
loss_np, _ = session.run([loss, optimizer_op])
training_losses.append(loss_np)
return Results(iterations=iters,
training_losses=training_losses,
testing_losses=testing_losses,
classification_accuracies=classification_accuracies,
singular_values=sigmas)
def run_sweep(nets, critics, loss_fns, exp_name, **kwargs):
"""Runs the sweep across encoder networks, critics, and the estimators."""
grid = itertools.product(nets, critics, loss_fns)
data_frames = []
results_with_singular_values = []
for nets_name, critic_name, loss_name in grid:
print("[New experiment] encoder: {}, critic: {}, loss: {}".format(
nets_name, critic_name, loss_name))
with tf.Graph().as_default():
g1, g2 = nets[nets_name]()
critic = critics[critic_name]()
loss_fn = loss_fns[loss_name]
results_per_run = []
for n in range(NRUNS):
try:
print("{:d}th run, loss: {}".format(n, loss_name))
if loss_name == "drfc" and TFDS_NAME == "cifar10":
results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)
#results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=1e-4, n_iter=n, loss_name=loss_name)
else:
results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)
results_per_run.append(results)
except Exception as ex:
print("Run {} failed! Error: {}".format(n, ex))
for i, result in enumerate(results_per_run):
data_frames.append(convert_to_data_frame(
result, exp_name, nets_name, critic_name, loss_name, i))
if kwargs.get('compute_jacobian', False):
results_with_singular_values.append((
ResultsConfig(nets_name, critic_name, loss_name), results_per_run
))
return {
"df": pd.concat(data_frames),
"singular_values": results_with_singular_values
}
#@title Run experiment or load precomputed results { display-mode: "form" }
def run_all_experiments():
tf.compat.v1.reset_default_graph()
wpc_loss = lambda x: -infonce_lower_bound(x)
cpc_loss = lambda x: -infonce_lower_bound(x)
#nwj_loss = lambda x: -nwj_lower_bound(x)
drfc_loss = lambda x: -our_lower_bound(x)
pcc_loss = lambda x: -js_fgan_lower_bound(x)
skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)
ls_pcc_loss = lambda x: -label_smooth_pcc(x)
pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)
adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)
adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)
loss_fcts = {
# "wpc": wpc_loss,
"pcc": pcc_loss,
# "drfc": drfc_loss,
#"nwj": nwj_loss,
"cpc": cpc_loss,
# "skew_pcc": skew_pcc_loss,
"ls_pcc": ls_pcc_loss,
"prels_pcc": pre_ls_pcc_loss,
"adap_pred_pcc": adap_pred_smooth_pcc_loss,
"adap_label_pcc": adap_label_smooth_pcc_loss
}
kwargs = dict(
shift_only=True,
activation=lambda x: tf.nn.relu(x),
kernel_initializer=tf.compat.v1.initializers.truncated_normal(stddev=0.0001),
bias_initializer='zeros')
nets = {
"realnvp": lambda: (
RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs),
RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs)
)
}
critics = {
"bilinear": lambda: BilinearCritic(feature_dim=DIMS//2),
}
return run_sweep(nets, critics, loss_fcts, "invertible", n_iters=21000, n_evals=21)
if RUN_EXPERIMENTS:
data_invertible = run_all_experiments()["df"]
data_invertible.to_pickle(RESULT_DIR)
else:
os.system("wget -q -N https://storage.googleapis.com/mi_for_rl_files/mi_results.pkl")
data_invertible = pd.read_pickle('mi_results.pkl')
data_invertible = data_invertible[data_invertible.exp_name == "invertible"]
| """Convert results class to a data frame."""
label = "{}, {}, {}".format(nets, critic, loss)
rows = list(
zip(
itertools.repeat(exp_name),
itertools.repeat(nets),
itertools.repeat(critic),
itertools.repeat(loss),
itertools.repeat(seed),
result.iterations,
[-loss for loss in result.testing_losses], # Loss -> bound.
result.classification_accuracies,
itertools.repeat(label)))
df_eval = pd.DataFrame(
rows,
columns=("exp_name", "nets", "Critic", "Estimator",
"run", "iteration", "bound_value", "accuracy", "label"))
df_eval["Estimator"] = df_eval["Estimator"].replace(
to_replace={
"cpc": "$CPC$",
"pcc": "$PCC$",
"drfc": "$D-RFC$",
"wpc": "$WPC$"
})
df_eval["Critic"] = df_eval["Critic"].replace(
to_replace={
"concat": "MLP",
"separable": "Separable",
"innerprod": "Inner product",
"bilinear": "Bilinear"
})
return df_eval | identifier_body |
main.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# reference links:
# From https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb
# https://github.com/google-research/google-research/blob/master/mutual_information_representation_learning/mirl.ipynb
# https://github.com/yaohungt/Pointwise_Dependency_Neural_Estimation/tree/master/RepreLearn_Shallow
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import itertools
import os
import pickle
import argparse
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
import pandas as pd
from scipy.ndimage import gaussian_filter1d
import seaborn as sns
import tensorflow as tf
from tensorflow.python.ops.parallel_for import gradients
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
import sklearn.linear_model as sk_linear
parser = argparse.ArgumentParser(description='Representation Learning Experiments')
parser.add_argument('--dataset', default='mnist', type=str,
help='cifar10 or mnist')
parser.add_argument('--lr', default=1e-4, type=float,
help='learning rate')
parser.add_argument('--batch_size', default=100, type=int,
help='mini batch size')
parser.add_argument('--smoothing', default=0.01, type=float,
help='label smoothing parameter')
parser.add_argument('--output_dir', type=str, default='./runs',
help='directory where the results will be stored')
args = parser.parse_args()
a, b, c = 0.005, 0.1, 0.9
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
# learning rate for ours CIFAR10 is 1e-4, otherwise follows below
TFDS_NAME = args.dataset #"cifar10" # mnist or cifar10
NRUNS = 10 #@param { type: "slider", min: 1, max: 20, step: 1}
# parameters for training
if TFDS_NAME == "mnist":
DIMS = 784
elif TFDS_NAME == "cifar10":
DIMS = 3072
LEARNING_RATE = args.lr #1e-5
N_CLASSES = 10
TRAIN_BATCH_SIZE = args.batch_size #64 #@param { type: "slider", min: 64, max: 128, step: 64}
# save results
# RESULT_DIR = '{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)
RESULT_DIR = './runs/{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)
RUN_EXPERIMENTS = True
FEATURE_INPUT = "image"
FEATURE_LABEL = "label"
#slim = tf.contrib.slim
tfb = tfp.bijectors
tfd = tfp.distributions
tfkl = tf.keras.layers
tf.keras.backend.clear_session()
ResultsConfig = collections.namedtuple(
"ResultsConfig", ["nets", "critic", "loss"])
Results = collections.namedtuple(
'Results',
['iterations', 'training_losses', 'testing_losses',
'classification_accuracies', 'singular_values'])
ResultsAdversarial = collections.namedtuple(
"ResultsAdversarial",
["losses_e", "losses_c", "classification_accuracies", "iters"]
)
ResultsSamplingIssues = collections.namedtuple(
"ResultsSamplingIssues", ["mi_true", "nce_estimates_noniid",
"nce_estimates_iid", "nwj_estimates_noniid",
"nwj_estimates_iid"])
def acti_func(x, a, b, c):
# y: a
# x: 0 b c 1
x = tf.stop_gradient(x)
alpha = tf.zeros_like(x)
alpha = tf.where(x<=b, -a*x/b+a, alpha)
alpha = tf.where((x>b) & (x<c), 0., alpha)
alpha = tf.where(x>=c, a*x/(1-c)+a*c/(c-1), alpha)
return alpha
def convert_to_data_frame(result, exp_name, nets, critic, loss, seed):
"""Convert results class to a data frame."""
label = "{}, {}, {}".format(nets, critic, loss)
rows = list(
zip(
itertools.repeat(exp_name),
itertools.repeat(nets),
itertools.repeat(critic),
itertools.repeat(loss),
itertools.repeat(seed),
result.iterations,
[-loss for loss in result.testing_losses], # Loss -> bound.
result.classification_accuracies,
itertools.repeat(label)))
df_eval = pd.DataFrame(
rows,
columns=("exp_name", "nets", "Critic", "Estimator",
"run", "iteration", "bound_value", "accuracy", "label"))
df_eval["Estimator"] = df_eval["Estimator"].replace(
to_replace={
"cpc": "$CPC$",
"pcc": "$PCC$",
"drfc": "$D-RFC$",
"wpc": "$WPC$"
})
df_eval["Critic"] = df_eval["Critic"].replace(
to_replace={
"concat": "MLP",
"separable": "Separable",
"innerprod": "Inner product",
"bilinear": "Bilinear"
})
return df_eval
def apply_default_style(ax):
ax.set_xlim([0, 20001])
ax.get_xaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(x/1000), ',')))
ax.set_xlabel("Training steps (in thousands)")
plt.tick_params(top=False, right=False, bottom=False, left=False)
handles, labels = ax.get_legend_handles_labels()
plt.legend(loc="lower right", handles=handles[1:], labels=labels[1:])
def get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):
total_loss = 0
for i in range(0, x_array.shape[0], batch_size):
x_slice = x_array[i:i+batch_size, :dims]
total_loss += x_slice.shape[0] * session.run(loss,
feed_dict={data_ph: x_slice})
return total_loss / x_array.shape[0]
def get_classification_accuracy(session, codes, data_ph, dims):
x_train_mapped = map_data(x_train, session, codes, data_ph, dims)
x_test_mapped = map_data(x_test, session, codes, data_ph, dims)
accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)
return accuracy
def map_data(x_array, session, codes, data_ph, dims, batch_size=512):
x_mapped = []
for i in range(0, x_array.shape[0], batch_size):
x_mapped.append(
session.run(codes,
feed_dict={data_ph: x_array[i:i+batch_size, :dims]}))
return np.concatenate(x_mapped, axis=0)
def reduce_logmeanexp_nodiag(x, axis=None):
batch_size = x.shape[0]
logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(np.inf * tf.ones(batch_size)), axis=axis)
if axis:
num_elem = batch_size - 1.
else:
num_elem = batch_size * (batch_size - 1.)
return logsumexp - tf.math.log(num_elem)
def tuba_lower_bound(scores, log_baseline=None):
if log_baseline is not None:
scores -= log_baseline[:, None]
batch_size = tf.cast(scores.shape[0], tf.float32)
# First term is an expectation over samples from the joint,
# which are the diagonal elmements of the scores matrix.
joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))
# Second term is an expectation over samples from the marginal,
# which are the off-diagonal elements of the scores matrix.
marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))
return 1. + joint_term - marg_term
def nwj_lower_bound(scores):
# equivalent to: tuba_lower_bound(scores, log_baseline=1.)
return tuba_lower_bound(scores - 1.)
@tf.function
def js_fgan_lower_bound(f):
"""Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016)."""
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
n = tf.cast(f.shape[0], tf.float32)
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return first_term - second_term
@tf.function
def infonce_lower_bound(scores):
"""InfoNCE lower bound from van den Oord et al. (2018)."""
nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.reduce_logsumexp(input_tensor=scores, axis=1))
mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll
return mi
@tf.function
def our_lower_bound(scores):
# scores: 128, 128
"""Our lower bound"""
batch_size = tf.cast(scores.shape[0], tf.float32)
joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))
# expectation
scores_sq = scores**2
marg_num = batch_size * (batch_size - 1.)
marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(input_tensor=tf.linalg.diag_part(scores_sq))
marg_term = marg_term / marg_num
# tf.print(joint_term - 0.5*marg_term)
return joint_term - 0.5*marg_term
# nll = tf.reduce_mean(tf.linalg.diag_part(scores) - 0.5 * tf.math.reduce_euclidean_norm(scores, axis=1))
# tf.print(nll)
# mi = tf.math.log(tf.cast(scores.shape[0].value, tf.float32)) + nll
# return mi
@tf.function
def skew_js_fgan_lower_bound(f):
"""skewed js lower bound (true cross entropy)"""
n = tf.cast(f.shape[0], tf.float32)
alpha = 1/n
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return alpha*first_term - (1-alpha)*second_term
@tf.function
def label_smooth_pcc(f):
""" pcc with label smoothing trick"""
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
@tf.function
def predict_smooth_pcc(f):
""" pcc with predictor smoothing trick"""
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
@tf.function
def adap_label_smooth_pcc(f):
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
alpha = acti_func(pre_prob, a, b, c)
new_labels = (1.0 - alpha) * labels + alpha / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(new_labels, pre_prob)
@tf.function
def adap_pred_smooth_pcc(f):
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
# print('pre_prob:',pre_prob)
alpha = acti_func(pre_prob, a, b, c)
pre_prob = (1.0 - alpha) * pre_prob + alpha / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
# @title Define the linear evaluation protocol { display-mode: "form" }
def logistic_fit(x_train, y_train, x_test, y_test):
logistic_regressor = sk_linear.LogisticRegression(
solver='saga', multi_class='multinomial', tol=.1, C=10.)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
logistic_regressor.fit(x_train, y_train.ravel())
return logistic_regressor.score(x_test, y_test.ravel())
# @title Define and load the dataset, check baseline in pixel space { display-mode: "form" }
tf.compat.v1.reset_default_graph()
def map_fn(example):
image = example[FEATURE_INPUT]
image = tf.cast(image, tf.float32) / 255.0
image = tf.reshape(image, [-1]) # Flatten.
label = example[FEATURE_LABEL]
return {FEATURE_INPUT: image, FEATURE_LABEL: label}
def load_data(split):
return (tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split)
.cache()
.map(map_func=map_fn)
.shuffle(1000))
def tfds_to_np(dataset):
features = list(tfds.as_numpy(dataset))
images = np.stack([f[FEATURE_INPUT].ravel() for f in features])
labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])
return images, labels
dataset_train = load_data("train")
dataset_test = load_data("test")
x_train, y_train = tfds_to_np(dataset_train)
x_test, y_test = tfds_to_np(dataset_test)
tf.compat.v1.reset_default_graph()
x_train_noisy = x_train + 0.05 * np.random.randn(*x_train.shape)
x_test_noisy = x_test + 0.05 * np.random.randn(*x_test.shape)
print("Fit on half the pixels: {}. It should be around 0.835.".format(
logistic_fit(x_train_noisy[:, :DIMS//2], y_train,
x_test_noisy[:, :DIMS//2], y_test)))
def processed_train_data(dims, batch_size):
dataset = load_data("train")
dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)
get_next = tf.compat.v1.data.make_one_shot_iterator(dataset_batched).get_next()
features = get_next[FEATURE_INPUT]
labels = get_next[FEATURE_LABEL]
# Martin: where the problem occurs
x_1, x_2 = tf.split(features, [dims, DIMS-dims], axis=-1)
return x_1, x_2, labels
class MLP(tf.keras.Model):
def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):
super(MLP, self).__init__()
self._layers = [tfkl.Dense(dimensions, **dense_kwargs)
for dimensions in layer_dimensions[:-1]]
dense_kwargs_copy = copy.deepcopy(dense_kwargs)
dense_kwargs_copy["activation"] = None
self._layers.append(tfkl.Dense(layer_dimensions[-1], **dense_kwargs_copy))
self._shortcuts = shortcuts
@property
def layers(self):
return self._layers
def __call__(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x) + x if self._shortcuts else layer(x)
return x
# LayerNorm implementation copied from
# https://stackoverflow.com/questions/39095252/fail-to-implement-layer-normalization-with-keras
class LayerNorm(tfkl.Layer):
""" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 """
def __init__(self, scale_initializer='ones', bias_initializer='zeros',
axes=[1,2,3], epsilon=1e-6, **kwargs):
super(LayerNorm, self).__init__(**kwargs)
self.epsilon = epsilon
self.scale_initializer = tf.keras.initializers.get(scale_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.axes = axes
def build(self, input_shape):
self.scale = self.add_weight(shape=(input_shape[-1],),
initializer=self.scale_initializer,
trainable=True,
name='{}_scale'.format(self.name))
self.bias = self.add_weight(shape=(input_shape[-1],),
initializer=self.bias_initializer,
trainable=True,
name='{}_bias'.format(self.name))
self.built = True
def call(self, x, mask=None):
mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)
std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)
norm = (x - mean) * (1/(std + self.epsilon))
return norm * self.scale + self.bias
def compute_output_shape(self, input_shape):
return input_shape
class ConvNet(tf.keras.Sequential):
def __init__(self, channels=64, kernel_size=5, input_dim=DIMS//2, output_dim=100,
activation=tf.nn.relu):
# Note: This works only for the specific data set considered here.
super(ConvNet, self).__init__([
tfkl.Reshape((14, 28, 1), input_shape=(input_dim,)),
tfkl.Conv2D(channels, kernel_size, strides=2,
padding="same", activation=activation),
tfkl.Conv2D(2*channels, kernel_size, strides=2,
padding="same", activation=activation),
LayerNorm(),
tfkl.GlobalAveragePooling2D(),
tfkl.Dense(output_dim),
])
from tensorflow_probability.python.internal import tensorshape_util
import tensorflow.compat.v1 as tf1
from tensorflow_probability.python.bijectors import affine_scalar
from tensorflow_probability.python.bijectors import bijector as bijector_lib
# Modified from tensorflow_probability/python/bijectors/real_nvp.py
class RealNVP(bijector_lib.Bijector):
def __init__(self,
num_masked,
shift_and_log_scale_fn=None,
bijector_fn=None,
is_constant_jacobian=False,
validate_args=False,
name=None):
name = name or 'real_nvp'
if num_masked < 0:
|
self._num_masked = num_masked
# At construction time, we don't know input_depth.
self._input_depth = None
if bool(shift_and_log_scale_fn) == bool(bijector_fn):
raise ValueError('Exactly one of `shift_and_log_scale_fn` and '
'`bijector_fn` should be specified.')
if shift_and_log_scale_fn:
def _bijector_fn(x0, input_depth, **condition_kwargs):
shift, log_scale = shift_and_log_scale_fn(x0, input_depth,
**condition_kwargs)
# ** First modification is here.
return affine_scalar.AffineScalar(shift=shift, scale=log_scale)
bijector_fn = _bijector_fn
if validate_args:
bijector_fn = _validate_bijector_fn(bijector_fn)
# Still do this assignment for variable tracking.
self._shift_and_log_scale_fn = shift_and_log_scale_fn
self._bijector_fn = bijector_fn
super(RealNVP, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _cache_input_depth(self, x):
if self._input_depth is None:
self._input_depth = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
if self._input_depth is None:
raise NotImplementedError(
'Rightmost dimension must be known prior to graph execution.')
if self._num_masked >= self._input_depth:
raise ValueError(
'Number of masked units must be smaller than the event size.')
def _forward(self, x, **condition_kwargs):
self._cache_input_depth(x)
x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]
y1 = self._bijector_fn(x0, self._input_depth - self._num_masked,
**condition_kwargs).forward(x1)
y = tf.concat([x0, y1], axis=-1)
return y
def _inverse(self, y, **condition_kwargs):
self._cache_input_depth(y)
y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]
x1 = self._bijector_fn(y0, self._input_depth - self._num_masked,
**condition_kwargs).inverse(y1)
x = tf.concat([y0, x1], axis=-1)
return x
def _forward_log_det_jacobian(self, x, **condition_kwargs):
self._cache_input_depth(x)
x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]
return self._bijector_fn(x0, self._input_depth - self._num_masked,
**condition_kwargs).forward_log_det_jacobian(
x1, event_ndims=1)
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
self._cache_input_depth(y)
y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]
return self._bijector_fn(y0, self._input_depth - self._num_masked,
**condition_kwargs).inverse_log_det_jacobian(
y1, event_ndims=1)
def real_nvp_default_template(hidden_layers,
shift_only=False,
activation=tf.nn.relu,
name=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):
def _fn(x, output_units, **condition_kwargs):
"""Fully connected MLP parameterized via `real_nvp_template`."""
if condition_kwargs:
raise NotImplementedError(
'Conditioning not implemented in the default template.')
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
reshape_output = lambda x: x[0]
else:
reshape_output = lambda x: x
for units in hidden_layers:
x = tf1.layers.dense(
inputs=x,
units=units,
activation=activation,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
x = tf1.layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
if shift_only:
return reshape_output(x), None
shift, log_scale = tf.split(x, 2, axis=-1)
# ** Here is the second modification.
return reshape_output(shift), 1e-7 + tf.nn.softplus(reshape_output(log_scale))
return tf1.make_template('real_nvp_default_template', _fn)
class RealNVPBijector(tf.keras.Model):
def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):
super(RealNVPBijector, self).__init__()
permutations = [np.random.permutation(dimensions)
for _ in range(n_couplings)]
bijectors = []
for permutation in permutations:
bijectors.append(RealNVP(
dimensions // 2,
real_nvp_default_template(hidden_layers, **dense_kwargs)))
bijectors.append(tfb.Permute(permutation))
self._bijector = tfb.Chain(bijectors)
def call(self, inputs):
return self._bijector.forward(inputs)
class InnerProdCritic(tf.keras.Model):
def call(self, x, y):
return tf.matmul(x, y, transpose_b=True)
class BilinearCritic(tf.keras.Model):
def __init__(self, feature_dim=100, **kwargs):
super(BilinearCritic, self).__init__(**kwargs)
self._W = tfkl.Dense(feature_dim, use_bias=False)
def call(self, x, y):
return tf.matmul(x, self._W(y), transpose_b=True)
# https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb
class ConcatCritic(tf.keras.Model):
def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):
super(ConcatCritic, self).__init__(**kwargs)
# output is scalar score
self._f = MLP([hidden_dim for _ in range(layers)]+[1], False, {"activation": "relu"})
def call(self, x, y):
batch_size = tf.shape(input=x)[0]
# Tile all possible combinations of x and y
x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))
y_tiled = tf.tile(y[:, None], (1, batch_size, 1))
# xy is [batch_size * batch_size, x_dim + y_dim]
xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2),
[batch_size * batch_size, -1])
# Compute scores for each x_i, y_j pair.
scores = self._f(xy_pairs)
return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))
class SeparableCritic(tf.keras.Model):
def __init__(self, hidden_dim=100, output_dim=100, layers=1,
activation='relu', **kwargs):
super(SeparableCritic, self).__init__(**kwargs)
self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {"activation": activation})
self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {"activation": activation})
def call(self, x, y):
x_mapped = self._f_x(x)
y_mapped = self._f_y(y)
return tf.matmul(x_mapped, y_mapped, transpose_b=True)
def train(g1,
g2,
critic,
loss_fn,
learning_rate,
batch_size=TRAIN_BATCH_SIZE,
n_iters=15000,
n_evals=15,
compute_jacobian=False,
noise_std=0.0,
data_dimensions=DIMS//2,
n_iter=1,
loss_name='InfoNCE',
):
"""Runs the training loop for a fixed model.
Args:
g1: Function, maps input1 to representation.
g2: Function, maps input2 to representation.
critic: Function, maps two representations to scalar.
loss_fn: Function, mutual information estimator.
learning_rate: Learning rate.
batch_size: Training batch size.
n_iters: Number of optimization iterations.
n_evals: Number of model evaluations.
compute_jacobian: Whether to estimate the singular values of the Jacobian.
noise_std: Standard deviation for the Gaussian noise. Default is 0.0.
data_dimensions: The dimension of the data. By default it's half of the
original data dimension.
Returns:
Returns and instance of `Results` tuple.
"""
x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)
if noise_std > 0.0:
assert x_1.shape == x_2.shape, "X1 and X2 shapes must agree to add noise!"
noise = noise_std * tf.random.normal(x_1.shape)
x_1 += noise
x_2 += noise
# Compute the representations.
code_1, code_2 = g1(x_1), g2(x_2)
critic_matrix = critic(code_1, code_2)
# Compute the Jacobian of g1 if needed.
if compute_jacobian:
jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)
singular_values = tf.linalg.svd(jacobian, compute_uv=False)
# Optimizer setup.
loss = loss_fn(critic_matrix)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
if not loss_name == 'wpc':
optimizer_op = optimizer.minimize(loss)
else:
gvs = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
optimizer_op = optimizer.apply_gradients(capped_gvs)
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
# Subgraph for eval (add noise to input if necessary)
data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None, data_dimensions])
data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(input=data_ph))
codes = g1(data_ph_noisy)
training_losses, testing_losses, classification_accuracies, iters, sigmas \
= [], [], [], [], []
# Main training loop.
for iter_n in range(n_iters):
# Evaluate the model performance.
if iter_n % (n_iters // n_evals) == 0:
iters.append(iter_n)
accuracy = get_classification_accuracy(session, codes, data_ph, data_dimensions)
classification_accuracies.append(accuracy)
testing_losses.append(
get_testing_loss(x_test, session, loss, data_ph, data_dimensions))
if compute_jacobian:
sigmas.append(session.run(singular_values))
print("{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}".format(\
n_iter, loss_name, iter_n, accuracy, args.dataset, args.batch_size, args.lr))
# Run one optimization step.
loss_np, _ = session.run([loss, optimizer_op])
training_losses.append(loss_np)
return Results(iterations=iters,
training_losses=training_losses,
testing_losses=testing_losses,
classification_accuracies=classification_accuracies,
singular_values=sigmas)
def run_sweep(nets, critics, loss_fns, exp_name, **kwargs):
"""Runs the sweep across encoder networks, critics, and the estimators."""
grid = itertools.product(nets, critics, loss_fns)
data_frames = []
results_with_singular_values = []
for nets_name, critic_name, loss_name in grid:
print("[New experiment] encoder: {}, critic: {}, loss: {}".format(
nets_name, critic_name, loss_name))
with tf.Graph().as_default():
g1, g2 = nets[nets_name]()
critic = critics[critic_name]()
loss_fn = loss_fns[loss_name]
results_per_run = []
for n in range(NRUNS):
try:
print("{:d}th run, loss: {}".format(n, loss_name))
if loss_name == "drfc" and TFDS_NAME == "cifar10":
results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)
#results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=1e-4, n_iter=n, loss_name=loss_name)
else:
results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)
results_per_run.append(results)
except Exception as ex:
print("Run {} failed! Error: {}".format(n, ex))
for i, result in enumerate(results_per_run):
data_frames.append(convert_to_data_frame(
result, exp_name, nets_name, critic_name, loss_name, i))
if kwargs.get('compute_jacobian', False):
results_with_singular_values.append((
ResultsConfig(nets_name, critic_name, loss_name), results_per_run
))
return {
"df": pd.concat(data_frames),
"singular_values": results_with_singular_values
}
#@title Run experiment or load precomputed results { display-mode: "form" }
def run_all_experiments():
tf.compat.v1.reset_default_graph()
wpc_loss = lambda x: -infonce_lower_bound(x)
cpc_loss = lambda x: -infonce_lower_bound(x)
#nwj_loss = lambda x: -nwj_lower_bound(x)
drfc_loss = lambda x: -our_lower_bound(x)
pcc_loss = lambda x: -js_fgan_lower_bound(x)
skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)
ls_pcc_loss = lambda x: -label_smooth_pcc(x)
pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)
adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)
adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)
loss_fcts = {
# "wpc": wpc_loss,
"pcc": pcc_loss,
# "drfc": drfc_loss,
#"nwj": nwj_loss,
"cpc": cpc_loss,
# "skew_pcc": skew_pcc_loss,
"ls_pcc": ls_pcc_loss,
"prels_pcc": pre_ls_pcc_loss,
"adap_pred_pcc": adap_pred_smooth_pcc_loss,
"adap_label_pcc": adap_label_smooth_pcc_loss
}
kwargs = dict(
shift_only=True,
activation=lambda x: tf.nn.relu(x),
kernel_initializer=tf.compat.v1.initializers.truncated_normal(stddev=0.0001),
bias_initializer='zeros')
nets = {
"realnvp": lambda: (
RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs),
RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs)
)
}
critics = {
"bilinear": lambda: BilinearCritic(feature_dim=DIMS//2),
}
return run_sweep(nets, critics, loss_fcts, "invertible", n_iters=21000, n_evals=21)
if RUN_EXPERIMENTS:
data_invertible = run_all_experiments()["df"]
data_invertible.to_pickle(RESULT_DIR)
else:
os.system("wget -q -N https://storage.googleapis.com/mi_for_rl_files/mi_results.pkl")
data_invertible = pd.read_pickle('mi_results.pkl')
data_invertible = data_invertible[data_invertible.exp_name == "invertible"]
| raise ValueError('num_masked must be a non-negative integer.') | conditional_block |
main.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# reference links:
# From https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb
# https://github.com/google-research/google-research/blob/master/mutual_information_representation_learning/mirl.ipynb
# https://github.com/yaohungt/Pointwise_Dependency_Neural_Estimation/tree/master/RepreLearn_Shallow
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import itertools
import os
import pickle
import argparse
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
import pandas as pd
from scipy.ndimage import gaussian_filter1d
import seaborn as sns
import tensorflow as tf
from tensorflow.python.ops.parallel_for import gradients
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
import sklearn.linear_model as sk_linear
parser = argparse.ArgumentParser(description='Representation Learning Experiments')
parser.add_argument('--dataset', default='mnist', type=str,
help='cifar10 or mnist')
parser.add_argument('--lr', default=1e-4, type=float,
help='learning rate')
parser.add_argument('--batch_size', default=100, type=int,
help='mini batch size')
parser.add_argument('--smoothing', default=0.01, type=float,
help='label smoothing parameter')
parser.add_argument('--output_dir', type=str, default='./runs',
help='directory where the results will be stored')
args = parser.parse_args()
a, b, c = 0.005, 0.1, 0.9
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
# learning rate for ours CIFAR10 is 1e-4, otherwise follows below
TFDS_NAME = args.dataset #"cifar10" # mnist or cifar10
NRUNS = 10 #@param { type: "slider", min: 1, max: 20, step: 1}
# parameters for training
if TFDS_NAME == "mnist":
DIMS = 784
elif TFDS_NAME == "cifar10":
DIMS = 3072
LEARNING_RATE = args.lr #1e-5
N_CLASSES = 10
TRAIN_BATCH_SIZE = args.batch_size #64 #@param { type: "slider", min: 64, max: 128, step: 64}
# save results
# RESULT_DIR = '{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)
RESULT_DIR = './runs/{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)
RUN_EXPERIMENTS = True
FEATURE_INPUT = "image"
FEATURE_LABEL = "label"
#slim = tf.contrib.slim
tfb = tfp.bijectors
tfd = tfp.distributions
tfkl = tf.keras.layers
tf.keras.backend.clear_session()
ResultsConfig = collections.namedtuple(
"ResultsConfig", ["nets", "critic", "loss"])
Results = collections.namedtuple(
'Results',
['iterations', 'training_losses', 'testing_losses',
'classification_accuracies', 'singular_values'])
ResultsAdversarial = collections.namedtuple(
"ResultsAdversarial",
["losses_e", "losses_c", "classification_accuracies", "iters"]
)
ResultsSamplingIssues = collections.namedtuple(
"ResultsSamplingIssues", ["mi_true", "nce_estimates_noniid",
"nce_estimates_iid", "nwj_estimates_noniid",
"nwj_estimates_iid"])
def acti_func(x, a, b, c):
# y: a
# x: 0 b c 1
x = tf.stop_gradient(x)
alpha = tf.zeros_like(x)
alpha = tf.where(x<=b, -a*x/b+a, alpha)
alpha = tf.where((x>b) & (x<c), 0., alpha)
alpha = tf.where(x>=c, a*x/(1-c)+a*c/(c-1), alpha)
return alpha
def convert_to_data_frame(result, exp_name, nets, critic, loss, seed):
"""Convert results class to a data frame."""
label = "{}, {}, {}".format(nets, critic, loss)
rows = list(
zip(
itertools.repeat(exp_name),
itertools.repeat(nets),
itertools.repeat(critic),
itertools.repeat(loss),
itertools.repeat(seed),
result.iterations,
[-loss for loss in result.testing_losses], # Loss -> bound.
result.classification_accuracies,
itertools.repeat(label)))
df_eval = pd.DataFrame(
rows,
columns=("exp_name", "nets", "Critic", "Estimator",
"run", "iteration", "bound_value", "accuracy", "label"))
df_eval["Estimator"] = df_eval["Estimator"].replace(
to_replace={
"cpc": "$CPC$",
"pcc": "$PCC$",
"drfc": "$D-RFC$",
"wpc": "$WPC$"
})
df_eval["Critic"] = df_eval["Critic"].replace(
to_replace={
"concat": "MLP",
"separable": "Separable",
"innerprod": "Inner product",
"bilinear": "Bilinear"
})
return df_eval
def apply_default_style(ax):
ax.set_xlim([0, 20001])
ax.get_xaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(x/1000), ',')))
ax.set_xlabel("Training steps (in thousands)")
plt.tick_params(top=False, right=False, bottom=False, left=False)
handles, labels = ax.get_legend_handles_labels()
plt.legend(loc="lower right", handles=handles[1:], labels=labels[1:])
def get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):
total_loss = 0
for i in range(0, x_array.shape[0], batch_size):
x_slice = x_array[i:i+batch_size, :dims]
total_loss += x_slice.shape[0] * session.run(loss,
feed_dict={data_ph: x_slice})
return total_loss / x_array.shape[0]
def get_classification_accuracy(session, codes, data_ph, dims):
x_train_mapped = map_data(x_train, session, codes, data_ph, dims)
x_test_mapped = map_data(x_test, session, codes, data_ph, dims)
accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)
return accuracy
def map_data(x_array, session, codes, data_ph, dims, batch_size=512):
x_mapped = []
for i in range(0, x_array.shape[0], batch_size):
x_mapped.append(
session.run(codes,
feed_dict={data_ph: x_array[i:i+batch_size, :dims]}))
return np.concatenate(x_mapped, axis=0)
def reduce_logmeanexp_nodiag(x, axis=None):
batch_size = x.shape[0]
logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(np.inf * tf.ones(batch_size)), axis=axis)
if axis:
num_elem = batch_size - 1.
else:
num_elem = batch_size * (batch_size - 1.)
return logsumexp - tf.math.log(num_elem)
def tuba_lower_bound(scores, log_baseline=None):
if log_baseline is not None:
scores -= log_baseline[:, None]
batch_size = tf.cast(scores.shape[0], tf.float32)
# First term is an expectation over samples from the joint,
# which are the diagonal elmements of the scores matrix.
joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))
# Second term is an expectation over samples from the marginal,
# which are the off-diagonal elements of the scores matrix.
marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))
return 1. + joint_term - marg_term
def nwj_lower_bound(scores):
# equivalent to: tuba_lower_bound(scores, log_baseline=1.)
return tuba_lower_bound(scores - 1.)
@tf.function
def js_fgan_lower_bound(f):
"""Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016)."""
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
n = tf.cast(f.shape[0], tf.float32)
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return first_term - second_term
@tf.function
def infonce_lower_bound(scores):
"""InfoNCE lower bound from van den Oord et al. (2018)."""
nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.reduce_logsumexp(input_tensor=scores, axis=1))
mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll
return mi
@tf.function
def our_lower_bound(scores):
# scores: 128, 128
"""Our lower bound"""
batch_size = tf.cast(scores.shape[0], tf.float32)
joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))
# expectation
scores_sq = scores**2
marg_num = batch_size * (batch_size - 1.)
marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(input_tensor=tf.linalg.diag_part(scores_sq))
marg_term = marg_term / marg_num
# tf.print(joint_term - 0.5*marg_term)
return joint_term - 0.5*marg_term
# nll = tf.reduce_mean(tf.linalg.diag_part(scores) - 0.5 * tf.math.reduce_euclidean_norm(scores, axis=1))
# tf.print(nll)
# mi = tf.math.log(tf.cast(scores.shape[0].value, tf.float32)) + nll
# return mi
@tf.function
def skew_js_fgan_lower_bound(f):
"""skewed js lower bound (true cross entropy)"""
n = tf.cast(f.shape[0], tf.float32)
alpha = 1/n
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return alpha*first_term - (1-alpha)*second_term
@tf.function
def label_smooth_pcc(f):
""" pcc with label smoothing trick"""
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
@tf.function
def predict_smooth_pcc(f):
""" pcc with predictor smoothing trick"""
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
@tf.function
def adap_label_smooth_pcc(f):
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
alpha = acti_func(pre_prob, a, b, c)
new_labels = (1.0 - alpha) * labels + alpha / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(new_labels, pre_prob)
@tf.function
def adap_pred_smooth_pcc(f):
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
# print('pre_prob:',pre_prob)
alpha = acti_func(pre_prob, a, b, c)
pre_prob = (1.0 - alpha) * pre_prob + alpha / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
# @title Define the linear evaluation protocol { display-mode: "form" }
def logistic_fit(x_train, y_train, x_test, y_test):
logistic_regressor = sk_linear.LogisticRegression(
solver='saga', multi_class='multinomial', tol=.1, C=10.)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
logistic_regressor.fit(x_train, y_train.ravel())
return logistic_regressor.score(x_test, y_test.ravel())
# @title Define and load the dataset, check baseline in pixel space { display-mode: "form" }
tf.compat.v1.reset_default_graph()
def map_fn(example):
image = example[FEATURE_INPUT]
image = tf.cast(image, tf.float32) / 255.0
image = tf.reshape(image, [-1]) # Flatten.
label = example[FEATURE_LABEL]
return {FEATURE_INPUT: image, FEATURE_LABEL: label}
def load_data(split):
return (tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split)
.cache()
.map(map_func=map_fn)
.shuffle(1000))
def tfds_to_np(dataset):
features = list(tfds.as_numpy(dataset))
images = np.stack([f[FEATURE_INPUT].ravel() for f in features])
labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])
return images, labels
dataset_train = load_data("train")
dataset_test = load_data("test")
x_train, y_train = tfds_to_np(dataset_train)
x_test, y_test = tfds_to_np(dataset_test)
tf.compat.v1.reset_default_graph()
x_train_noisy = x_train + 0.05 * np.random.randn(*x_train.shape)
x_test_noisy = x_test + 0.05 * np.random.randn(*x_test.shape)
print("Fit on half the pixels: {}. It should be around 0.835.".format(
logistic_fit(x_train_noisy[:, :DIMS//2], y_train,
x_test_noisy[:, :DIMS//2], y_test)))
def processed_train_data(dims, batch_size):
dataset = load_data("train")
dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)
get_next = tf.compat.v1.data.make_one_shot_iterator(dataset_batched).get_next()
features = get_next[FEATURE_INPUT]
labels = get_next[FEATURE_LABEL]
# Martin: where the problem occurs
x_1, x_2 = tf.split(features, [dims, DIMS-dims], axis=-1)
return x_1, x_2, labels
class MLP(tf.keras.Model):
def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):
super(MLP, self).__init__()
self._layers = [tfkl.Dense(dimensions, **dense_kwargs)
for dimensions in layer_dimensions[:-1]]
dense_kwargs_copy = copy.deepcopy(dense_kwargs)
dense_kwargs_copy["activation"] = None
self._layers.append(tfkl.Dense(layer_dimensions[-1], **dense_kwargs_copy))
self._shortcuts = shortcuts
@property
def layers(self):
return self._layers
def __call__(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x) + x if self._shortcuts else layer(x)
return x
# LayerNorm implementation copied from
# https://stackoverflow.com/questions/39095252/fail-to-implement-layer-normalization-with-keras
class LayerNorm(tfkl.Layer):
""" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 """
def __init__(self, scale_initializer='ones', bias_initializer='zeros',
axes=[1,2,3], epsilon=1e-6, **kwargs):
super(LayerNorm, self).__init__(**kwargs)
self.epsilon = epsilon
self.scale_initializer = tf.keras.initializers.get(scale_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.axes = axes
def build(self, input_shape):
self.scale = self.add_weight(shape=(input_shape[-1],),
initializer=self.scale_initializer,
trainable=True,
name='{}_scale'.format(self.name))
self.bias = self.add_weight(shape=(input_shape[-1],),
initializer=self.bias_initializer,
trainable=True,
name='{}_bias'.format(self.name))
self.built = True
def call(self, x, mask=None):
mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)
std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)
norm = (x - mean) * (1/(std + self.epsilon))
return norm * self.scale + self.bias
def | (self, input_shape):
return input_shape
class ConvNet(tf.keras.Sequential):
def __init__(self, channels=64, kernel_size=5, input_dim=DIMS//2, output_dim=100,
activation=tf.nn.relu):
# Note: This works only for the specific data set considered here.
super(ConvNet, self).__init__([
tfkl.Reshape((14, 28, 1), input_shape=(input_dim,)),
tfkl.Conv2D(channels, kernel_size, strides=2,
padding="same", activation=activation),
tfkl.Conv2D(2*channels, kernel_size, strides=2,
padding="same", activation=activation),
LayerNorm(),
tfkl.GlobalAveragePooling2D(),
tfkl.Dense(output_dim),
])
from tensorflow_probability.python.internal import tensorshape_util
import tensorflow.compat.v1 as tf1
from tensorflow_probability.python.bijectors import affine_scalar
from tensorflow_probability.python.bijectors import bijector as bijector_lib
# Modified from tensorflow_probability/python/bijectors/real_nvp.py
class RealNVP(bijector_lib.Bijector):
def __init__(self,
num_masked,
shift_and_log_scale_fn=None,
bijector_fn=None,
is_constant_jacobian=False,
validate_args=False,
name=None):
name = name or 'real_nvp'
if num_masked < 0:
raise ValueError('num_masked must be a non-negative integer.')
self._num_masked = num_masked
# At construction time, we don't know input_depth.
self._input_depth = None
if bool(shift_and_log_scale_fn) == bool(bijector_fn):
raise ValueError('Exactly one of `shift_and_log_scale_fn` and '
'`bijector_fn` should be specified.')
if shift_and_log_scale_fn:
def _bijector_fn(x0, input_depth, **condition_kwargs):
shift, log_scale = shift_and_log_scale_fn(x0, input_depth,
**condition_kwargs)
# ** First modification is here.
return affine_scalar.AffineScalar(shift=shift, scale=log_scale)
bijector_fn = _bijector_fn
if validate_args:
bijector_fn = _validate_bijector_fn(bijector_fn)
# Still do this assignment for variable tracking.
self._shift_and_log_scale_fn = shift_and_log_scale_fn
self._bijector_fn = bijector_fn
super(RealNVP, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _cache_input_depth(self, x):
if self._input_depth is None:
self._input_depth = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
if self._input_depth is None:
raise NotImplementedError(
'Rightmost dimension must be known prior to graph execution.')
if self._num_masked >= self._input_depth:
raise ValueError(
'Number of masked units must be smaller than the event size.')
def _forward(self, x, **condition_kwargs):
self._cache_input_depth(x)
x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]
y1 = self._bijector_fn(x0, self._input_depth - self._num_masked,
**condition_kwargs).forward(x1)
y = tf.concat([x0, y1], axis=-1)
return y
def _inverse(self, y, **condition_kwargs):
self._cache_input_depth(y)
y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]
x1 = self._bijector_fn(y0, self._input_depth - self._num_masked,
**condition_kwargs).inverse(y1)
x = tf.concat([y0, x1], axis=-1)
return x
def _forward_log_det_jacobian(self, x, **condition_kwargs):
self._cache_input_depth(x)
x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]
return self._bijector_fn(x0, self._input_depth - self._num_masked,
**condition_kwargs).forward_log_det_jacobian(
x1, event_ndims=1)
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
self._cache_input_depth(y)
y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]
return self._bijector_fn(y0, self._input_depth - self._num_masked,
**condition_kwargs).inverse_log_det_jacobian(
y1, event_ndims=1)
def real_nvp_default_template(hidden_layers,
shift_only=False,
activation=tf.nn.relu,
name=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):
def _fn(x, output_units, **condition_kwargs):
"""Fully connected MLP parameterized via `real_nvp_template`."""
if condition_kwargs:
raise NotImplementedError(
'Conditioning not implemented in the default template.')
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
reshape_output = lambda x: x[0]
else:
reshape_output = lambda x: x
for units in hidden_layers:
x = tf1.layers.dense(
inputs=x,
units=units,
activation=activation,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
x = tf1.layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
if shift_only:
return reshape_output(x), None
shift, log_scale = tf.split(x, 2, axis=-1)
# ** Here is the second modification.
return reshape_output(shift), 1e-7 + tf.nn.softplus(reshape_output(log_scale))
return tf1.make_template('real_nvp_default_template', _fn)
class RealNVPBijector(tf.keras.Model):
def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):
super(RealNVPBijector, self).__init__()
permutations = [np.random.permutation(dimensions)
for _ in range(n_couplings)]
bijectors = []
for permutation in permutations:
bijectors.append(RealNVP(
dimensions // 2,
real_nvp_default_template(hidden_layers, **dense_kwargs)))
bijectors.append(tfb.Permute(permutation))
self._bijector = tfb.Chain(bijectors)
def call(self, inputs):
return self._bijector.forward(inputs)
class InnerProdCritic(tf.keras.Model):
def call(self, x, y):
return tf.matmul(x, y, transpose_b=True)
class BilinearCritic(tf.keras.Model):
def __init__(self, feature_dim=100, **kwargs):
super(BilinearCritic, self).__init__(**kwargs)
self._W = tfkl.Dense(feature_dim, use_bias=False)
def call(self, x, y):
return tf.matmul(x, self._W(y), transpose_b=True)
# https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb
class ConcatCritic(tf.keras.Model):
def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):
super(ConcatCritic, self).__init__(**kwargs)
# output is scalar score
self._f = MLP([hidden_dim for _ in range(layers)]+[1], False, {"activation": "relu"})
def call(self, x, y):
batch_size = tf.shape(input=x)[0]
# Tile all possible combinations of x and y
x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))
y_tiled = tf.tile(y[:, None], (1, batch_size, 1))
# xy is [batch_size * batch_size, x_dim + y_dim]
xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2),
[batch_size * batch_size, -1])
# Compute scores for each x_i, y_j pair.
scores = self._f(xy_pairs)
return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))
class SeparableCritic(tf.keras.Model):
def __init__(self, hidden_dim=100, output_dim=100, layers=1,
activation='relu', **kwargs):
super(SeparableCritic, self).__init__(**kwargs)
self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {"activation": activation})
self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {"activation": activation})
def call(self, x, y):
x_mapped = self._f_x(x)
y_mapped = self._f_y(y)
return tf.matmul(x_mapped, y_mapped, transpose_b=True)
def train(g1,
g2,
critic,
loss_fn,
learning_rate,
batch_size=TRAIN_BATCH_SIZE,
n_iters=15000,
n_evals=15,
compute_jacobian=False,
noise_std=0.0,
data_dimensions=DIMS//2,
n_iter=1,
loss_name='InfoNCE',
):
"""Runs the training loop for a fixed model.
Args:
g1: Function, maps input1 to representation.
g2: Function, maps input2 to representation.
critic: Function, maps two representations to scalar.
loss_fn: Function, mutual information estimator.
learning_rate: Learning rate.
batch_size: Training batch size.
n_iters: Number of optimization iterations.
n_evals: Number of model evaluations.
compute_jacobian: Whether to estimate the singular values of the Jacobian.
noise_std: Standard deviation for the Gaussian noise. Default is 0.0.
data_dimensions: The dimension of the data. By default it's half of the
original data dimension.
Returns:
Returns and instance of `Results` tuple.
"""
x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)
if noise_std > 0.0:
assert x_1.shape == x_2.shape, "X1 and X2 shapes must agree to add noise!"
noise = noise_std * tf.random.normal(x_1.shape)
x_1 += noise
x_2 += noise
# Compute the representations.
code_1, code_2 = g1(x_1), g2(x_2)
critic_matrix = critic(code_1, code_2)
# Compute the Jacobian of g1 if needed.
if compute_jacobian:
jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)
singular_values = tf.linalg.svd(jacobian, compute_uv=False)
# Optimizer setup.
loss = loss_fn(critic_matrix)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
if not loss_name == 'wpc':
optimizer_op = optimizer.minimize(loss)
else:
gvs = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
optimizer_op = optimizer.apply_gradients(capped_gvs)
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
# Subgraph for eval (add noise to input if necessary)
data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None, data_dimensions])
data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(input=data_ph))
codes = g1(data_ph_noisy)
training_losses, testing_losses, classification_accuracies, iters, sigmas \
= [], [], [], [], []
# Main training loop.
for iter_n in range(n_iters):
# Evaluate the model performance.
if iter_n % (n_iters // n_evals) == 0:
iters.append(iter_n)
accuracy = get_classification_accuracy(session, codes, data_ph, data_dimensions)
classification_accuracies.append(accuracy)
testing_losses.append(
get_testing_loss(x_test, session, loss, data_ph, data_dimensions))
if compute_jacobian:
sigmas.append(session.run(singular_values))
print("{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}".format(\
n_iter, loss_name, iter_n, accuracy, args.dataset, args.batch_size, args.lr))
# Run one optimization step.
loss_np, _ = session.run([loss, optimizer_op])
training_losses.append(loss_np)
return Results(iterations=iters,
training_losses=training_losses,
testing_losses=testing_losses,
classification_accuracies=classification_accuracies,
singular_values=sigmas)
def run_sweep(nets, critics, loss_fns, exp_name, **kwargs):
"""Runs the sweep across encoder networks, critics, and the estimators."""
grid = itertools.product(nets, critics, loss_fns)
data_frames = []
results_with_singular_values = []
for nets_name, critic_name, loss_name in grid:
print("[New experiment] encoder: {}, critic: {}, loss: {}".format(
nets_name, critic_name, loss_name))
with tf.Graph().as_default():
g1, g2 = nets[nets_name]()
critic = critics[critic_name]()
loss_fn = loss_fns[loss_name]
results_per_run = []
for n in range(NRUNS):
try:
print("{:d}th run, loss: {}".format(n, loss_name))
if loss_name == "drfc" and TFDS_NAME == "cifar10":
results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)
#results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=1e-4, n_iter=n, loss_name=loss_name)
else:
results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)
results_per_run.append(results)
except Exception as ex:
print("Run {} failed! Error: {}".format(n, ex))
for i, result in enumerate(results_per_run):
data_frames.append(convert_to_data_frame(
result, exp_name, nets_name, critic_name, loss_name, i))
if kwargs.get('compute_jacobian', False):
results_with_singular_values.append((
ResultsConfig(nets_name, critic_name, loss_name), results_per_run
))
return {
"df": pd.concat(data_frames),
"singular_values": results_with_singular_values
}
#@title Run experiment or load precomputed results { display-mode: "form" }
def run_all_experiments():
tf.compat.v1.reset_default_graph()
wpc_loss = lambda x: -infonce_lower_bound(x)
cpc_loss = lambda x: -infonce_lower_bound(x)
#nwj_loss = lambda x: -nwj_lower_bound(x)
drfc_loss = lambda x: -our_lower_bound(x)
pcc_loss = lambda x: -js_fgan_lower_bound(x)
skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)
ls_pcc_loss = lambda x: -label_smooth_pcc(x)
pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)
adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)
adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)
loss_fcts = {
# "wpc": wpc_loss,
"pcc": pcc_loss,
# "drfc": drfc_loss,
#"nwj": nwj_loss,
"cpc": cpc_loss,
# "skew_pcc": skew_pcc_loss,
"ls_pcc": ls_pcc_loss,
"prels_pcc": pre_ls_pcc_loss,
"adap_pred_pcc": adap_pred_smooth_pcc_loss,
"adap_label_pcc": adap_label_smooth_pcc_loss
}
kwargs = dict(
shift_only=True,
activation=lambda x: tf.nn.relu(x),
kernel_initializer=tf.compat.v1.initializers.truncated_normal(stddev=0.0001),
bias_initializer='zeros')
nets = {
"realnvp": lambda: (
RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs),
RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs)
)
}
critics = {
"bilinear": lambda: BilinearCritic(feature_dim=DIMS//2),
}
return run_sweep(nets, critics, loss_fcts, "invertible", n_iters=21000, n_evals=21)
if RUN_EXPERIMENTS:
data_invertible = run_all_experiments()["df"]
data_invertible.to_pickle(RESULT_DIR)
else:
os.system("wget -q -N https://storage.googleapis.com/mi_for_rl_files/mi_results.pkl")
data_invertible = pd.read_pickle('mi_results.pkl')
data_invertible = data_invertible[data_invertible.exp_name == "invertible"]
| compute_output_shape | identifier_name |
main.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# reference links:
# From https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb
# https://github.com/google-research/google-research/blob/master/mutual_information_representation_learning/mirl.ipynb
# https://github.com/yaohungt/Pointwise_Dependency_Neural_Estimation/tree/master/RepreLearn_Shallow
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import itertools
import os
import pickle
import argparse
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
import pandas as pd
from scipy.ndimage import gaussian_filter1d
import seaborn as sns
import tensorflow as tf
from tensorflow.python.ops.parallel_for import gradients
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
import sklearn.linear_model as sk_linear
parser = argparse.ArgumentParser(description='Representation Learning Experiments')
parser.add_argument('--dataset', default='mnist', type=str,
help='cifar10 or mnist')
parser.add_argument('--lr', default=1e-4, type=float,
help='learning rate')
parser.add_argument('--batch_size', default=100, type=int,
help='mini batch size')
parser.add_argument('--smoothing', default=0.01, type=float,
help='label smoothing parameter')
parser.add_argument('--output_dir', type=str, default='./runs',
help='directory where the results will be stored')
args = parser.parse_args()
a, b, c = 0.005, 0.1, 0.9
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
# learning rate for ours CIFAR10 is 1e-4, otherwise follows below
TFDS_NAME = args.dataset #"cifar10" # mnist or cifar10
NRUNS = 10 #@param { type: "slider", min: 1, max: 20, step: 1}
# parameters for training
if TFDS_NAME == "mnist":
DIMS = 784
elif TFDS_NAME == "cifar10":
DIMS = 3072
LEARNING_RATE = args.lr #1e-5
N_CLASSES = 10
TRAIN_BATCH_SIZE = args.batch_size #64 #@param { type: "slider", min: 64, max: 128, step: 64}
# save results
# RESULT_DIR = '{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)
RESULT_DIR = './runs/{}_nrun={}_lr={}_batch={}.pkl'.format(TFDS_NAME, NRUNS, LEARNING_RATE, TRAIN_BATCH_SIZE)
RUN_EXPERIMENTS = True
FEATURE_INPUT = "image"
FEATURE_LABEL = "label"
#slim = tf.contrib.slim
tfb = tfp.bijectors
tfd = tfp.distributions
tfkl = tf.keras.layers
tf.keras.backend.clear_session()
ResultsConfig = collections.namedtuple(
"ResultsConfig", ["nets", "critic", "loss"])
Results = collections.namedtuple(
'Results',
['iterations', 'training_losses', 'testing_losses',
'classification_accuracies', 'singular_values'])
ResultsAdversarial = collections.namedtuple(
"ResultsAdversarial",
["losses_e", "losses_c", "classification_accuracies", "iters"]
)
ResultsSamplingIssues = collections.namedtuple(
"ResultsSamplingIssues", ["mi_true", "nce_estimates_noniid",
"nce_estimates_iid", "nwj_estimates_noniid",
"nwj_estimates_iid"])
def acti_func(x, a, b, c):
# y: a
# x: 0 b c 1
x = tf.stop_gradient(x)
alpha = tf.zeros_like(x)
alpha = tf.where(x<=b, -a*x/b+a, alpha)
alpha = tf.where((x>b) & (x<c), 0., alpha)
alpha = tf.where(x>=c, a*x/(1-c)+a*c/(c-1), alpha)
return alpha
def convert_to_data_frame(result, exp_name, nets, critic, loss, seed):
"""Convert results class to a data frame."""
label = "{}, {}, {}".format(nets, critic, loss)
rows = list(
zip(
itertools.repeat(exp_name),
itertools.repeat(nets),
itertools.repeat(critic),
itertools.repeat(loss),
itertools.repeat(seed),
result.iterations,
[-loss for loss in result.testing_losses], # Loss -> bound.
result.classification_accuracies,
itertools.repeat(label)))
df_eval = pd.DataFrame(
rows,
columns=("exp_name", "nets", "Critic", "Estimator",
"run", "iteration", "bound_value", "accuracy", "label"))
df_eval["Estimator"] = df_eval["Estimator"].replace(
to_replace={
"cpc": "$CPC$",
"pcc": "$PCC$",
"drfc": "$D-RFC$",
"wpc": "$WPC$"
})
df_eval["Critic"] = df_eval["Critic"].replace(
to_replace={
"concat": "MLP",
"separable": "Separable",
"innerprod": "Inner product",
"bilinear": "Bilinear"
})
return df_eval
def apply_default_style(ax):
ax.set_xlim([0, 20001])
ax.get_xaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(x/1000), ',')))
ax.set_xlabel("Training steps (in thousands)")
plt.tick_params(top=False, right=False, bottom=False, left=False)
handles, labels = ax.get_legend_handles_labels()
plt.legend(loc="lower right", handles=handles[1:], labels=labels[1:])
def get_testing_loss(x_array, session, loss, data_ph, dims, batch_size=512):
total_loss = 0
for i in range(0, x_array.shape[0], batch_size):
x_slice = x_array[i:i+batch_size, :dims]
total_loss += x_slice.shape[0] * session.run(loss,
feed_dict={data_ph: x_slice})
return total_loss / x_array.shape[0]
def get_classification_accuracy(session, codes, data_ph, dims):
x_train_mapped = map_data(x_train, session, codes, data_ph, dims)
x_test_mapped = map_data(x_test, session, codes, data_ph, dims)
accuracy = logistic_fit(x_train_mapped, y_train, x_test_mapped, y_test)
return accuracy
def map_data(x_array, session, codes, data_ph, dims, batch_size=512):
x_mapped = []
for i in range(0, x_array.shape[0], batch_size):
x_mapped.append(
session.run(codes,
feed_dict={data_ph: x_array[i:i+batch_size, :dims]}))
return np.concatenate(x_mapped, axis=0)
def reduce_logmeanexp_nodiag(x, axis=None):
batch_size = x.shape[0]
logsumexp = tf.reduce_logsumexp(input_tensor=x - tf.linalg.tensor_diag(np.inf * tf.ones(batch_size)), axis=axis)
if axis:
num_elem = batch_size - 1.
else:
num_elem = batch_size * (batch_size - 1.)
return logsumexp - tf.math.log(num_elem)
def tuba_lower_bound(scores, log_baseline=None):
if log_baseline is not None:
scores -= log_baseline[:, None]
batch_size = tf.cast(scores.shape[0], tf.float32)
# First term is an expectation over samples from the joint,
# which are the diagonal elmements of the scores matrix.
joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))
# Second term is an expectation over samples from the marginal,
# which are the off-diagonal elements of the scores matrix.
marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))
return 1. + joint_term - marg_term
def nwj_lower_bound(scores):
# equivalent to: tuba_lower_bound(scores, log_baseline=1.)
return tuba_lower_bound(scores - 1.)
@tf.function
def js_fgan_lower_bound(f):
"""Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016)."""
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
n = tf.cast(f.shape[0], tf.float32)
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return first_term - second_term
@tf.function
def infonce_lower_bound(scores):
"""InfoNCE lower bound from van den Oord et al. (2018)."""
nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.reduce_logsumexp(input_tensor=scores, axis=1))
mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll
return mi
@tf.function
def our_lower_bound(scores):
# scores: 128, 128
"""Our lower bound"""
batch_size = tf.cast(scores.shape[0], tf.float32)
joint_term = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores))
# expectation
scores_sq = scores**2
marg_num = batch_size * (batch_size - 1.)
marg_term = tf.reduce_sum(input_tensor=scores_sq) - tf.reduce_sum(input_tensor=tf.linalg.diag_part(scores_sq))
marg_term = marg_term / marg_num
# tf.print(joint_term - 0.5*marg_term)
return joint_term - 0.5*marg_term
# nll = tf.reduce_mean(tf.linalg.diag_part(scores) - 0.5 * tf.math.reduce_euclidean_norm(scores, axis=1))
# tf.print(nll)
# mi = tf.math.log(tf.cast(scores.shape[0].value, tf.float32)) + nll
# return mi
@tf.function
def skew_js_fgan_lower_bound(f):
"""skewed js lower bound (true cross entropy)"""
n = tf.cast(f.shape[0], tf.float32)
alpha = 1/n
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return alpha*first_term - (1-alpha)*second_term
@tf.function
def label_smooth_pcc(f):
""" pcc with label smoothing trick"""
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
@tf.function
def predict_smooth_pcc(f):
""" pcc with predictor smoothing trick"""
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
pre_prob = (1.0 - args.smoothing) * pre_prob + args.smoothing / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
@tf.function
def adap_label_smooth_pcc(f):
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
alpha = acti_func(pre_prob, a, b, c)
new_labels = (1.0 - alpha) * labels + alpha / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(new_labels, pre_prob)
@tf.function
def adap_pred_smooth_pcc(f):
n = f.shape[0]
labels = tf.eye(n)
labels = tf.reshape(labels,[-1,1])
# labels = (1.0 - args.smoothing) * labels + args.smoothing / 2
pre_prob = tf.reshape(tf.sigmoid(f), [-1,1])
# print('pre_prob:',pre_prob)
alpha = acti_func(pre_prob, a, b, c)
pre_prob = (1.0 - alpha) * pre_prob + alpha / 2
bce = tf.keras.losses.BinaryCrossentropy()
return -bce(labels, pre_prob)
# @title Define the linear evaluation protocol { display-mode: "form" }
def logistic_fit(x_train, y_train, x_test, y_test):
logistic_regressor = sk_linear.LogisticRegression(
solver='saga', multi_class='multinomial', tol=.1, C=10.)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
logistic_regressor.fit(x_train, y_train.ravel())
return logistic_regressor.score(x_test, y_test.ravel())
# @title Define and load the dataset, check baseline in pixel space { display-mode: "form" }
tf.compat.v1.reset_default_graph()
def map_fn(example):
image = example[FEATURE_INPUT]
image = tf.cast(image, tf.float32) / 255.0
image = tf.reshape(image, [-1]) # Flatten.
label = example[FEATURE_LABEL]
return {FEATURE_INPUT: image, FEATURE_LABEL: label}
def load_data(split):
return (tfds.load(TFDS_NAME, data_dir='/public/wangxu/data/', split=split)
.cache()
.map(map_func=map_fn)
.shuffle(1000))
def tfds_to_np(dataset):
features = list(tfds.as_numpy(dataset))
images = np.stack([f[FEATURE_INPUT].ravel() for f in features])
labels = np.stack([f[FEATURE_LABEL].ravel() for f in features])
return images, labels
dataset_train = load_data("train")
dataset_test = load_data("test")
x_train, y_train = tfds_to_np(dataset_train)
x_test, y_test = tfds_to_np(dataset_test)
tf.compat.v1.reset_default_graph()
x_train_noisy = x_train + 0.05 * np.random.randn(*x_train.shape)
x_test_noisy = x_test + 0.05 * np.random.randn(*x_test.shape)
print("Fit on half the pixels: {}. It should be around 0.835.".format(
logistic_fit(x_train_noisy[:, :DIMS//2], y_train,
x_test_noisy[:, :DIMS//2], y_test)))
def processed_train_data(dims, batch_size):
dataset = load_data("train")
dataset_batched = dataset.repeat().batch(batch_size, drop_remainder=True)
get_next = tf.compat.v1.data.make_one_shot_iterator(dataset_batched).get_next()
features = get_next[FEATURE_INPUT]
labels = get_next[FEATURE_LABEL]
# Martin: where the problem occurs
x_1, x_2 = tf.split(features, [dims, DIMS-dims], axis=-1)
return x_1, x_2, labels
class MLP(tf.keras.Model):
def __init__(self, layer_dimensions, shortcuts, dense_kwargs={}):
super(MLP, self).__init__()
self._layers = [tfkl.Dense(dimensions, **dense_kwargs)
for dimensions in layer_dimensions[:-1]]
dense_kwargs_copy = copy.deepcopy(dense_kwargs)
dense_kwargs_copy["activation"] = None
self._layers.append(tfkl.Dense(layer_dimensions[-1], **dense_kwargs_copy))
self._shortcuts = shortcuts
@property
def layers(self):
return self._layers
def __call__(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x) + x if self._shortcuts else layer(x)
return x
# LayerNorm implementation copied from
# https://stackoverflow.com/questions/39095252/fail-to-implement-layer-normalization-with-keras
class LayerNorm(tfkl.Layer):
""" Layer Normalization in the style of https://arxiv.org/abs/1607.06450 """
def __init__(self, scale_initializer='ones', bias_initializer='zeros',
axes=[1,2,3], epsilon=1e-6, **kwargs):
super(LayerNorm, self).__init__(**kwargs)
self.epsilon = epsilon
self.scale_initializer = tf.keras.initializers.get(scale_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.axes = axes
def build(self, input_shape):
self.scale = self.add_weight(shape=(input_shape[-1],),
initializer=self.scale_initializer,
trainable=True,
name='{}_scale'.format(self.name))
self.bias = self.add_weight(shape=(input_shape[-1],),
initializer=self.bias_initializer,
trainable=True,
name='{}_bias'.format(self.name))
self.built = True
def call(self, x, mask=None):
mean = tf.keras.backend.mean(x, axis=self.axes, keepdims=True)
std = tf.keras.backend.std(x, axis=self.axes, keepdims=True)
norm = (x - mean) * (1/(std + self.epsilon))
return norm * self.scale + self.bias
def compute_output_shape(self, input_shape):
return input_shape
class ConvNet(tf.keras.Sequential):
def __init__(self, channels=64, kernel_size=5, input_dim=DIMS//2, output_dim=100,
activation=tf.nn.relu):
# Note: This works only for the specific data set considered here.
super(ConvNet, self).__init__([
tfkl.Reshape((14, 28, 1), input_shape=(input_dim,)),
tfkl.Conv2D(channels, kernel_size, strides=2,
padding="same", activation=activation),
tfkl.Conv2D(2*channels, kernel_size, strides=2,
padding="same", activation=activation),
LayerNorm(),
tfkl.GlobalAveragePooling2D(),
tfkl.Dense(output_dim),
])
from tensorflow_probability.python.internal import tensorshape_util
import tensorflow.compat.v1 as tf1
from tensorflow_probability.python.bijectors import affine_scalar
from tensorflow_probability.python.bijectors import bijector as bijector_lib
# Modified from tensorflow_probability/python/bijectors/real_nvp.py
class RealNVP(bijector_lib.Bijector):
def __init__(self,
num_masked,
shift_and_log_scale_fn=None,
bijector_fn=None,
is_constant_jacobian=False,
validate_args=False,
name=None):
name = name or 'real_nvp'
if num_masked < 0:
raise ValueError('num_masked must be a non-negative integer.')
self._num_masked = num_masked
# At construction time, we don't know input_depth.
self._input_depth = None
if bool(shift_and_log_scale_fn) == bool(bijector_fn):
raise ValueError('Exactly one of `shift_and_log_scale_fn` and '
'`bijector_fn` should be specified.')
if shift_and_log_scale_fn:
def _bijector_fn(x0, input_depth, **condition_kwargs):
shift, log_scale = shift_and_log_scale_fn(x0, input_depth,
**condition_kwargs)
# ** First modification is here.
return affine_scalar.AffineScalar(shift=shift, scale=log_scale)
bijector_fn = _bijector_fn
if validate_args:
bijector_fn = _validate_bijector_fn(bijector_fn)
# Still do this assignment for variable tracking.
self._shift_and_log_scale_fn = shift_and_log_scale_fn
self._bijector_fn = bijector_fn
super(RealNVP, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _cache_input_depth(self, x):
if self._input_depth is None:
self._input_depth = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
if self._input_depth is None:
raise NotImplementedError(
'Rightmost dimension must be known prior to graph execution.')
if self._num_masked >= self._input_depth:
raise ValueError(
'Number of masked units must be smaller than the event size.')
def _forward(self, x, **condition_kwargs):
self._cache_input_depth(x)
x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]
y1 = self._bijector_fn(x0, self._input_depth - self._num_masked,
**condition_kwargs).forward(x1)
y = tf.concat([x0, y1], axis=-1)
return y
def _inverse(self, y, **condition_kwargs):
self._cache_input_depth(y)
y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]
x1 = self._bijector_fn(y0, self._input_depth - self._num_masked,
**condition_kwargs).inverse(y1)
x = tf.concat([y0, x1], axis=-1)
return x
def _forward_log_det_jacobian(self, x, **condition_kwargs):
self._cache_input_depth(x)
x0, x1 = x[..., :self._num_masked], x[..., self._num_masked:]
return self._bijector_fn(x0, self._input_depth - self._num_masked,
**condition_kwargs).forward_log_det_jacobian(
x1, event_ndims=1)
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
self._cache_input_depth(y)
y0, y1 = y[..., :self._num_masked], y[..., self._num_masked:]
return self._bijector_fn(y0, self._input_depth - self._num_masked,
**condition_kwargs).inverse_log_det_jacobian(
y1, event_ndims=1)
def real_nvp_default_template(hidden_layers,
shift_only=False,
activation=tf.nn.relu,
name=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
with tf.compat.v1.name_scope(name or 'real_nvp_default_template'):
def _fn(x, output_units, **condition_kwargs):
"""Fully connected MLP parameterized via `real_nvp_template`."""
if condition_kwargs:
raise NotImplementedError(
'Conditioning not implemented in the default template.')
if tensorshape_util.rank(x.shape) == 1:
x = x[tf.newaxis, ...]
reshape_output = lambda x: x[0]
else:
reshape_output = lambda x: x
for units in hidden_layers:
x = tf1.layers.dense(
inputs=x,
units=units,
activation=activation,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
x = tf1.layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs)
if shift_only:
return reshape_output(x), None
shift, log_scale = tf.split(x, 2, axis=-1)
# ** Here is the second modification.
return reshape_output(shift), 1e-7 + tf.nn.softplus(reshape_output(log_scale))
return tf1.make_template('real_nvp_default_template', _fn)
class RealNVPBijector(tf.keras.Model):
def __init__(self, dimensions, n_couplings, hidden_layers, dense_kwargs):
super(RealNVPBijector, self).__init__()
permutations = [np.random.permutation(dimensions)
for _ in range(n_couplings)]
bijectors = []
for permutation in permutations:
bijectors.append(RealNVP(
dimensions // 2,
real_nvp_default_template(hidden_layers, **dense_kwargs)))
bijectors.append(tfb.Permute(permutation))
self._bijector = tfb.Chain(bijectors)
def call(self, inputs):
return self._bijector.forward(inputs)
class InnerProdCritic(tf.keras.Model):
def call(self, x, y):
return tf.matmul(x, y, transpose_b=True)
class BilinearCritic(tf.keras.Model):
def __init__(self, feature_dim=100, **kwargs):
super(BilinearCritic, self).__init__(**kwargs)
self._W = tfkl.Dense(feature_dim, use_bias=False)
def call(self, x, y):
return tf.matmul(x, self._W(y), transpose_b=True)
# https://colab.research.google.com/github/google-research/google-research/blob/master/vbmi/vbmi_demo.ipynb
class ConcatCritic(tf.keras.Model):
def __init__(self, hidden_dim=200, layers=1, activation='relu', **kwargs):
super(ConcatCritic, self).__init__(**kwargs)
# output is scalar score
self._f = MLP([hidden_dim for _ in range(layers)]+[1], False, {"activation": "relu"})
def call(self, x, y):
batch_size = tf.shape(input=x)[0]
# Tile all possible combinations of x and y
x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))
y_tiled = tf.tile(y[:, None], (1, batch_size, 1))
# xy is [batch_size * batch_size, x_dim + y_dim]
xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2),
[batch_size * batch_size, -1])
# Compute scores for each x_i, y_j pair.
scores = self._f(xy_pairs)
return tf.transpose(a=tf.reshape(scores, [batch_size, batch_size]))
class SeparableCritic(tf.keras.Model):
def __init__(self, hidden_dim=100, output_dim=100, layers=1,
activation='relu', **kwargs):
super(SeparableCritic, self).__init__(**kwargs)
self._f_x = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {"activation": activation})
self._f_y = MLP([hidden_dim for _ in range(layers)] + [output_dim], False, {"activation": activation})
def call(self, x, y):
x_mapped = self._f_x(x)
y_mapped = self._f_y(y)
return tf.matmul(x_mapped, y_mapped, transpose_b=True)
def train(g1,
g2,
critic,
loss_fn,
learning_rate,
batch_size=TRAIN_BATCH_SIZE,
n_iters=15000,
n_evals=15,
compute_jacobian=False,
noise_std=0.0,
data_dimensions=DIMS//2,
n_iter=1,
loss_name='InfoNCE',
):
"""Runs the training loop for a fixed model.
Args:
g1: Function, maps input1 to representation.
g2: Function, maps input2 to representation.
critic: Function, maps two representations to scalar.
loss_fn: Function, mutual information estimator.
learning_rate: Learning rate.
batch_size: Training batch size.
n_iters: Number of optimization iterations.
n_evals: Number of model evaluations.
compute_jacobian: Whether to estimate the singular values of the Jacobian.
noise_std: Standard deviation for the Gaussian noise. Default is 0.0.
data_dimensions: The dimension of the data. By default it's half of the
original data dimension.
Returns:
Returns and instance of `Results` tuple.
"""
x_1, x_2, _ = processed_train_data(data_dimensions, batch_size)
if noise_std > 0.0:
assert x_1.shape == x_2.shape, "X1 and X2 shapes must agree to add noise!"
noise = noise_std * tf.random.normal(x_1.shape)
x_1 += noise
x_2 += noise
# Compute the representations.
code_1, code_2 = g1(x_1), g2(x_2)
critic_matrix = critic(code_1, code_2)
# Compute the Jacobian of g1 if needed.
if compute_jacobian:
jacobian = gradients.batch_jacobian(code_1, x_1, use_pfor=False)
singular_values = tf.linalg.svd(jacobian, compute_uv=False)
# Optimizer setup.
loss = loss_fn(critic_matrix)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
if not loss_name == 'wpc':
optimizer_op = optimizer.minimize(loss)
else:
gvs = optimizer.compute_gradients(loss)
capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
optimizer_op = optimizer.apply_gradients(capped_gvs)
with tf.compat.v1.Session() as session:
session.run(tf.compat.v1.global_variables_initializer())
# Subgraph for eval (add noise to input if necessary)
data_ph = tf.compat.v1.placeholder(tf.float32, shape=[None, data_dimensions])
data_ph_noisy = data_ph + noise_std * tf.random.normal(tf.shape(input=data_ph))
codes = g1(data_ph_noisy)
training_losses, testing_losses, classification_accuracies, iters, sigmas \
= [], [], [], [], []
# Main training loop.
for iter_n in range(n_iters):
# Evaluate the model performance.
if iter_n % (n_iters // n_evals) == 0:
iters.append(iter_n)
accuracy = get_classification_accuracy(session, codes, data_ph, data_dimensions)
classification_accuracies.append(accuracy)
testing_losses.append(
get_testing_loss(x_test, session, loss, data_ph, data_dimensions))
if compute_jacobian:
sigmas.append(session.run(singular_values))
print("{:d}th iter Loss_name {} Step {:>10d} fit {:>.5f} DS {} B {:d} lr {:f}".format(\
n_iter, loss_name, iter_n, accuracy, args.dataset, args.batch_size, args.lr))
# Run one optimization step.
loss_np, _ = session.run([loss, optimizer_op])
training_losses.append(loss_np)
return Results(iterations=iters,
training_losses=training_losses,
testing_losses=testing_losses,
classification_accuracies=classification_accuracies,
singular_values=sigmas)
def run_sweep(nets, critics, loss_fns, exp_name, **kwargs):
"""Runs the sweep across encoder networks, critics, and the estimators."""
grid = itertools.product(nets, critics, loss_fns)
data_frames = []
results_with_singular_values = [] | with tf.Graph().as_default():
g1, g2 = nets[nets_name]()
critic = critics[critic_name]()
loss_fn = loss_fns[loss_name]
results_per_run = []
for n in range(NRUNS):
try:
print("{:d}th run, loss: {}".format(n, loss_name))
if loss_name == "drfc" and TFDS_NAME == "cifar10":
results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)
#results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=1e-4, n_iter=n, loss_name=loss_name)
else:
results = train(g1, g2, critic, loss_fn, **kwargs, learning_rate=LEARNING_RATE, n_iter=n, loss_name=loss_name)
results_per_run.append(results)
except Exception as ex:
print("Run {} failed! Error: {}".format(n, ex))
for i, result in enumerate(results_per_run):
data_frames.append(convert_to_data_frame(
result, exp_name, nets_name, critic_name, loss_name, i))
if kwargs.get('compute_jacobian', False):
results_with_singular_values.append((
ResultsConfig(nets_name, critic_name, loss_name), results_per_run
))
return {
"df": pd.concat(data_frames),
"singular_values": results_with_singular_values
}
#@title Run experiment or load precomputed results { display-mode: "form" }
def run_all_experiments():
tf.compat.v1.reset_default_graph()
wpc_loss = lambda x: -infonce_lower_bound(x)
cpc_loss = lambda x: -infonce_lower_bound(x)
#nwj_loss = lambda x: -nwj_lower_bound(x)
drfc_loss = lambda x: -our_lower_bound(x)
pcc_loss = lambda x: -js_fgan_lower_bound(x)
skew_pcc_loss = lambda x: -skew_js_fgan_lower_bound(x)
ls_pcc_loss = lambda x: -label_smooth_pcc(x)
pre_ls_pcc_loss = lambda x: -predict_smooth_pcc(x)
adap_pred_smooth_pcc_loss = lambda x: -adap_pred_smooth_pcc(x)
adap_label_smooth_pcc_loss = lambda x: -adap_label_smooth_pcc(x)
loss_fcts = {
# "wpc": wpc_loss,
"pcc": pcc_loss,
# "drfc": drfc_loss,
#"nwj": nwj_loss,
"cpc": cpc_loss,
# "skew_pcc": skew_pcc_loss,
"ls_pcc": ls_pcc_loss,
"prels_pcc": pre_ls_pcc_loss,
"adap_pred_pcc": adap_pred_smooth_pcc_loss,
"adap_label_pcc": adap_label_smooth_pcc_loss
}
kwargs = dict(
shift_only=True,
activation=lambda x: tf.nn.relu(x),
kernel_initializer=tf.compat.v1.initializers.truncated_normal(stddev=0.0001),
bias_initializer='zeros')
nets = {
"realnvp": lambda: (
RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs),
RealNVPBijector(DIMS // 2, n_couplings=30, hidden_layers=[512, 512], dense_kwargs=kwargs)
)
}
critics = {
"bilinear": lambda: BilinearCritic(feature_dim=DIMS//2),
}
return run_sweep(nets, critics, loss_fcts, "invertible", n_iters=21000, n_evals=21)
if RUN_EXPERIMENTS:
data_invertible = run_all_experiments()["df"]
data_invertible.to_pickle(RESULT_DIR)
else:
os.system("wget -q -N https://storage.googleapis.com/mi_for_rl_files/mi_results.pkl")
data_invertible = pd.read_pickle('mi_results.pkl')
data_invertible = data_invertible[data_invertible.exp_name == "invertible"] | for nets_name, critic_name, loss_name in grid:
print("[New experiment] encoder: {}, critic: {}, loss: {}".format(
nets_name, critic_name, loss_name)) | random_line_split |
preprocessing.py | #!/usr/bin/env python3
# coding: utf-8
"""Chromosight's preprocessing submodule implements a number of functions to
operate on Hi-C contact maps before detection. These functions can be used to
improve the signal or filter unneeded signal. There are also functions to edit
(zoom, crop, factorize) kernel matrices.
"""
import sys
import numpy as np
import numpy.linalg as la
import scipy.stats as ss
import scipy.sparse as sp
import scipy.ndimage as ndi
from sklearn.isotonic import IsotonicRegression
def erase_missing(signal, valid_rows, valid_cols, sym_upper=True):
"""
Given a sparse matrix, set all pixels in missing (invalid) bins to 0.
Parameters
----------
signal : scipy.sparse.csr_matrix of floats
Input signal on which to erase values.
valid_rows : numpy.ndarray of ints
Indices of rows considered valid (not missing).
valid_cols : numpy.ndarray of ints
Indices of columns considered valid (not missing).
sym_upper : bool
Define if the input signal is upper symmetric.
Returns
-------
scipy.sparse.csr_matrix
The input signal with all values in missing bins set to 0
"""
if sym_upper and sp.issparse(signal):
if np.any(valid_rows != valid_cols):
raise ValueError(
"Valid rows and columns must be identical with sym_upper=True"
)
if signal.shape[0] != signal.shape[1]:
raise ValueError(
"Input matrix must be square when using sym_upper=True"
)
# Make a boolean mask from good bins
good_mask = np.isin(range(signal.shape[0]), valid_rows)
# Set all pixels in a nondetectable bin to 0
# For faster masking of bins, mask bins using dot product with an
# identify matrix where bad bins have been masked on the diagonal
# E.g. if removing the second bin (row and column):
# 1 0 0 9 6 5 1 0 0 9 0 5
# 0 0 0 X 6 8 7 X 0 0 0 = 0 0 0
# 0 0 1 6 7 8 0 0 1 6 0 8
mask_mat = sp.eye(signal.shape[0])
mask_mat.data[0][~good_mask] = 0
erased = mask_mat.dot(signal).dot(mask_mat)
else:
# Get a boolean array of missing (1) and valid (0) rows
missing_rows = valid_to_missing(valid_rows, signal.shape[0])
missing_cols = valid_to_missing(valid_cols, signal.shape[1])
erased = signal.copy()
erased[missing_rows, :] = 0
erased[:, missing_cols] = 0
return erased
def set_mat_diag(mat, diag=0, val=0):
"""
Set the nth diagonal of a symmetric 2D numpy array to a fixed value.
Operates in place.
Parameters
----------
mat : numpy.ndarray
Symmetric 2D array of floats.
diag : int
0-based index of the diagonal to modify. Use negative values for the
lower half.
val : float
Value to use for filling the diagonal
"""
m = mat.shape[0]
step = m + 1
start = diag
end = m ** 2 - diag * m
mat.flat[start:end:step] = val
def diag_trim(mat, n):
"""
Trim an upper triangle sparse matrix so that only the first n diagonals
are kept.
Parameters
----------
mat : scipy.sparse.csr_matrix or numpy.ndarray
The sparse matrix to be trimmed
n : int
The number of diagonals from the center to keep (0-based).
Returns
-------
scipy.sparse.dia_matrix or numpy.ndarray:
The diagonally trimmed upper triangle matrix with only the first
n diagonal.
"""
if sp.issparse(mat):
if mat.format != "csr":
raise ValueError("input type must be scipy.sparse.csr_matrix")
# Trim diagonals by removing all elements further than n in the
# upper triangle
trimmed = sp.tril(mat, n, format="csr")
trimmed = sp.triu(trimmed, format="csr")
else:
trimmed = mat.copy()
n_diags = trimmed.shape[0]
for diag in range(n, n_diags):
set_mat_diag(trimmed, diag, 0)
return trimmed
return trimmed
def distance_law(
matrix, detectable_bins=None, max_dist=None, smooth=True, fun=np.nanmean
):
"""
Computes genomic distance law by averaging over each diagonal in the upper
triangle matrix. If a list of detectable bins is provided, pixels in
missing bins will be excluded from the averages. A maximum distance can be
specified to define how many diagonals should be computed.
parameters
----------
matrix: scipy.sparse.csr_matrix
the input matrix to compute distance law from.
detectable_bins : numpy.ndarray of ints
An array of detectable bins indices to consider when computing
distance law.
max_dist : int
Maximum distance from diagonal, in number of bins in which to compute
distance law
smooth : bool
Whether to use isotonic regression to smooth the distance law.
fun : callable
A function to apply on each diagonal. Defaults to mean.
Returns
-------
dist: np.ndarray
the output genomic distance law.
example
-------
>>> m = np.ones((3,3))
>>> m += np.array([1,2,3])
>>> m
array([[2., 3., 4.],
[2., 3., 4.],
[2., 3., 4.]])
>>> distance_law(csr_matrix(m))
array([3. , 3.5, 4. ])
"""
mat_n = matrix.shape[0]
if max_dist is None:
max_dist = mat_n
n_diags = min(mat_n, max_dist + 1)
dist = np.zeros(mat_n)
if detectable_bins is None:
detectable_bins = np.array(range(mat_n))
for diag in range(n_diags):
# Find detectable which fall in diagonal
detect_mask = np.zeros(mat_n, dtype=bool)
detect_mask[detectable_bins] = 1
# Find bins which are detectable in the diagonal (intersect of
# hori and verti)
detect_mask_h = detect_mask[: (mat_n - diag)]
detect_mask_v = detect_mask[mat_n - (mat_n - diag) :]
detect_mask_diag = detect_mask_h & detect_mask_v
detect_diag = matrix.diagonal(diag)[detect_mask_diag]
dist[diag] = fun(detect_diag[detect_diag > 0])
# Smooth the curve using isotonic regression: Find closest approximation
# with the condition that point n+1 cannot be higher than point n.
# (i.e. contacts can only decrease when increasing distance)
if smooth and mat_n > 2:
ir = IsotonicRegression(increasing=False)
dist[~np.isfinite(dist)] = 0
dist = ir.fit_transform(range(len(dist)), dist)
return dist
def get_detectable_bins(mat, n_mads=3, inter=False):
"""
Returns lists of detectable indices after excluding low interacting bins
based on the proportion of zero pixel values in the matrix bins.
Parameters
----------
mat : scipy.sparse.coo_matrix
A Hi-C matrix in tihe form of a 2D numpy array or coo matrix
n_mads : int
Number of median absolute deviation below the median required to
consider bins non-detectable.
inter : bool
Whether the matrix is interchromosomal. Default is to consider the
matrix is intrachromosomal (i.e. upper symmetric).
Returns
-------
numpy ndarray :
tuple of 2 1D arrays containing indices of low interacting rows and
columns, respectively.
"""
matrix = mat.copy()
matrix.eliminate_zeros()
def | (x): return ss.median_abs_deviation(x, nan_policy="omit")
if not inter:
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("Intrachromosomal matrices must be symmetric.")
# Replace nonzero pixels by ones to work on prop. of nonzero pixels
matrix.data = np.ones(matrix.data.shape)
# Compute number of nonzero values in each bin
sum_bins = sum_mat_bins(matrix)
# Compute variation in the number of nonzero pixels
sum_mad = mad(sum_bins)
# Find poor interacting rows and columns
sum_med = np.median(sum_bins)
detect_threshold = max(1, sum_med - sum_mad * n_mads)
# Removal of poor interacting rows and columns
good_bins = np.flatnonzero(sum_bins >= detect_threshold)
good_bins = (good_bins, good_bins)
else:
# Adapted for asymetric matrices (need to compute rows and columns)
sum_rows, sum_cols = matrix.sum(axis=1).A1, matrix.sum(axis=0).A1
mad_rows, mad_cols = mad(sum_rows), mad(sum_cols)
med_rows, med_cols = np.median(sum_rows), np.median(sum_cols)
detect_threshold_rows = max(1, med_rows - mad_rows * n_mads)
detect_threshold_cols = max(1, med_cols - mad_cols * n_mads)
good_rows = np.flatnonzero(sum_rows > detect_threshold_rows)
good_cols = np.flatnonzero(sum_cols > detect_threshold_cols)
good_bins = (good_rows, good_cols)
return good_bins
def detrend(
matrix,
detectable_bins=None,
max_dist=None,
smooth=False,
fun=np.nanmean,
max_val=10,
):
"""
Detrends a Hi-C matrix by the distance law.
The input matrix should have been normalised beforehandand.
Parameters
----------
matrix : scipy.sparse.csr_matrix
The normalised intrachromosomal Hi-C matrix to detrend.
detectable_bins : tuple
Tuple containing a list of detectable rows and a list of columns on
which to perform detrending. Poorly interacting indices have been
excluded.
max_dist : int
Maximum number of bins from the diagonal at which to compute trend.
smooth : bool
Whether to use isotonic regression to smooth the trend.
fun : callable
Function to use on each diagonal to compute the trend.
max_val : float or None
Maximum value in the detrended matrix. Set to None to disable
Returns
-------
numpy.ndarray :
The detrended Hi-C matrix.
"""
matrix = matrix.tocsr()
y = distance_law(
matrix,
detectable_bins=detectable_bins,
max_dist=max_dist,
smooth=smooth,
fun=fun,
)
y[np.isnan(y)] = 0.0
# Detrending by the distance law
clean_mat = matrix.tocoo()
# clean_mat.data /= y_savgol[abs(clean_mat.row - clean_mat.col)]
try:
clean_mat.data = clean_mat.data / y[abs(clean_mat.row - clean_mat.col)]
# If no nonzero value in matrix, do nothing
except TypeError:
pass
clean_mat = clean_mat.tocsr()
if max_val is not None:
clean_mat[clean_mat >= max_val] = 1
return clean_mat
def ztransform(matrix):
"""
Z transformation for Hi-C matrices.
Parameters
----------
matrix : scipy.sparse.coo_matrix
A Hi-C matrix in sparse format.
Returns
-------
scipy.sparse.coo_matrix:
The detrended Hi-C matrix
"""
mat = matrix.copy()
mu = np.mean(mat.data)
sd = np.std(mat.data)
mat.data -= mu
mat.data /= sd
return mat
def sum_mat_bins(mat):
"""
Compute the sum of matrices bins (i.e. rows or columns) using
only the upper triangle, assuming symmetrical matrices.
Parameters
----------
mat : scipy.sparse.coo_matrix
Contact map in sparse format, either in upper triangle or
full matrix.
Returns
-------
numpy.ndarray :
1D array of bin sums.
"""
# Equivalaent to row or col sum on a full matrix
# Note: mat.sum returns a 'matrix' object. A1 extracts the 1D flat array
# from the matrix
return mat.sum(axis=0).A1 + mat.sum(axis=1).A1 - mat.diagonal(0)
def subsample_contacts(M, n_contacts):
"""Bootstrap sampling of contacts in a sparse Hi-C map.
Parameters
----------
M : scipy.sparse.coo_matrix
The input Hi-C contact map in sparse format.
n_contacts : int
The number of contacts to sample.
Returns
-------
scipy.sparse.coo_matrix
A new matrix with a fraction of the original contacts.
"""
S = M.data.copy()
# Match cell idx to cumulative number of contacts
cum_counts = np.cumsum(S)
# Total number of contacts to sample
tot_contacts = int(cum_counts[-1])
# Sample desired number of contacts from the range(0, n_contacts) array
sampled_contacts = np.random.choice(
int(tot_contacts), size=(n_contacts), replace=False
)
# Get indices of sampled contacts in the cum_counts array
idx = np.searchsorted(cum_counts, sampled_contacts, side="right")
# Bin those indices to the same dimensions as matrix data to get counts
sampled_counts = np.bincount(idx, minlength=S.shape[0])
# Get nonzero values to build new sparse matrix
nnz_mask = sampled_counts > 0
sampled_counts = sampled_counts[nnz_mask].astype(np.float64)
sampled_rows = M.row[nnz_mask]
sampled_cols = M.col[nnz_mask]
return sp.coo_matrix(
(sampled_counts, (sampled_rows, sampled_cols)),
shape=(M.shape[0], M.shape[1]),
)
def frame_missing_mask(mask, kernel_shape, sym_upper=False, max_dist=None):
"""
Adds a frame around input mask, given a kernel. The goal of this
frame is define margins around the matrix where the kernel will not perform
convolution (denoted by 1). If the matrix is upper symmetric, a margin of
half the kernel's width is added below the diagonal and a maximum distance
from the diagonal above which margins need not be drawn can be considered.
Otherwise Margins are simply added on all 4 sides of the matrix.
::
signal kernel _________
______ ____ |#######|
| | | | ==> |# #|
| | |___| |# #|
| | |# #|
|_____| |# #|
|#######|
--------
Parameters
----------
mask : scipy.sparse.csr_matrix of bool
The mask around which to add margins.
kernels_shape : tuple of ints
The number of rows and kernel in the input kernel. Margins will be half
these values.
sym_upper : bool
Whether the signal is a symmetric upper triangle matrix. If so, values
on a margin below the diagonal will be masked.
max_dist : int or None
Number of diagonals to keep
Returns
-------
framed_mask : scipy.sparse.csr_matrix of bool
The input mask with a padding of True around the edges. If sym_upper
is True, a padding is also added below the diagonal.
"""
if mask.dtype != bool:
raise ValueError("Mask must contain boolean values")
if not sp.issparse(mask):
raise ValueError("Mask must be a sparse matrix")
framed_mask = mask.copy()
ms, ns = mask.shape
mk, nk = kernel_shape
if sym_upper and (max_dist is not None):
# Remove diagonals further than scan distance in the input mask
framed_mask = diag_trim(framed_mask, max_dist + max(nk, mk)).tocsr()
max_m = max_dist + mk
max_n = max_dist + nk
else:
max_m, max_n = ms, ns
# Up and down margins initialized with zeros and filled as needed
margin_1 = sp.csr_matrix((mk - 1, ns), dtype=bool)
margin_2 = sp.csr_matrix((mk - 1, ns), dtype=bool)
if sym_upper and (max_dist is not None):
# Margin 1 (top) is in upper triangle -> fill missing up to scan dist
margin_1[:, :max_n] = 1
else:
margin_1[:, :] = 1
margin_2[:, :] = 1
framed_mask = sp.vstack([margin_1, framed_mask, margin_2], format="csr")
# Left and right
margin_1 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool)
margin_2 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool)
if sym_upper and (max_dist is not None):
# Margin 2 (right) is in upper triangle-> fill missing up to scan dist
margin_2[-(max_m + 1) :, :] = 1
# Fill only the start of left margin for the top-left corner
margin_1[: mk - 1, :] = 1
else:
margin_1[:, :] = 1
margin_2[:, :] = 1
framed_mask = sp.hstack([margin_1, framed_mask, margin_2], format="csr")
if sym_upper:
# LIL format is much faster when changing sparsity
framed_mask = framed_mask.tolil()
# Add margin below diagonal
big_k = max(nk, mk)
dia_margins = np.ones(big_k)
dia_offsets = np.arange(-1, -big_k-1, -1)
framed_mask += sp.diags(
dia_margins,
dia_offsets,
shape=framed_mask.shape,
format="lil",
dtype=bool,
)
framed_mask = framed_mask.tocsr()
return framed_mask
def check_missing_mask(signal, mask):
"""
Ensure all elements defined as missing by the mask are set to zero in the
signal. If this is not the case, raises an error.
Parameters
----------
signal : numpy.ndarray of floats or scipy.sparse.csr_matrix of floats
The signal to be checked.
mask : numpy.ndarray of bools or scipy.sparse.csr_matrix of bools
The mask defining missing values as True and valid values as False.
"""
if sp.issparse(mask):
# Check if there are nonzero values in the signal reported as missing
# by the mask
missing_with_signal = np.nonzero(
abs(signal[mask.nonzero()[0], mask.nonzero()[1]]) > 0
)[0]
if len(missing_with_signal) > 0:
raise ValueError(
"There are",
len(missing_with_signal),
"non-zero elements reported as missing.",
)
else:
if np.sum(abs(signal[mask > 0])) > 1e-10:
raise ValueError(
"There are",
str(np.sum(abs(signal[mask > 0]))),
"non-zero elements reported as missing.",
)
def make_missing_mask(
shape, valid_rows, valid_cols, max_dist=None, sym_upper=False
):
"""
Given lists of valid rows and columns, generate a sparse matrix mask with
missing pixels denoted as 1 and valid pixels as 0. If a max_dist is
provided, upper symmetric matrices will only be flagged up to max_dist
pixels from the diagonal.
Parameters
----------
shape : tuple of ints
Shape of the mask to generate.
valid_rows : numpy.ndarray of ints
Array with the indices of valid rows that should be set to 0 in the
mask.
valid_cols : numpy.ndarray of ints
Array with the indices of valid rows that should be set to 0 in the
mask.
max_dist : int or None
The maximum diagonal distance at which masking should take place.
sym_upper : bool
Whether the matrix is symmetric upper. If so, max_dist is ignored
Returns
-------
scipy.sparse.csr_matrix of bool
The mask containing False values where pixels are valid and True valid
where pixels are missing
"""
# Error if the matrix upper symmetric but shape is rectangle or missing
# rows and cols are different
sm, sn = shape
if sym_upper and (sm != sn or len(valid_rows) != len(valid_cols)):
raise ValueError("Rectangular matrices cannot be upper symmetric")
# Get a boolean array of missing (1) and valid (0) rows
missing_rows = valid_to_missing(valid_rows, sm)
# When matrix is sym., rows and cols are synonym, no need to compute 2x
if sym_upper:
missing_cols = missing_rows
else:
missing_cols = valid_to_missing(valid_cols, sn)
# If upper sym., fill only upper diag up to max_dist.
# E. g. with bins 1 and 3 missing
# and a max_dist of 1:
# 0 1 0 0 0
# 0 1 1 0 0
# 0 0 0 1 0
# 0 0 0 1 1
# 0 0 0 0 0
# For each missing bin, mask is apply 1 pixel upwards and 1 to the right
# to fill only the upper triangle up to max_dist
if sym_upper:
# If no max dist has been specified, fill the whole upper triangle
if max_dist is None:
max_dist = min(shape)
# Generate matrix of distance shifts by row.
# Shape is len(missing_rows) x (max_dist + 1)
# e.g.: 2 missing rows and max dist of 1
# 0 0
# 1 1
row_shifts = np.tile(
np.array(range(max_dist + 1)), (len(missing_rows), 1)
).T
# Compute row positions upwards to diagonal by subtracting missing rows
# to the shifts. Following the previous example, if missing rows are
# bins 1 and 3:
# 1 3
# 0 2
rows_before = (missing_rows - row_shifts).flatten("F")
# looking at pixels up from the bins, cols remain the same:
# 1 3
# 1 3
cols_before = np.repeat(missing_rows, max_dist+1)
# Compute col position to the right until diagonal by adding the shift
# Note: upper symmetric, so row_shifts = col_shift_
# 1 3
# 2 4
cols_after = (missing_cols + row_shifts).flatten("F")
# This time, rows remain constant since we are computing positions to
# the right
rows_after = np.repeat(missing_cols, max_dist+1)
# Combine positions to the right and upwards
rows = np.concatenate([rows_before, rows_after])
cols = np.concatenate([cols_before, cols_after])
data = np.ones(rows.shape, dtype=bool)
# Remove entries where rows or cols are negative or larger than shape
valid = (cols < sm) & (cols >= 0) & (rows < sm) & (rows >= 0)
# Build mask mat with miss bins up to max scan dist in upper triangle
mask = sp.coo_matrix(
(data[valid], (rows[valid], cols[valid])), shape=shape, dtype=bool
).tocsr()
else:
mask = sp.csr_matrix(shape, dtype=bool)
mask[missing_rows, :] = 1
mask[:, missing_cols] = 1
return mask
def zero_pad_sparse(mat, margin_h, margin_v, fmt="coo"):
"""
Adds margin of zeros around an input sparse matrix.
Parameters
----------
mat : scipy.sparse.csr_matrix
The matrix to be padded.
margin_h : int
The width of the horizontal margin to add on the left and right of the
matrix.
margin_v : int
The width of the vertical margin to add on the top and bottom of the
matrix.
fmt : string
The desired scipy sparse format of the output matrix
Returns
-------
scipy.sparse.csr_matrix :
The padded matrix of dimensions (m + 2 * margin_h, n + 2 * margin_v).
Examples
--------
>>> m = sp.csr_matrix(np.array([[1, 2], [10, 20]]))
>>> zero_pad_sparse(m, 2, 1).toarray()
array([[ 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 2, 0, 0],
[ 0, 0, 10, 20, 0, 0],
[ 0, 0, 0, 0, 0, 0]])
"""
sm, sn = mat.shape
padded_mat = mat.copy()
# Up and down margins initialized with zeros and filled as needed
margin_h_0 = sp.csr_matrix((sm, margin_h), dtype=mat.dtype)
margin_v_0 = sp.csr_matrix((margin_v, sn + 2 * margin_h), dtype=mat.dtype)
padded_mat = sp.hstack([margin_h_0, padded_mat, margin_h_0], format="csr")
padded_mat = sp.vstack([margin_v_0, padded_mat, margin_v_0], format="csr")
return padded_mat
def crop_kernel(kernel, target_size):
"""
Crop a kernel matrix to target size horizontally and vertically.
If the target size is even, the target size is adjusted to the
next integer up.
Parameters
----------
kernel : numpy.ndarray of floats
Image to crop.
target_size : tuple of ints
Tuple defining the target shape of the kernel, takes the
form (rows, cols) where rows and cols are odd numbers.
Returns
-------
cropped : numpy.ndarray of floats
New image no larger than target dimensions
"""
# Use list for mutability
target = [d for d in target_size]
adjusted = False
for dim in range(len(target)):
if not target[dim] % 2:
target[dim] += 1
adjusted = True
if adjusted:
sys.stderr.write(
"WARNING: Cropped kernel size adjusted to "
f"{target[0]}x{target[1]} to keep odd dimensions.\n"
)
source_m, source_n = kernel.shape
target_m, target_n = target
# Define horizontal and vertical margins to trim
if source_m > target_m:
margin_rows = (source_m - target_m) // 2
else:
margin_rows = 0
if source_n > target_n:
margin_cols = (source_n - target_n) // 2
else:
margin_cols = 0
cropped = kernel[
margin_rows : (source_m - margin_rows),
margin_cols : (source_n - margin_cols),
]
return cropped
def resize_kernel(
kernel,
kernel_res=None,
signal_res=None,
factor=None,
min_size=7,
quiet=False,
):
"""
Resize a kernel matrix based on the resolution at which it was defined and
the signal resolution. E.g. if a kernel matrix was generated for 10kb and
the input signal is 20kb, kernel size will be divided by two. If the kernel
is enlarged, pixels are interpolated with a spline of degree 1.
Alternatively, a resize factor can be provided. In the example above, the
factor would be 0.5.
Parameters
----------
kernel : numpy.ndarray
Kernel matrix.
kernel_res : int
Resolution for which the kernel was designed. Mutually exclusive with
factor.
signal_res : int
Resolution of the signal matrix in basepair per matrix bin. Mutually
exclusive with factor.
factor : float
Resize factor. Can be provided as an alternative to kernel_res and
signal_res. Values above 1 will enlarge the kernel, values below 1 will
shrink it.
min_size : int
Lower bound, in number of rows/column allowed when resizing the kernel.
quiet : bool
Suppress warnings if resize factor was adjusted.
Returns
-------
resized_kernel : numpy.ndarray
The resized input kernel.
"""
km, kn = kernel.shape
if km != kn:
raise ValueError("kernel must be square.")
if not (km % 2) or not (kn % 2):
raise ValueError("kernel size must be odd.")
if factor is not None:
if kernel_res is not None or signal_res is not None:
raise ValueError(
"factor is mutually exclusive with resolution "
"parameters (kernel_res and signal_res)."
)
resize_factor = factor
else:
if kernel_res is None or signal_res is None:
raise ValueError(
"You must provide either a resize factor or the signal and "
"kernel resolutions."
)
# Define by how many times kernel must be enlarged for its pixels to
# match the signal's pixels
resize_factor = kernel_res / signal_res
if km * resize_factor < min_size:
resize_factor = min_size / km
resized_kernel = ndi.zoom(kernel, resize_factor, order=1)
if not resized_kernel.shape[0] % 2:
# Compute the factor required to yield a dimension smaller by one
adj_resize_factor = (resized_kernel.shape[0] - 1) / km
if not quiet:
sys.stderr.write(
f"Adjusting resize factor from {resize_factor} to {adj_resize_factor}.\n"
)
resized_kernel = ndi.zoom(kernel, adj_resize_factor, order=1)
return resized_kernel
def factorise_kernel(kernel, prop_info=0.999):
"""
Performs truncated SVD on an input kernel, returning the singular vectors
necessary to retain a given proportion of information contained in the
kernel.
Parameters
----------
kernel : numpy.ndarray of floats
The input 2D kernel to factorise.
prop_info : float
Proportion of information to retain.
Returns
-------
tuple of numpy.ndarrays of floats
A tuple containing the truncated left and right singular matrices,
where each singular vector has been multiplied by the square root of
their respective singular values.
"""
u, sigma, v = la.svd(kernel)
total_info = np.sum(sigma ** 2)
# Compute min. number of singular vectors to retain enough info
keep_k = np.flatnonzero(np.cumsum(sigma ** 2) > prop_info * total_info)[0] + 1
if keep_k > np.floor(min(kernel.shape) / 2):
sys.stderr.write(
f"Warning: Kernel factorisation required {keep_k} singular,"
"vectors this may result in slow operations.\n",
)
# Truncate singular matrix to the keep only required vectors
u = u[:, :keep_k]
v = v[:keep_k, :]
# Multiply each singular vector by the sqrt of its singular value
for i in range(keep_k):
u[:, i] *= np.sqrt(sigma[i])
v[i, :] *= np.sqrt(sigma[i])
return (u, v)
def valid_to_missing(valid, size):
"""
Given an array of valid indices, return the corrsesponding array of missing
indices.
Parameters
---------
valid : numpy.ndarray of ints
The valid indices.
size : int
The size of the matrix (maximum possible index + 1).
Return
------
missing : numpy.ndarray of ints
The missing indices.
"""
missing = np.ones(size, dtype=bool)
try:
missing[valid] = False
# In case there is no valid index
except IndexError:
pass
missing = np.flatnonzero(missing)
return missing
| mad | identifier_name |
preprocessing.py | #!/usr/bin/env python3
# coding: utf-8
"""Chromosight's preprocessing submodule implements a number of functions to
operate on Hi-C contact maps before detection. These functions can be used to
improve the signal or filter unneeded signal. There are also functions to edit
(zoom, crop, factorize) kernel matrices.
"""
import sys
import numpy as np
import numpy.linalg as la
import scipy.stats as ss
import scipy.sparse as sp
import scipy.ndimage as ndi
from sklearn.isotonic import IsotonicRegression
def erase_missing(signal, valid_rows, valid_cols, sym_upper=True):
"""
Given a sparse matrix, set all pixels in missing (invalid) bins to 0.
Parameters
----------
signal : scipy.sparse.csr_matrix of floats
Input signal on which to erase values.
valid_rows : numpy.ndarray of ints
Indices of rows considered valid (not missing).
valid_cols : numpy.ndarray of ints
Indices of columns considered valid (not missing).
sym_upper : bool
Define if the input signal is upper symmetric.
Returns
-------
scipy.sparse.csr_matrix
The input signal with all values in missing bins set to 0
"""
if sym_upper and sp.issparse(signal):
if np.any(valid_rows != valid_cols):
raise ValueError(
"Valid rows and columns must be identical with sym_upper=True"
)
if signal.shape[0] != signal.shape[1]:
raise ValueError(
"Input matrix must be square when using sym_upper=True"
)
# Make a boolean mask from good bins
good_mask = np.isin(range(signal.shape[0]), valid_rows)
# Set all pixels in a nondetectable bin to 0
# For faster masking of bins, mask bins using dot product with an
# identify matrix where bad bins have been masked on the diagonal
# E.g. if removing the second bin (row and column):
# 1 0 0 9 6 5 1 0 0 9 0 5
# 0 0 0 X 6 8 7 X 0 0 0 = 0 0 0
# 0 0 1 6 7 8 0 0 1 6 0 8
mask_mat = sp.eye(signal.shape[0])
mask_mat.data[0][~good_mask] = 0
erased = mask_mat.dot(signal).dot(mask_mat)
else:
# Get a boolean array of missing (1) and valid (0) rows
missing_rows = valid_to_missing(valid_rows, signal.shape[0])
missing_cols = valid_to_missing(valid_cols, signal.shape[1])
erased = signal.copy()
erased[missing_rows, :] = 0
erased[:, missing_cols] = 0
return erased
def set_mat_diag(mat, diag=0, val=0):
"""
Set the nth diagonal of a symmetric 2D numpy array to a fixed value.
Operates in place.
Parameters
----------
mat : numpy.ndarray
Symmetric 2D array of floats.
diag : int
0-based index of the diagonal to modify. Use negative values for the
lower half.
val : float
Value to use for filling the diagonal
"""
m = mat.shape[0]
step = m + 1
start = diag
end = m ** 2 - diag * m
mat.flat[start:end:step] = val
def diag_trim(mat, n):
"""
Trim an upper triangle sparse matrix so that only the first n diagonals
are kept.
Parameters
----------
mat : scipy.sparse.csr_matrix or numpy.ndarray
The sparse matrix to be trimmed
n : int
The number of diagonals from the center to keep (0-based).
Returns
-------
scipy.sparse.dia_matrix or numpy.ndarray:
The diagonally trimmed upper triangle matrix with only the first
n diagonal.
"""
if sp.issparse(mat):
if mat.format != "csr":
raise ValueError("input type must be scipy.sparse.csr_matrix")
# Trim diagonals by removing all elements further than n in the
# upper triangle
trimmed = sp.tril(mat, n, format="csr")
trimmed = sp.triu(trimmed, format="csr")
else:
trimmed = mat.copy()
n_diags = trimmed.shape[0]
for diag in range(n, n_diags):
set_mat_diag(trimmed, diag, 0)
return trimmed
return trimmed
def distance_law(
matrix, detectable_bins=None, max_dist=None, smooth=True, fun=np.nanmean
):
"""
Computes genomic distance law by averaging over each diagonal in the upper
triangle matrix. If a list of detectable bins is provided, pixels in
missing bins will be excluded from the averages. A maximum distance can be
specified to define how many diagonals should be computed.
parameters
----------
matrix: scipy.sparse.csr_matrix
the input matrix to compute distance law from.
detectable_bins : numpy.ndarray of ints
An array of detectable bins indices to consider when computing
distance law.
max_dist : int
Maximum distance from diagonal, in number of bins in which to compute
distance law
smooth : bool
Whether to use isotonic regression to smooth the distance law.
fun : callable
A function to apply on each diagonal. Defaults to mean.
Returns
-------
dist: np.ndarray
the output genomic distance law.
example
-------
>>> m = np.ones((3,3))
>>> m += np.array([1,2,3])
>>> m
array([[2., 3., 4.],
[2., 3., 4.],
[2., 3., 4.]])
>>> distance_law(csr_matrix(m))
array([3. , 3.5, 4. ])
"""
mat_n = matrix.shape[0]
if max_dist is None:
max_dist = mat_n
n_diags = min(mat_n, max_dist + 1)
dist = np.zeros(mat_n)
if detectable_bins is None:
detectable_bins = np.array(range(mat_n))
for diag in range(n_diags):
# Find detectable which fall in diagonal
detect_mask = np.zeros(mat_n, dtype=bool)
detect_mask[detectable_bins] = 1
# Find bins which are detectable in the diagonal (intersect of
# hori and verti)
detect_mask_h = detect_mask[: (mat_n - diag)]
detect_mask_v = detect_mask[mat_n - (mat_n - diag) :]
detect_mask_diag = detect_mask_h & detect_mask_v
detect_diag = matrix.diagonal(diag)[detect_mask_diag]
dist[diag] = fun(detect_diag[detect_diag > 0])
# Smooth the curve using isotonic regression: Find closest approximation
# with the condition that point n+1 cannot be higher than point n.
# (i.e. contacts can only decrease when increasing distance)
if smooth and mat_n > 2:
ir = IsotonicRegression(increasing=False)
dist[~np.isfinite(dist)] = 0
dist = ir.fit_transform(range(len(dist)), dist)
return dist
def get_detectable_bins(mat, n_mads=3, inter=False):
"""
Returns lists of detectable indices after excluding low interacting bins
based on the proportion of zero pixel values in the matrix bins.
Parameters
----------
mat : scipy.sparse.coo_matrix
A Hi-C matrix in tihe form of a 2D numpy array or coo matrix
n_mads : int
Number of median absolute deviation below the median required to
consider bins non-detectable.
inter : bool
Whether the matrix is interchromosomal. Default is to consider the
matrix is intrachromosomal (i.e. upper symmetric).
Returns
-------
numpy ndarray :
tuple of 2 1D arrays containing indices of low interacting rows and
columns, respectively.
"""
matrix = mat.copy()
matrix.eliminate_zeros()
def mad(x): return ss.median_abs_deviation(x, nan_policy="omit")
if not inter:
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("Intrachromosomal matrices must be symmetric.")
# Replace nonzero pixels by ones to work on prop. of nonzero pixels
matrix.data = np.ones(matrix.data.shape)
# Compute number of nonzero values in each bin
sum_bins = sum_mat_bins(matrix)
# Compute variation in the number of nonzero pixels
sum_mad = mad(sum_bins)
# Find poor interacting rows and columns
sum_med = np.median(sum_bins)
detect_threshold = max(1, sum_med - sum_mad * n_mads)
# Removal of poor interacting rows and columns
good_bins = np.flatnonzero(sum_bins >= detect_threshold)
good_bins = (good_bins, good_bins)
else:
# Adapted for asymetric matrices (need to compute rows and columns)
|
return good_bins
def detrend(
matrix,
detectable_bins=None,
max_dist=None,
smooth=False,
fun=np.nanmean,
max_val=10,
):
"""
Detrends a Hi-C matrix by the distance law.
The input matrix should have been normalised beforehandand.
Parameters
----------
matrix : scipy.sparse.csr_matrix
The normalised intrachromosomal Hi-C matrix to detrend.
detectable_bins : tuple
Tuple containing a list of detectable rows and a list of columns on
which to perform detrending. Poorly interacting indices have been
excluded.
max_dist : int
Maximum number of bins from the diagonal at which to compute trend.
smooth : bool
Whether to use isotonic regression to smooth the trend.
fun : callable
Function to use on each diagonal to compute the trend.
max_val : float or None
Maximum value in the detrended matrix. Set to None to disable
Returns
-------
numpy.ndarray :
The detrended Hi-C matrix.
"""
matrix = matrix.tocsr()
y = distance_law(
matrix,
detectable_bins=detectable_bins,
max_dist=max_dist,
smooth=smooth,
fun=fun,
)
y[np.isnan(y)] = 0.0
# Detrending by the distance law
clean_mat = matrix.tocoo()
# clean_mat.data /= y_savgol[abs(clean_mat.row - clean_mat.col)]
try:
clean_mat.data = clean_mat.data / y[abs(clean_mat.row - clean_mat.col)]
# If no nonzero value in matrix, do nothing
except TypeError:
pass
clean_mat = clean_mat.tocsr()
if max_val is not None:
clean_mat[clean_mat >= max_val] = 1
return clean_mat
def ztransform(matrix):
"""
Z transformation for Hi-C matrices.
Parameters
----------
matrix : scipy.sparse.coo_matrix
A Hi-C matrix in sparse format.
Returns
-------
scipy.sparse.coo_matrix:
The detrended Hi-C matrix
"""
mat = matrix.copy()
mu = np.mean(mat.data)
sd = np.std(mat.data)
mat.data -= mu
mat.data /= sd
return mat
def sum_mat_bins(mat):
"""
Compute the sum of matrices bins (i.e. rows or columns) using
only the upper triangle, assuming symmetrical matrices.
Parameters
----------
mat : scipy.sparse.coo_matrix
Contact map in sparse format, either in upper triangle or
full matrix.
Returns
-------
numpy.ndarray :
1D array of bin sums.
"""
# Equivalaent to row or col sum on a full matrix
# Note: mat.sum returns a 'matrix' object. A1 extracts the 1D flat array
# from the matrix
return mat.sum(axis=0).A1 + mat.sum(axis=1).A1 - mat.diagonal(0)
def subsample_contacts(M, n_contacts):
"""Bootstrap sampling of contacts in a sparse Hi-C map.
Parameters
----------
M : scipy.sparse.coo_matrix
The input Hi-C contact map in sparse format.
n_contacts : int
The number of contacts to sample.
Returns
-------
scipy.sparse.coo_matrix
A new matrix with a fraction of the original contacts.
"""
S = M.data.copy()
# Match cell idx to cumulative number of contacts
cum_counts = np.cumsum(S)
# Total number of contacts to sample
tot_contacts = int(cum_counts[-1])
# Sample desired number of contacts from the range(0, n_contacts) array
sampled_contacts = np.random.choice(
int(tot_contacts), size=(n_contacts), replace=False
)
# Get indices of sampled contacts in the cum_counts array
idx = np.searchsorted(cum_counts, sampled_contacts, side="right")
# Bin those indices to the same dimensions as matrix data to get counts
sampled_counts = np.bincount(idx, minlength=S.shape[0])
# Get nonzero values to build new sparse matrix
nnz_mask = sampled_counts > 0
sampled_counts = sampled_counts[nnz_mask].astype(np.float64)
sampled_rows = M.row[nnz_mask]
sampled_cols = M.col[nnz_mask]
return sp.coo_matrix(
(sampled_counts, (sampled_rows, sampled_cols)),
shape=(M.shape[0], M.shape[1]),
)
def frame_missing_mask(mask, kernel_shape, sym_upper=False, max_dist=None):
"""
Adds a frame around input mask, given a kernel. The goal of this
frame is define margins around the matrix where the kernel will not perform
convolution (denoted by 1). If the matrix is upper symmetric, a margin of
half the kernel's width is added below the diagonal and a maximum distance
from the diagonal above which margins need not be drawn can be considered.
Otherwise Margins are simply added on all 4 sides of the matrix.
::
signal kernel _________
______ ____ |#######|
| | | | ==> |# #|
| | |___| |# #|
| | |# #|
|_____| |# #|
|#######|
--------
Parameters
----------
mask : scipy.sparse.csr_matrix of bool
The mask around which to add margins.
kernels_shape : tuple of ints
The number of rows and kernel in the input kernel. Margins will be half
these values.
sym_upper : bool
Whether the signal is a symmetric upper triangle matrix. If so, values
on a margin below the diagonal will be masked.
max_dist : int or None
Number of diagonals to keep
Returns
-------
framed_mask : scipy.sparse.csr_matrix of bool
The input mask with a padding of True around the edges. If sym_upper
is True, a padding is also added below the diagonal.
"""
if mask.dtype != bool:
raise ValueError("Mask must contain boolean values")
if not sp.issparse(mask):
raise ValueError("Mask must be a sparse matrix")
framed_mask = mask.copy()
ms, ns = mask.shape
mk, nk = kernel_shape
if sym_upper and (max_dist is not None):
# Remove diagonals further than scan distance in the input mask
framed_mask = diag_trim(framed_mask, max_dist + max(nk, mk)).tocsr()
max_m = max_dist + mk
max_n = max_dist + nk
else:
max_m, max_n = ms, ns
# Up and down margins initialized with zeros and filled as needed
margin_1 = sp.csr_matrix((mk - 1, ns), dtype=bool)
margin_2 = sp.csr_matrix((mk - 1, ns), dtype=bool)
if sym_upper and (max_dist is not None):
# Margin 1 (top) is in upper triangle -> fill missing up to scan dist
margin_1[:, :max_n] = 1
else:
margin_1[:, :] = 1
margin_2[:, :] = 1
framed_mask = sp.vstack([margin_1, framed_mask, margin_2], format="csr")
# Left and right
margin_1 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool)
margin_2 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool)
if sym_upper and (max_dist is not None):
# Margin 2 (right) is in upper triangle-> fill missing up to scan dist
margin_2[-(max_m + 1) :, :] = 1
# Fill only the start of left margin for the top-left corner
margin_1[: mk - 1, :] = 1
else:
margin_1[:, :] = 1
margin_2[:, :] = 1
framed_mask = sp.hstack([margin_1, framed_mask, margin_2], format="csr")
if sym_upper:
# LIL format is much faster when changing sparsity
framed_mask = framed_mask.tolil()
# Add margin below diagonal
big_k = max(nk, mk)
dia_margins = np.ones(big_k)
dia_offsets = np.arange(-1, -big_k-1, -1)
framed_mask += sp.diags(
dia_margins,
dia_offsets,
shape=framed_mask.shape,
format="lil",
dtype=bool,
)
framed_mask = framed_mask.tocsr()
return framed_mask
def check_missing_mask(signal, mask):
"""
Ensure all elements defined as missing by the mask are set to zero in the
signal. If this is not the case, raises an error.
Parameters
----------
signal : numpy.ndarray of floats or scipy.sparse.csr_matrix of floats
The signal to be checked.
mask : numpy.ndarray of bools or scipy.sparse.csr_matrix of bools
The mask defining missing values as True and valid values as False.
"""
if sp.issparse(mask):
# Check if there are nonzero values in the signal reported as missing
# by the mask
missing_with_signal = np.nonzero(
abs(signal[mask.nonzero()[0], mask.nonzero()[1]]) > 0
)[0]
if len(missing_with_signal) > 0:
raise ValueError(
"There are",
len(missing_with_signal),
"non-zero elements reported as missing.",
)
else:
if np.sum(abs(signal[mask > 0])) > 1e-10:
raise ValueError(
"There are",
str(np.sum(abs(signal[mask > 0]))),
"non-zero elements reported as missing.",
)
def make_missing_mask(
shape, valid_rows, valid_cols, max_dist=None, sym_upper=False
):
"""
Given lists of valid rows and columns, generate a sparse matrix mask with
missing pixels denoted as 1 and valid pixels as 0. If a max_dist is
provided, upper symmetric matrices will only be flagged up to max_dist
pixels from the diagonal.
Parameters
----------
shape : tuple of ints
Shape of the mask to generate.
valid_rows : numpy.ndarray of ints
Array with the indices of valid rows that should be set to 0 in the
mask.
valid_cols : numpy.ndarray of ints
Array with the indices of valid rows that should be set to 0 in the
mask.
max_dist : int or None
The maximum diagonal distance at which masking should take place.
sym_upper : bool
Whether the matrix is symmetric upper. If so, max_dist is ignored
Returns
-------
scipy.sparse.csr_matrix of bool
The mask containing False values where pixels are valid and True valid
where pixels are missing
"""
# Error if the matrix upper symmetric but shape is rectangle or missing
# rows and cols are different
sm, sn = shape
if sym_upper and (sm != sn or len(valid_rows) != len(valid_cols)):
raise ValueError("Rectangular matrices cannot be upper symmetric")
# Get a boolean array of missing (1) and valid (0) rows
missing_rows = valid_to_missing(valid_rows, sm)
# When matrix is sym., rows and cols are synonym, no need to compute 2x
if sym_upper:
missing_cols = missing_rows
else:
missing_cols = valid_to_missing(valid_cols, sn)
# If upper sym., fill only upper diag up to max_dist.
# E. g. with bins 1 and 3 missing
# and a max_dist of 1:
# 0 1 0 0 0
# 0 1 1 0 0
# 0 0 0 1 0
# 0 0 0 1 1
# 0 0 0 0 0
# For each missing bin, mask is apply 1 pixel upwards and 1 to the right
# to fill only the upper triangle up to max_dist
if sym_upper:
# If no max dist has been specified, fill the whole upper triangle
if max_dist is None:
max_dist = min(shape)
# Generate matrix of distance shifts by row.
# Shape is len(missing_rows) x (max_dist + 1)
# e.g.: 2 missing rows and max dist of 1
# 0 0
# 1 1
row_shifts = np.tile(
np.array(range(max_dist + 1)), (len(missing_rows), 1)
).T
# Compute row positions upwards to diagonal by subtracting missing rows
# to the shifts. Following the previous example, if missing rows are
# bins 1 and 3:
# 1 3
# 0 2
rows_before = (missing_rows - row_shifts).flatten("F")
# looking at pixels up from the bins, cols remain the same:
# 1 3
# 1 3
cols_before = np.repeat(missing_rows, max_dist+1)
# Compute col position to the right until diagonal by adding the shift
# Note: upper symmetric, so row_shifts = col_shift_
# 1 3
# 2 4
cols_after = (missing_cols + row_shifts).flatten("F")
# This time, rows remain constant since we are computing positions to
# the right
rows_after = np.repeat(missing_cols, max_dist+1)
# Combine positions to the right and upwards
rows = np.concatenate([rows_before, rows_after])
cols = np.concatenate([cols_before, cols_after])
data = np.ones(rows.shape, dtype=bool)
# Remove entries where rows or cols are negative or larger than shape
valid = (cols < sm) & (cols >= 0) & (rows < sm) & (rows >= 0)
# Build mask mat with miss bins up to max scan dist in upper triangle
mask = sp.coo_matrix(
(data[valid], (rows[valid], cols[valid])), shape=shape, dtype=bool
).tocsr()
else:
mask = sp.csr_matrix(shape, dtype=bool)
mask[missing_rows, :] = 1
mask[:, missing_cols] = 1
return mask
def zero_pad_sparse(mat, margin_h, margin_v, fmt="coo"):
"""
Adds margin of zeros around an input sparse matrix.
Parameters
----------
mat : scipy.sparse.csr_matrix
The matrix to be padded.
margin_h : int
The width of the horizontal margin to add on the left and right of the
matrix.
margin_v : int
The width of the vertical margin to add on the top and bottom of the
matrix.
fmt : string
The desired scipy sparse format of the output matrix
Returns
-------
scipy.sparse.csr_matrix :
The padded matrix of dimensions (m + 2 * margin_h, n + 2 * margin_v).
Examples
--------
>>> m = sp.csr_matrix(np.array([[1, 2], [10, 20]]))
>>> zero_pad_sparse(m, 2, 1).toarray()
array([[ 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 2, 0, 0],
[ 0, 0, 10, 20, 0, 0],
[ 0, 0, 0, 0, 0, 0]])
"""
sm, sn = mat.shape
padded_mat = mat.copy()
# Up and down margins initialized with zeros and filled as needed
margin_h_0 = sp.csr_matrix((sm, margin_h), dtype=mat.dtype)
margin_v_0 = sp.csr_matrix((margin_v, sn + 2 * margin_h), dtype=mat.dtype)
padded_mat = sp.hstack([margin_h_0, padded_mat, margin_h_0], format="csr")
padded_mat = sp.vstack([margin_v_0, padded_mat, margin_v_0], format="csr")
return padded_mat
def crop_kernel(kernel, target_size):
"""
Crop a kernel matrix to target size horizontally and vertically.
If the target size is even, the target size is adjusted to the
next integer up.
Parameters
----------
kernel : numpy.ndarray of floats
Image to crop.
target_size : tuple of ints
Tuple defining the target shape of the kernel, takes the
form (rows, cols) where rows and cols are odd numbers.
Returns
-------
cropped : numpy.ndarray of floats
New image no larger than target dimensions
"""
# Use list for mutability
target = [d for d in target_size]
adjusted = False
for dim in range(len(target)):
if not target[dim] % 2:
target[dim] += 1
adjusted = True
if adjusted:
sys.stderr.write(
"WARNING: Cropped kernel size adjusted to "
f"{target[0]}x{target[1]} to keep odd dimensions.\n"
)
source_m, source_n = kernel.shape
target_m, target_n = target
# Define horizontal and vertical margins to trim
if source_m > target_m:
margin_rows = (source_m - target_m) // 2
else:
margin_rows = 0
if source_n > target_n:
margin_cols = (source_n - target_n) // 2
else:
margin_cols = 0
cropped = kernel[
margin_rows : (source_m - margin_rows),
margin_cols : (source_n - margin_cols),
]
return cropped
def resize_kernel(
kernel,
kernel_res=None,
signal_res=None,
factor=None,
min_size=7,
quiet=False,
):
"""
Resize a kernel matrix based on the resolution at which it was defined and
the signal resolution. E.g. if a kernel matrix was generated for 10kb and
the input signal is 20kb, kernel size will be divided by two. If the kernel
is enlarged, pixels are interpolated with a spline of degree 1.
Alternatively, a resize factor can be provided. In the example above, the
factor would be 0.5.
Parameters
----------
kernel : numpy.ndarray
Kernel matrix.
kernel_res : int
Resolution for which the kernel was designed. Mutually exclusive with
factor.
signal_res : int
Resolution of the signal matrix in basepair per matrix bin. Mutually
exclusive with factor.
factor : float
Resize factor. Can be provided as an alternative to kernel_res and
signal_res. Values above 1 will enlarge the kernel, values below 1 will
shrink it.
min_size : int
Lower bound, in number of rows/column allowed when resizing the kernel.
quiet : bool
Suppress warnings if resize factor was adjusted.
Returns
-------
resized_kernel : numpy.ndarray
The resized input kernel.
"""
km, kn = kernel.shape
if km != kn:
raise ValueError("kernel must be square.")
if not (km % 2) or not (kn % 2):
raise ValueError("kernel size must be odd.")
if factor is not None:
if kernel_res is not None or signal_res is not None:
raise ValueError(
"factor is mutually exclusive with resolution "
"parameters (kernel_res and signal_res)."
)
resize_factor = factor
else:
if kernel_res is None or signal_res is None:
raise ValueError(
"You must provide either a resize factor or the signal and "
"kernel resolutions."
)
# Define by how many times kernel must be enlarged for its pixels to
# match the signal's pixels
resize_factor = kernel_res / signal_res
if km * resize_factor < min_size:
resize_factor = min_size / km
resized_kernel = ndi.zoom(kernel, resize_factor, order=1)
if not resized_kernel.shape[0] % 2:
# Compute the factor required to yield a dimension smaller by one
adj_resize_factor = (resized_kernel.shape[0] - 1) / km
if not quiet:
sys.stderr.write(
f"Adjusting resize factor from {resize_factor} to {adj_resize_factor}.\n"
)
resized_kernel = ndi.zoom(kernel, adj_resize_factor, order=1)
return resized_kernel
def factorise_kernel(kernel, prop_info=0.999):
"""
Performs truncated SVD on an input kernel, returning the singular vectors
necessary to retain a given proportion of information contained in the
kernel.
Parameters
----------
kernel : numpy.ndarray of floats
The input 2D kernel to factorise.
prop_info : float
Proportion of information to retain.
Returns
-------
tuple of numpy.ndarrays of floats
A tuple containing the truncated left and right singular matrices,
where each singular vector has been multiplied by the square root of
their respective singular values.
"""
u, sigma, v = la.svd(kernel)
total_info = np.sum(sigma ** 2)
# Compute min. number of singular vectors to retain enough info
keep_k = np.flatnonzero(np.cumsum(sigma ** 2) > prop_info * total_info)[0] + 1
if keep_k > np.floor(min(kernel.shape) / 2):
sys.stderr.write(
f"Warning: Kernel factorisation required {keep_k} singular,"
"vectors this may result in slow operations.\n",
)
# Truncate singular matrix to the keep only required vectors
u = u[:, :keep_k]
v = v[:keep_k, :]
# Multiply each singular vector by the sqrt of its singular value
for i in range(keep_k):
u[:, i] *= np.sqrt(sigma[i])
v[i, :] *= np.sqrt(sigma[i])
return (u, v)
def valid_to_missing(valid, size):
"""
Given an array of valid indices, return the corrsesponding array of missing
indices.
Parameters
---------
valid : numpy.ndarray of ints
The valid indices.
size : int
The size of the matrix (maximum possible index + 1).
Return
------
missing : numpy.ndarray of ints
The missing indices.
"""
missing = np.ones(size, dtype=bool)
try:
missing[valid] = False
# In case there is no valid index
except IndexError:
pass
missing = np.flatnonzero(missing)
return missing
| sum_rows, sum_cols = matrix.sum(axis=1).A1, matrix.sum(axis=0).A1
mad_rows, mad_cols = mad(sum_rows), mad(sum_cols)
med_rows, med_cols = np.median(sum_rows), np.median(sum_cols)
detect_threshold_rows = max(1, med_rows - mad_rows * n_mads)
detect_threshold_cols = max(1, med_cols - mad_cols * n_mads)
good_rows = np.flatnonzero(sum_rows > detect_threshold_rows)
good_cols = np.flatnonzero(sum_cols > detect_threshold_cols)
good_bins = (good_rows, good_cols) | conditional_block |
preprocessing.py | #!/usr/bin/env python3
# coding: utf-8
"""Chromosight's preprocessing submodule implements a number of functions to
operate on Hi-C contact maps before detection. These functions can be used to
improve the signal or filter unneeded signal. There are also functions to edit
(zoom, crop, factorize) kernel matrices.
"""
import sys
import numpy as np
import numpy.linalg as la
import scipy.stats as ss
import scipy.sparse as sp
import scipy.ndimage as ndi
from sklearn.isotonic import IsotonicRegression
def erase_missing(signal, valid_rows, valid_cols, sym_upper=True):
"""
Given a sparse matrix, set all pixels in missing (invalid) bins to 0.
Parameters
----------
signal : scipy.sparse.csr_matrix of floats
Input signal on which to erase values.
valid_rows : numpy.ndarray of ints
Indices of rows considered valid (not missing).
valid_cols : numpy.ndarray of ints
Indices of columns considered valid (not missing).
sym_upper : bool
Define if the input signal is upper symmetric.
Returns
-------
scipy.sparse.csr_matrix
The input signal with all values in missing bins set to 0
"""
if sym_upper and sp.issparse(signal):
if np.any(valid_rows != valid_cols):
raise ValueError(
"Valid rows and columns must be identical with sym_upper=True"
)
if signal.shape[0] != signal.shape[1]:
raise ValueError(
"Input matrix must be square when using sym_upper=True"
)
# Make a boolean mask from good bins
good_mask = np.isin(range(signal.shape[0]), valid_rows)
# Set all pixels in a nondetectable bin to 0
# For faster masking of bins, mask bins using dot product with an
# identify matrix where bad bins have been masked on the diagonal
# E.g. if removing the second bin (row and column):
# 1 0 0 9 6 5 1 0 0 9 0 5
# 0 0 0 X 6 8 7 X 0 0 0 = 0 0 0
# 0 0 1 6 7 8 0 0 1 6 0 8
mask_mat = sp.eye(signal.shape[0])
mask_mat.data[0][~good_mask] = 0
erased = mask_mat.dot(signal).dot(mask_mat)
else:
# Get a boolean array of missing (1) and valid (0) rows
missing_rows = valid_to_missing(valid_rows, signal.shape[0])
missing_cols = valid_to_missing(valid_cols, signal.shape[1])
erased = signal.copy()
erased[missing_rows, :] = 0
erased[:, missing_cols] = 0
return erased
def set_mat_diag(mat, diag=0, val=0):
"""
Set the nth diagonal of a symmetric 2D numpy array to a fixed value.
Operates in place.
Parameters
----------
mat : numpy.ndarray
Symmetric 2D array of floats.
diag : int
0-based index of the diagonal to modify. Use negative values for the
lower half.
val : float
Value to use for filling the diagonal
"""
m = mat.shape[0]
step = m + 1
start = diag
end = m ** 2 - diag * m
mat.flat[start:end:step] = val
def diag_trim(mat, n):
"""
Trim an upper triangle sparse matrix so that only the first n diagonals
are kept.
Parameters
----------
mat : scipy.sparse.csr_matrix or numpy.ndarray
The sparse matrix to be trimmed
n : int
The number of diagonals from the center to keep (0-based).
Returns
-------
scipy.sparse.dia_matrix or numpy.ndarray:
The diagonally trimmed upper triangle matrix with only the first
n diagonal.
"""
if sp.issparse(mat):
if mat.format != "csr":
raise ValueError("input type must be scipy.sparse.csr_matrix")
# Trim diagonals by removing all elements further than n in the
# upper triangle
trimmed = sp.tril(mat, n, format="csr")
trimmed = sp.triu(trimmed, format="csr")
else:
trimmed = mat.copy()
n_diags = trimmed.shape[0]
for diag in range(n, n_diags):
set_mat_diag(trimmed, diag, 0)
return trimmed
return trimmed
def distance_law(
matrix, detectable_bins=None, max_dist=None, smooth=True, fun=np.nanmean
):
"""
Computes genomic distance law by averaging over each diagonal in the upper
triangle matrix. If a list of detectable bins is provided, pixels in
missing bins will be excluded from the averages. A maximum distance can be
specified to define how many diagonals should be computed.
parameters
----------
matrix: scipy.sparse.csr_matrix
the input matrix to compute distance law from.
detectable_bins : numpy.ndarray of ints
An array of detectable bins indices to consider when computing
distance law.
max_dist : int
Maximum distance from diagonal, in number of bins in which to compute
distance law
smooth : bool
Whether to use isotonic regression to smooth the distance law.
fun : callable
A function to apply on each diagonal. Defaults to mean.
Returns
-------
dist: np.ndarray
the output genomic distance law.
example
-------
>>> m = np.ones((3,3))
>>> m += np.array([1,2,3])
>>> m
array([[2., 3., 4.],
[2., 3., 4.],
[2., 3., 4.]])
>>> distance_law(csr_matrix(m))
array([3. , 3.5, 4. ])
"""
mat_n = matrix.shape[0]
if max_dist is None:
max_dist = mat_n
n_diags = min(mat_n, max_dist + 1)
dist = np.zeros(mat_n)
if detectable_bins is None:
detectable_bins = np.array(range(mat_n))
for diag in range(n_diags):
# Find detectable which fall in diagonal
detect_mask = np.zeros(mat_n, dtype=bool)
detect_mask[detectable_bins] = 1
# Find bins which are detectable in the diagonal (intersect of
# hori and verti)
detect_mask_h = detect_mask[: (mat_n - diag)]
detect_mask_v = detect_mask[mat_n - (mat_n - diag) :]
detect_mask_diag = detect_mask_h & detect_mask_v
detect_diag = matrix.diagonal(diag)[detect_mask_diag]
dist[diag] = fun(detect_diag[detect_diag > 0])
# Smooth the curve using isotonic regression: Find closest approximation
# with the condition that point n+1 cannot be higher than point n.
# (i.e. contacts can only decrease when increasing distance)
if smooth and mat_n > 2:
ir = IsotonicRegression(increasing=False)
dist[~np.isfinite(dist)] = 0
dist = ir.fit_transform(range(len(dist)), dist)
return dist
def get_detectable_bins(mat, n_mads=3, inter=False):
"""
Returns lists of detectable indices after excluding low interacting bins
based on the proportion of zero pixel values in the matrix bins.
Parameters
----------
mat : scipy.sparse.coo_matrix
A Hi-C matrix in tihe form of a 2D numpy array or coo matrix
n_mads : int
Number of median absolute deviation below the median required to
consider bins non-detectable.
inter : bool
Whether the matrix is interchromosomal. Default is to consider the
matrix is intrachromosomal (i.e. upper symmetric).
Returns
-------
numpy ndarray :
tuple of 2 1D arrays containing indices of low interacting rows and
columns, respectively.
"""
matrix = mat.copy()
matrix.eliminate_zeros()
def mad(x): return ss.median_abs_deviation(x, nan_policy="omit")
if not inter:
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("Intrachromosomal matrices must be symmetric.")
# Replace nonzero pixels by ones to work on prop. of nonzero pixels
matrix.data = np.ones(matrix.data.shape)
# Compute number of nonzero values in each bin
sum_bins = sum_mat_bins(matrix)
# Compute variation in the number of nonzero pixels
sum_mad = mad(sum_bins)
# Find poor interacting rows and columns
sum_med = np.median(sum_bins)
detect_threshold = max(1, sum_med - sum_mad * n_mads)
# Removal of poor interacting rows and columns
good_bins = np.flatnonzero(sum_bins >= detect_threshold)
good_bins = (good_bins, good_bins)
else:
# Adapted for asymetric matrices (need to compute rows and columns)
sum_rows, sum_cols = matrix.sum(axis=1).A1, matrix.sum(axis=0).A1
mad_rows, mad_cols = mad(sum_rows), mad(sum_cols)
med_rows, med_cols = np.median(sum_rows), np.median(sum_cols)
detect_threshold_rows = max(1, med_rows - mad_rows * n_mads)
detect_threshold_cols = max(1, med_cols - mad_cols * n_mads)
good_rows = np.flatnonzero(sum_rows > detect_threshold_rows)
good_cols = np.flatnonzero(sum_cols > detect_threshold_cols)
good_bins = (good_rows, good_cols)
return good_bins
def detrend(
matrix,
detectable_bins=None,
max_dist=None,
smooth=False,
fun=np.nanmean,
max_val=10,
):
"""
Detrends a Hi-C matrix by the distance law.
The input matrix should have been normalised beforehandand.
Parameters
----------
matrix : scipy.sparse.csr_matrix
The normalised intrachromosomal Hi-C matrix to detrend.
detectable_bins : tuple
Tuple containing a list of detectable rows and a list of columns on
which to perform detrending. Poorly interacting indices have been
excluded.
max_dist : int
Maximum number of bins from the diagonal at which to compute trend.
smooth : bool
Whether to use isotonic regression to smooth the trend.
fun : callable
Function to use on each diagonal to compute the trend.
max_val : float or None
Maximum value in the detrended matrix. Set to None to disable
Returns
-------
numpy.ndarray :
The detrended Hi-C matrix.
"""
matrix = matrix.tocsr()
y = distance_law(
matrix,
detectable_bins=detectable_bins,
max_dist=max_dist,
smooth=smooth,
fun=fun,
)
y[np.isnan(y)] = 0.0
# Detrending by the distance law
clean_mat = matrix.tocoo()
# clean_mat.data /= y_savgol[abs(clean_mat.row - clean_mat.col)]
try:
clean_mat.data = clean_mat.data / y[abs(clean_mat.row - clean_mat.col)]
# If no nonzero value in matrix, do nothing
except TypeError:
pass
clean_mat = clean_mat.tocsr()
if max_val is not None:
clean_mat[clean_mat >= max_val] = 1
return clean_mat
def ztransform(matrix):
"""
Z transformation for Hi-C matrices.
Parameters
----------
matrix : scipy.sparse.coo_matrix
A Hi-C matrix in sparse format. |
Returns
-------
scipy.sparse.coo_matrix:
The detrended Hi-C matrix
"""
mat = matrix.copy()
mu = np.mean(mat.data)
sd = np.std(mat.data)
mat.data -= mu
mat.data /= sd
return mat
def sum_mat_bins(mat):
"""
Compute the sum of matrices bins (i.e. rows or columns) using
only the upper triangle, assuming symmetrical matrices.
Parameters
----------
mat : scipy.sparse.coo_matrix
Contact map in sparse format, either in upper triangle or
full matrix.
Returns
-------
numpy.ndarray :
1D array of bin sums.
"""
# Equivalaent to row or col sum on a full matrix
# Note: mat.sum returns a 'matrix' object. A1 extracts the 1D flat array
# from the matrix
return mat.sum(axis=0).A1 + mat.sum(axis=1).A1 - mat.diagonal(0)
def subsample_contacts(M, n_contacts):
"""Bootstrap sampling of contacts in a sparse Hi-C map.
Parameters
----------
M : scipy.sparse.coo_matrix
The input Hi-C contact map in sparse format.
n_contacts : int
The number of contacts to sample.
Returns
-------
scipy.sparse.coo_matrix
A new matrix with a fraction of the original contacts.
"""
S = M.data.copy()
# Match cell idx to cumulative number of contacts
cum_counts = np.cumsum(S)
# Total number of contacts to sample
tot_contacts = int(cum_counts[-1])
# Sample desired number of contacts from the range(0, n_contacts) array
sampled_contacts = np.random.choice(
int(tot_contacts), size=(n_contacts), replace=False
)
# Get indices of sampled contacts in the cum_counts array
idx = np.searchsorted(cum_counts, sampled_contacts, side="right")
# Bin those indices to the same dimensions as matrix data to get counts
sampled_counts = np.bincount(idx, minlength=S.shape[0])
# Get nonzero values to build new sparse matrix
nnz_mask = sampled_counts > 0
sampled_counts = sampled_counts[nnz_mask].astype(np.float64)
sampled_rows = M.row[nnz_mask]
sampled_cols = M.col[nnz_mask]
return sp.coo_matrix(
(sampled_counts, (sampled_rows, sampled_cols)),
shape=(M.shape[0], M.shape[1]),
)
def frame_missing_mask(mask, kernel_shape, sym_upper=False, max_dist=None):
"""
Adds a frame around input mask, given a kernel. The goal of this
frame is define margins around the matrix where the kernel will not perform
convolution (denoted by 1). If the matrix is upper symmetric, a margin of
half the kernel's width is added below the diagonal and a maximum distance
from the diagonal above which margins need not be drawn can be considered.
Otherwise Margins are simply added on all 4 sides of the matrix.
::
signal kernel _________
______ ____ |#######|
| | | | ==> |# #|
| | |___| |# #|
| | |# #|
|_____| |# #|
|#######|
--------
Parameters
----------
mask : scipy.sparse.csr_matrix of bool
The mask around which to add margins.
kernels_shape : tuple of ints
The number of rows and kernel in the input kernel. Margins will be half
these values.
sym_upper : bool
Whether the signal is a symmetric upper triangle matrix. If so, values
on a margin below the diagonal will be masked.
max_dist : int or None
Number of diagonals to keep
Returns
-------
framed_mask : scipy.sparse.csr_matrix of bool
The input mask with a padding of True around the edges. If sym_upper
is True, a padding is also added below the diagonal.
"""
if mask.dtype != bool:
raise ValueError("Mask must contain boolean values")
if not sp.issparse(mask):
raise ValueError("Mask must be a sparse matrix")
framed_mask = mask.copy()
ms, ns = mask.shape
mk, nk = kernel_shape
if sym_upper and (max_dist is not None):
# Remove diagonals further than scan distance in the input mask
framed_mask = diag_trim(framed_mask, max_dist + max(nk, mk)).tocsr()
max_m = max_dist + mk
max_n = max_dist + nk
else:
max_m, max_n = ms, ns
# Up and down margins initialized with zeros and filled as needed
margin_1 = sp.csr_matrix((mk - 1, ns), dtype=bool)
margin_2 = sp.csr_matrix((mk - 1, ns), dtype=bool)
if sym_upper and (max_dist is not None):
# Margin 1 (top) is in upper triangle -> fill missing up to scan dist
margin_1[:, :max_n] = 1
else:
margin_1[:, :] = 1
margin_2[:, :] = 1
framed_mask = sp.vstack([margin_1, framed_mask, margin_2], format="csr")
# Left and right
margin_1 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool)
margin_2 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool)
if sym_upper and (max_dist is not None):
# Margin 2 (right) is in upper triangle-> fill missing up to scan dist
margin_2[-(max_m + 1) :, :] = 1
# Fill only the start of left margin for the top-left corner
margin_1[: mk - 1, :] = 1
else:
margin_1[:, :] = 1
margin_2[:, :] = 1
framed_mask = sp.hstack([margin_1, framed_mask, margin_2], format="csr")
if sym_upper:
# LIL format is much faster when changing sparsity
framed_mask = framed_mask.tolil()
# Add margin below diagonal
big_k = max(nk, mk)
dia_margins = np.ones(big_k)
dia_offsets = np.arange(-1, -big_k-1, -1)
framed_mask += sp.diags(
dia_margins,
dia_offsets,
shape=framed_mask.shape,
format="lil",
dtype=bool,
)
framed_mask = framed_mask.tocsr()
return framed_mask
def check_missing_mask(signal, mask):
"""
Ensure all elements defined as missing by the mask are set to zero in the
signal. If this is not the case, raises an error.
Parameters
----------
signal : numpy.ndarray of floats or scipy.sparse.csr_matrix of floats
The signal to be checked.
mask : numpy.ndarray of bools or scipy.sparse.csr_matrix of bools
The mask defining missing values as True and valid values as False.
"""
if sp.issparse(mask):
# Check if there are nonzero values in the signal reported as missing
# by the mask
missing_with_signal = np.nonzero(
abs(signal[mask.nonzero()[0], mask.nonzero()[1]]) > 0
)[0]
if len(missing_with_signal) > 0:
raise ValueError(
"There are",
len(missing_with_signal),
"non-zero elements reported as missing.",
)
else:
if np.sum(abs(signal[mask > 0])) > 1e-10:
raise ValueError(
"There are",
str(np.sum(abs(signal[mask > 0]))),
"non-zero elements reported as missing.",
)
def make_missing_mask(
shape, valid_rows, valid_cols, max_dist=None, sym_upper=False
):
"""
Given lists of valid rows and columns, generate a sparse matrix mask with
missing pixels denoted as 1 and valid pixels as 0. If a max_dist is
provided, upper symmetric matrices will only be flagged up to max_dist
pixels from the diagonal.
Parameters
----------
shape : tuple of ints
Shape of the mask to generate.
valid_rows : numpy.ndarray of ints
Array with the indices of valid rows that should be set to 0 in the
mask.
valid_cols : numpy.ndarray of ints
Array with the indices of valid rows that should be set to 0 in the
mask.
max_dist : int or None
The maximum diagonal distance at which masking should take place.
sym_upper : bool
Whether the matrix is symmetric upper. If so, max_dist is ignored
Returns
-------
scipy.sparse.csr_matrix of bool
The mask containing False values where pixels are valid and True valid
where pixels are missing
"""
# Error if the matrix upper symmetric but shape is rectangle or missing
# rows and cols are different
sm, sn = shape
if sym_upper and (sm != sn or len(valid_rows) != len(valid_cols)):
raise ValueError("Rectangular matrices cannot be upper symmetric")
# Get a boolean array of missing (1) and valid (0) rows
missing_rows = valid_to_missing(valid_rows, sm)
# When matrix is sym., rows and cols are synonym, no need to compute 2x
if sym_upper:
missing_cols = missing_rows
else:
missing_cols = valid_to_missing(valid_cols, sn)
# If upper sym., fill only upper diag up to max_dist.
# E. g. with bins 1 and 3 missing
# and a max_dist of 1:
# 0 1 0 0 0
# 0 1 1 0 0
# 0 0 0 1 0
# 0 0 0 1 1
# 0 0 0 0 0
# For each missing bin, mask is apply 1 pixel upwards and 1 to the right
# to fill only the upper triangle up to max_dist
if sym_upper:
# If no max dist has been specified, fill the whole upper triangle
if max_dist is None:
max_dist = min(shape)
# Generate matrix of distance shifts by row.
# Shape is len(missing_rows) x (max_dist + 1)
# e.g.: 2 missing rows and max dist of 1
# 0 0
# 1 1
row_shifts = np.tile(
np.array(range(max_dist + 1)), (len(missing_rows), 1)
).T
# Compute row positions upwards to diagonal by subtracting missing rows
# to the shifts. Following the previous example, if missing rows are
# bins 1 and 3:
# 1 3
# 0 2
rows_before = (missing_rows - row_shifts).flatten("F")
# looking at pixels up from the bins, cols remain the same:
# 1 3
# 1 3
cols_before = np.repeat(missing_rows, max_dist+1)
# Compute col position to the right until diagonal by adding the shift
# Note: upper symmetric, so row_shifts = col_shift_
# 1 3
# 2 4
cols_after = (missing_cols + row_shifts).flatten("F")
# This time, rows remain constant since we are computing positions to
# the right
rows_after = np.repeat(missing_cols, max_dist+1)
# Combine positions to the right and upwards
rows = np.concatenate([rows_before, rows_after])
cols = np.concatenate([cols_before, cols_after])
data = np.ones(rows.shape, dtype=bool)
# Remove entries where rows or cols are negative or larger than shape
valid = (cols < sm) & (cols >= 0) & (rows < sm) & (rows >= 0)
# Build mask mat with miss bins up to max scan dist in upper triangle
mask = sp.coo_matrix(
(data[valid], (rows[valid], cols[valid])), shape=shape, dtype=bool
).tocsr()
else:
mask = sp.csr_matrix(shape, dtype=bool)
mask[missing_rows, :] = 1
mask[:, missing_cols] = 1
return mask
def zero_pad_sparse(mat, margin_h, margin_v, fmt="coo"):
"""
Adds margin of zeros around an input sparse matrix.
Parameters
----------
mat : scipy.sparse.csr_matrix
The matrix to be padded.
margin_h : int
The width of the horizontal margin to add on the left and right of the
matrix.
margin_v : int
The width of the vertical margin to add on the top and bottom of the
matrix.
fmt : string
The desired scipy sparse format of the output matrix
Returns
-------
scipy.sparse.csr_matrix :
The padded matrix of dimensions (m + 2 * margin_h, n + 2 * margin_v).
Examples
--------
>>> m = sp.csr_matrix(np.array([[1, 2], [10, 20]]))
>>> zero_pad_sparse(m, 2, 1).toarray()
array([[ 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 2, 0, 0],
[ 0, 0, 10, 20, 0, 0],
[ 0, 0, 0, 0, 0, 0]])
"""
sm, sn = mat.shape
padded_mat = mat.copy()
# Up and down margins initialized with zeros and filled as needed
margin_h_0 = sp.csr_matrix((sm, margin_h), dtype=mat.dtype)
margin_v_0 = sp.csr_matrix((margin_v, sn + 2 * margin_h), dtype=mat.dtype)
padded_mat = sp.hstack([margin_h_0, padded_mat, margin_h_0], format="csr")
padded_mat = sp.vstack([margin_v_0, padded_mat, margin_v_0], format="csr")
return padded_mat
def crop_kernel(kernel, target_size):
"""
Crop a kernel matrix to target size horizontally and vertically.
If the target size is even, the target size is adjusted to the
next integer up.
Parameters
----------
kernel : numpy.ndarray of floats
Image to crop.
target_size : tuple of ints
Tuple defining the target shape of the kernel, takes the
form (rows, cols) where rows and cols are odd numbers.
Returns
-------
cropped : numpy.ndarray of floats
New image no larger than target dimensions
"""
# Use list for mutability
target = [d for d in target_size]
adjusted = False
for dim in range(len(target)):
if not target[dim] % 2:
target[dim] += 1
adjusted = True
if adjusted:
sys.stderr.write(
"WARNING: Cropped kernel size adjusted to "
f"{target[0]}x{target[1]} to keep odd dimensions.\n"
)
source_m, source_n = kernel.shape
target_m, target_n = target
# Define horizontal and vertical margins to trim
if source_m > target_m:
margin_rows = (source_m - target_m) // 2
else:
margin_rows = 0
if source_n > target_n:
margin_cols = (source_n - target_n) // 2
else:
margin_cols = 0
cropped = kernel[
margin_rows : (source_m - margin_rows),
margin_cols : (source_n - margin_cols),
]
return cropped
def resize_kernel(
kernel,
kernel_res=None,
signal_res=None,
factor=None,
min_size=7,
quiet=False,
):
"""
Resize a kernel matrix based on the resolution at which it was defined and
the signal resolution. E.g. if a kernel matrix was generated for 10kb and
the input signal is 20kb, kernel size will be divided by two. If the kernel
is enlarged, pixels are interpolated with a spline of degree 1.
Alternatively, a resize factor can be provided. In the example above, the
factor would be 0.5.
Parameters
----------
kernel : numpy.ndarray
Kernel matrix.
kernel_res : int
Resolution for which the kernel was designed. Mutually exclusive with
factor.
signal_res : int
Resolution of the signal matrix in basepair per matrix bin. Mutually
exclusive with factor.
factor : float
Resize factor. Can be provided as an alternative to kernel_res and
signal_res. Values above 1 will enlarge the kernel, values below 1 will
shrink it.
min_size : int
Lower bound, in number of rows/column allowed when resizing the kernel.
quiet : bool
Suppress warnings if resize factor was adjusted.
Returns
-------
resized_kernel : numpy.ndarray
The resized input kernel.
"""
km, kn = kernel.shape
if km != kn:
raise ValueError("kernel must be square.")
if not (km % 2) or not (kn % 2):
raise ValueError("kernel size must be odd.")
if factor is not None:
if kernel_res is not None or signal_res is not None:
raise ValueError(
"factor is mutually exclusive with resolution "
"parameters (kernel_res and signal_res)."
)
resize_factor = factor
else:
if kernel_res is None or signal_res is None:
raise ValueError(
"You must provide either a resize factor or the signal and "
"kernel resolutions."
)
# Define by how many times kernel must be enlarged for its pixels to
# match the signal's pixels
resize_factor = kernel_res / signal_res
if km * resize_factor < min_size:
resize_factor = min_size / km
resized_kernel = ndi.zoom(kernel, resize_factor, order=1)
if not resized_kernel.shape[0] % 2:
# Compute the factor required to yield a dimension smaller by one
adj_resize_factor = (resized_kernel.shape[0] - 1) / km
if not quiet:
sys.stderr.write(
f"Adjusting resize factor from {resize_factor} to {adj_resize_factor}.\n"
)
resized_kernel = ndi.zoom(kernel, adj_resize_factor, order=1)
return resized_kernel
def factorise_kernel(kernel, prop_info=0.999):
"""
Performs truncated SVD on an input kernel, returning the singular vectors
necessary to retain a given proportion of information contained in the
kernel.
Parameters
----------
kernel : numpy.ndarray of floats
The input 2D kernel to factorise.
prop_info : float
Proportion of information to retain.
Returns
-------
tuple of numpy.ndarrays of floats
A tuple containing the truncated left and right singular matrices,
where each singular vector has been multiplied by the square root of
their respective singular values.
"""
u, sigma, v = la.svd(kernel)
total_info = np.sum(sigma ** 2)
# Compute min. number of singular vectors to retain enough info
keep_k = np.flatnonzero(np.cumsum(sigma ** 2) > prop_info * total_info)[0] + 1
if keep_k > np.floor(min(kernel.shape) / 2):
sys.stderr.write(
f"Warning: Kernel factorisation required {keep_k} singular,"
"vectors this may result in slow operations.\n",
)
# Truncate singular matrix to the keep only required vectors
u = u[:, :keep_k]
v = v[:keep_k, :]
# Multiply each singular vector by the sqrt of its singular value
for i in range(keep_k):
u[:, i] *= np.sqrt(sigma[i])
v[i, :] *= np.sqrt(sigma[i])
return (u, v)
def valid_to_missing(valid, size):
"""
Given an array of valid indices, return the corrsesponding array of missing
indices.
Parameters
---------
valid : numpy.ndarray of ints
The valid indices.
size : int
The size of the matrix (maximum possible index + 1).
Return
------
missing : numpy.ndarray of ints
The missing indices.
"""
missing = np.ones(size, dtype=bool)
try:
missing[valid] = False
# In case there is no valid index
except IndexError:
pass
missing = np.flatnonzero(missing)
return missing | random_line_split |
|
preprocessing.py | #!/usr/bin/env python3
# coding: utf-8
"""Chromosight's preprocessing submodule implements a number of functions to
operate on Hi-C contact maps before detection. These functions can be used to
improve the signal or filter unneeded signal. There are also functions to edit
(zoom, crop, factorize) kernel matrices.
"""
import sys
import numpy as np
import numpy.linalg as la
import scipy.stats as ss
import scipy.sparse as sp
import scipy.ndimage as ndi
from sklearn.isotonic import IsotonicRegression
def erase_missing(signal, valid_rows, valid_cols, sym_upper=True):
"""
Given a sparse matrix, set all pixels in missing (invalid) bins to 0.
Parameters
----------
signal : scipy.sparse.csr_matrix of floats
Input signal on which to erase values.
valid_rows : numpy.ndarray of ints
Indices of rows considered valid (not missing).
valid_cols : numpy.ndarray of ints
Indices of columns considered valid (not missing).
sym_upper : bool
Define if the input signal is upper symmetric.
Returns
-------
scipy.sparse.csr_matrix
The input signal with all values in missing bins set to 0
"""
if sym_upper and sp.issparse(signal):
if np.any(valid_rows != valid_cols):
raise ValueError(
"Valid rows and columns must be identical with sym_upper=True"
)
if signal.shape[0] != signal.shape[1]:
raise ValueError(
"Input matrix must be square when using sym_upper=True"
)
# Make a boolean mask from good bins
good_mask = np.isin(range(signal.shape[0]), valid_rows)
# Set all pixels in a nondetectable bin to 0
# For faster masking of bins, mask bins using dot product with an
# identify matrix where bad bins have been masked on the diagonal
# E.g. if removing the second bin (row and column):
# 1 0 0 9 6 5 1 0 0 9 0 5
# 0 0 0 X 6 8 7 X 0 0 0 = 0 0 0
# 0 0 1 6 7 8 0 0 1 6 0 8
mask_mat = sp.eye(signal.shape[0])
mask_mat.data[0][~good_mask] = 0
erased = mask_mat.dot(signal).dot(mask_mat)
else:
# Get a boolean array of missing (1) and valid (0) rows
missing_rows = valid_to_missing(valid_rows, signal.shape[0])
missing_cols = valid_to_missing(valid_cols, signal.shape[1])
erased = signal.copy()
erased[missing_rows, :] = 0
erased[:, missing_cols] = 0
return erased
def set_mat_diag(mat, diag=0, val=0):
"""
Set the nth diagonal of a symmetric 2D numpy array to a fixed value.
Operates in place.
Parameters
----------
mat : numpy.ndarray
Symmetric 2D array of floats.
diag : int
0-based index of the diagonal to modify. Use negative values for the
lower half.
val : float
Value to use for filling the diagonal
"""
m = mat.shape[0]
step = m + 1
start = diag
end = m ** 2 - diag * m
mat.flat[start:end:step] = val
def diag_trim(mat, n):
|
def distance_law(
matrix, detectable_bins=None, max_dist=None, smooth=True, fun=np.nanmean
):
"""
Computes genomic distance law by averaging over each diagonal in the upper
triangle matrix. If a list of detectable bins is provided, pixels in
missing bins will be excluded from the averages. A maximum distance can be
specified to define how many diagonals should be computed.
parameters
----------
matrix: scipy.sparse.csr_matrix
the input matrix to compute distance law from.
detectable_bins : numpy.ndarray of ints
An array of detectable bins indices to consider when computing
distance law.
max_dist : int
Maximum distance from diagonal, in number of bins in which to compute
distance law
smooth : bool
Whether to use isotonic regression to smooth the distance law.
fun : callable
A function to apply on each diagonal. Defaults to mean.
Returns
-------
dist: np.ndarray
the output genomic distance law.
example
-------
>>> m = np.ones((3,3))
>>> m += np.array([1,2,3])
>>> m
array([[2., 3., 4.],
[2., 3., 4.],
[2., 3., 4.]])
>>> distance_law(csr_matrix(m))
array([3. , 3.5, 4. ])
"""
mat_n = matrix.shape[0]
if max_dist is None:
max_dist = mat_n
n_diags = min(mat_n, max_dist + 1)
dist = np.zeros(mat_n)
if detectable_bins is None:
detectable_bins = np.array(range(mat_n))
for diag in range(n_diags):
# Find detectable which fall in diagonal
detect_mask = np.zeros(mat_n, dtype=bool)
detect_mask[detectable_bins] = 1
# Find bins which are detectable in the diagonal (intersect of
# hori and verti)
detect_mask_h = detect_mask[: (mat_n - diag)]
detect_mask_v = detect_mask[mat_n - (mat_n - diag) :]
detect_mask_diag = detect_mask_h & detect_mask_v
detect_diag = matrix.diagonal(diag)[detect_mask_diag]
dist[diag] = fun(detect_diag[detect_diag > 0])
# Smooth the curve using isotonic regression: Find closest approximation
# with the condition that point n+1 cannot be higher than point n.
# (i.e. contacts can only decrease when increasing distance)
if smooth and mat_n > 2:
ir = IsotonicRegression(increasing=False)
dist[~np.isfinite(dist)] = 0
dist = ir.fit_transform(range(len(dist)), dist)
return dist
def get_detectable_bins(mat, n_mads=3, inter=False):
"""
Returns lists of detectable indices after excluding low interacting bins
based on the proportion of zero pixel values in the matrix bins.
Parameters
----------
mat : scipy.sparse.coo_matrix
A Hi-C matrix in tihe form of a 2D numpy array or coo matrix
n_mads : int
Number of median absolute deviation below the median required to
consider bins non-detectable.
inter : bool
Whether the matrix is interchromosomal. Default is to consider the
matrix is intrachromosomal (i.e. upper symmetric).
Returns
-------
numpy ndarray :
tuple of 2 1D arrays containing indices of low interacting rows and
columns, respectively.
"""
matrix = mat.copy()
matrix.eliminate_zeros()
def mad(x): return ss.median_abs_deviation(x, nan_policy="omit")
if not inter:
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("Intrachromosomal matrices must be symmetric.")
# Replace nonzero pixels by ones to work on prop. of nonzero pixels
matrix.data = np.ones(matrix.data.shape)
# Compute number of nonzero values in each bin
sum_bins = sum_mat_bins(matrix)
# Compute variation in the number of nonzero pixels
sum_mad = mad(sum_bins)
# Find poor interacting rows and columns
sum_med = np.median(sum_bins)
detect_threshold = max(1, sum_med - sum_mad * n_mads)
# Removal of poor interacting rows and columns
good_bins = np.flatnonzero(sum_bins >= detect_threshold)
good_bins = (good_bins, good_bins)
else:
# Adapted for asymetric matrices (need to compute rows and columns)
sum_rows, sum_cols = matrix.sum(axis=1).A1, matrix.sum(axis=0).A1
mad_rows, mad_cols = mad(sum_rows), mad(sum_cols)
med_rows, med_cols = np.median(sum_rows), np.median(sum_cols)
detect_threshold_rows = max(1, med_rows - mad_rows * n_mads)
detect_threshold_cols = max(1, med_cols - mad_cols * n_mads)
good_rows = np.flatnonzero(sum_rows > detect_threshold_rows)
good_cols = np.flatnonzero(sum_cols > detect_threshold_cols)
good_bins = (good_rows, good_cols)
return good_bins
def detrend(
matrix,
detectable_bins=None,
max_dist=None,
smooth=False,
fun=np.nanmean,
max_val=10,
):
"""
Detrends a Hi-C matrix by the distance law.
The input matrix should have been normalised beforehandand.
Parameters
----------
matrix : scipy.sparse.csr_matrix
The normalised intrachromosomal Hi-C matrix to detrend.
detectable_bins : tuple
Tuple containing a list of detectable rows and a list of columns on
which to perform detrending. Poorly interacting indices have been
excluded.
max_dist : int
Maximum number of bins from the diagonal at which to compute trend.
smooth : bool
Whether to use isotonic regression to smooth the trend.
fun : callable
Function to use on each diagonal to compute the trend.
max_val : float or None
Maximum value in the detrended matrix. Set to None to disable
Returns
-------
numpy.ndarray :
The detrended Hi-C matrix.
"""
matrix = matrix.tocsr()
y = distance_law(
matrix,
detectable_bins=detectable_bins,
max_dist=max_dist,
smooth=smooth,
fun=fun,
)
y[np.isnan(y)] = 0.0
# Detrending by the distance law
clean_mat = matrix.tocoo()
# clean_mat.data /= y_savgol[abs(clean_mat.row - clean_mat.col)]
try:
clean_mat.data = clean_mat.data / y[abs(clean_mat.row - clean_mat.col)]
# If no nonzero value in matrix, do nothing
except TypeError:
pass
clean_mat = clean_mat.tocsr()
if max_val is not None:
clean_mat[clean_mat >= max_val] = 1
return clean_mat
def ztransform(matrix):
"""
Z transformation for Hi-C matrices.
Parameters
----------
matrix : scipy.sparse.coo_matrix
A Hi-C matrix in sparse format.
Returns
-------
scipy.sparse.coo_matrix:
The detrended Hi-C matrix
"""
mat = matrix.copy()
mu = np.mean(mat.data)
sd = np.std(mat.data)
mat.data -= mu
mat.data /= sd
return mat
def sum_mat_bins(mat):
"""
Compute the sum of matrices bins (i.e. rows or columns) using
only the upper triangle, assuming symmetrical matrices.
Parameters
----------
mat : scipy.sparse.coo_matrix
Contact map in sparse format, either in upper triangle or
full matrix.
Returns
-------
numpy.ndarray :
1D array of bin sums.
"""
# Equivalaent to row or col sum on a full matrix
# Note: mat.sum returns a 'matrix' object. A1 extracts the 1D flat array
# from the matrix
return mat.sum(axis=0).A1 + mat.sum(axis=1).A1 - mat.diagonal(0)
def subsample_contacts(M, n_contacts):
"""Bootstrap sampling of contacts in a sparse Hi-C map.
Parameters
----------
M : scipy.sparse.coo_matrix
The input Hi-C contact map in sparse format.
n_contacts : int
The number of contacts to sample.
Returns
-------
scipy.sparse.coo_matrix
A new matrix with a fraction of the original contacts.
"""
S = M.data.copy()
# Match cell idx to cumulative number of contacts
cum_counts = np.cumsum(S)
# Total number of contacts to sample
tot_contacts = int(cum_counts[-1])
# Sample desired number of contacts from the range(0, n_contacts) array
sampled_contacts = np.random.choice(
int(tot_contacts), size=(n_contacts), replace=False
)
# Get indices of sampled contacts in the cum_counts array
idx = np.searchsorted(cum_counts, sampled_contacts, side="right")
# Bin those indices to the same dimensions as matrix data to get counts
sampled_counts = np.bincount(idx, minlength=S.shape[0])
# Get nonzero values to build new sparse matrix
nnz_mask = sampled_counts > 0
sampled_counts = sampled_counts[nnz_mask].astype(np.float64)
sampled_rows = M.row[nnz_mask]
sampled_cols = M.col[nnz_mask]
return sp.coo_matrix(
(sampled_counts, (sampled_rows, sampled_cols)),
shape=(M.shape[0], M.shape[1]),
)
def frame_missing_mask(mask, kernel_shape, sym_upper=False, max_dist=None):
"""
Adds a frame around input mask, given a kernel. The goal of this
frame is define margins around the matrix where the kernel will not perform
convolution (denoted by 1). If the matrix is upper symmetric, a margin of
half the kernel's width is added below the diagonal and a maximum distance
from the diagonal above which margins need not be drawn can be considered.
Otherwise Margins are simply added on all 4 sides of the matrix.
::
signal kernel _________
______ ____ |#######|
| | | | ==> |# #|
| | |___| |# #|
| | |# #|
|_____| |# #|
|#######|
--------
Parameters
----------
mask : scipy.sparse.csr_matrix of bool
The mask around which to add margins.
kernels_shape : tuple of ints
The number of rows and kernel in the input kernel. Margins will be half
these values.
sym_upper : bool
Whether the signal is a symmetric upper triangle matrix. If so, values
on a margin below the diagonal will be masked.
max_dist : int or None
Number of diagonals to keep
Returns
-------
framed_mask : scipy.sparse.csr_matrix of bool
The input mask with a padding of True around the edges. If sym_upper
is True, a padding is also added below the diagonal.
"""
if mask.dtype != bool:
raise ValueError("Mask must contain boolean values")
if not sp.issparse(mask):
raise ValueError("Mask must be a sparse matrix")
framed_mask = mask.copy()
ms, ns = mask.shape
mk, nk = kernel_shape
if sym_upper and (max_dist is not None):
# Remove diagonals further than scan distance in the input mask
framed_mask = diag_trim(framed_mask, max_dist + max(nk, mk)).tocsr()
max_m = max_dist + mk
max_n = max_dist + nk
else:
max_m, max_n = ms, ns
# Up and down margins initialized with zeros and filled as needed
margin_1 = sp.csr_matrix((mk - 1, ns), dtype=bool)
margin_2 = sp.csr_matrix((mk - 1, ns), dtype=bool)
if sym_upper and (max_dist is not None):
# Margin 1 (top) is in upper triangle -> fill missing up to scan dist
margin_1[:, :max_n] = 1
else:
margin_1[:, :] = 1
margin_2[:, :] = 1
framed_mask = sp.vstack([margin_1, framed_mask, margin_2], format="csr")
# Left and right
margin_1 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool)
margin_2 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool)
if sym_upper and (max_dist is not None):
# Margin 2 (right) is in upper triangle-> fill missing up to scan dist
margin_2[-(max_m + 1) :, :] = 1
# Fill only the start of left margin for the top-left corner
margin_1[: mk - 1, :] = 1
else:
margin_1[:, :] = 1
margin_2[:, :] = 1
framed_mask = sp.hstack([margin_1, framed_mask, margin_2], format="csr")
if sym_upper:
# LIL format is much faster when changing sparsity
framed_mask = framed_mask.tolil()
# Add margin below diagonal
big_k = max(nk, mk)
dia_margins = np.ones(big_k)
dia_offsets = np.arange(-1, -big_k-1, -1)
framed_mask += sp.diags(
dia_margins,
dia_offsets,
shape=framed_mask.shape,
format="lil",
dtype=bool,
)
framed_mask = framed_mask.tocsr()
return framed_mask
def check_missing_mask(signal, mask):
"""
Ensure all elements defined as missing by the mask are set to zero in the
signal. If this is not the case, raises an error.
Parameters
----------
signal : numpy.ndarray of floats or scipy.sparse.csr_matrix of floats
The signal to be checked.
mask : numpy.ndarray of bools or scipy.sparse.csr_matrix of bools
The mask defining missing values as True and valid values as False.
"""
if sp.issparse(mask):
# Check if there are nonzero values in the signal reported as missing
# by the mask
missing_with_signal = np.nonzero(
abs(signal[mask.nonzero()[0], mask.nonzero()[1]]) > 0
)[0]
if len(missing_with_signal) > 0:
raise ValueError(
"There are",
len(missing_with_signal),
"non-zero elements reported as missing.",
)
else:
if np.sum(abs(signal[mask > 0])) > 1e-10:
raise ValueError(
"There are",
str(np.sum(abs(signal[mask > 0]))),
"non-zero elements reported as missing.",
)
def make_missing_mask(
shape, valid_rows, valid_cols, max_dist=None, sym_upper=False
):
"""
Given lists of valid rows and columns, generate a sparse matrix mask with
missing pixels denoted as 1 and valid pixels as 0. If a max_dist is
provided, upper symmetric matrices will only be flagged up to max_dist
pixels from the diagonal.
Parameters
----------
shape : tuple of ints
Shape of the mask to generate.
valid_rows : numpy.ndarray of ints
Array with the indices of valid rows that should be set to 0 in the
mask.
valid_cols : numpy.ndarray of ints
Array with the indices of valid rows that should be set to 0 in the
mask.
max_dist : int or None
The maximum diagonal distance at which masking should take place.
sym_upper : bool
Whether the matrix is symmetric upper. If so, max_dist is ignored
Returns
-------
scipy.sparse.csr_matrix of bool
The mask containing False values where pixels are valid and True valid
where pixels are missing
"""
# Error if the matrix upper symmetric but shape is rectangle or missing
# rows and cols are different
sm, sn = shape
if sym_upper and (sm != sn or len(valid_rows) != len(valid_cols)):
raise ValueError("Rectangular matrices cannot be upper symmetric")
# Get a boolean array of missing (1) and valid (0) rows
missing_rows = valid_to_missing(valid_rows, sm)
# When matrix is sym., rows and cols are synonym, no need to compute 2x
if sym_upper:
missing_cols = missing_rows
else:
missing_cols = valid_to_missing(valid_cols, sn)
# If upper sym., fill only upper diag up to max_dist.
# E. g. with bins 1 and 3 missing
# and a max_dist of 1:
# 0 1 0 0 0
# 0 1 1 0 0
# 0 0 0 1 0
# 0 0 0 1 1
# 0 0 0 0 0
# For each missing bin, mask is apply 1 pixel upwards and 1 to the right
# to fill only the upper triangle up to max_dist
if sym_upper:
# If no max dist has been specified, fill the whole upper triangle
if max_dist is None:
max_dist = min(shape)
# Generate matrix of distance shifts by row.
# Shape is len(missing_rows) x (max_dist + 1)
# e.g.: 2 missing rows and max dist of 1
# 0 0
# 1 1
row_shifts = np.tile(
np.array(range(max_dist + 1)), (len(missing_rows), 1)
).T
# Compute row positions upwards to diagonal by subtracting missing rows
# to the shifts. Following the previous example, if missing rows are
# bins 1 and 3:
# 1 3
# 0 2
rows_before = (missing_rows - row_shifts).flatten("F")
# looking at pixels up from the bins, cols remain the same:
# 1 3
# 1 3
cols_before = np.repeat(missing_rows, max_dist+1)
# Compute col position to the right until diagonal by adding the shift
# Note: upper symmetric, so row_shifts = col_shift_
# 1 3
# 2 4
cols_after = (missing_cols + row_shifts).flatten("F")
# This time, rows remain constant since we are computing positions to
# the right
rows_after = np.repeat(missing_cols, max_dist+1)
# Combine positions to the right and upwards
rows = np.concatenate([rows_before, rows_after])
cols = np.concatenate([cols_before, cols_after])
data = np.ones(rows.shape, dtype=bool)
# Remove entries where rows or cols are negative or larger than shape
valid = (cols < sm) & (cols >= 0) & (rows < sm) & (rows >= 0)
# Build mask mat with miss bins up to max scan dist in upper triangle
mask = sp.coo_matrix(
(data[valid], (rows[valid], cols[valid])), shape=shape, dtype=bool
).tocsr()
else:
mask = sp.csr_matrix(shape, dtype=bool)
mask[missing_rows, :] = 1
mask[:, missing_cols] = 1
return mask
def zero_pad_sparse(mat, margin_h, margin_v, fmt="coo"):
"""
Adds margin of zeros around an input sparse matrix.
Parameters
----------
mat : scipy.sparse.csr_matrix
The matrix to be padded.
margin_h : int
The width of the horizontal margin to add on the left and right of the
matrix.
margin_v : int
The width of the vertical margin to add on the top and bottom of the
matrix.
fmt : string
The desired scipy sparse format of the output matrix
Returns
-------
scipy.sparse.csr_matrix :
The padded matrix of dimensions (m + 2 * margin_h, n + 2 * margin_v).
Examples
--------
>>> m = sp.csr_matrix(np.array([[1, 2], [10, 20]]))
>>> zero_pad_sparse(m, 2, 1).toarray()
array([[ 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 2, 0, 0],
[ 0, 0, 10, 20, 0, 0],
[ 0, 0, 0, 0, 0, 0]])
"""
sm, sn = mat.shape
padded_mat = mat.copy()
# Up and down margins initialized with zeros and filled as needed
margin_h_0 = sp.csr_matrix((sm, margin_h), dtype=mat.dtype)
margin_v_0 = sp.csr_matrix((margin_v, sn + 2 * margin_h), dtype=mat.dtype)
padded_mat = sp.hstack([margin_h_0, padded_mat, margin_h_0], format="csr")
padded_mat = sp.vstack([margin_v_0, padded_mat, margin_v_0], format="csr")
return padded_mat
def crop_kernel(kernel, target_size):
"""
Crop a kernel matrix to target size horizontally and vertically.
If the target size is even, the target size is adjusted to the
next integer up.
Parameters
----------
kernel : numpy.ndarray of floats
Image to crop.
target_size : tuple of ints
Tuple defining the target shape of the kernel, takes the
form (rows, cols) where rows and cols are odd numbers.
Returns
-------
cropped : numpy.ndarray of floats
New image no larger than target dimensions
"""
# Use list for mutability
target = [d for d in target_size]
adjusted = False
for dim in range(len(target)):
if not target[dim] % 2:
target[dim] += 1
adjusted = True
if adjusted:
sys.stderr.write(
"WARNING: Cropped kernel size adjusted to "
f"{target[0]}x{target[1]} to keep odd dimensions.\n"
)
source_m, source_n = kernel.shape
target_m, target_n = target
# Define horizontal and vertical margins to trim
if source_m > target_m:
margin_rows = (source_m - target_m) // 2
else:
margin_rows = 0
if source_n > target_n:
margin_cols = (source_n - target_n) // 2
else:
margin_cols = 0
cropped = kernel[
margin_rows : (source_m - margin_rows),
margin_cols : (source_n - margin_cols),
]
return cropped
def resize_kernel(
kernel,
kernel_res=None,
signal_res=None,
factor=None,
min_size=7,
quiet=False,
):
"""
Resize a kernel matrix based on the resolution at which it was defined and
the signal resolution. E.g. if a kernel matrix was generated for 10kb and
the input signal is 20kb, kernel size will be divided by two. If the kernel
is enlarged, pixels are interpolated with a spline of degree 1.
Alternatively, a resize factor can be provided. In the example above, the
factor would be 0.5.
Parameters
----------
kernel : numpy.ndarray
Kernel matrix.
kernel_res : int
Resolution for which the kernel was designed. Mutually exclusive with
factor.
signal_res : int
Resolution of the signal matrix in basepair per matrix bin. Mutually
exclusive with factor.
factor : float
Resize factor. Can be provided as an alternative to kernel_res and
signal_res. Values above 1 will enlarge the kernel, values below 1 will
shrink it.
min_size : int
Lower bound, in number of rows/column allowed when resizing the kernel.
quiet : bool
Suppress warnings if resize factor was adjusted.
Returns
-------
resized_kernel : numpy.ndarray
The resized input kernel.
"""
km, kn = kernel.shape
if km != kn:
raise ValueError("kernel must be square.")
if not (km % 2) or not (kn % 2):
raise ValueError("kernel size must be odd.")
if factor is not None:
if kernel_res is not None or signal_res is not None:
raise ValueError(
"factor is mutually exclusive with resolution "
"parameters (kernel_res and signal_res)."
)
resize_factor = factor
else:
if kernel_res is None or signal_res is None:
raise ValueError(
"You must provide either a resize factor or the signal and "
"kernel resolutions."
)
# Define by how many times kernel must be enlarged for its pixels to
# match the signal's pixels
resize_factor = kernel_res / signal_res
if km * resize_factor < min_size:
resize_factor = min_size / km
resized_kernel = ndi.zoom(kernel, resize_factor, order=1)
if not resized_kernel.shape[0] % 2:
# Compute the factor required to yield a dimension smaller by one
adj_resize_factor = (resized_kernel.shape[0] - 1) / km
if not quiet:
sys.stderr.write(
f"Adjusting resize factor from {resize_factor} to {adj_resize_factor}.\n"
)
resized_kernel = ndi.zoom(kernel, adj_resize_factor, order=1)
return resized_kernel
def factorise_kernel(kernel, prop_info=0.999):
"""
Performs truncated SVD on an input kernel, returning the singular vectors
necessary to retain a given proportion of information contained in the
kernel.
Parameters
----------
kernel : numpy.ndarray of floats
The input 2D kernel to factorise.
prop_info : float
Proportion of information to retain.
Returns
-------
tuple of numpy.ndarrays of floats
A tuple containing the truncated left and right singular matrices,
where each singular vector has been multiplied by the square root of
their respective singular values.
"""
u, sigma, v = la.svd(kernel)
total_info = np.sum(sigma ** 2)
# Compute min. number of singular vectors to retain enough info
keep_k = np.flatnonzero(np.cumsum(sigma ** 2) > prop_info * total_info)[0] + 1
if keep_k > np.floor(min(kernel.shape) / 2):
sys.stderr.write(
f"Warning: Kernel factorisation required {keep_k} singular,"
"vectors this may result in slow operations.\n",
)
# Truncate singular matrix to the keep only required vectors
u = u[:, :keep_k]
v = v[:keep_k, :]
# Multiply each singular vector by the sqrt of its singular value
for i in range(keep_k):
u[:, i] *= np.sqrt(sigma[i])
v[i, :] *= np.sqrt(sigma[i])
return (u, v)
def valid_to_missing(valid, size):
"""
Given an array of valid indices, return the corrsesponding array of missing
indices.
Parameters
---------
valid : numpy.ndarray of ints
The valid indices.
size : int
The size of the matrix (maximum possible index + 1).
Return
------
missing : numpy.ndarray of ints
The missing indices.
"""
missing = np.ones(size, dtype=bool)
try:
missing[valid] = False
# In case there is no valid index
except IndexError:
pass
missing = np.flatnonzero(missing)
return missing
| """
Trim an upper triangle sparse matrix so that only the first n diagonals
are kept.
Parameters
----------
mat : scipy.sparse.csr_matrix or numpy.ndarray
The sparse matrix to be trimmed
n : int
The number of diagonals from the center to keep (0-based).
Returns
-------
scipy.sparse.dia_matrix or numpy.ndarray:
The diagonally trimmed upper triangle matrix with only the first
n diagonal.
"""
if sp.issparse(mat):
if mat.format != "csr":
raise ValueError("input type must be scipy.sparse.csr_matrix")
# Trim diagonals by removing all elements further than n in the
# upper triangle
trimmed = sp.tril(mat, n, format="csr")
trimmed = sp.triu(trimmed, format="csr")
else:
trimmed = mat.copy()
n_diags = trimmed.shape[0]
for diag in range(n, n_diags):
set_mat_diag(trimmed, diag, 0)
return trimmed
return trimmed | identifier_body |
graph.service.ts | import { Injectable } from '@angular/core';
import { HttpClient, HttpHeaders } from '@angular/common/http';
import { Observable, of, forkJoin } from 'rxjs';
import * as Rx from 'rxjs';
import { catchError, map, tap } from 'rxjs/operators';
import { FullDocNode, DocNode2, Doc2, Link, Change, Db } from './standard-map';
import { MessageService } from './message.service';
import * as d3Sankey from 'd3-sankey';
import { TreeNode, IActionMapping } from 'angular-tree-component';
import { GraphTab } from './GraphTab';
import { GraphFilter } from './GraphFilter';
export interface ICategory {
id: string;
title: string;
active?: boolean;
}
export type CategoryList = ICategory[];
export class FilterCriteria {
constructor(
public categoryIds: string[] = null,
public categoryOrder: string[] = null) {
}
}
// -- Dag Node --
export interface SNodeExtra {
nodeId: number;
name: string;
data?: any;
}
export interface SLinkExtra {
source: number;
target: number;
value: number;
uom: string;
sourceNode: any;
targetNode: any;
}
export type SNode = d3Sankey.SankeyNode<SNodeExtra, SLinkExtra>;
export type SLink = d3Sankey.SankeyLink<SNodeExtra, SLinkExtra>;
export interface DAG {
nodes: SNode[];
links: SLink[];
}
// -- Dag Node --
@Injectable({ providedIn: 'root' })
export class GraphService {
private docGuids = {};
private docDb: Db = null;
private docs = {};
private nextDocGuid = 0;
private filterOrder = [];
private runningFilters = false;
public updateSubject = new Rx.BehaviorSubject(0);
public updateViewSubject = new Rx.BehaviorSubject(0);
public visualStyle = true;
public visualZoom = 1;
constructor(
private messageService: MessageService,
private http: HttpClient) {
this.addTab("ISO");
}
public runFilters(changedTab: GraphTab, parentChanged: boolean)
{
if (this.runningFilters)
return; // prevent re-entry
this.runningFilters = true;
var tabs = this.graphTabs;
var anyChanged = false;
for (var i of this.filterOrder)
{ | if (anyChanged || t.column.autoFilterSrc == changedTab.column || (parentChanged && t == changedTab))
{
anyChanged = true;
// filter child tree
GraphFilter.runFilter(t.column);
}
}
//if (anyChanged)
this.updateSubject.next(0);
this.runningFilters = false;
}
getGuid(id: string, type: string, rev: string, createMissing: boolean = true): number {
var key = `${type}-${id}-${rev}`;
if (key in this.docGuids)
return this.docGuids[key];
if (!createMissing)
return null;
var value = this.nextDocGuid++;
this.docGuids[key] = value;
return value;
}
getDbIndex() : Observable<Db> {
if (this.docDb)
return of(this.docDb);
return this.http.get<Db>('assets/output/docs-index.json', {responseType: 'json'})
.pipe(
tap(
data => {
this.docDb = data;
},
error => this.handleError("getDbIndex", [])
)
);
}
getDoc(id: string) : Observable<Doc2> {
if (this.docs[id])
return of(this.docs[id]);
return this.http.get<Doc2>('assets/output/docs-' + id + '.json', {responseType: 'json'})
.pipe(
tap(
data => {
this.docs[id] = data;
},
error => this.handleError("getDoc", [])
)
);
}
getDocTypes() : Observable<CategoryList> {
return this.getDbIndex().pipe(
map(
data => {
return data.docs.map(v => { return { id: v.id, title: v.type }; });
}
)
);
}
private addToDoc(parent: FullDocNode, input: DocNode2) {
var child = new FullDocNode(input);
if (parent)
{
parent.children.push(child);
}
// Recurse
for (var c of input.children)
{
this.addToDoc(child, c);
}
return child;
}
getFullDocByType(id: string) : Observable<FullDocNode> {
return this.getDoc(id).pipe(
map(
data => {
return this.addToDoc(null, data);
}
)
);
}
getChangeLog(): Observable<Change[]> {
return this.getDbIndex().pipe(
map(
data => {
return data.changelog;
}
)
);
}
// Live state management: maybe move this to a different service.
public graphTabs: GraphTab[] = [ ];
public selectedTab: number = 0;
public get canAdd(): boolean {
return this.graphTabs.length < 3;
}
public addTab(id: string, filterTopLevelKeys: string[] = null, customName: string = null) {
if (!this.canAdd)
return;
this.getFullDocByType(id)
.subscribe(doc => {
var newTab = new GraphTab(this, null, doc);
newTab.nodes = doc.children;
// Filter the top level nodes if desired
if (filterTopLevelKeys) {
newTab.nodes = doc.children.filter(c => {
var rootKey = c.id.replace(/_/g, ''); // The 'all' structure has _, where the menu choice has those stripped. Remove them for comparison.
return filterTopLevelKeys.includes(rootKey);
});
}
// Override the tab name if desired
if (customName)
newTab.title = customName;
// Reference this data from the column view.
newTab.column.nodes = newTab.nodes;
this.graphTabs.push(newTab);
this.ensureISOIsInMiddle();
// Coverage calculation is disabled to save time.
//if (id != "ISO")
//{
// // compare with iso.
// newTab.coverage = this.compareDocs(newTab.column, this.graphTabs[1]);
//}
var selectTab = newTab;
if (newTab.isAll) {
selectTab = this.graphTabs.find(t => t.isIso);
}
// The current request is to NOT activate the newly added tab. So only activate index 0
if (this.graphTabs.length != 1) {
selectTab = this.graphTabs[this.selectedTab]; // reselect current selection
}
// even if we dont change tabs, we still have to reactive it to configure filters
this.activateTab(selectTab);
});
}
private ensureISOIsInMiddle() {
var isoTab = this.graphTabs.find(t => t.isIso);
if (this.graphTabs.length > 1)
{
this.graphTabs = this.graphTabs.filter(t => t != isoTab);
this.graphTabs.splice(1, 0, isoTab);
}
}
public configureFilterStack() {
switch (this.selectedTab)
{
case 0: this.filterOrder = [0, 1, 2]; break;
case 1: this.filterOrder = [1, 0, 2]; break;
case 2: this.filterOrder = [2, 1, 0]; break;
}
// setup filters
var isoTab = this.graphTabs.find(t => t.isIso);
var primary = this.graphTabs[this.filterOrder[0]];
if (!primary)
return;
// clear auto filter of left tab
primary.column.autoFilterSrc = null;
primary.column.autoFilterParent = null;
primary.column.autoFilterSelf = false;
var secondary = this.graphTabs[this.filterOrder[1]];
if (secondary)
{
if (secondary == isoTab)
{
// assure iso filters from the primary: "auto filter"
isoTab.column.autoFilterSrc = primary.column;
isoTab.column.autoFilterParent = primary.column.parent;
isoTab.column.autoFilterSelf = false;
}
else
{
// auto filter with this tabs connections to iso
secondary.column.autoFilterSrc = isoTab.column;
secondary.column.autoFilterParent = primary.column.parent; // the primary tab always drives the selection
secondary.column.autoFilterSelf = true;
}
}
var third = this.graphTabs[this.filterOrder[2]];
if (third)
{
// auto filter with this tabs connections to iso
third.column.autoFilterSrc = isoTab.column;
third.column.autoFilterParent = primary.column.parent; // the primary tab always drives the selection
third.column.autoFilterSelf = true;
}
}
public tabChanged() {
this.configureFilterStack();
if (this.selectedTab >= 0 && this.selectedTab < this.graphTabs.length) {
this.graphTabs[this.selectedTab].parentTabTreeChanged();
}
}
public removeTab(tab) {
this.graphTabs = this.graphTabs.filter(t => t!=tab);
this.ensureISOIsInMiddle();
this.activateTab(this.graphTabs[0]);
}
public activateTab(tab: GraphTab): Promise<boolean> {
return new Promise<boolean>((resolve, reject) => {
var newIndex = this.graphTabs.indexOf(tab);
var finalize = () => {
this.selectedTab = newIndex;
this.tabChanged();
setTimeout(() => {
resolve(true);
}, 1000);
};
// if the index is the same
this.selectedTab = -1; // set it to non-value so change is detected
setTimeout(finalize, 1000); // give dom time to stabilize
});
}
public getNodesWithLinks(children: FullDocNode[], result: FullDocNode[])
{
for (var c of children)
{
if (c.node.links && c.node.links.length > 0)
result.push(c);
this.getNodesWithLinks(c.children, result);
}
return result;
}
public flattenSections(children: FullDocNode[], result: string[])
{
for (var c of children)
{
if (c.getBody())
result.push(c.id);
this.flattenSections(c.children, result);
}
return result;
}
public flattenLinks(children: FullDocNode[], result: Link[], linkData: any)
{
for (var c of children)
{
if (c.shouldBeMapped)
{
linkData.total++;
if (!c.isUnmapped)
{
linkData.linked++;
result = result.concat(c.node.links);
}
}
result = this.flattenLinks(c.children, result, linkData);
}
return result;
}
public compareDocs(aTab: GraphTab, bTab: GraphTab): any {
var bSections = [];
this.flattenSections(bTab.nodes, bSections);
var bCopy = bSections.slice();
var linkData = { total: 0, linked: 0 };
var aLinks = this.flattenLinks(aTab.nodes, [], linkData);
var found = 0;
var checked = 0;
for (var a of aLinks)
{
++checked;
var b = bCopy.find(x => x == a.id)
if (b)
{
bCopy = bCopy.filter(x => x != b);
++found;
}
}
return {
coverage: found + "/" + bSections.length,
mapped: linkData.linked + "/" + linkData.total,
uniqueconnections: found + "/" + checked,
uncoveredIds: bCopy
//"coverage": (found / bSections.length * 100).toFixed(1) + "% (" + found + "/" + bSections.length + ")",
//"mapped": (linkData.linked / linkData.total * 100).toFixed(1) + "% (" + linkData.linked + "/" + linkData.total + ")",
//"uniqueconnections": (found / checked * 100).toFixed(1) + "% (" + found + "/" + checked + ")"
};
}
/**
* Handle Http operation that failed.
* Let the app continue.
* @param operation - name of the operation that failed
* @param result - optional value to return as the observable result
*/
private handleError<T> (operation = 'operation', result?: T) {
return (error: any): Observable<T> => {
// TODO: send the error to remote logging infrastructure
console.error(error); // log to console instead
// TODO: better job of transforming error for user consumption
this.log(`${operation} failed: ${error.message}`);
// Let the app keep running by returning an empty result.
return of(result as T);
};
}
/** Log a GraphService message with the MessageService */
private log(message: string) {
this.messageService.add(`GraphService: ${message}`);
}
public get errorStrings(): string[] {
return this.graphTabs.reduce((a: string[], v: GraphTab) => { a.concat(v.errors()); return a; }, []);
}
public get anyErrors(): boolean {
for (var t of this.graphTabs)
if (t.anyErrors)
return true;
return false;
}
} | var t = tabs[i];
if (!t)
continue;
| random_line_split |
graph.service.ts | import { Injectable } from '@angular/core';
import { HttpClient, HttpHeaders } from '@angular/common/http';
import { Observable, of, forkJoin } from 'rxjs';
import * as Rx from 'rxjs';
import { catchError, map, tap } from 'rxjs/operators';
import { FullDocNode, DocNode2, Doc2, Link, Change, Db } from './standard-map';
import { MessageService } from './message.service';
import * as d3Sankey from 'd3-sankey';
import { TreeNode, IActionMapping } from 'angular-tree-component';
import { GraphTab } from './GraphTab';
import { GraphFilter } from './GraphFilter';
export interface ICategory {
id: string;
title: string;
active?: boolean;
}
export type CategoryList = ICategory[];
export class FilterCriteria {
constructor(
public categoryIds: string[] = null,
public categoryOrder: string[] = null) {
}
}
// -- Dag Node --
export interface SNodeExtra {
nodeId: number;
name: string;
data?: any;
}
export interface SLinkExtra {
source: number;
target: number;
value: number;
uom: string;
sourceNode: any;
targetNode: any;
}
export type SNode = d3Sankey.SankeyNode<SNodeExtra, SLinkExtra>;
export type SLink = d3Sankey.SankeyLink<SNodeExtra, SLinkExtra>;
export interface DAG {
nodes: SNode[];
links: SLink[];
}
// -- Dag Node --
@Injectable({ providedIn: 'root' })
export class GraphService {
private docGuids = {};
private docDb: Db = null;
private docs = {};
private nextDocGuid = 0;
private filterOrder = [];
private runningFilters = false;
public updateSubject = new Rx.BehaviorSubject(0);
public updateViewSubject = new Rx.BehaviorSubject(0);
public visualStyle = true;
public visualZoom = 1;
| (
private messageService: MessageService,
private http: HttpClient) {
this.addTab("ISO");
}
public runFilters(changedTab: GraphTab, parentChanged: boolean)
{
if (this.runningFilters)
return; // prevent re-entry
this.runningFilters = true;
var tabs = this.graphTabs;
var anyChanged = false;
for (var i of this.filterOrder)
{
var t = tabs[i];
if (!t)
continue;
if (anyChanged || t.column.autoFilterSrc == changedTab.column || (parentChanged && t == changedTab))
{
anyChanged = true;
// filter child tree
GraphFilter.runFilter(t.column);
}
}
//if (anyChanged)
this.updateSubject.next(0);
this.runningFilters = false;
}
getGuid(id: string, type: string, rev: string, createMissing: boolean = true): number {
var key = `${type}-${id}-${rev}`;
if (key in this.docGuids)
return this.docGuids[key];
if (!createMissing)
return null;
var value = this.nextDocGuid++;
this.docGuids[key] = value;
return value;
}
getDbIndex() : Observable<Db> {
if (this.docDb)
return of(this.docDb);
return this.http.get<Db>('assets/output/docs-index.json', {responseType: 'json'})
.pipe(
tap(
data => {
this.docDb = data;
},
error => this.handleError("getDbIndex", [])
)
);
}
getDoc(id: string) : Observable<Doc2> {
if (this.docs[id])
return of(this.docs[id]);
return this.http.get<Doc2>('assets/output/docs-' + id + '.json', {responseType: 'json'})
.pipe(
tap(
data => {
this.docs[id] = data;
},
error => this.handleError("getDoc", [])
)
);
}
getDocTypes() : Observable<CategoryList> {
return this.getDbIndex().pipe(
map(
data => {
return data.docs.map(v => { return { id: v.id, title: v.type }; });
}
)
);
}
private addToDoc(parent: FullDocNode, input: DocNode2) {
var child = new FullDocNode(input);
if (parent)
{
parent.children.push(child);
}
// Recurse
for (var c of input.children)
{
this.addToDoc(child, c);
}
return child;
}
getFullDocByType(id: string) : Observable<FullDocNode> {
return this.getDoc(id).pipe(
map(
data => {
return this.addToDoc(null, data);
}
)
);
}
getChangeLog(): Observable<Change[]> {
return this.getDbIndex().pipe(
map(
data => {
return data.changelog;
}
)
);
}
// Live state management: maybe move this to a different service.
public graphTabs: GraphTab[] = [ ];
public selectedTab: number = 0;
public get canAdd(): boolean {
return this.graphTabs.length < 3;
}
public addTab(id: string, filterTopLevelKeys: string[] = null, customName: string = null) {
if (!this.canAdd)
return;
this.getFullDocByType(id)
.subscribe(doc => {
var newTab = new GraphTab(this, null, doc);
newTab.nodes = doc.children;
// Filter the top level nodes if desired
if (filterTopLevelKeys) {
newTab.nodes = doc.children.filter(c => {
var rootKey = c.id.replace(/_/g, ''); // The 'all' structure has _, where the menu choice has those stripped. Remove them for comparison.
return filterTopLevelKeys.includes(rootKey);
});
}
// Override the tab name if desired
if (customName)
newTab.title = customName;
// Reference this data from the column view.
newTab.column.nodes = newTab.nodes;
this.graphTabs.push(newTab);
this.ensureISOIsInMiddle();
// Coverage calculation is disabled to save time.
//if (id != "ISO")
//{
// // compare with iso.
// newTab.coverage = this.compareDocs(newTab.column, this.graphTabs[1]);
//}
var selectTab = newTab;
if (newTab.isAll) {
selectTab = this.graphTabs.find(t => t.isIso);
}
// The current request is to NOT activate the newly added tab. So only activate index 0
if (this.graphTabs.length != 1) {
selectTab = this.graphTabs[this.selectedTab]; // reselect current selection
}
// even if we dont change tabs, we still have to reactive it to configure filters
this.activateTab(selectTab);
});
}
private ensureISOIsInMiddle() {
var isoTab = this.graphTabs.find(t => t.isIso);
if (this.graphTabs.length > 1)
{
this.graphTabs = this.graphTabs.filter(t => t != isoTab);
this.graphTabs.splice(1, 0, isoTab);
}
}
public configureFilterStack() {
switch (this.selectedTab)
{
case 0: this.filterOrder = [0, 1, 2]; break;
case 1: this.filterOrder = [1, 0, 2]; break;
case 2: this.filterOrder = [2, 1, 0]; break;
}
// setup filters
var isoTab = this.graphTabs.find(t => t.isIso);
var primary = this.graphTabs[this.filterOrder[0]];
if (!primary)
return;
// clear auto filter of left tab
primary.column.autoFilterSrc = null;
primary.column.autoFilterParent = null;
primary.column.autoFilterSelf = false;
var secondary = this.graphTabs[this.filterOrder[1]];
if (secondary)
{
if (secondary == isoTab)
{
// assure iso filters from the primary: "auto filter"
isoTab.column.autoFilterSrc = primary.column;
isoTab.column.autoFilterParent = primary.column.parent;
isoTab.column.autoFilterSelf = false;
}
else
{
// auto filter with this tabs connections to iso
secondary.column.autoFilterSrc = isoTab.column;
secondary.column.autoFilterParent = primary.column.parent; // the primary tab always drives the selection
secondary.column.autoFilterSelf = true;
}
}
var third = this.graphTabs[this.filterOrder[2]];
if (third)
{
// auto filter with this tabs connections to iso
third.column.autoFilterSrc = isoTab.column;
third.column.autoFilterParent = primary.column.parent; // the primary tab always drives the selection
third.column.autoFilterSelf = true;
}
}
public tabChanged() {
this.configureFilterStack();
if (this.selectedTab >= 0 && this.selectedTab < this.graphTabs.length) {
this.graphTabs[this.selectedTab].parentTabTreeChanged();
}
}
public removeTab(tab) {
this.graphTabs = this.graphTabs.filter(t => t!=tab);
this.ensureISOIsInMiddle();
this.activateTab(this.graphTabs[0]);
}
public activateTab(tab: GraphTab): Promise<boolean> {
return new Promise<boolean>((resolve, reject) => {
var newIndex = this.graphTabs.indexOf(tab);
var finalize = () => {
this.selectedTab = newIndex;
this.tabChanged();
setTimeout(() => {
resolve(true);
}, 1000);
};
// if the index is the same
this.selectedTab = -1; // set it to non-value so change is detected
setTimeout(finalize, 1000); // give dom time to stabilize
});
}
public getNodesWithLinks(children: FullDocNode[], result: FullDocNode[])
{
for (var c of children)
{
if (c.node.links && c.node.links.length > 0)
result.push(c);
this.getNodesWithLinks(c.children, result);
}
return result;
}
public flattenSections(children: FullDocNode[], result: string[])
{
for (var c of children)
{
if (c.getBody())
result.push(c.id);
this.flattenSections(c.children, result);
}
return result;
}
public flattenLinks(children: FullDocNode[], result: Link[], linkData: any)
{
for (var c of children)
{
if (c.shouldBeMapped)
{
linkData.total++;
if (!c.isUnmapped)
{
linkData.linked++;
result = result.concat(c.node.links);
}
}
result = this.flattenLinks(c.children, result, linkData);
}
return result;
}
public compareDocs(aTab: GraphTab, bTab: GraphTab): any {
var bSections = [];
this.flattenSections(bTab.nodes, bSections);
var bCopy = bSections.slice();
var linkData = { total: 0, linked: 0 };
var aLinks = this.flattenLinks(aTab.nodes, [], linkData);
var found = 0;
var checked = 0;
for (var a of aLinks)
{
++checked;
var b = bCopy.find(x => x == a.id)
if (b)
{
bCopy = bCopy.filter(x => x != b);
++found;
}
}
return {
coverage: found + "/" + bSections.length,
mapped: linkData.linked + "/" + linkData.total,
uniqueconnections: found + "/" + checked,
uncoveredIds: bCopy
//"coverage": (found / bSections.length * 100).toFixed(1) + "% (" + found + "/" + bSections.length + ")",
//"mapped": (linkData.linked / linkData.total * 100).toFixed(1) + "% (" + linkData.linked + "/" + linkData.total + ")",
//"uniqueconnections": (found / checked * 100).toFixed(1) + "% (" + found + "/" + checked + ")"
};
}
/**
* Handle Http operation that failed.
* Let the app continue.
* @param operation - name of the operation that failed
* @param result - optional value to return as the observable result
*/
private handleError<T> (operation = 'operation', result?: T) {
return (error: any): Observable<T> => {
// TODO: send the error to remote logging infrastructure
console.error(error); // log to console instead
// TODO: better job of transforming error for user consumption
this.log(`${operation} failed: ${error.message}`);
// Let the app keep running by returning an empty result.
return of(result as T);
};
}
/** Log a GraphService message with the MessageService */
private log(message: string) {
this.messageService.add(`GraphService: ${message}`);
}
public get errorStrings(): string[] {
return this.graphTabs.reduce((a: string[], v: GraphTab) => { a.concat(v.errors()); return a; }, []);
}
public get anyErrors(): boolean {
for (var t of this.graphTabs)
if (t.anyErrors)
return true;
return false;
}
}
| constructor | identifier_name |
graph.service.ts | import { Injectable } from '@angular/core';
import { HttpClient, HttpHeaders } from '@angular/common/http';
import { Observable, of, forkJoin } from 'rxjs';
import * as Rx from 'rxjs';
import { catchError, map, tap } from 'rxjs/operators';
import { FullDocNode, DocNode2, Doc2, Link, Change, Db } from './standard-map';
import { MessageService } from './message.service';
import * as d3Sankey from 'd3-sankey';
import { TreeNode, IActionMapping } from 'angular-tree-component';
import { GraphTab } from './GraphTab';
import { GraphFilter } from './GraphFilter';
export interface ICategory {
id: string;
title: string;
active?: boolean;
}
export type CategoryList = ICategory[];
export class FilterCriteria {
constructor(
public categoryIds: string[] = null,
public categoryOrder: string[] = null) {
}
}
// -- Dag Node --
export interface SNodeExtra {
nodeId: number;
name: string;
data?: any;
}
export interface SLinkExtra {
source: number;
target: number;
value: number;
uom: string;
sourceNode: any;
targetNode: any;
}
export type SNode = d3Sankey.SankeyNode<SNodeExtra, SLinkExtra>;
export type SLink = d3Sankey.SankeyLink<SNodeExtra, SLinkExtra>;
export interface DAG {
nodes: SNode[];
links: SLink[];
}
// -- Dag Node --
@Injectable({ providedIn: 'root' })
export class GraphService {
private docGuids = {};
private docDb: Db = null;
private docs = {};
private nextDocGuid = 0;
private filterOrder = [];
private runningFilters = false;
public updateSubject = new Rx.BehaviorSubject(0);
public updateViewSubject = new Rx.BehaviorSubject(0);
public visualStyle = true;
public visualZoom = 1;
constructor(
private messageService: MessageService,
private http: HttpClient) {
this.addTab("ISO");
}
public runFilters(changedTab: GraphTab, parentChanged: boolean)
{
if (this.runningFilters)
return; // prevent re-entry
this.runningFilters = true;
var tabs = this.graphTabs;
var anyChanged = false;
for (var i of this.filterOrder)
{
var t = tabs[i];
if (!t)
continue;
if (anyChanged || t.column.autoFilterSrc == changedTab.column || (parentChanged && t == changedTab))
{
anyChanged = true;
// filter child tree
GraphFilter.runFilter(t.column);
}
}
//if (anyChanged)
this.updateSubject.next(0);
this.runningFilters = false;
}
getGuid(id: string, type: string, rev: string, createMissing: boolean = true): number {
var key = `${type}-${id}-${rev}`;
if (key in this.docGuids)
return this.docGuids[key];
if (!createMissing)
return null;
var value = this.nextDocGuid++;
this.docGuids[key] = value;
return value;
}
getDbIndex() : Observable<Db> {
if (this.docDb)
return of(this.docDb);
return this.http.get<Db>('assets/output/docs-index.json', {responseType: 'json'})
.pipe(
tap(
data => {
this.docDb = data;
},
error => this.handleError("getDbIndex", [])
)
);
}
getDoc(id: string) : Observable<Doc2> {
if (this.docs[id])
return of(this.docs[id]);
return this.http.get<Doc2>('assets/output/docs-' + id + '.json', {responseType: 'json'})
.pipe(
tap(
data => {
this.docs[id] = data;
},
error => this.handleError("getDoc", [])
)
);
}
getDocTypes() : Observable<CategoryList> {
return this.getDbIndex().pipe(
map(
data => {
return data.docs.map(v => { return { id: v.id, title: v.type }; });
}
)
);
}
private addToDoc(parent: FullDocNode, input: DocNode2) {
var child = new FullDocNode(input);
if (parent)
{
parent.children.push(child);
}
// Recurse
for (var c of input.children)
{
this.addToDoc(child, c);
}
return child;
}
getFullDocByType(id: string) : Observable<FullDocNode> {
return this.getDoc(id).pipe(
map(
data => {
return this.addToDoc(null, data);
}
)
);
}
getChangeLog(): Observable<Change[]> {
return this.getDbIndex().pipe(
map(
data => {
return data.changelog;
}
)
);
}
// Live state management: maybe move this to a different service.
public graphTabs: GraphTab[] = [ ];
public selectedTab: number = 0;
public get canAdd(): boolean {
return this.graphTabs.length < 3;
}
public addTab(id: string, filterTopLevelKeys: string[] = null, customName: string = null) {
if (!this.canAdd)
return;
this.getFullDocByType(id)
.subscribe(doc => {
var newTab = new GraphTab(this, null, doc);
newTab.nodes = doc.children;
// Filter the top level nodes if desired
if (filterTopLevelKeys) {
newTab.nodes = doc.children.filter(c => {
var rootKey = c.id.replace(/_/g, ''); // The 'all' structure has _, where the menu choice has those stripped. Remove them for comparison.
return filterTopLevelKeys.includes(rootKey);
});
}
// Override the tab name if desired
if (customName)
newTab.title = customName;
// Reference this data from the column view.
newTab.column.nodes = newTab.nodes;
this.graphTabs.push(newTab);
this.ensureISOIsInMiddle();
// Coverage calculation is disabled to save time.
//if (id != "ISO")
//{
// // compare with iso.
// newTab.coverage = this.compareDocs(newTab.column, this.graphTabs[1]);
//}
var selectTab = newTab;
if (newTab.isAll) {
selectTab = this.graphTabs.find(t => t.isIso);
}
// The current request is to NOT activate the newly added tab. So only activate index 0
if (this.graphTabs.length != 1) {
selectTab = this.graphTabs[this.selectedTab]; // reselect current selection
}
// even if we dont change tabs, we still have to reactive it to configure filters
this.activateTab(selectTab);
});
}
private ensureISOIsInMiddle() {
var isoTab = this.graphTabs.find(t => t.isIso);
if (this.graphTabs.length > 1)
{
this.graphTabs = this.graphTabs.filter(t => t != isoTab);
this.graphTabs.splice(1, 0, isoTab);
}
}
public configureFilterStack() {
switch (this.selectedTab)
{
case 0: this.filterOrder = [0, 1, 2]; break;
case 1: this.filterOrder = [1, 0, 2]; break;
case 2: this.filterOrder = [2, 1, 0]; break;
}
// setup filters
var isoTab = this.graphTabs.find(t => t.isIso);
var primary = this.graphTabs[this.filterOrder[0]];
if (!primary)
return;
// clear auto filter of left tab
primary.column.autoFilterSrc = null;
primary.column.autoFilterParent = null;
primary.column.autoFilterSelf = false;
var secondary = this.graphTabs[this.filterOrder[1]];
if (secondary)
{
if (secondary == isoTab)
{
// assure iso filters from the primary: "auto filter"
isoTab.column.autoFilterSrc = primary.column;
isoTab.column.autoFilterParent = primary.column.parent;
isoTab.column.autoFilterSelf = false;
}
else
{
// auto filter with this tabs connections to iso
secondary.column.autoFilterSrc = isoTab.column;
secondary.column.autoFilterParent = primary.column.parent; // the primary tab always drives the selection
secondary.column.autoFilterSelf = true;
}
}
var third = this.graphTabs[this.filterOrder[2]];
if (third)
{
// auto filter with this tabs connections to iso
third.column.autoFilterSrc = isoTab.column;
third.column.autoFilterParent = primary.column.parent; // the primary tab always drives the selection
third.column.autoFilterSelf = true;
}
}
public tabChanged() {
this.configureFilterStack();
if (this.selectedTab >= 0 && this.selectedTab < this.graphTabs.length) {
this.graphTabs[this.selectedTab].parentTabTreeChanged();
}
}
public removeTab(tab) {
this.graphTabs = this.graphTabs.filter(t => t!=tab);
this.ensureISOIsInMiddle();
this.activateTab(this.graphTabs[0]);
}
public activateTab(tab: GraphTab): Promise<boolean> {
return new Promise<boolean>((resolve, reject) => {
var newIndex = this.graphTabs.indexOf(tab);
var finalize = () => {
this.selectedTab = newIndex;
this.tabChanged();
setTimeout(() => {
resolve(true);
}, 1000);
};
// if the index is the same
this.selectedTab = -1; // set it to non-value so change is detected
setTimeout(finalize, 1000); // give dom time to stabilize
});
}
public getNodesWithLinks(children: FullDocNode[], result: FullDocNode[])
{
for (var c of children)
{
if (c.node.links && c.node.links.length > 0)
result.push(c);
this.getNodesWithLinks(c.children, result);
}
return result;
}
public flattenSections(children: FullDocNode[], result: string[])
{
for (var c of children)
{
if (c.getBody())
result.push(c.id);
this.flattenSections(c.children, result);
}
return result;
}
public flattenLinks(children: FullDocNode[], result: Link[], linkData: any)
{
for (var c of children)
{
if (c.shouldBeMapped)
{
linkData.total++;
if (!c.isUnmapped)
{
linkData.linked++;
result = result.concat(c.node.links);
}
}
result = this.flattenLinks(c.children, result, linkData);
}
return result;
}
public compareDocs(aTab: GraphTab, bTab: GraphTab): any {
var bSections = [];
this.flattenSections(bTab.nodes, bSections);
var bCopy = bSections.slice();
var linkData = { total: 0, linked: 0 };
var aLinks = this.flattenLinks(aTab.nodes, [], linkData);
var found = 0;
var checked = 0;
for (var a of aLinks)
{
++checked;
var b = bCopy.find(x => x == a.id)
if (b)
{
bCopy = bCopy.filter(x => x != b);
++found;
}
}
return {
coverage: found + "/" + bSections.length,
mapped: linkData.linked + "/" + linkData.total,
uniqueconnections: found + "/" + checked,
uncoveredIds: bCopy
//"coverage": (found / bSections.length * 100).toFixed(1) + "% (" + found + "/" + bSections.length + ")",
//"mapped": (linkData.linked / linkData.total * 100).toFixed(1) + "% (" + linkData.linked + "/" + linkData.total + ")",
//"uniqueconnections": (found / checked * 100).toFixed(1) + "% (" + found + "/" + checked + ")"
};
}
/**
* Handle Http operation that failed.
* Let the app continue.
* @param operation - name of the operation that failed
* @param result - optional value to return as the observable result
*/
private handleError<T> (operation = 'operation', result?: T) {
return (error: any): Observable<T> => {
// TODO: send the error to remote logging infrastructure
console.error(error); // log to console instead
// TODO: better job of transforming error for user consumption
this.log(`${operation} failed: ${error.message}`);
// Let the app keep running by returning an empty result.
return of(result as T);
};
}
/** Log a GraphService message with the MessageService */
private log(message: string) {
this.messageService.add(`GraphService: ${message}`);
}
public get errorStrings(): string[] {
return this.graphTabs.reduce((a: string[], v: GraphTab) => { a.concat(v.errors()); return a; }, []);
}
public get anyErrors(): boolean |
}
| {
for (var t of this.graphTabs)
if (t.anyErrors)
return true;
return false;
} | identifier_body |
binomial.rs | // Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The binomial distribution.
#![allow(deprecated)]
#![allow(clippy::all)]
use crate::distributions::{Distribution, Uniform};
use crate::Rng;
/// The binomial distribution `Binomial(n, p)`.
///
/// This distribution has density function:
/// `f(k) = n!/(k! (n-k)!) p^k (1-p)^(n-k)` for `k >= 0`.
#[deprecated(since = "0.7.0", note = "moved to rand_distr crate")]
#[derive(Clone, Copy, Debug)]
pub struct Binomial {
/// Number of trials.
n: u64,
/// Probability of success.
p: f64,
}
impl Binomial {
/// Construct a new `Binomial` with the given shape parameters `n` (number
/// of trials) and `p` (probability of success).
///
/// Panics if `p < 0` or `p > 1`.
pub fn new(n: u64, p: f64) -> Binomial {
assert!(p >= 0.0, "Binomial::new called with p < 0");
assert!(p <= 1.0, "Binomial::new called with p > 1");
Binomial { n, p }
}
}
/// Convert a `f64` to an `i64`, panicing on overflow.
// In the future (Rust 1.34), this might be replaced with `TryFrom`.
fn f64_to_i64(x: f64) -> i64 {
assert!(x < (::std::i64::MAX as f64));
x as i64
}
impl Distribution<u64> for Binomial {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
// Handle these values directly.
if self.p == 0.0 {
return 0;
} else if self.p == 1.0 {
return self.n;
}
// The binomial distribution is symmetrical with respect to p -> 1-p,
// k -> n-k switch p so that it is less than 0.5 - this allows for lower
// expected values we will just invert the result at the end
let p = if self.p <= 0.5 { self.p } else { 1.0 - self.p };
let result;
let q = 1. - p;
// For small n * min(p, 1 - p), the BINV algorithm based on the inverse
// transformation of the binomial distribution is efficient. Otherwise,
// the BTPE algorithm is used.
//
// Voratas Kachitvichyanukul and Bruce W. Schmeiser. 1988. Binomial
// random variate generation. Commun. ACM 31, 2 (February 1988),
// 216-222. http://dx.doi.org/10.1145/42372.42381
// Threshold for prefering the BINV algorithm. The paper suggests 10,
// Ranlib uses 30, and GSL uses 14.
const BINV_THRESHOLD: f64 = 10.;
if (self.n as f64) * p < BINV_THRESHOLD && self.n <= (::std::i32::MAX as u64) {
// Use the BINV algorithm.
let s = p / q;
let a = ((self.n + 1) as f64) * s;
let mut r = q.powi(self.n as i32);
let mut u: f64 = rng.gen();
let mut x = 0;
while u > r as f64 {
u -= r;
x += 1;
r *= a / (x as f64) - s;
}
result = x;
} else {
// Use the BTPE algorithm.
// Threshold for using the squeeze algorithm. This can be freely
// chosen based on performance. Ranlib and GSL use 20.
const SQUEEZE_THRESHOLD: i64 = 20;
// Step 0: Calculate constants as functions of `n` and `p`.
let n = self.n as f64;
let np = n * p;
let npq = np * q;
let f_m = np + p;
let m = f64_to_i64(f_m);
// radius of triangle region, since height=1 also area of region
let p1 = (2.195 * npq.sqrt() - 4.6 * q).floor() + 0.5;
// tip of triangle
let x_m = (m as f64) + 0.5;
// left edge of triangle
let x_l = x_m - p1;
// right edge of triangle
let x_r = x_m + p1;
let c = 0.134 + 20.5 / (15.3 + (m as f64));
// p1 + area of parallelogram region
let p2 = p1 * (1. + 2. * c);
fn lambda(a: f64) -> f64 {
a * (1. + 0.5 * a)
}
let lambda_l = lambda((f_m - x_l) / (f_m - x_l * p));
let lambda_r = lambda((x_r - f_m) / (x_r * q));
// p1 + area of left tail
let p3 = p2 + c / lambda_l;
// p1 + area of right tail
let p4 = p3 + c / lambda_r;
// return value
let mut y: i64;
let gen_u = Uniform::new(0., p4);
let gen_v = Uniform::new(0., 1.);
loop {
// Step 1: Generate `u` for selecting the region. If region 1 is
// selected, generate a triangularly distributed variate.
let u = gen_u.sample(rng);
let mut v = gen_v.sample(rng);
if !(u > p1) {
y = f64_to_i64(x_m - p1 * v + u);
break;
}
if !(u > p2) {
// Step 2: Region 2, parallelograms. Check if region 2 is
// used. If so, generate `y`.
let x = x_l + (u - p1) / c;
v = v * c + 1.0 - (x - x_m).abs() / p1;
if v > 1. {
continue;
} else {
y = f64_to_i64(x);
}
} else if !(u > p3) {
// Step 3: Region 3, left exponential tail.
y = f64_to_i64(x_l + v.ln() / lambda_l);
if y < 0 {
continue;
} else {
v *= (u - p2) * lambda_l;
}
} else {
// Step 4: Region 4, right exponential tail.
y = f64_to_i64(x_r - v.ln() / lambda_r);
if y > 0 && (y as u64) > self.n {
continue;
} else {
v *= (u - p3) * lambda_r;
}
}
// Step 5: Acceptance/rejection comparison.
// Step 5.0: Test for appropriate method of evaluating f(y).
let k = (y - m).abs();
if !(k > SQUEEZE_THRESHOLD && (k as f64) < 0.5 * npq - 1.) {
// Step 5.1: Evaluate f(y) via the recursive relationship. Start the
// search from the mode.
let s = p / q;
let a = s * (n + 1.);
let mut f = 1.0;
if m < y {
let mut i = m;
loop {
i += 1;
f *= a / (i as f64) - s;
if i == y {
break;
}
}
} else if m > y {
let mut i = y;
loop {
i += 1;
f /= a / (i as f64) - s;
if i == m {
break;
}
}
}
if v > f {
continue;
} else {
break;
}
}
// Step 5.2: Squeezing. Check the value of ln(v) againts upper and
// lower bound of ln(f(y)).
let k = k as f64;
let rho = (k / npq) * ((k * (k / 3. + 0.625) + 1. / 6.) / npq + 0.5);
let t = -0.5 * k * k / npq;
let alpha = v.ln();
if alpha < t - rho {
break;
}
if alpha > t + rho {
continue;
}
// Step 5.3: Final acceptance/rejection test.
let x1 = (y + 1) as f64;
let f1 = (m + 1) as f64;
let z = (f64_to_i64(n) + 1 - m) as f64;
let w = (f64_to_i64(n) - y + 1) as f64;
fn stirling(a: f64) -> f64 {
let a2 = a * a;
(13860. - (462. - (132. - (99. - 140. / a2) / a2) / a2) / a2) / a / 166320.
}
if alpha
> x_m * (f1 / x1).ln()
+ (n - (m as f64) + 0.5) * (z / w).ln()
+ ((y - m) as f64) * (w * p / (x1 * q)).ln()
// We use the signs from the GSL implementation, which are
// different than the ones in the reference. According to
// the GSL authors, the new signs were verified to be
// correct by one of the original designers of the
// algorithm.
+ stirling(f1)
+ stirling(z)
- stirling(x1)
- stirling(w)
{
continue;
}
break;
}
assert!(y >= 0);
result = y as u64;
}
// Invert the result for p < 0.5.
if p != self.p {
self.n - result
} else {
result
}
}
}
#[cfg(test)]
mod test {
use super::Binomial;
use crate::distributions::Distribution;
use crate::Rng;
fn test_binomial_mean_and_variance<R: Rng>(n: u64, p: f64, rng: &mut R) {
let binomial = Binomial::new(n, p);
let expected_mean = n as f64 * p;
let expected_variance = n as f64 * p * (1.0 - p);
| for i in results.iter_mut() {
*i = binomial.sample(rng) as f64;
}
let mean = results.iter().sum::<f64>() / results.len() as f64;
assert!(
(mean as f64 - expected_mean).abs() < expected_mean / 50.0,
"mean: {}, expected_mean: {}",
mean,
expected_mean
);
let variance =
results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>() / results.len() as f64;
assert!(
(variance - expected_variance).abs() < expected_variance / 10.0,
"variance: {}, expected_variance: {}",
variance,
expected_variance
);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_binomial() {
let mut rng = crate::test::rng(351);
test_binomial_mean_and_variance(150, 0.1, &mut rng);
test_binomial_mean_and_variance(70, 0.6, &mut rng);
test_binomial_mean_and_variance(40, 0.5, &mut rng);
test_binomial_mean_and_variance(20, 0.7, &mut rng);
test_binomial_mean_and_variance(20, 0.5, &mut rng);
}
#[test]
fn test_binomial_end_points() {
let mut rng = crate::test::rng(352);
assert_eq!(rng.sample(Binomial::new(20, 0.0)), 0);
assert_eq!(rng.sample(Binomial::new(20, 1.0)), 20);
}
#[test]
#[should_panic]
fn test_binomial_invalid_lambda_neg() {
Binomial::new(20, -10.0);
}
} | let mut results = [0.0; 1000]; | random_line_split |
binomial.rs | // Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The binomial distribution.
#![allow(deprecated)]
#![allow(clippy::all)]
use crate::distributions::{Distribution, Uniform};
use crate::Rng;
/// The binomial distribution `Binomial(n, p)`.
///
/// This distribution has density function:
/// `f(k) = n!/(k! (n-k)!) p^k (1-p)^(n-k)` for `k >= 0`.
#[deprecated(since = "0.7.0", note = "moved to rand_distr crate")]
#[derive(Clone, Copy, Debug)]
pub struct Binomial {
/// Number of trials.
n: u64,
/// Probability of success.
p: f64,
}
impl Binomial {
/// Construct a new `Binomial` with the given shape parameters `n` (number
/// of trials) and `p` (probability of success).
///
/// Panics if `p < 0` or `p > 1`.
pub fn new(n: u64, p: f64) -> Binomial {
assert!(p >= 0.0, "Binomial::new called with p < 0");
assert!(p <= 1.0, "Binomial::new called with p > 1");
Binomial { n, p }
}
}
/// Convert a `f64` to an `i64`, panicing on overflow.
// In the future (Rust 1.34), this might be replaced with `TryFrom`.
fn f64_to_i64(x: f64) -> i64 {
assert!(x < (::std::i64::MAX as f64));
x as i64
}
impl Distribution<u64> for Binomial {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
// Handle these values directly.
if self.p == 0.0 {
return 0;
} else if self.p == 1.0 {
return self.n;
}
// The binomial distribution is symmetrical with respect to p -> 1-p,
// k -> n-k switch p so that it is less than 0.5 - this allows for lower
// expected values we will just invert the result at the end
let p = if self.p <= 0.5 { self.p } else { 1.0 - self.p };
let result;
let q = 1. - p;
// For small n * min(p, 1 - p), the BINV algorithm based on the inverse
// transformation of the binomial distribution is efficient. Otherwise,
// the BTPE algorithm is used.
//
// Voratas Kachitvichyanukul and Bruce W. Schmeiser. 1988. Binomial
// random variate generation. Commun. ACM 31, 2 (February 1988),
// 216-222. http://dx.doi.org/10.1145/42372.42381
// Threshold for prefering the BINV algorithm. The paper suggests 10,
// Ranlib uses 30, and GSL uses 14.
const BINV_THRESHOLD: f64 = 10.;
if (self.n as f64) * p < BINV_THRESHOLD && self.n <= (::std::i32::MAX as u64) {
// Use the BINV algorithm.
let s = p / q;
let a = ((self.n + 1) as f64) * s;
let mut r = q.powi(self.n as i32);
let mut u: f64 = rng.gen();
let mut x = 0;
while u > r as f64 {
u -= r;
x += 1;
r *= a / (x as f64) - s;
}
result = x;
} else {
// Use the BTPE algorithm.
// Threshold for using the squeeze algorithm. This can be freely
// chosen based on performance. Ranlib and GSL use 20.
const SQUEEZE_THRESHOLD: i64 = 20;
// Step 0: Calculate constants as functions of `n` and `p`.
let n = self.n as f64;
let np = n * p;
let npq = np * q;
let f_m = np + p;
let m = f64_to_i64(f_m);
// radius of triangle region, since height=1 also area of region
let p1 = (2.195 * npq.sqrt() - 4.6 * q).floor() + 0.5;
// tip of triangle
let x_m = (m as f64) + 0.5;
// left edge of triangle
let x_l = x_m - p1;
// right edge of triangle
let x_r = x_m + p1;
let c = 0.134 + 20.5 / (15.3 + (m as f64));
// p1 + area of parallelogram region
let p2 = p1 * (1. + 2. * c);
fn lambda(a: f64) -> f64 {
a * (1. + 0.5 * a)
}
let lambda_l = lambda((f_m - x_l) / (f_m - x_l * p));
let lambda_r = lambda((x_r - f_m) / (x_r * q));
// p1 + area of left tail
let p3 = p2 + c / lambda_l;
// p1 + area of right tail
let p4 = p3 + c / lambda_r;
// return value
let mut y: i64;
let gen_u = Uniform::new(0., p4);
let gen_v = Uniform::new(0., 1.);
loop {
// Step 1: Generate `u` for selecting the region. If region 1 is
// selected, generate a triangularly distributed variate.
let u = gen_u.sample(rng);
let mut v = gen_v.sample(rng);
if !(u > p1) {
y = f64_to_i64(x_m - p1 * v + u);
break;
}
if !(u > p2) {
// Step 2: Region 2, parallelograms. Check if region 2 is
// used. If so, generate `y`.
let x = x_l + (u - p1) / c;
v = v * c + 1.0 - (x - x_m).abs() / p1;
if v > 1. {
continue;
} else {
y = f64_to_i64(x);
}
} else if !(u > p3) {
// Step 3: Region 3, left exponential tail.
y = f64_to_i64(x_l + v.ln() / lambda_l);
if y < 0 {
continue;
} else {
v *= (u - p2) * lambda_l;
}
} else {
// Step 4: Region 4, right exponential tail.
y = f64_to_i64(x_r - v.ln() / lambda_r);
if y > 0 && (y as u64) > self.n {
continue;
} else {
v *= (u - p3) * lambda_r;
}
}
// Step 5: Acceptance/rejection comparison.
// Step 5.0: Test for appropriate method of evaluating f(y).
let k = (y - m).abs();
if !(k > SQUEEZE_THRESHOLD && (k as f64) < 0.5 * npq - 1.) {
// Step 5.1: Evaluate f(y) via the recursive relationship. Start the
// search from the mode.
let s = p / q;
let a = s * (n + 1.);
let mut f = 1.0;
if m < y {
let mut i = m;
loop {
i += 1;
f *= a / (i as f64) - s;
if i == y {
break;
}
}
} else if m > y {
let mut i = y;
loop {
i += 1;
f /= a / (i as f64) - s;
if i == m {
break;
}
}
}
if v > f {
continue;
} else {
break;
}
}
// Step 5.2: Squeezing. Check the value of ln(v) againts upper and
// lower bound of ln(f(y)).
let k = k as f64;
let rho = (k / npq) * ((k * (k / 3. + 0.625) + 1. / 6.) / npq + 0.5);
let t = -0.5 * k * k / npq;
let alpha = v.ln();
if alpha < t - rho {
break;
}
if alpha > t + rho {
continue;
}
// Step 5.3: Final acceptance/rejection test.
let x1 = (y + 1) as f64;
let f1 = (m + 1) as f64;
let z = (f64_to_i64(n) + 1 - m) as f64;
let w = (f64_to_i64(n) - y + 1) as f64;
fn stirling(a: f64) -> f64 {
let a2 = a * a;
(13860. - (462. - (132. - (99. - 140. / a2) / a2) / a2) / a2) / a / 166320.
}
if alpha
> x_m * (f1 / x1).ln()
+ (n - (m as f64) + 0.5) * (z / w).ln()
+ ((y - m) as f64) * (w * p / (x1 * q)).ln()
// We use the signs from the GSL implementation, which are
// different than the ones in the reference. According to
// the GSL authors, the new signs were verified to be
// correct by one of the original designers of the
// algorithm.
+ stirling(f1)
+ stirling(z)
- stirling(x1)
- stirling(w)
{
continue;
}
break;
}
assert!(y >= 0);
result = y as u64;
}
// Invert the result for p < 0.5.
if p != self.p | else {
result
}
}
}
#[cfg(test)]
mod test {
use super::Binomial;
use crate::distributions::Distribution;
use crate::Rng;
fn test_binomial_mean_and_variance<R: Rng>(n: u64, p: f64, rng: &mut R) {
let binomial = Binomial::new(n, p);
let expected_mean = n as f64 * p;
let expected_variance = n as f64 * p * (1.0 - p);
let mut results = [0.0; 1000];
for i in results.iter_mut() {
*i = binomial.sample(rng) as f64;
}
let mean = results.iter().sum::<f64>() / results.len() as f64;
assert!(
(mean as f64 - expected_mean).abs() < expected_mean / 50.0,
"mean: {}, expected_mean: {}",
mean,
expected_mean
);
let variance =
results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>() / results.len() as f64;
assert!(
(variance - expected_variance).abs() < expected_variance / 10.0,
"variance: {}, expected_variance: {}",
variance,
expected_variance
);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_binomial() {
let mut rng = crate::test::rng(351);
test_binomial_mean_and_variance(150, 0.1, &mut rng);
test_binomial_mean_and_variance(70, 0.6, &mut rng);
test_binomial_mean_and_variance(40, 0.5, &mut rng);
test_binomial_mean_and_variance(20, 0.7, &mut rng);
test_binomial_mean_and_variance(20, 0.5, &mut rng);
}
#[test]
fn test_binomial_end_points() {
let mut rng = crate::test::rng(352);
assert_eq!(rng.sample(Binomial::new(20, 0.0)), 0);
assert_eq!(rng.sample(Binomial::new(20, 1.0)), 20);
}
#[test]
#[should_panic]
fn test_binomial_invalid_lambda_neg() {
Binomial::new(20, -10.0);
}
}
| {
self.n - result
} | conditional_block |
binomial.rs | // Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The binomial distribution.
#![allow(deprecated)]
#![allow(clippy::all)]
use crate::distributions::{Distribution, Uniform};
use crate::Rng;
/// The binomial distribution `Binomial(n, p)`.
///
/// This distribution has density function:
/// `f(k) = n!/(k! (n-k)!) p^k (1-p)^(n-k)` for `k >= 0`.
#[deprecated(since = "0.7.0", note = "moved to rand_distr crate")]
#[derive(Clone, Copy, Debug)]
pub struct Binomial {
/// Number of trials.
n: u64,
/// Probability of success.
p: f64,
}
impl Binomial {
/// Construct a new `Binomial` with the given shape parameters `n` (number
/// of trials) and `p` (probability of success).
///
/// Panics if `p < 0` or `p > 1`.
pub fn new(n: u64, p: f64) -> Binomial {
assert!(p >= 0.0, "Binomial::new called with p < 0");
assert!(p <= 1.0, "Binomial::new called with p > 1");
Binomial { n, p }
}
}
/// Convert a `f64` to an `i64`, panicing on overflow.
// In the future (Rust 1.34), this might be replaced with `TryFrom`.
fn f64_to_i64(x: f64) -> i64 {
assert!(x < (::std::i64::MAX as f64));
x as i64
}
impl Distribution<u64> for Binomial {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
// Handle these values directly.
if self.p == 0.0 {
return 0;
} else if self.p == 1.0 {
return self.n;
}
// The binomial distribution is symmetrical with respect to p -> 1-p,
// k -> n-k switch p so that it is less than 0.5 - this allows for lower
// expected values we will just invert the result at the end
let p = if self.p <= 0.5 { self.p } else { 1.0 - self.p };
let result;
let q = 1. - p;
// For small n * min(p, 1 - p), the BINV algorithm based on the inverse
// transformation of the binomial distribution is efficient. Otherwise,
// the BTPE algorithm is used.
//
// Voratas Kachitvichyanukul and Bruce W. Schmeiser. 1988. Binomial
// random variate generation. Commun. ACM 31, 2 (February 1988),
// 216-222. http://dx.doi.org/10.1145/42372.42381
// Threshold for prefering the BINV algorithm. The paper suggests 10,
// Ranlib uses 30, and GSL uses 14.
const BINV_THRESHOLD: f64 = 10.;
if (self.n as f64) * p < BINV_THRESHOLD && self.n <= (::std::i32::MAX as u64) {
// Use the BINV algorithm.
let s = p / q;
let a = ((self.n + 1) as f64) * s;
let mut r = q.powi(self.n as i32);
let mut u: f64 = rng.gen();
let mut x = 0;
while u > r as f64 {
u -= r;
x += 1;
r *= a / (x as f64) - s;
}
result = x;
} else {
// Use the BTPE algorithm.
// Threshold for using the squeeze algorithm. This can be freely
// chosen based on performance. Ranlib and GSL use 20.
const SQUEEZE_THRESHOLD: i64 = 20;
// Step 0: Calculate constants as functions of `n` and `p`.
let n = self.n as f64;
let np = n * p;
let npq = np * q;
let f_m = np + p;
let m = f64_to_i64(f_m);
// radius of triangle region, since height=1 also area of region
let p1 = (2.195 * npq.sqrt() - 4.6 * q).floor() + 0.5;
// tip of triangle
let x_m = (m as f64) + 0.5;
// left edge of triangle
let x_l = x_m - p1;
// right edge of triangle
let x_r = x_m + p1;
let c = 0.134 + 20.5 / (15.3 + (m as f64));
// p1 + area of parallelogram region
let p2 = p1 * (1. + 2. * c);
fn lambda(a: f64) -> f64 {
a * (1. + 0.5 * a)
}
let lambda_l = lambda((f_m - x_l) / (f_m - x_l * p));
let lambda_r = lambda((x_r - f_m) / (x_r * q));
// p1 + area of left tail
let p3 = p2 + c / lambda_l;
// p1 + area of right tail
let p4 = p3 + c / lambda_r;
// return value
let mut y: i64;
let gen_u = Uniform::new(0., p4);
let gen_v = Uniform::new(0., 1.);
loop {
// Step 1: Generate `u` for selecting the region. If region 1 is
// selected, generate a triangularly distributed variate.
let u = gen_u.sample(rng);
let mut v = gen_v.sample(rng);
if !(u > p1) {
y = f64_to_i64(x_m - p1 * v + u);
break;
}
if !(u > p2) {
// Step 2: Region 2, parallelograms. Check if region 2 is
// used. If so, generate `y`.
let x = x_l + (u - p1) / c;
v = v * c + 1.0 - (x - x_m).abs() / p1;
if v > 1. {
continue;
} else {
y = f64_to_i64(x);
}
} else if !(u > p3) {
// Step 3: Region 3, left exponential tail.
y = f64_to_i64(x_l + v.ln() / lambda_l);
if y < 0 {
continue;
} else {
v *= (u - p2) * lambda_l;
}
} else {
// Step 4: Region 4, right exponential tail.
y = f64_to_i64(x_r - v.ln() / lambda_r);
if y > 0 && (y as u64) > self.n {
continue;
} else {
v *= (u - p3) * lambda_r;
}
}
// Step 5: Acceptance/rejection comparison.
// Step 5.0: Test for appropriate method of evaluating f(y).
let k = (y - m).abs();
if !(k > SQUEEZE_THRESHOLD && (k as f64) < 0.5 * npq - 1.) {
// Step 5.1: Evaluate f(y) via the recursive relationship. Start the
// search from the mode.
let s = p / q;
let a = s * (n + 1.);
let mut f = 1.0;
if m < y {
let mut i = m;
loop {
i += 1;
f *= a / (i as f64) - s;
if i == y {
break;
}
}
} else if m > y {
let mut i = y;
loop {
i += 1;
f /= a / (i as f64) - s;
if i == m {
break;
}
}
}
if v > f {
continue;
} else {
break;
}
}
// Step 5.2: Squeezing. Check the value of ln(v) againts upper and
// lower bound of ln(f(y)).
let k = k as f64;
let rho = (k / npq) * ((k * (k / 3. + 0.625) + 1. / 6.) / npq + 0.5);
let t = -0.5 * k * k / npq;
let alpha = v.ln();
if alpha < t - rho {
break;
}
if alpha > t + rho {
continue;
}
// Step 5.3: Final acceptance/rejection test.
let x1 = (y + 1) as f64;
let f1 = (m + 1) as f64;
let z = (f64_to_i64(n) + 1 - m) as f64;
let w = (f64_to_i64(n) - y + 1) as f64;
fn stirling(a: f64) -> f64 {
let a2 = a * a;
(13860. - (462. - (132. - (99. - 140. / a2) / a2) / a2) / a2) / a / 166320.
}
if alpha
> x_m * (f1 / x1).ln()
+ (n - (m as f64) + 0.5) * (z / w).ln()
+ ((y - m) as f64) * (w * p / (x1 * q)).ln()
// We use the signs from the GSL implementation, which are
// different than the ones in the reference. According to
// the GSL authors, the new signs were verified to be
// correct by one of the original designers of the
// algorithm.
+ stirling(f1)
+ stirling(z)
- stirling(x1)
- stirling(w)
{
continue;
}
break;
}
assert!(y >= 0);
result = y as u64;
}
// Invert the result for p < 0.5.
if p != self.p {
self.n - result
} else {
result
}
}
}
#[cfg(test)]
mod test {
use super::Binomial;
use crate::distributions::Distribution;
use crate::Rng;
fn test_binomial_mean_and_variance<R: Rng>(n: u64, p: f64, rng: &mut R) {
let binomial = Binomial::new(n, p);
let expected_mean = n as f64 * p;
let expected_variance = n as f64 * p * (1.0 - p);
let mut results = [0.0; 1000];
for i in results.iter_mut() {
*i = binomial.sample(rng) as f64;
}
let mean = results.iter().sum::<f64>() / results.len() as f64;
assert!(
(mean as f64 - expected_mean).abs() < expected_mean / 50.0,
"mean: {}, expected_mean: {}",
mean,
expected_mean
);
let variance =
results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>() / results.len() as f64;
assert!(
(variance - expected_variance).abs() < expected_variance / 10.0,
"variance: {}, expected_variance: {}",
variance,
expected_variance
);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn | () {
let mut rng = crate::test::rng(351);
test_binomial_mean_and_variance(150, 0.1, &mut rng);
test_binomial_mean_and_variance(70, 0.6, &mut rng);
test_binomial_mean_and_variance(40, 0.5, &mut rng);
test_binomial_mean_and_variance(20, 0.7, &mut rng);
test_binomial_mean_and_variance(20, 0.5, &mut rng);
}
#[test]
fn test_binomial_end_points() {
let mut rng = crate::test::rng(352);
assert_eq!(rng.sample(Binomial::new(20, 0.0)), 0);
assert_eq!(rng.sample(Binomial::new(20, 1.0)), 20);
}
#[test]
#[should_panic]
fn test_binomial_invalid_lambda_neg() {
Binomial::new(20, -10.0);
}
}
| test_binomial | identifier_name |
binomial.rs | // Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The binomial distribution.
#![allow(deprecated)]
#![allow(clippy::all)]
use crate::distributions::{Distribution, Uniform};
use crate::Rng;
/// The binomial distribution `Binomial(n, p)`.
///
/// This distribution has density function:
/// `f(k) = n!/(k! (n-k)!) p^k (1-p)^(n-k)` for `k >= 0`.
#[deprecated(since = "0.7.0", note = "moved to rand_distr crate")]
#[derive(Clone, Copy, Debug)]
pub struct Binomial {
/// Number of trials.
n: u64,
/// Probability of success.
p: f64,
}
impl Binomial {
/// Construct a new `Binomial` with the given shape parameters `n` (number
/// of trials) and `p` (probability of success).
///
/// Panics if `p < 0` or `p > 1`.
pub fn new(n: u64, p: f64) -> Binomial {
assert!(p >= 0.0, "Binomial::new called with p < 0");
assert!(p <= 1.0, "Binomial::new called with p > 1");
Binomial { n, p }
}
}
/// Convert a `f64` to an `i64`, panicing on overflow.
// In the future (Rust 1.34), this might be replaced with `TryFrom`.
fn f64_to_i64(x: f64) -> i64 {
assert!(x < (::std::i64::MAX as f64));
x as i64
}
impl Distribution<u64> for Binomial {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
// Handle these values directly.
if self.p == 0.0 {
return 0;
} else if self.p == 1.0 {
return self.n;
}
// The binomial distribution is symmetrical with respect to p -> 1-p,
// k -> n-k switch p so that it is less than 0.5 - this allows for lower
// expected values we will just invert the result at the end
let p = if self.p <= 0.5 { self.p } else { 1.0 - self.p };
let result;
let q = 1. - p;
// For small n * min(p, 1 - p), the BINV algorithm based on the inverse
// transformation of the binomial distribution is efficient. Otherwise,
// the BTPE algorithm is used.
//
// Voratas Kachitvichyanukul and Bruce W. Schmeiser. 1988. Binomial
// random variate generation. Commun. ACM 31, 2 (February 1988),
// 216-222. http://dx.doi.org/10.1145/42372.42381
// Threshold for prefering the BINV algorithm. The paper suggests 10,
// Ranlib uses 30, and GSL uses 14.
const BINV_THRESHOLD: f64 = 10.;
if (self.n as f64) * p < BINV_THRESHOLD && self.n <= (::std::i32::MAX as u64) {
// Use the BINV algorithm.
let s = p / q;
let a = ((self.n + 1) as f64) * s;
let mut r = q.powi(self.n as i32);
let mut u: f64 = rng.gen();
let mut x = 0;
while u > r as f64 {
u -= r;
x += 1;
r *= a / (x as f64) - s;
}
result = x;
} else {
// Use the BTPE algorithm.
// Threshold for using the squeeze algorithm. This can be freely
// chosen based on performance. Ranlib and GSL use 20.
const SQUEEZE_THRESHOLD: i64 = 20;
// Step 0: Calculate constants as functions of `n` and `p`.
let n = self.n as f64;
let np = n * p;
let npq = np * q;
let f_m = np + p;
let m = f64_to_i64(f_m);
// radius of triangle region, since height=1 also area of region
let p1 = (2.195 * npq.sqrt() - 4.6 * q).floor() + 0.5;
// tip of triangle
let x_m = (m as f64) + 0.5;
// left edge of triangle
let x_l = x_m - p1;
// right edge of triangle
let x_r = x_m + p1;
let c = 0.134 + 20.5 / (15.3 + (m as f64));
// p1 + area of parallelogram region
let p2 = p1 * (1. + 2. * c);
fn lambda(a: f64) -> f64 |
let lambda_l = lambda((f_m - x_l) / (f_m - x_l * p));
let lambda_r = lambda((x_r - f_m) / (x_r * q));
// p1 + area of left tail
let p3 = p2 + c / lambda_l;
// p1 + area of right tail
let p4 = p3 + c / lambda_r;
// return value
let mut y: i64;
let gen_u = Uniform::new(0., p4);
let gen_v = Uniform::new(0., 1.);
loop {
// Step 1: Generate `u` for selecting the region. If region 1 is
// selected, generate a triangularly distributed variate.
let u = gen_u.sample(rng);
let mut v = gen_v.sample(rng);
if !(u > p1) {
y = f64_to_i64(x_m - p1 * v + u);
break;
}
if !(u > p2) {
// Step 2: Region 2, parallelograms. Check if region 2 is
// used. If so, generate `y`.
let x = x_l + (u - p1) / c;
v = v * c + 1.0 - (x - x_m).abs() / p1;
if v > 1. {
continue;
} else {
y = f64_to_i64(x);
}
} else if !(u > p3) {
// Step 3: Region 3, left exponential tail.
y = f64_to_i64(x_l + v.ln() / lambda_l);
if y < 0 {
continue;
} else {
v *= (u - p2) * lambda_l;
}
} else {
// Step 4: Region 4, right exponential tail.
y = f64_to_i64(x_r - v.ln() / lambda_r);
if y > 0 && (y as u64) > self.n {
continue;
} else {
v *= (u - p3) * lambda_r;
}
}
// Step 5: Acceptance/rejection comparison.
// Step 5.0: Test for appropriate method of evaluating f(y).
let k = (y - m).abs();
if !(k > SQUEEZE_THRESHOLD && (k as f64) < 0.5 * npq - 1.) {
// Step 5.1: Evaluate f(y) via the recursive relationship. Start the
// search from the mode.
let s = p / q;
let a = s * (n + 1.);
let mut f = 1.0;
if m < y {
let mut i = m;
loop {
i += 1;
f *= a / (i as f64) - s;
if i == y {
break;
}
}
} else if m > y {
let mut i = y;
loop {
i += 1;
f /= a / (i as f64) - s;
if i == m {
break;
}
}
}
if v > f {
continue;
} else {
break;
}
}
// Step 5.2: Squeezing. Check the value of ln(v) againts upper and
// lower bound of ln(f(y)).
let k = k as f64;
let rho = (k / npq) * ((k * (k / 3. + 0.625) + 1. / 6.) / npq + 0.5);
let t = -0.5 * k * k / npq;
let alpha = v.ln();
if alpha < t - rho {
break;
}
if alpha > t + rho {
continue;
}
// Step 5.3: Final acceptance/rejection test.
let x1 = (y + 1) as f64;
let f1 = (m + 1) as f64;
let z = (f64_to_i64(n) + 1 - m) as f64;
let w = (f64_to_i64(n) - y + 1) as f64;
fn stirling(a: f64) -> f64 {
let a2 = a * a;
(13860. - (462. - (132. - (99. - 140. / a2) / a2) / a2) / a2) / a / 166320.
}
if alpha
> x_m * (f1 / x1).ln()
+ (n - (m as f64) + 0.5) * (z / w).ln()
+ ((y - m) as f64) * (w * p / (x1 * q)).ln()
// We use the signs from the GSL implementation, which are
// different than the ones in the reference. According to
// the GSL authors, the new signs were verified to be
// correct by one of the original designers of the
// algorithm.
+ stirling(f1)
+ stirling(z)
- stirling(x1)
- stirling(w)
{
continue;
}
break;
}
assert!(y >= 0);
result = y as u64;
}
// Invert the result for p < 0.5.
if p != self.p {
self.n - result
} else {
result
}
}
}
#[cfg(test)]
mod test {
use super::Binomial;
use crate::distributions::Distribution;
use crate::Rng;
fn test_binomial_mean_and_variance<R: Rng>(n: u64, p: f64, rng: &mut R) {
let binomial = Binomial::new(n, p);
let expected_mean = n as f64 * p;
let expected_variance = n as f64 * p * (1.0 - p);
let mut results = [0.0; 1000];
for i in results.iter_mut() {
*i = binomial.sample(rng) as f64;
}
let mean = results.iter().sum::<f64>() / results.len() as f64;
assert!(
(mean as f64 - expected_mean).abs() < expected_mean / 50.0,
"mean: {}, expected_mean: {}",
mean,
expected_mean
);
let variance =
results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>() / results.len() as f64;
assert!(
(variance - expected_variance).abs() < expected_variance / 10.0,
"variance: {}, expected_variance: {}",
variance,
expected_variance
);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_binomial() {
let mut rng = crate::test::rng(351);
test_binomial_mean_and_variance(150, 0.1, &mut rng);
test_binomial_mean_and_variance(70, 0.6, &mut rng);
test_binomial_mean_and_variance(40, 0.5, &mut rng);
test_binomial_mean_and_variance(20, 0.7, &mut rng);
test_binomial_mean_and_variance(20, 0.5, &mut rng);
}
#[test]
fn test_binomial_end_points() {
let mut rng = crate::test::rng(352);
assert_eq!(rng.sample(Binomial::new(20, 0.0)), 0);
assert_eq!(rng.sample(Binomial::new(20, 1.0)), 20);
}
#[test]
#[should_panic]
fn test_binomial_invalid_lambda_neg() {
Binomial::new(20, -10.0);
}
}
| {
a * (1. + 0.5 * a)
} | identifier_body |
main.rs | use anyhow::{Result, bail};
use futures::{FutureExt, StreamExt};
use warp::Filter;
use warp::ws::{Message, WebSocket};
use tokio::sync::{mpsc, RwLock};
use std::collections::{HashMap, hash_map};
use std::sync::{
Arc,
atomic::{AtomicUsize, Ordering},
};
use rand::seq::IteratorRandom;
use std::convert::Infallible;
use schema::{Message as WsMsg, Role, Prompt, Answer, LoginRejectedReason};
mod util;
mod deck;
use util::expand_underscores;
use deck::Deck;
#[derive(Default)]
struct Game {
prompts: Deck<Prompt>,
answers: Deck<Answer>,
round: Option<Round>,
clients: HashMap<usize, mpsc::UnboundedSender<schema::Message>>,
players: HashMap<usize, Player>,
}
static N_CARDS_IN_HAND: usize = 4;
static MIN_N_PLAYERS: usize = 3;
static MAX_N_PLAYERS: usize = 3;
static N_UNDERSCORES: usize = 5;
impl Game {
fn distribute_cards(&mut self) {
for player in &mut self.players.values_mut() {
if player.hand.len() < N_CARDS_IN_HAND {
player.hand.extend(self.answers.draw(N_CARDS_IN_HAND - player.hand.len()));
}
}
}
fn new_round(&mut self) -> Result<()> {
if self.players.len() == 0 {
bail!("There are no players!");
}
let mut next_czar = 0;
// Discard current round
if let Some(Round{ prompt, answers, czar, .. }) = self.round.take() {
next_czar = czar+1;
self.prompts.discard(&[prompt]);
for cards in answers.values() {
self.answers.discard(cards);
}
}
// Find next czar
let mut player_ids = self.players.keys().collect::<Vec<_>>();
player_ids.sort_unstable();
if let Err(idx) = player_ids.binary_search(&&next_czar) {
// There's no player with ID next_czar
if idx == player_ids.len() {
// There isn't a greater key
next_czar = *player_ids[0];
} else {
// There is a key greater than next_czar
next_czar = *player_ids[idx];
}
}
// Create new round
println!("Players to choose from: {:?}", self.players.keys().map(|u| u.to_string()).collect::<Vec<_>>().join(", "));
let round = Round {
prompt: self.prompts.draw_once(),
// TODO cycle Czars
czar: next_czar,
answers: Default::default(),
state: RoundState::Answering,
};
println!("Next czar is Player #{}", round.czar);
// Distribute cards and notify players
self.distribute_cards();
for (id, player) in &mut self.players {
let role = if *id == round.czar { Role::Czar } else { Role::Player };
self.clients[id].send(WsMsg::NewRound {
role,
prompt: round.prompt.clone(),
hand: player.hand.clone(),
})?;
}
// Set new round
self.round = Some(round);
Ok(())
}
fn broadcast_to_players(&mut self, msg: &WsMsg) -> Result<()> {
for id in self.players.keys() {
self.clients[id].send(msg.clone())?;
}
Ok(())
}
}
#[derive(PartialEq)]
enum RoundState {
Answering,
Judging,
}
struct Round {
prompt: Prompt,
czar: usize,
answers: HashMap<usize, Vec<Answer>>,
state: RoundState,
}
struct Player {
name: String,
hand: Vec<Answer>,
score: u64,
}
static NEXT_USER_ID: AtomicUsize = AtomicUsize::new(1);
async fn process_message(
game: &Arc<RwLock<Game>>,
user_id: usize,
msg: WsMsg,
tx: &mpsc::UnboundedSender<WsMsg>
) -> Result<()> {
match msg {
WsMsg::Login(username) => {
if game.read().await.players.len() >= MAX_N_PLAYERS {
tx.send(WsMsg::LoginRejected(LoginRejectedReason::GameIsFull))?;
return Ok(())
}
if game.read().await.players.values().any(|player| player.name == username) {
tx.send(WsMsg::LoginRejected(LoginRejectedReason::UsernameIsTaken))?;
return Ok(())
}
tx.send(WsMsg::LoginAccepted)?;
let hand = game.write().await.answers.draw(N_CARDS_IN_HAND);
let player = Player {
name: username.clone(),
hand: hand.clone(),
score: 0,
};
game.write().await.players.insert(user_id, player);
// Notify other players
game.write().await.broadcast_to_players(&WsMsg::PlayerJoined { name: username })?;
// Only start new round if there are enough players
if game.read().await.players.len() >= MIN_N_PLAYERS {
let game = &mut game.write().await;
let round = if let Some(round) = &game.round {
round
} else {
// TODO lobby
println!("Starting new round");
game.new_round()?;
game.round.as_ref().unwrap()
};
// If in judgement, don't send NewRound
if round.state == RoundState::Answering {
let role = if round.czar == user_id { Role::Czar } else { Role::Player };
tx.send(WsMsg::NewRound {
role,
prompt: round.prompt.clone(),
hand: hand,
})?;
}
}
Ok(())
},
// WsMsg::Register(name) => todo!(),
// WsMsg::Ready => todo!(),
// WsMsg::NotReady => todo!(),
WsMsg::SubmitAnswer(answers) => {
if let Game {
clients,
players,
round: Some(round),
..
} = &mut *game.write().await {
if round.state != RoundState::Answering {
eprintln!("invalid query SubmitAnswer: round is in judgement phase");
return Ok(())
}
if round.czar == user_id {
eprintln!("invalid query SubmitAnswer: player is Czar");
return Ok(())
}
match round.answers.entry(user_id) {
hash_map::Entry::Occupied(_) => {
eprintln!("invalid query SubmitAnswer: player already submitted answer")
},
hash_map::Entry::Vacant(entry) => {
let hand = &mut players.get_mut(&user_id).unwrap().hand;
if !answers.iter().all(|x| hand.contains(x)) {
eprintln!("invalid query SubmitAnswer: cards are not in player's deck");
return Ok(())
}
println!("SubmitAnswer({})", answers.iter().map(Answer::to_string).collect::<Vec<_>>().join(", "));
// Remove cards from player's hand
hand.retain(|x| !answers.contains(x));
// Insert cards into submitted answers
entry.insert(answers);
tx.send(WsMsg::AnswerAccepted)?;
},
}
// Check whether all players have answered
if round.answers.len() == players.len() - 1 {
round.state = RoundState::Judging;
// If so, notify them that JUDGEMENT HAS BEGUN
// TODO maybe obfuscate the player IDs before sending
for id in players.keys() {
clients[id].send(WsMsg::ReadyToJudge(round.answers.clone()))?;
}
}
} else {
eprintln!("invalid query SubmitAnswer: there is no ongoing round");
}
// TODO send AnswerAccepted/Rejected messages
Ok(())
},
WsMsg::SubmitJudgement(answer_id) => {
let mut new_round = false;
if let Game {
clients,
players,
round: Some(round),
..
} = &mut *game.write().await {
if round.state != RoundState::Judging {
eprintln!("invalid query SubmitAnswer: round is in judgement phase");
return Ok(())
}
if round.czar != user_id {
eprintln!("invalid query SubmitJudgement: player isn't Czar");
return Ok(())
}
match round.answers.get(&answer_id) {
None => {
eprintln!("invalid query SubmitJudgement: user ID does not exist");
},
Some(winning_answers) => {
let winner = {
// Increment winner's scores
let winner = players.get_mut(&answer_id).unwrap();
winner.score += 1;
// Get winner's name
winner.name.clone()
};
let scores = players.values().map(|player| (player.name.clone(), player.score)).collect();
let msg = WsMsg::RoundEnded {
winner,
winning_answers: winning_answers.clone(),
scores,
};
// Notify end of round, provide winner and scores
for id in players.keys() {
clients[id].send(msg.clone())?;
}
new_round = true;
}
}
} else {
eprintln!("invalid query SubmitAnswer: there is no ongoing round");
}
if new_round {
game.write().await.new_round()?;
}
// TODO send JudgementAccepted/Rejected messages
Ok(())
},
_ => unreachable!(),
}
}
async fn user_connected(game: Arc<RwLock<Game>>, socket: WebSocket) {
let my_id = NEXT_USER_ID.fetch_add(1, Ordering::Relaxed);
println!("User connected: #{}", my_id);
let (tx, mut rx) = socket.split();
// Manage outgoing messages to this user
let tx = {
let (tx2, rx) = mpsc::unbounded_channel();
tokio::task::spawn(rx.map(|msg| {
Ok(Message::text(serde_json::to_string(&msg).unwrap()))
}).forward(tx).map(move |result| {
if let Err(e) = result {
eprintln!("websocket send error: {}", e);
}
}));
tx2
};
game.write().await.clients.insert(my_id, tx.clone());
// Manage incoming messages from this user
while let Some(result) = rx.next().await {
let msg = match result {
Ok(msg) => msg,
Err(e) => {
eprintln!("websocket error with user {}: {}", my_id, e);
break;
}
};
if let Ok(text) = msg.to_str() {
if let Ok(response) = serde_json::from_str::<WsMsg>(text) {
if let Err(_) = process_message(&game, my_id, response, &tx).await {
eprintln!("Error while processing message from player #{}", my_id);
break;
}
} else {
eprintln!("cannot read message");
}
}
}
println!("Client #{} disconnected", my_id);
user_disconnected(game, my_id).await;
}
async fn user_disconnected(game: Arc<RwLock<Game>>, user_id: usize) {
let game = &mut *game.write().await;
game.clients.remove(&user_id);
if let Some(player) = game.players.remove(&user_id) {
// Discard player's answers
game.answers.discard(&player.hand);
// Discard player's submitted answers, if any
let mut user_is_czar = false;
if let Game {
answers,
round: Some(Round { answers: submitted_answers, czar, .. }),
..
} = game {
if let Some(cards) = submitted_answers.remove(&user_id) {
answers.discard(&cards);
}
user_is_czar = *czar == user_id;
}
// If player is Czar, return submitted answers to owners and restart round
if user_is_czar {
let mut round = game.round.take().unwrap();
game.prompts.discard(&[round.prompt]);
for (id, player) in game.players.iter_mut() {
player.hand.extend(round.answers.remove(id).into_iter().flatten());
}
if game.players.len() > 0 {
game.new_round().expect("Couldn't start new round");
}
}
// Notify other players
game.broadcast_to_players(&WsMsg::PlayerLeft { name: player.name.clone() });
}
// If not enough players, cancel round
if game.players.len() < MIN_N_PLAYERS {
game.round = None;
game.answers.reset();
game.prompts.reset();
for id in game.players.keys() {
game.clients[id].send(WsMsg::GameEnded);
}
// Clear player hands, to avoid double-discard
for player in game.players.values_mut() {
player.hand.clear();
}
}
}
use ron;
use std::fs::File;
use serde::de::DeserializeOwned;
fn | <Card: DeserializeOwned>(filename: &str) -> Result<Vec<Card>, Box<dyn std::error::Error>> {
let file = File::open(filename)?;
Ok(ron::de::from_reader(file)?)
}
fn load_prompts(filename: &str) -> Result<impl Iterator<Item=Prompt>, Box<dyn std::error::Error>> {
Ok(load_deck::<Prompt>(filename)?
.into_iter()
.map(|prompt| {
Prompt::new(
expand_underscores(&prompt.content, N_UNDERSCORES),
prompt.n_answers
)
}))
}
// async fn login(username: String, game: Arc<RwLock<Game>>) -> Result<impl warp::Reply, Infallible> {
// Ok(warp::reply::json(&WsMsg::LoginAccepted));
// Ok(warp::reply::json(&WsMsg::LoginRejected(LoginRejectedReason::GameIsFull)))
// }
#[tokio::main]
async fn main() {
let mut game_state = Game::default();
game_state.prompts.extend(load_prompts("assets/prompts.ron").unwrap());
game_state.answers.extend(load_deck("assets/answers.ron").unwrap());
let game_state = Arc::new(RwLock::new(game_state));
let game_state = warp::any().map(move || game_state.clone());
// warp::path!("login")
// .and(warp::post())
// .and(warp::body::json())
// .and(game_state.clone())
// .and_then(login);
let login = warp::path::end()
.map(|| {
"Hello World!"
});
let game = warp::path::end()
.and(warp::ws())
.and(game_state)
.map(|ws: warp::ws::Ws, game| {
ws.on_upgrade(move |socket| user_connected(game, socket))
});
// Match any request and return hello world!
let routes = game.or(login);
warp::serve(routes).run(([0, 0, 0, 0], 8000)).await;
}
| load_deck | identifier_name |
main.rs | use anyhow::{Result, bail};
use futures::{FutureExt, StreamExt};
use warp::Filter;
use warp::ws::{Message, WebSocket};
use tokio::sync::{mpsc, RwLock};
use std::collections::{HashMap, hash_map};
use std::sync::{
Arc,
atomic::{AtomicUsize, Ordering},
};
use rand::seq::IteratorRandom;
use std::convert::Infallible;
use schema::{Message as WsMsg, Role, Prompt, Answer, LoginRejectedReason};
mod util;
mod deck;
use util::expand_underscores;
use deck::Deck;
#[derive(Default)]
struct Game {
prompts: Deck<Prompt>,
answers: Deck<Answer>,
round: Option<Round>,
clients: HashMap<usize, mpsc::UnboundedSender<schema::Message>>,
players: HashMap<usize, Player>,
}
static N_CARDS_IN_HAND: usize = 4;
static MIN_N_PLAYERS: usize = 3;
static MAX_N_PLAYERS: usize = 3;
static N_UNDERSCORES: usize = 5;
impl Game {
fn distribute_cards(&mut self) {
for player in &mut self.players.values_mut() {
if player.hand.len() < N_CARDS_IN_HAND {
player.hand.extend(self.answers.draw(N_CARDS_IN_HAND - player.hand.len()));
}
}
}
fn new_round(&mut self) -> Result<()> {
if self.players.len() == 0 {
bail!("There are no players!");
}
let mut next_czar = 0;
// Discard current round
if let Some(Round{ prompt, answers, czar, .. }) = self.round.take() {
next_czar = czar+1;
self.prompts.discard(&[prompt]);
for cards in answers.values() {
self.answers.discard(cards);
}
}
// Find next czar
let mut player_ids = self.players.keys().collect::<Vec<_>>();
player_ids.sort_unstable();
if let Err(idx) = player_ids.binary_search(&&next_czar) {
// There's no player with ID next_czar
if idx == player_ids.len() {
// There isn't a greater key
next_czar = *player_ids[0];
} else {
// There is a key greater than next_czar
next_czar = *player_ids[idx];
}
}
// Create new round
println!("Players to choose from: {:?}", self.players.keys().map(|u| u.to_string()).collect::<Vec<_>>().join(", "));
let round = Round {
prompt: self.prompts.draw_once(),
// TODO cycle Czars
czar: next_czar,
answers: Default::default(),
state: RoundState::Answering,
};
println!("Next czar is Player #{}", round.czar);
// Distribute cards and notify players
self.distribute_cards();
for (id, player) in &mut self.players {
let role = if *id == round.czar { Role::Czar } else { Role::Player };
self.clients[id].send(WsMsg::NewRound {
role,
prompt: round.prompt.clone(),
hand: player.hand.clone(),
})?;
}
// Set new round
self.round = Some(round);
Ok(())
}
fn broadcast_to_players(&mut self, msg: &WsMsg) -> Result<()> {
for id in self.players.keys() {
self.clients[id].send(msg.clone())?;
}
Ok(())
}
}
#[derive(PartialEq)]
enum RoundState {
Answering,
Judging,
}
struct Round {
prompt: Prompt,
czar: usize,
answers: HashMap<usize, Vec<Answer>>,
state: RoundState,
}
struct Player {
name: String,
hand: Vec<Answer>,
score: u64,
}
static NEXT_USER_ID: AtomicUsize = AtomicUsize::new(1);
async fn process_message(
game: &Arc<RwLock<Game>>,
user_id: usize,
msg: WsMsg,
tx: &mpsc::UnboundedSender<WsMsg>
) -> Result<()> {
match msg {
WsMsg::Login(username) => {
if game.read().await.players.len() >= MAX_N_PLAYERS {
tx.send(WsMsg::LoginRejected(LoginRejectedReason::GameIsFull))?;
return Ok(())
}
if game.read().await.players.values().any(|player| player.name == username) {
tx.send(WsMsg::LoginRejected(LoginRejectedReason::UsernameIsTaken))?;
return Ok(())
}
tx.send(WsMsg::LoginAccepted)?;
let hand = game.write().await.answers.draw(N_CARDS_IN_HAND);
let player = Player {
name: username.clone(),
hand: hand.clone(),
score: 0,
};
game.write().await.players.insert(user_id, player);
// Notify other players
game.write().await.broadcast_to_players(&WsMsg::PlayerJoined { name: username })?;
// Only start new round if there are enough players
if game.read().await.players.len() >= MIN_N_PLAYERS {
let game = &mut game.write().await;
let round = if let Some(round) = &game.round {
round
} else {
// TODO lobby
println!("Starting new round");
game.new_round()?;
game.round.as_ref().unwrap()
};
// If in judgement, don't send NewRound
if round.state == RoundState::Answering {
let role = if round.czar == user_id { Role::Czar } else { Role::Player };
tx.send(WsMsg::NewRound {
role,
prompt: round.prompt.clone(),
hand: hand,
})?;
}
}
Ok(())
},
// WsMsg::Register(name) => todo!(),
// WsMsg::Ready => todo!(),
// WsMsg::NotReady => todo!(),
WsMsg::SubmitAnswer(answers) => {
if let Game {
clients,
players,
round: Some(round),
..
} = &mut *game.write().await {
if round.state != RoundState::Answering {
eprintln!("invalid query SubmitAnswer: round is in judgement phase");
return Ok(())
}
if round.czar == user_id {
eprintln!("invalid query SubmitAnswer: player is Czar");
return Ok(())
}
match round.answers.entry(user_id) {
hash_map::Entry::Occupied(_) => {
eprintln!("invalid query SubmitAnswer: player already submitted answer")
},
hash_map::Entry::Vacant(entry) => {
let hand = &mut players.get_mut(&user_id).unwrap().hand;
if !answers.iter().all(|x| hand.contains(x)) {
eprintln!("invalid query SubmitAnswer: cards are not in player's deck");
return Ok(())
}
println!("SubmitAnswer({})", answers.iter().map(Answer::to_string).collect::<Vec<_>>().join(", "));
// Remove cards from player's hand
hand.retain(|x| !answers.contains(x));
// Insert cards into submitted answers
entry.insert(answers);
tx.send(WsMsg::AnswerAccepted)?;
},
}
// Check whether all players have answered
if round.answers.len() == players.len() - 1 {
round.state = RoundState::Judging;
// If so, notify them that JUDGEMENT HAS BEGUN
// TODO maybe obfuscate the player IDs before sending
for id in players.keys() {
clients[id].send(WsMsg::ReadyToJudge(round.answers.clone()))?;
}
}
} else {
eprintln!("invalid query SubmitAnswer: there is no ongoing round");
}
// TODO send AnswerAccepted/Rejected messages
Ok(())
},
WsMsg::SubmitJudgement(answer_id) => {
let mut new_round = false;
if let Game {
clients,
players,
round: Some(round),
..
} = &mut *game.write().await {
if round.state != RoundState::Judging {
eprintln!("invalid query SubmitAnswer: round is in judgement phase");
return Ok(())
}
if round.czar != user_id {
eprintln!("invalid query SubmitJudgement: player isn't Czar");
return Ok(())
}
match round.answers.get(&answer_id) {
None => {
eprintln!("invalid query SubmitJudgement: user ID does not exist");
},
Some(winning_answers) => {
let winner = {
// Increment winner's scores
let winner = players.get_mut(&answer_id).unwrap();
winner.score += 1;
// Get winner's name
winner.name.clone()
};
let scores = players.values().map(|player| (player.name.clone(), player.score)).collect();
let msg = WsMsg::RoundEnded {
winner,
winning_answers: winning_answers.clone(),
scores,
};
// Notify end of round, provide winner and scores
for id in players.keys() {
clients[id].send(msg.clone())?;
}
new_round = true;
}
}
} else {
eprintln!("invalid query SubmitAnswer: there is no ongoing round");
}
if new_round {
game.write().await.new_round()?;
}
// TODO send JudgementAccepted/Rejected messages
Ok(())
},
_ => unreachable!(),
}
}
async fn user_connected(game: Arc<RwLock<Game>>, socket: WebSocket) {
let my_id = NEXT_USER_ID.fetch_add(1, Ordering::Relaxed);
println!("User connected: #{}", my_id);
let (tx, mut rx) = socket.split();
// Manage outgoing messages to this user
let tx = {
let (tx2, rx) = mpsc::unbounded_channel();
tokio::task::spawn(rx.map(|msg| {
Ok(Message::text(serde_json::to_string(&msg).unwrap()))
}).forward(tx).map(move |result| {
if let Err(e) = result {
eprintln!("websocket send error: {}", e);
}
}));
tx2
};
game.write().await.clients.insert(my_id, tx.clone());
// Manage incoming messages from this user
while let Some(result) = rx.next().await {
let msg = match result {
Ok(msg) => msg,
Err(e) => {
eprintln!("websocket error with user {}: {}", my_id, e);
break;
}
};
if let Ok(text) = msg.to_str() {
if let Ok(response) = serde_json::from_str::<WsMsg>(text) {
if let Err(_) = process_message(&game, my_id, response, &tx).await {
eprintln!("Error while processing message from player #{}", my_id);
break;
}
} else {
eprintln!("cannot read message");
}
}
}
println!("Client #{} disconnected", my_id);
user_disconnected(game, my_id).await;
}
async fn user_disconnected(game: Arc<RwLock<Game>>, user_id: usize) |
use ron;
use std::fs::File;
use serde::de::DeserializeOwned;
fn load_deck<Card: DeserializeOwned>(filename: &str) -> Result<Vec<Card>, Box<dyn std::error::Error>> {
let file = File::open(filename)?;
Ok(ron::de::from_reader(file)?)
}
fn load_prompts(filename: &str) -> Result<impl Iterator<Item=Prompt>, Box<dyn std::error::Error>> {
Ok(load_deck::<Prompt>(filename)?
.into_iter()
.map(|prompt| {
Prompt::new(
expand_underscores(&prompt.content, N_UNDERSCORES),
prompt.n_answers
)
}))
}
// async fn login(username: String, game: Arc<RwLock<Game>>) -> Result<impl warp::Reply, Infallible> {
// Ok(warp::reply::json(&WsMsg::LoginAccepted));
// Ok(warp::reply::json(&WsMsg::LoginRejected(LoginRejectedReason::GameIsFull)))
// }
#[tokio::main]
async fn main() {
let mut game_state = Game::default();
game_state.prompts.extend(load_prompts("assets/prompts.ron").unwrap());
game_state.answers.extend(load_deck("assets/answers.ron").unwrap());
let game_state = Arc::new(RwLock::new(game_state));
let game_state = warp::any().map(move || game_state.clone());
// warp::path!("login")
// .and(warp::post())
// .and(warp::body::json())
// .and(game_state.clone())
// .and_then(login);
let login = warp::path::end()
.map(|| {
"Hello World!"
});
let game = warp::path::end()
.and(warp::ws())
.and(game_state)
.map(|ws: warp::ws::Ws, game| {
ws.on_upgrade(move |socket| user_connected(game, socket))
});
// Match any request and return hello world!
let routes = game.or(login);
warp::serve(routes).run(([0, 0, 0, 0], 8000)).await;
}
| {
let game = &mut *game.write().await;
game.clients.remove(&user_id);
if let Some(player) = game.players.remove(&user_id) {
// Discard player's answers
game.answers.discard(&player.hand);
// Discard player's submitted answers, if any
let mut user_is_czar = false;
if let Game {
answers,
round: Some(Round { answers: submitted_answers, czar, .. }),
..
} = game {
if let Some(cards) = submitted_answers.remove(&user_id) {
answers.discard(&cards);
}
user_is_czar = *czar == user_id;
}
// If player is Czar, return submitted answers to owners and restart round
if user_is_czar {
let mut round = game.round.take().unwrap();
game.prompts.discard(&[round.prompt]);
for (id, player) in game.players.iter_mut() {
player.hand.extend(round.answers.remove(id).into_iter().flatten());
}
if game.players.len() > 0 {
game.new_round().expect("Couldn't start new round");
}
}
// Notify other players
game.broadcast_to_players(&WsMsg::PlayerLeft { name: player.name.clone() });
}
// If not enough players, cancel round
if game.players.len() < MIN_N_PLAYERS {
game.round = None;
game.answers.reset();
game.prompts.reset();
for id in game.players.keys() {
game.clients[id].send(WsMsg::GameEnded);
}
// Clear player hands, to avoid double-discard
for player in game.players.values_mut() {
player.hand.clear();
}
}
} | identifier_body |
main.rs | use anyhow::{Result, bail};
use futures::{FutureExt, StreamExt};
use warp::Filter;
use warp::ws::{Message, WebSocket};
use tokio::sync::{mpsc, RwLock};
use std::collections::{HashMap, hash_map};
use std::sync::{
Arc,
atomic::{AtomicUsize, Ordering},
};
use rand::seq::IteratorRandom;
use std::convert::Infallible;
use schema::{Message as WsMsg, Role, Prompt, Answer, LoginRejectedReason};
mod util;
mod deck;
use util::expand_underscores;
use deck::Deck;
#[derive(Default)]
struct Game {
prompts: Deck<Prompt>,
answers: Deck<Answer>,
round: Option<Round>,
clients: HashMap<usize, mpsc::UnboundedSender<schema::Message>>,
players: HashMap<usize, Player>,
}
static N_CARDS_IN_HAND: usize = 4;
static MIN_N_PLAYERS: usize = 3;
static MAX_N_PLAYERS: usize = 3;
static N_UNDERSCORES: usize = 5;
impl Game {
fn distribute_cards(&mut self) {
for player in &mut self.players.values_mut() {
if player.hand.len() < N_CARDS_IN_HAND {
player.hand.extend(self.answers.draw(N_CARDS_IN_HAND - player.hand.len()));
}
}
}
fn new_round(&mut self) -> Result<()> {
if self.players.len() == 0 {
bail!("There are no players!");
}
let mut next_czar = 0;
// Discard current round
if let Some(Round{ prompt, answers, czar, .. }) = self.round.take() {
next_czar = czar+1;
self.prompts.discard(&[prompt]);
for cards in answers.values() {
self.answers.discard(cards);
}
}
// Find next czar
let mut player_ids = self.players.keys().collect::<Vec<_>>();
player_ids.sort_unstable();
if let Err(idx) = player_ids.binary_search(&&next_czar) {
// There's no player with ID next_czar
if idx == player_ids.len() {
// There isn't a greater key
next_czar = *player_ids[0];
} else {
// There is a key greater than next_czar
next_czar = *player_ids[idx];
}
}
// Create new round
println!("Players to choose from: {:?}", self.players.keys().map(|u| u.to_string()).collect::<Vec<_>>().join(", "));
let round = Round {
prompt: self.prompts.draw_once(),
// TODO cycle Czars
czar: next_czar,
answers: Default::default(),
state: RoundState::Answering,
};
println!("Next czar is Player #{}", round.czar);
// Distribute cards and notify players
self.distribute_cards();
for (id, player) in &mut self.players {
let role = if *id == round.czar { Role::Czar } else { Role::Player };
self.clients[id].send(WsMsg::NewRound {
role,
prompt: round.prompt.clone(),
hand: player.hand.clone(),
})?;
}
// Set new round
self.round = Some(round);
Ok(())
}
fn broadcast_to_players(&mut self, msg: &WsMsg) -> Result<()> {
for id in self.players.keys() {
self.clients[id].send(msg.clone())?;
}
Ok(())
}
}
#[derive(PartialEq)]
enum RoundState {
Answering,
Judging,
}
struct Round {
prompt: Prompt,
czar: usize,
answers: HashMap<usize, Vec<Answer>>,
state: RoundState,
}
struct Player {
name: String,
hand: Vec<Answer>,
score: u64,
}
static NEXT_USER_ID: AtomicUsize = AtomicUsize::new(1);
async fn process_message(
game: &Arc<RwLock<Game>>,
user_id: usize,
msg: WsMsg,
tx: &mpsc::UnboundedSender<WsMsg>
) -> Result<()> {
match msg {
WsMsg::Login(username) => {
if game.read().await.players.len() >= MAX_N_PLAYERS {
tx.send(WsMsg::LoginRejected(LoginRejectedReason::GameIsFull))?;
return Ok(())
}
if game.read().await.players.values().any(|player| player.name == username) {
tx.send(WsMsg::LoginRejected(LoginRejectedReason::UsernameIsTaken))?;
return Ok(())
}
tx.send(WsMsg::LoginAccepted)?;
let hand = game.write().await.answers.draw(N_CARDS_IN_HAND);
let player = Player {
name: username.clone(),
hand: hand.clone(),
score: 0,
};
game.write().await.players.insert(user_id, player);
// Notify other players
game.write().await.broadcast_to_players(&WsMsg::PlayerJoined { name: username })?;
// Only start new round if there are enough players
if game.read().await.players.len() >= MIN_N_PLAYERS {
let game = &mut game.write().await;
let round = if let Some(round) = &game.round {
round
} else {
// TODO lobby
println!("Starting new round");
game.new_round()?;
game.round.as_ref().unwrap()
};
// If in judgement, don't send NewRound
if round.state == RoundState::Answering {
let role = if round.czar == user_id { Role::Czar } else { Role::Player };
tx.send(WsMsg::NewRound {
role,
prompt: round.prompt.clone(),
hand: hand,
})?;
}
}
Ok(())
},
// WsMsg::Register(name) => todo!(),
// WsMsg::Ready => todo!(),
// WsMsg::NotReady => todo!(),
WsMsg::SubmitAnswer(answers) => {
if let Game {
clients,
players,
round: Some(round),
..
} = &mut *game.write().await {
if round.state != RoundState::Answering {
eprintln!("invalid query SubmitAnswer: round is in judgement phase");
return Ok(())
}
if round.czar == user_id {
eprintln!("invalid query SubmitAnswer: player is Czar");
return Ok(())
}
match round.answers.entry(user_id) {
hash_map::Entry::Occupied(_) => {
eprintln!("invalid query SubmitAnswer: player already submitted answer")
},
hash_map::Entry::Vacant(entry) => {
let hand = &mut players.get_mut(&user_id).unwrap().hand;
if !answers.iter().all(|x| hand.contains(x)) {
eprintln!("invalid query SubmitAnswer: cards are not in player's deck");
return Ok(())
}
println!("SubmitAnswer({})", answers.iter().map(Answer::to_string).collect::<Vec<_>>().join(", "));
// Remove cards from player's hand
hand.retain(|x| !answers.contains(x));
// Insert cards into submitted answers
entry.insert(answers);
tx.send(WsMsg::AnswerAccepted)?; | },
}
// Check whether all players have answered
if round.answers.len() == players.len() - 1 {
round.state = RoundState::Judging;
// If so, notify them that JUDGEMENT HAS BEGUN
// TODO maybe obfuscate the player IDs before sending
for id in players.keys() {
clients[id].send(WsMsg::ReadyToJudge(round.answers.clone()))?;
}
}
} else {
eprintln!("invalid query SubmitAnswer: there is no ongoing round");
}
// TODO send AnswerAccepted/Rejected messages
Ok(())
},
WsMsg::SubmitJudgement(answer_id) => {
let mut new_round = false;
if let Game {
clients,
players,
round: Some(round),
..
} = &mut *game.write().await {
if round.state != RoundState::Judging {
eprintln!("invalid query SubmitAnswer: round is in judgement phase");
return Ok(())
}
if round.czar != user_id {
eprintln!("invalid query SubmitJudgement: player isn't Czar");
return Ok(())
}
match round.answers.get(&answer_id) {
None => {
eprintln!("invalid query SubmitJudgement: user ID does not exist");
},
Some(winning_answers) => {
let winner = {
// Increment winner's scores
let winner = players.get_mut(&answer_id).unwrap();
winner.score += 1;
// Get winner's name
winner.name.clone()
};
let scores = players.values().map(|player| (player.name.clone(), player.score)).collect();
let msg = WsMsg::RoundEnded {
winner,
winning_answers: winning_answers.clone(),
scores,
};
// Notify end of round, provide winner and scores
for id in players.keys() {
clients[id].send(msg.clone())?;
}
new_round = true;
}
}
} else {
eprintln!("invalid query SubmitAnswer: there is no ongoing round");
}
if new_round {
game.write().await.new_round()?;
}
// TODO send JudgementAccepted/Rejected messages
Ok(())
},
_ => unreachable!(),
}
}
async fn user_connected(game: Arc<RwLock<Game>>, socket: WebSocket) {
let my_id = NEXT_USER_ID.fetch_add(1, Ordering::Relaxed);
println!("User connected: #{}", my_id);
let (tx, mut rx) = socket.split();
// Manage outgoing messages to this user
let tx = {
let (tx2, rx) = mpsc::unbounded_channel();
tokio::task::spawn(rx.map(|msg| {
Ok(Message::text(serde_json::to_string(&msg).unwrap()))
}).forward(tx).map(move |result| {
if let Err(e) = result {
eprintln!("websocket send error: {}", e);
}
}));
tx2
};
game.write().await.clients.insert(my_id, tx.clone());
// Manage incoming messages from this user
while let Some(result) = rx.next().await {
let msg = match result {
Ok(msg) => msg,
Err(e) => {
eprintln!("websocket error with user {}: {}", my_id, e);
break;
}
};
if let Ok(text) = msg.to_str() {
if let Ok(response) = serde_json::from_str::<WsMsg>(text) {
if let Err(_) = process_message(&game, my_id, response, &tx).await {
eprintln!("Error while processing message from player #{}", my_id);
break;
}
} else {
eprintln!("cannot read message");
}
}
}
println!("Client #{} disconnected", my_id);
user_disconnected(game, my_id).await;
}
async fn user_disconnected(game: Arc<RwLock<Game>>, user_id: usize) {
let game = &mut *game.write().await;
game.clients.remove(&user_id);
if let Some(player) = game.players.remove(&user_id) {
// Discard player's answers
game.answers.discard(&player.hand);
// Discard player's submitted answers, if any
let mut user_is_czar = false;
if let Game {
answers,
round: Some(Round { answers: submitted_answers, czar, .. }),
..
} = game {
if let Some(cards) = submitted_answers.remove(&user_id) {
answers.discard(&cards);
}
user_is_czar = *czar == user_id;
}
// If player is Czar, return submitted answers to owners and restart round
if user_is_czar {
let mut round = game.round.take().unwrap();
game.prompts.discard(&[round.prompt]);
for (id, player) in game.players.iter_mut() {
player.hand.extend(round.answers.remove(id).into_iter().flatten());
}
if game.players.len() > 0 {
game.new_round().expect("Couldn't start new round");
}
}
// Notify other players
game.broadcast_to_players(&WsMsg::PlayerLeft { name: player.name.clone() });
}
// If not enough players, cancel round
if game.players.len() < MIN_N_PLAYERS {
game.round = None;
game.answers.reset();
game.prompts.reset();
for id in game.players.keys() {
game.clients[id].send(WsMsg::GameEnded);
}
// Clear player hands, to avoid double-discard
for player in game.players.values_mut() {
player.hand.clear();
}
}
}
use ron;
use std::fs::File;
use serde::de::DeserializeOwned;
fn load_deck<Card: DeserializeOwned>(filename: &str) -> Result<Vec<Card>, Box<dyn std::error::Error>> {
let file = File::open(filename)?;
Ok(ron::de::from_reader(file)?)
}
fn load_prompts(filename: &str) -> Result<impl Iterator<Item=Prompt>, Box<dyn std::error::Error>> {
Ok(load_deck::<Prompt>(filename)?
.into_iter()
.map(|prompt| {
Prompt::new(
expand_underscores(&prompt.content, N_UNDERSCORES),
prompt.n_answers
)
}))
}
// async fn login(username: String, game: Arc<RwLock<Game>>) -> Result<impl warp::Reply, Infallible> {
// Ok(warp::reply::json(&WsMsg::LoginAccepted));
// Ok(warp::reply::json(&WsMsg::LoginRejected(LoginRejectedReason::GameIsFull)))
// }
#[tokio::main]
async fn main() {
let mut game_state = Game::default();
game_state.prompts.extend(load_prompts("assets/prompts.ron").unwrap());
game_state.answers.extend(load_deck("assets/answers.ron").unwrap());
let game_state = Arc::new(RwLock::new(game_state));
let game_state = warp::any().map(move || game_state.clone());
// warp::path!("login")
// .and(warp::post())
// .and(warp::body::json())
// .and(game_state.clone())
// .and_then(login);
let login = warp::path::end()
.map(|| {
"Hello World!"
});
let game = warp::path::end()
.and(warp::ws())
.and(game_state)
.map(|ws: warp::ws::Ws, game| {
ws.on_upgrade(move |socket| user_connected(game, socket))
});
// Match any request and return hello world!
let routes = game.or(login);
warp::serve(routes).run(([0, 0, 0, 0], 8000)).await;
} | random_line_split |
|
cougballoon_rcv.py | ##################################
## Michael Hamilton
## [email protected]
## #cougballoon
## v1.0 Mar 1, 2015
## v1.1 Mar 13, 2015 - added JSON
## v1.2 Apr 5, 2015 - finalized graphs
## v1.3 Apr 6, 2015 - repaired value errors
##################################
#Axis titles and legends have been created, verify they remain upon running.
#Will return previous value if there is an error with the reading, and
#will output the error to the serial monitor below
#add heading back to graph
import re
import json
import plotly
plotly.__version__
import plotly.plotly as py
import plotly.tools as tls
from plotly.graph_objs import *
import numpy as np
#https://plot.ly/python/streaming-line-tutorial/
import serial
import io
import os
#Set initial values? Why? Didn't work without it....
RMClongitude = 0
RMClatitude = 0
GGAaltitude = 0
RMCspeed = 0
RMCheading = 0
RMCday = 0
RMCmonth = 0
RMCyear = 0
RMChours = 0
RMCminutes = 0
RMCseconds = 0
extTemp = 70.0 #A
intTemp = 0 #C
vidTemp = 40.0 #E
COlevel = 0 #F
CH4level = 0 #G
HackStatus = "000000" #Hack
roll = 0
pitch = 0
heading = 0
pressure = 0
pressureAltitude = 0
temperature10DOF = 0
GGAreceived = False
RMCreceived = False
#Depending on the port we are plugged into
#ser = serial.Serial('/dev/tty.usbmodem1411', 9600)
ser = serial.Serial('/dev/tty.usbmodem1421', 9600)
#Change time to local time zone
def UTCtoPSTDST(hours):
hours = hours.rstrip('\n');
hours = hours.rstrip('\r');
hours = int(hours) + 17
if (hours > 24):
hours = hours - 24
hours = str(hours)
return hours
#Save all incoming data with a current date/time string
def saveData(a):
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
saveAllIncomingData(x)
saveAllIncomingData(a)
#Convert GPS strings to floats (Couldn't get str.isnumeric() to work correctly)
def StringToFloatGPS(a):
a = a.rstrip('\n');
a = a.rstrip('\r');
a = float(a)
return a
#FIX SO IT DOES NOT RETURN A ZERO!!!!!!!!
#COnvert data strings to floats
def StringToFloat(a, b):
#print len(a)
if (len(a) < 4):
print "Incomplete data, returning a zero."
return b
a = a[1:len(a)]
a = a.rstrip('\n');
a = a.rstrip('\r');
if (a == "-"):
print "Only a negative sign in string, returning a zero."
return b
a = float(a)
return a
#Saves all incoming data to a file on the desktop
def saveAllIncomingData(c):
f = open('/Users/michaelhamilton/Desktop/cougballoonData.txt', 'a')
f.write(c)
f.close
#Convert nmea string to .kml file, send to server
def handleGPSdata(nmeaString):
#Commented out lines are for .docs, we are using .txt files instead.
#f = open('/Users/michaelhamilton/gpsbabel/nmeaRawData.doc', 'a')
f = open('/Users/michaelhamilton/gpsbabel/nmeaRawData.txt', 'a')
f.write(nmeaString)
f.close()
saveAllIncomingData(nmeaString)
#os.system("cd /Users/michaelhamilton/gpsbabel && ./gpsbabel -i nmea -f nmeaRawData.doc -o kml,deficon=http://encs.vancouver.wsu.edu/~mikehmbn/balloon-icon-map.png,line_color=FF321E98,floating=1 -F cougballoon.kml")
os.system("cd /Users/michaelhamilton/gpsbabel && ./gpsbabel -i nmea -f nmeaRawData.txt -o kml,deficon=http://encs.vancouver.wsu.edu/~mikehmbn/balloon-icon-map.png,line_color=FF321E98,floating=1 -F cougballoon.kml")
os.system("cd /Users/michaelhamilton/gpsbabel && scp cougballoon.kml [email protected]:Sites/")
print "Updated KML file was sent to the server"
return
#Get JSON data and send it to the server
def parseToJson(RMClongitude, RMClatitude, GGAaltitude, RMCspeed, RMCheading, RMCday, RMCmonth, RMCyear, RMChours, RMCminutes, RMCseconds, extTemp, intTemp, vidTemp, COlevel, CH4level, HackStatus):
JSONdata2 = { 'cougballoon':[ { 'Longitude':RMClongitude, 'Latitude':RMClatitude, 'Altitude':GGAaltitude, 'Speed':RMCspeed, 'Heading':RMCheading, 'Time':{'Day':RMCday, 'Month':RMCmonth, 'Year':RMCyear, 'Hours':RMChours, 'Minutes':RMCminutes, 'Seconds':RMCseconds},'External temperature(deg F)':extTemp, 'Internal temperature(deg F)':intTemp, 'Video Transmitter temperature(deg F)':vidTemp, 'Carbon Monoxide level(ppm)':COlevel, 'Methane level(ppm)':CH4level, 'HackHD':HackStatus } ] }
data_string2 = json.dumps(JSONdata2)
#Now post it to json_data.json for the map legend
f = open('/Users/michaelhamilton/Desktop/json_data.json', 'w')
f.write(data_string2)
f.close()
os.system("scp /Users/michaelhamilton/Desktop/json_data.json [email protected]:Sites/")
#Now to handle it for json_data.html
JSONdata = [ { 'Longitude':RMClongitude, 'Latitude':RMClatitude, 'Altitude':GGAaltitude, 'Speed(mph)':RMCspeed, 'Heading':RMCheading, 'Time':{'Day':RMCday, 'Month':RMCmonth, 'Year':RMCyear, 'Hours':RMChours, 'Minutes':RMCminutes, 'Seconds':RMCseconds},'External temperature(deg F)':extTemp, 'Internal temperature(deg F)':intTemp, 'Video Transmitter temperature(deg F)':vidTemp, 'Carbon Monoxide level(ppm)':COlevel, 'Methane level(ppm)':CH4level, 'HackHD camera statuses':HackStatus } ]
data_string = json.dumps(JSONdata)
JSONdataString = str(data_string)
newJSONdata = re.match(r'\[(.*)', JSONdataString)
newJSONdata2 = "," + newJSONdata.group(1)
f = open('/Users/michaelhamilton/Desktop/json_data.html', 'r+')
jumpBack = -1 #jump back 1 spot from the end
f.seek(jumpBack, 2) #2 is the end of the file
last = f.readline() #read the last line
while (last != "]"): #if it's not a ], then keep jumping back
jumpBack = jumpBack - 1 #decrement
if (last == "]"):
f.seek(-1, 2)
f.write(newJSONdata2)
f.close()
#Send it to the server
os.system("cd /Users/michaelhamilton/Desktop && scp json_data.html [email protected]:Sites/")
print "Updated JSON information was sent to the server."
return
#Parse out the data from an RMC nmea string
def RegExprNMEAdataRMC(line):
#if it's an RMC string....
print line
newRMCline = re.match( r'\$GPRMC,(\d\d)(\d\d)(\d\d).*,\D,(\d+.\d+),\D,(\d+.\d+),\D,(\d+.\d+),(\d+.\d+),(\d\d)(\d\d)(\d\d),.*,.*', line, re.I)
#All data are strings, not integers
if (newRMCline):
global RMChours
RMChours = newRMCline.group(1)
#Convert UTC hours to PST(Daylight Savings Time)
RMChours = UTCtoPSTDST(RMChours)
global RMCminutes
RMCminutes = newRMCline.group(2)
global RMCseconds
RMCseconds = newRMCline.group(3)
global RMClatitude
RMClatitude = newRMCline.group(4)
RMClatitude = StringToFloatGPS(RMClatitude)
global RMClongitude
RMClongitude = newRMCline.group(5)
RMClongitude = StringToFloatGPS(RMClongitude)
global RMCspeed
RMCspeed = newRMCline.group(6)
RMCspeed = StringToFloatGPS(RMCspeed)
global RMCheading
RMCheading = newRMCline.group(7)
RMCheading = StringToFloatGPS(RMCheading)
global RMCday
RMCday = newRMCline.group(8)
global RMCmonth
RMCmonth = newRMCline.group(9)
global RMCyear
RMCyear = newRMCline.group(10)
return True
else:
return False
#Parse out the data from an GGA nmea string
def RegExprNMEAdataGGA(line):
#if it's a GGA string....
print line
newGGAline = re.match( r'\$GPGGA,(\d\d)(\d\d)(\d\d).*,(.*..*),\D,(.*..*),\D,\d,\d\d\,\d.\d\d,(\d+.\d),\D.*', line, re.I)
#All data are strings, not integers
if (newGGAline):
global GGAhours
GGAhours = newGGAline.group(1)
#Convert UTC hours to PST(Daylight Savings Time)
GGAhours = UTCtoPSTDST(GGAhours)
global GGAminutes
GGAminutes = newGGAline.group(2)
global GGAseconds
GGAseconds = newGGAline.group(3)
global GGAlatitude
GGAlatitude = newGGAline.group(4)
GGAlatitude = StringToFloatGPS(GGAlatitude)
global GGAlongitude
GGAlongitude = newGGAline.group(5)
GGAlongitude = StringToFloatGPS(GGAlongitude)
global GGAaltitude
GGAaltitude = newGGAline.group(6)
GGAaltitude = StringToFloatGPS(GGAaltitude)
s2.write(dict(x=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), y=GGAaltitude))
return True
else:
return False
#Get my login and keys ready ro send data to plot.ly
stream_ids = tls.get_credentials_file()['stream_ids']
#Set up the plotly streams
stream_id1 = stream_ids[0]#External temperature #A
stream_id2 = stream_ids[1]#GGAaltitude #B
stream_id3 = stream_ids[2]#Internal temperature #C
#stream_id4 = stream_ids[3]#Internal pressure #D
stream_id4 = stream_ids[3]#pressureAltitude #D
#stream_id5 = stream_ids[4]#Videolynx temperature #E
stream_id5 = stream_ids[4]#10DOF temperature #E
stream_id6 = stream_ids[5]#CO level in ppm #F
stream_id7 = stream_ids[6]#CH4 level in ppm #G
stream_id8 = stream_ids[7]#Humidity #J
stream_id9 = stream_ids[8]#Roll #L
stream_id10 = stream_ids[9]#Pitch #P
#stream_id11 = stream_ids[10]#Heading #Q
#stream_id12 = stream_ids[11]#Pressure #T
stream_id13 = stream_ids[12]#PressureAltitude #U
#Graph 1 data, stream names coincide with stream_ids for simplicity
#External temperature #A
stream1 = Stream(
token=stream_id1,
maxpoints=20
)
#GGAaltitude #A
stream2 = Stream(
token=stream_id2,
maxpoints=4
)
#Internal temperature #C
stream3 = Stream(
token=stream_id3,
maxpoints=20
)
#pressureAltitude #C
stream4 = Stream(
token=stream_id4,
maxpoints=20
)
#10DOF temperature #E
stream5 = Stream(
token=stream_id5,
maxpoints=20
)
#Graph 2 data, stream names coincide with stream_ids for simplicity
#CO level in ppm #G
stream6 = Stream(
token=stream_id6,
maxpoints=20
)
#CH4 level in ppm #G
stream7 = Stream(
token=stream_id7,
maxpoints=20
)
#Roll #L
stream9 = Stream(
token=stream_id9,
maxpoints=20
)
#Pitch #P
stream10 = Stream(
token=stream_id10,
maxpoints=20
)
#Heading #Q
#stream11 = Stream(
# token=stream_id11,
# maxpoints=20
#)
#Pressure #T
#stream12 = Stream(
# token=stream_id12,
# maxpoints=20
#)
#PressureAltitude #U
stream13 = Stream(
token=stream_id13,
maxpoints=20
)
#Trace names coincide with stream names
trace1 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream1
)
trace2 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream2
)
trace3 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream3
)
trace4 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream4
)
trace5 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream5
)
trace6 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream6
)
trace7 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream7
)
trace9 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream9
)
trace10 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream10
) | #)
#trace12 = Scatter(
# x=[],
# y=[],
# mode='lines+markers',
# stream=stream12
#)
trace13 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream13
)
#Set up the plotly graphs
data_graph_a = Data([trace1, trace3, trace5])
data_graph_b = Data([trace6, trace7])
data_graph_c = Data([trace9, trace10])
#data_graph_d = Data([trace2, trace4])#Does not work
data_graph_e = Data([trace2, trace4])
layout_a = Layout(title='#cougballoon temperatures')#This is the name on the graph
layout_b = Layout(title='#cougballoon air quality levels')#This is the name on the graph
layout_c = Layout(title='#cougballoon payload pitch and roll data')#This is the name on the graph
#layout_d = Layout(title='#cougballoon altitude')#This is the name on the graph
layout_e = Layout(title='#cougballoon altitude')#This is the name on the graph
fig_a = Figure(data=data_graph_a, layout=layout_a)
fig_b = Figure(data=data_graph_b, layout=layout_b)
fig_c = Figure(data=data_graph_c, layout=layout_c)
#fig_d = Figure(data=data_graph_d, layout=layout_d)
fig_e = Figure(data=data_graph_e, layout=layout_e)
unique_url_a = py.plot(fig_a, filename='cougballoon1', fileopt='extend')#Name above the graph
unique_url_b = py.plot(fig_b, filename='cougballoon2', fileopt='extend')#Name above the graph
unique_url_c = py.plot(fig_c, filename='cougballoon3', fileopt='extend')#Name above the graph
#unique_url_d = py.plot(fig_d, filename='cougballoon4', fileopt='extend')#Name above the graph
unique_url_e = py.plot(fig_e, filename='cougballoon5', fileopt='extend')#Name above the graph
#Print the plotly urls
print unique_url_a
print unique_url_b
print unique_url_c
#print unique_url_d
print unique_url_e
#Get the plotly streams ready
s1 = py.Stream(stream_id1)
s2 = py.Stream(stream_id2)
s3 = py.Stream(stream_id3)
s4 = py.Stream(stream_id4)
s5 = py.Stream(stream_id5)
s6 = py.Stream(stream_id6)
s7 = py.Stream(stream_id7)
s9 = py.Stream(stream_id9)
s10 = py.Stream(stream_id10)
#s11 = py.Stream(stream_id11)
#s12 = py.Stream(stream_id12)
#s13 = py.Stream(stream_id13)
#Open the plotly streams
s1.open()
s2.open()
s3.open()
s4.open()
s5.open()
s6.open()
s7.open()
s9.open()
s10.open()
#s11.open()
#s12.open()
#s13.open()
import datetime
import time
# Delay start of stream by 5 sec (time to switch tabs)
time.sleep(5)
#Clean out the buffers
line = ser.readline()
time.sleep(2)
line = ser.readline()
time.sleep(2)
line = ser.readline()
time.sleep(2)
while True:
# Current time on x-axis, values on y-axis
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
line = ser.readline() #properly captures incoming string
#External temperature #A
if ((line.find("A")) == 0):
print "External temperature:"
print line
y = StringToFloat(line, extTemp)
saveData(line)
extTemp = y
print y
s1.write(dict(x=x, y=y))
#External pressure #B
elif ((line.find("B")) == 0):
print "External Pressure:"
print line
y = StringToFloat(line)
saveData(line)
print y
#s2.write(dict(x=x, y=y))
#Internal temperature #C
elif ((line.find("C")) == 0):
print "Internal temperature:"
print line
y = StringToFloat(line, intTemp)
saveData(line)
intTemp = y
print y
s3.write(dict(x=x, y=y))
#Internal pressure #D
#elif ((line.find("D")) == 0):
#print "Internal pressure:"
#print line
#y = StringToFloat(line)
#saveData(line)
#print y
#s4.write(dict(x=x, y=y))
#Videolynx temperature #E
elif ((line.find("E")) == 0):
print "Videolynx temperature:"
print line
y = StringToFloat(line)
saveData(line)
vidTemp = y
print y
#s5.write(dict(x=x, y=y))
#CO level in ppm #F
elif ((line.find("F")) == 0):
print "CO level (in ppm):"
print line
y = StringToFloat(line, COlevel)
saveData(line)
COlevel = y
print y
s6.write(dict(x=x, y=y))
#CH4 level in ppm #G
elif ((line.find("G")) == 0):
print "CH4 level (in ppm):"
print line
y = StringToFloat(line, CH4level)
saveData(line)
CH4level = y
print y
s7.write(dict(x=x, y=y))
#Humidity #J
elif ((line.find("J")) == 0):
print "Humidity:"
print line
y = StringToFloat(line)
saveData(line)
print y
#What data do we want here?
elif ((line.find("K")) == 0):
print "FOUND A K!"
print line
y = StringToFloat(line)
saveData(line)
print y
#What data do we want here?
elif ((line.find("L")) == 0):
print "Roll:"
print line
y = StringToFloat(line, roll)
saveData(line)
roll = y
print y
s9.write(dict(x=x, y=y))
#HACKHD INFO BELOW
elif ((line.find("Hack")) == 0):
print "HackHD information"
print line
saveData(line)
HackStatus = line
HackStatus = HackStatus[6:13]
HackStatus = HackStatus.rstrip('\n');
HackStatus = HackStatus.rstrip('\r');
print HackStatus
#What data do we want here?
elif ((line.find("P")) == 0):
print "Pitch:"
print line
y = StringToFloat(line, pitch)
saveData(line)
pitch = y
print y
s10.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("Q")) == 0):
print "Heading:"
print line
y = StringToFloat(line, heading)
saveData(line)
heading = y
print y
#s11.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("T")) == 0):
print "Pressure"
print line
y = StringToFloat(line, pressure)
saveData(line)
pressure = y
print y
#What data do we want here?
elif ((line.find("U")) == 0):
print "Altitude(from press/temp):"
print line
y = StringToFloat(line, pressureAltitude)
saveData(line)
pressureAltitude = y
print y
s4.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("V")) == 0):
print "Temperature(from 10dof):"
print line
y = StringToFloat(line, temperature10DOF)
saveData(line)
temperature10DOF = y
print y
s5.write(dict(x=x, y=y))
#Take care of the incoming GPS data, send to plotly and post as JSON
elif ((line.find("$")) == 0):
print "Incoming GPS information"
handleGPSdata(line)
if (line.startswith( '$GPGGA' ) == True):
GGAreceived = RegExprNMEAdataGGA(line)
elif (line.startswith( '$GPRMC' ) == True):
RMCreceived = RegExprNMEAdataRMC(line)
#When an RMC and a GGA string have been received, post it!
if ((GGAreceived == True) & (RMCreceived == True)):
parseToJson(RMClongitude, RMClatitude, GGAaltitude, RMCspeed, RMCheading, RMCday, RMCmonth, RMCyear, RMChours, RMCminutes, RMCseconds, extTemp, intTemp, vidTemp, COlevel, CH4level, HackStatus)
GGAreceived = False
RMCreceived = False
#Close the stream when done plotting, but we never really close it...
s.close() | #trace11 = Scatter(
# x=[],
# y=[],
# mode='lines+markers',
# stream=stream11 | random_line_split |
cougballoon_rcv.py |
##################################
## Michael Hamilton
## [email protected]
## #cougballoon
## v1.0 Mar 1, 2015
## v1.1 Mar 13, 2015 - added JSON
## v1.2 Apr 5, 2015 - finalized graphs
## v1.3 Apr 6, 2015 - repaired value errors
##################################
#Axis titles and legends have been created, verify they remain upon running.
#Will return previous value if there is an error with the reading, and
#will output the error to the serial monitor below
#add heading back to graph
import re
import json
import plotly
plotly.__version__
import plotly.plotly as py
import plotly.tools as tls
from plotly.graph_objs import *
import numpy as np
#https://plot.ly/python/streaming-line-tutorial/
import serial
import io
import os
#Set initial values? Why? Didn't work without it....
RMClongitude = 0
RMClatitude = 0
GGAaltitude = 0
RMCspeed = 0
RMCheading = 0
RMCday = 0
RMCmonth = 0
RMCyear = 0
RMChours = 0
RMCminutes = 0
RMCseconds = 0
extTemp = 70.0 #A
intTemp = 0 #C
vidTemp = 40.0 #E
COlevel = 0 #F
CH4level = 0 #G
HackStatus = "000000" #Hack
roll = 0
pitch = 0
heading = 0
pressure = 0
pressureAltitude = 0
temperature10DOF = 0
GGAreceived = False
RMCreceived = False
#Depending on the port we are plugged into
#ser = serial.Serial('/dev/tty.usbmodem1411', 9600)
ser = serial.Serial('/dev/tty.usbmodem1421', 9600)
#Change time to local time zone
def UTCtoPSTDST(hours):
hours = hours.rstrip('\n');
hours = hours.rstrip('\r');
hours = int(hours) + 17
if (hours > 24):
hours = hours - 24
hours = str(hours)
return hours
#Save all incoming data with a current date/time string
def saveData(a):
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
saveAllIncomingData(x)
saveAllIncomingData(a)
#Convert GPS strings to floats (Couldn't get str.isnumeric() to work correctly)
def StringToFloatGPS(a):
a = a.rstrip('\n');
a = a.rstrip('\r');
a = float(a)
return a
#FIX SO IT DOES NOT RETURN A ZERO!!!!!!!!
#COnvert data strings to floats
def StringToFloat(a, b):
#print len(a)
if (len(a) < 4):
print "Incomplete data, returning a zero."
return b
a = a[1:len(a)]
a = a.rstrip('\n');
a = a.rstrip('\r');
if (a == "-"):
print "Only a negative sign in string, returning a zero."
return b
a = float(a)
return a
#Saves all incoming data to a file on the desktop
def saveAllIncomingData(c):
f = open('/Users/michaelhamilton/Desktop/cougballoonData.txt', 'a')
f.write(c)
f.close
#Convert nmea string to .kml file, send to server
def | (nmeaString):
#Commented out lines are for .docs, we are using .txt files instead.
#f = open('/Users/michaelhamilton/gpsbabel/nmeaRawData.doc', 'a')
f = open('/Users/michaelhamilton/gpsbabel/nmeaRawData.txt', 'a')
f.write(nmeaString)
f.close()
saveAllIncomingData(nmeaString)
#os.system("cd /Users/michaelhamilton/gpsbabel && ./gpsbabel -i nmea -f nmeaRawData.doc -o kml,deficon=http://encs.vancouver.wsu.edu/~mikehmbn/balloon-icon-map.png,line_color=FF321E98,floating=1 -F cougballoon.kml")
os.system("cd /Users/michaelhamilton/gpsbabel && ./gpsbabel -i nmea -f nmeaRawData.txt -o kml,deficon=http://encs.vancouver.wsu.edu/~mikehmbn/balloon-icon-map.png,line_color=FF321E98,floating=1 -F cougballoon.kml")
os.system("cd /Users/michaelhamilton/gpsbabel && scp cougballoon.kml [email protected]:Sites/")
print "Updated KML file was sent to the server"
return
#Get JSON data and send it to the server
def parseToJson(RMClongitude, RMClatitude, GGAaltitude, RMCspeed, RMCheading, RMCday, RMCmonth, RMCyear, RMChours, RMCminutes, RMCseconds, extTemp, intTemp, vidTemp, COlevel, CH4level, HackStatus):
JSONdata2 = { 'cougballoon':[ { 'Longitude':RMClongitude, 'Latitude':RMClatitude, 'Altitude':GGAaltitude, 'Speed':RMCspeed, 'Heading':RMCheading, 'Time':{'Day':RMCday, 'Month':RMCmonth, 'Year':RMCyear, 'Hours':RMChours, 'Minutes':RMCminutes, 'Seconds':RMCseconds},'External temperature(deg F)':extTemp, 'Internal temperature(deg F)':intTemp, 'Video Transmitter temperature(deg F)':vidTemp, 'Carbon Monoxide level(ppm)':COlevel, 'Methane level(ppm)':CH4level, 'HackHD':HackStatus } ] }
data_string2 = json.dumps(JSONdata2)
#Now post it to json_data.json for the map legend
f = open('/Users/michaelhamilton/Desktop/json_data.json', 'w')
f.write(data_string2)
f.close()
os.system("scp /Users/michaelhamilton/Desktop/json_data.json [email protected]:Sites/")
#Now to handle it for json_data.html
JSONdata = [ { 'Longitude':RMClongitude, 'Latitude':RMClatitude, 'Altitude':GGAaltitude, 'Speed(mph)':RMCspeed, 'Heading':RMCheading, 'Time':{'Day':RMCday, 'Month':RMCmonth, 'Year':RMCyear, 'Hours':RMChours, 'Minutes':RMCminutes, 'Seconds':RMCseconds},'External temperature(deg F)':extTemp, 'Internal temperature(deg F)':intTemp, 'Video Transmitter temperature(deg F)':vidTemp, 'Carbon Monoxide level(ppm)':COlevel, 'Methane level(ppm)':CH4level, 'HackHD camera statuses':HackStatus } ]
data_string = json.dumps(JSONdata)
JSONdataString = str(data_string)
newJSONdata = re.match(r'\[(.*)', JSONdataString)
newJSONdata2 = "," + newJSONdata.group(1)
f = open('/Users/michaelhamilton/Desktop/json_data.html', 'r+')
jumpBack = -1 #jump back 1 spot from the end
f.seek(jumpBack, 2) #2 is the end of the file
last = f.readline() #read the last line
while (last != "]"): #if it's not a ], then keep jumping back
jumpBack = jumpBack - 1 #decrement
if (last == "]"):
f.seek(-1, 2)
f.write(newJSONdata2)
f.close()
#Send it to the server
os.system("cd /Users/michaelhamilton/Desktop && scp json_data.html [email protected]:Sites/")
print "Updated JSON information was sent to the server."
return
#Parse out the data from an RMC nmea string
def RegExprNMEAdataRMC(line):
#if it's an RMC string....
print line
newRMCline = re.match( r'\$GPRMC,(\d\d)(\d\d)(\d\d).*,\D,(\d+.\d+),\D,(\d+.\d+),\D,(\d+.\d+),(\d+.\d+),(\d\d)(\d\d)(\d\d),.*,.*', line, re.I)
#All data are strings, not integers
if (newRMCline):
global RMChours
RMChours = newRMCline.group(1)
#Convert UTC hours to PST(Daylight Savings Time)
RMChours = UTCtoPSTDST(RMChours)
global RMCminutes
RMCminutes = newRMCline.group(2)
global RMCseconds
RMCseconds = newRMCline.group(3)
global RMClatitude
RMClatitude = newRMCline.group(4)
RMClatitude = StringToFloatGPS(RMClatitude)
global RMClongitude
RMClongitude = newRMCline.group(5)
RMClongitude = StringToFloatGPS(RMClongitude)
global RMCspeed
RMCspeed = newRMCline.group(6)
RMCspeed = StringToFloatGPS(RMCspeed)
global RMCheading
RMCheading = newRMCline.group(7)
RMCheading = StringToFloatGPS(RMCheading)
global RMCday
RMCday = newRMCline.group(8)
global RMCmonth
RMCmonth = newRMCline.group(9)
global RMCyear
RMCyear = newRMCline.group(10)
return True
else:
return False
#Parse out the data from an GGA nmea string
def RegExprNMEAdataGGA(line):
#if it's a GGA string....
print line
newGGAline = re.match( r'\$GPGGA,(\d\d)(\d\d)(\d\d).*,(.*..*),\D,(.*..*),\D,\d,\d\d\,\d.\d\d,(\d+.\d),\D.*', line, re.I)
#All data are strings, not integers
if (newGGAline):
global GGAhours
GGAhours = newGGAline.group(1)
#Convert UTC hours to PST(Daylight Savings Time)
GGAhours = UTCtoPSTDST(GGAhours)
global GGAminutes
GGAminutes = newGGAline.group(2)
global GGAseconds
GGAseconds = newGGAline.group(3)
global GGAlatitude
GGAlatitude = newGGAline.group(4)
GGAlatitude = StringToFloatGPS(GGAlatitude)
global GGAlongitude
GGAlongitude = newGGAline.group(5)
GGAlongitude = StringToFloatGPS(GGAlongitude)
global GGAaltitude
GGAaltitude = newGGAline.group(6)
GGAaltitude = StringToFloatGPS(GGAaltitude)
s2.write(dict(x=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), y=GGAaltitude))
return True
else:
return False
#Get my login and keys ready ro send data to plot.ly
stream_ids = tls.get_credentials_file()['stream_ids']
#Set up the plotly streams
stream_id1 = stream_ids[0]#External temperature #A
stream_id2 = stream_ids[1]#GGAaltitude #B
stream_id3 = stream_ids[2]#Internal temperature #C
#stream_id4 = stream_ids[3]#Internal pressure #D
stream_id4 = stream_ids[3]#pressureAltitude #D
#stream_id5 = stream_ids[4]#Videolynx temperature #E
stream_id5 = stream_ids[4]#10DOF temperature #E
stream_id6 = stream_ids[5]#CO level in ppm #F
stream_id7 = stream_ids[6]#CH4 level in ppm #G
stream_id8 = stream_ids[7]#Humidity #J
stream_id9 = stream_ids[8]#Roll #L
stream_id10 = stream_ids[9]#Pitch #P
#stream_id11 = stream_ids[10]#Heading #Q
#stream_id12 = stream_ids[11]#Pressure #T
stream_id13 = stream_ids[12]#PressureAltitude #U
#Graph 1 data, stream names coincide with stream_ids for simplicity
#External temperature #A
stream1 = Stream(
token=stream_id1,
maxpoints=20
)
#GGAaltitude #A
stream2 = Stream(
token=stream_id2,
maxpoints=4
)
#Internal temperature #C
stream3 = Stream(
token=stream_id3,
maxpoints=20
)
#pressureAltitude #C
stream4 = Stream(
token=stream_id4,
maxpoints=20
)
#10DOF temperature #E
stream5 = Stream(
token=stream_id5,
maxpoints=20
)
#Graph 2 data, stream names coincide with stream_ids for simplicity
#CO level in ppm #G
stream6 = Stream(
token=stream_id6,
maxpoints=20
)
#CH4 level in ppm #G
stream7 = Stream(
token=stream_id7,
maxpoints=20
)
#Roll #L
stream9 = Stream(
token=stream_id9,
maxpoints=20
)
#Pitch #P
stream10 = Stream(
token=stream_id10,
maxpoints=20
)
#Heading #Q
#stream11 = Stream(
# token=stream_id11,
# maxpoints=20
#)
#Pressure #T
#stream12 = Stream(
# token=stream_id12,
# maxpoints=20
#)
#PressureAltitude #U
stream13 = Stream(
token=stream_id13,
maxpoints=20
)
#Trace names coincide with stream names
trace1 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream1
)
trace2 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream2
)
trace3 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream3
)
trace4 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream4
)
trace5 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream5
)
trace6 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream6
)
trace7 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream7
)
trace9 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream9
)
trace10 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream10
)
#trace11 = Scatter(
# x=[],
# y=[],
# mode='lines+markers',
# stream=stream11
#)
#trace12 = Scatter(
# x=[],
# y=[],
# mode='lines+markers',
# stream=stream12
#)
trace13 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream13
)
#Set up the plotly graphs
data_graph_a = Data([trace1, trace3, trace5])
data_graph_b = Data([trace6, trace7])
data_graph_c = Data([trace9, trace10])
#data_graph_d = Data([trace2, trace4])#Does not work
data_graph_e = Data([trace2, trace4])
layout_a = Layout(title='#cougballoon temperatures')#This is the name on the graph
layout_b = Layout(title='#cougballoon air quality levels')#This is the name on the graph
layout_c = Layout(title='#cougballoon payload pitch and roll data')#This is the name on the graph
#layout_d = Layout(title='#cougballoon altitude')#This is the name on the graph
layout_e = Layout(title='#cougballoon altitude')#This is the name on the graph
fig_a = Figure(data=data_graph_a, layout=layout_a)
fig_b = Figure(data=data_graph_b, layout=layout_b)
fig_c = Figure(data=data_graph_c, layout=layout_c)
#fig_d = Figure(data=data_graph_d, layout=layout_d)
fig_e = Figure(data=data_graph_e, layout=layout_e)
unique_url_a = py.plot(fig_a, filename='cougballoon1', fileopt='extend')#Name above the graph
unique_url_b = py.plot(fig_b, filename='cougballoon2', fileopt='extend')#Name above the graph
unique_url_c = py.plot(fig_c, filename='cougballoon3', fileopt='extend')#Name above the graph
#unique_url_d = py.plot(fig_d, filename='cougballoon4', fileopt='extend')#Name above the graph
unique_url_e = py.plot(fig_e, filename='cougballoon5', fileopt='extend')#Name above the graph
#Print the plotly urls
print unique_url_a
print unique_url_b
print unique_url_c
#print unique_url_d
print unique_url_e
#Get the plotly streams ready
s1 = py.Stream(stream_id1)
s2 = py.Stream(stream_id2)
s3 = py.Stream(stream_id3)
s4 = py.Stream(stream_id4)
s5 = py.Stream(stream_id5)
s6 = py.Stream(stream_id6)
s7 = py.Stream(stream_id7)
s9 = py.Stream(stream_id9)
s10 = py.Stream(stream_id10)
#s11 = py.Stream(stream_id11)
#s12 = py.Stream(stream_id12)
#s13 = py.Stream(stream_id13)
#Open the plotly streams
s1.open()
s2.open()
s3.open()
s4.open()
s5.open()
s6.open()
s7.open()
s9.open()
s10.open()
#s11.open()
#s12.open()
#s13.open()
import datetime
import time
# Delay start of stream by 5 sec (time to switch tabs)
time.sleep(5)
#Clean out the buffers
line = ser.readline()
time.sleep(2)
line = ser.readline()
time.sleep(2)
line = ser.readline()
time.sleep(2)
while True:
# Current time on x-axis, values on y-axis
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
line = ser.readline() #properly captures incoming string
#External temperature #A
if ((line.find("A")) == 0):
print "External temperature:"
print line
y = StringToFloat(line, extTemp)
saveData(line)
extTemp = y
print y
s1.write(dict(x=x, y=y))
#External pressure #B
elif ((line.find("B")) == 0):
print "External Pressure:"
print line
y = StringToFloat(line)
saveData(line)
print y
#s2.write(dict(x=x, y=y))
#Internal temperature #C
elif ((line.find("C")) == 0):
print "Internal temperature:"
print line
y = StringToFloat(line, intTemp)
saveData(line)
intTemp = y
print y
s3.write(dict(x=x, y=y))
#Internal pressure #D
#elif ((line.find("D")) == 0):
#print "Internal pressure:"
#print line
#y = StringToFloat(line)
#saveData(line)
#print y
#s4.write(dict(x=x, y=y))
#Videolynx temperature #E
elif ((line.find("E")) == 0):
print "Videolynx temperature:"
print line
y = StringToFloat(line)
saveData(line)
vidTemp = y
print y
#s5.write(dict(x=x, y=y))
#CO level in ppm #F
elif ((line.find("F")) == 0):
print "CO level (in ppm):"
print line
y = StringToFloat(line, COlevel)
saveData(line)
COlevel = y
print y
s6.write(dict(x=x, y=y))
#CH4 level in ppm #G
elif ((line.find("G")) == 0):
print "CH4 level (in ppm):"
print line
y = StringToFloat(line, CH4level)
saveData(line)
CH4level = y
print y
s7.write(dict(x=x, y=y))
#Humidity #J
elif ((line.find("J")) == 0):
print "Humidity:"
print line
y = StringToFloat(line)
saveData(line)
print y
#What data do we want here?
elif ((line.find("K")) == 0):
print "FOUND A K!"
print line
y = StringToFloat(line)
saveData(line)
print y
#What data do we want here?
elif ((line.find("L")) == 0):
print "Roll:"
print line
y = StringToFloat(line, roll)
saveData(line)
roll = y
print y
s9.write(dict(x=x, y=y))
#HACKHD INFO BELOW
elif ((line.find("Hack")) == 0):
print "HackHD information"
print line
saveData(line)
HackStatus = line
HackStatus = HackStatus[6:13]
HackStatus = HackStatus.rstrip('\n');
HackStatus = HackStatus.rstrip('\r');
print HackStatus
#What data do we want here?
elif ((line.find("P")) == 0):
print "Pitch:"
print line
y = StringToFloat(line, pitch)
saveData(line)
pitch = y
print y
s10.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("Q")) == 0):
print "Heading:"
print line
y = StringToFloat(line, heading)
saveData(line)
heading = y
print y
#s11.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("T")) == 0):
print "Pressure"
print line
y = StringToFloat(line, pressure)
saveData(line)
pressure = y
print y
#What data do we want here?
elif ((line.find("U")) == 0):
print "Altitude(from press/temp):"
print line
y = StringToFloat(line, pressureAltitude)
saveData(line)
pressureAltitude = y
print y
s4.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("V")) == 0):
print "Temperature(from 10dof):"
print line
y = StringToFloat(line, temperature10DOF)
saveData(line)
temperature10DOF = y
print y
s5.write(dict(x=x, y=y))
#Take care of the incoming GPS data, send to plotly and post as JSON
elif ((line.find("$")) == 0):
print "Incoming GPS information"
handleGPSdata(line)
if (line.startswith( '$GPGGA' ) == True):
GGAreceived = RegExprNMEAdataGGA(line)
elif (line.startswith( '$GPRMC' ) == True):
RMCreceived = RegExprNMEAdataRMC(line)
#When an RMC and a GGA string have been received, post it!
if ((GGAreceived == True) & (RMCreceived == True)):
parseToJson(RMClongitude, RMClatitude, GGAaltitude, RMCspeed, RMCheading, RMCday, RMCmonth, RMCyear, RMChours, RMCminutes, RMCseconds, extTemp, intTemp, vidTemp, COlevel, CH4level, HackStatus)
GGAreceived = False
RMCreceived = False
#Close the stream when done plotting, but we never really close it...
s.close() | handleGPSdata | identifier_name |
cougballoon_rcv.py |
##################################
## Michael Hamilton
## [email protected]
## #cougballoon
## v1.0 Mar 1, 2015
## v1.1 Mar 13, 2015 - added JSON
## v1.2 Apr 5, 2015 - finalized graphs
## v1.3 Apr 6, 2015 - repaired value errors
##################################
#Axis titles and legends have been created, verify they remain upon running.
#Will return previous value if there is an error with the reading, and
#will output the error to the serial monitor below
#add heading back to graph
import re
import json
import plotly
plotly.__version__
import plotly.plotly as py
import plotly.tools as tls
from plotly.graph_objs import *
import numpy as np
#https://plot.ly/python/streaming-line-tutorial/
import serial
import io
import os
#Set initial values? Why? Didn't work without it....
RMClongitude = 0
RMClatitude = 0
GGAaltitude = 0
RMCspeed = 0
RMCheading = 0
RMCday = 0
RMCmonth = 0
RMCyear = 0
RMChours = 0
RMCminutes = 0
RMCseconds = 0
extTemp = 70.0 #A
intTemp = 0 #C
vidTemp = 40.0 #E
COlevel = 0 #F
CH4level = 0 #G
HackStatus = "000000" #Hack
roll = 0
pitch = 0
heading = 0
pressure = 0
pressureAltitude = 0
temperature10DOF = 0
GGAreceived = False
RMCreceived = False
#Depending on the port we are plugged into
#ser = serial.Serial('/dev/tty.usbmodem1411', 9600)
ser = serial.Serial('/dev/tty.usbmodem1421', 9600)
#Change time to local time zone
def UTCtoPSTDST(hours):
|
#Save all incoming data with a current date/time string
def saveData(a):
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
saveAllIncomingData(x)
saveAllIncomingData(a)
#Convert GPS strings to floats (Couldn't get str.isnumeric() to work correctly)
def StringToFloatGPS(a):
a = a.rstrip('\n');
a = a.rstrip('\r');
a = float(a)
return a
#FIX SO IT DOES NOT RETURN A ZERO!!!!!!!!
#COnvert data strings to floats
def StringToFloat(a, b):
#print len(a)
if (len(a) < 4):
print "Incomplete data, returning a zero."
return b
a = a[1:len(a)]
a = a.rstrip('\n');
a = a.rstrip('\r');
if (a == "-"):
print "Only a negative sign in string, returning a zero."
return b
a = float(a)
return a
#Saves all incoming data to a file on the desktop
def saveAllIncomingData(c):
f = open('/Users/michaelhamilton/Desktop/cougballoonData.txt', 'a')
f.write(c)
f.close
#Convert nmea string to .kml file, send to server
def handleGPSdata(nmeaString):
#Commented out lines are for .docs, we are using .txt files instead.
#f = open('/Users/michaelhamilton/gpsbabel/nmeaRawData.doc', 'a')
f = open('/Users/michaelhamilton/gpsbabel/nmeaRawData.txt', 'a')
f.write(nmeaString)
f.close()
saveAllIncomingData(nmeaString)
#os.system("cd /Users/michaelhamilton/gpsbabel && ./gpsbabel -i nmea -f nmeaRawData.doc -o kml,deficon=http://encs.vancouver.wsu.edu/~mikehmbn/balloon-icon-map.png,line_color=FF321E98,floating=1 -F cougballoon.kml")
os.system("cd /Users/michaelhamilton/gpsbabel && ./gpsbabel -i nmea -f nmeaRawData.txt -o kml,deficon=http://encs.vancouver.wsu.edu/~mikehmbn/balloon-icon-map.png,line_color=FF321E98,floating=1 -F cougballoon.kml")
os.system("cd /Users/michaelhamilton/gpsbabel && scp cougballoon.kml [email protected]:Sites/")
print "Updated KML file was sent to the server"
return
#Get JSON data and send it to the server
def parseToJson(RMClongitude, RMClatitude, GGAaltitude, RMCspeed, RMCheading, RMCday, RMCmonth, RMCyear, RMChours, RMCminutes, RMCseconds, extTemp, intTemp, vidTemp, COlevel, CH4level, HackStatus):
JSONdata2 = { 'cougballoon':[ { 'Longitude':RMClongitude, 'Latitude':RMClatitude, 'Altitude':GGAaltitude, 'Speed':RMCspeed, 'Heading':RMCheading, 'Time':{'Day':RMCday, 'Month':RMCmonth, 'Year':RMCyear, 'Hours':RMChours, 'Minutes':RMCminutes, 'Seconds':RMCseconds},'External temperature(deg F)':extTemp, 'Internal temperature(deg F)':intTemp, 'Video Transmitter temperature(deg F)':vidTemp, 'Carbon Monoxide level(ppm)':COlevel, 'Methane level(ppm)':CH4level, 'HackHD':HackStatus } ] }
data_string2 = json.dumps(JSONdata2)
#Now post it to json_data.json for the map legend
f = open('/Users/michaelhamilton/Desktop/json_data.json', 'w')
f.write(data_string2)
f.close()
os.system("scp /Users/michaelhamilton/Desktop/json_data.json [email protected]:Sites/")
#Now to handle it for json_data.html
JSONdata = [ { 'Longitude':RMClongitude, 'Latitude':RMClatitude, 'Altitude':GGAaltitude, 'Speed(mph)':RMCspeed, 'Heading':RMCheading, 'Time':{'Day':RMCday, 'Month':RMCmonth, 'Year':RMCyear, 'Hours':RMChours, 'Minutes':RMCminutes, 'Seconds':RMCseconds},'External temperature(deg F)':extTemp, 'Internal temperature(deg F)':intTemp, 'Video Transmitter temperature(deg F)':vidTemp, 'Carbon Monoxide level(ppm)':COlevel, 'Methane level(ppm)':CH4level, 'HackHD camera statuses':HackStatus } ]
data_string = json.dumps(JSONdata)
JSONdataString = str(data_string)
newJSONdata = re.match(r'\[(.*)', JSONdataString)
newJSONdata2 = "," + newJSONdata.group(1)
f = open('/Users/michaelhamilton/Desktop/json_data.html', 'r+')
jumpBack = -1 #jump back 1 spot from the end
f.seek(jumpBack, 2) #2 is the end of the file
last = f.readline() #read the last line
while (last != "]"): #if it's not a ], then keep jumping back
jumpBack = jumpBack - 1 #decrement
if (last == "]"):
f.seek(-1, 2)
f.write(newJSONdata2)
f.close()
#Send it to the server
os.system("cd /Users/michaelhamilton/Desktop && scp json_data.html [email protected]:Sites/")
print "Updated JSON information was sent to the server."
return
#Parse out the data from an RMC nmea string
def RegExprNMEAdataRMC(line):
#if it's an RMC string....
print line
newRMCline = re.match( r'\$GPRMC,(\d\d)(\d\d)(\d\d).*,\D,(\d+.\d+),\D,(\d+.\d+),\D,(\d+.\d+),(\d+.\d+),(\d\d)(\d\d)(\d\d),.*,.*', line, re.I)
#All data are strings, not integers
if (newRMCline):
global RMChours
RMChours = newRMCline.group(1)
#Convert UTC hours to PST(Daylight Savings Time)
RMChours = UTCtoPSTDST(RMChours)
global RMCminutes
RMCminutes = newRMCline.group(2)
global RMCseconds
RMCseconds = newRMCline.group(3)
global RMClatitude
RMClatitude = newRMCline.group(4)
RMClatitude = StringToFloatGPS(RMClatitude)
global RMClongitude
RMClongitude = newRMCline.group(5)
RMClongitude = StringToFloatGPS(RMClongitude)
global RMCspeed
RMCspeed = newRMCline.group(6)
RMCspeed = StringToFloatGPS(RMCspeed)
global RMCheading
RMCheading = newRMCline.group(7)
RMCheading = StringToFloatGPS(RMCheading)
global RMCday
RMCday = newRMCline.group(8)
global RMCmonth
RMCmonth = newRMCline.group(9)
global RMCyear
RMCyear = newRMCline.group(10)
return True
else:
return False
#Parse out the data from an GGA nmea string
def RegExprNMEAdataGGA(line):
#if it's a GGA string....
print line
newGGAline = re.match( r'\$GPGGA,(\d\d)(\d\d)(\d\d).*,(.*..*),\D,(.*..*),\D,\d,\d\d\,\d.\d\d,(\d+.\d),\D.*', line, re.I)
#All data are strings, not integers
if (newGGAline):
global GGAhours
GGAhours = newGGAline.group(1)
#Convert UTC hours to PST(Daylight Savings Time)
GGAhours = UTCtoPSTDST(GGAhours)
global GGAminutes
GGAminutes = newGGAline.group(2)
global GGAseconds
GGAseconds = newGGAline.group(3)
global GGAlatitude
GGAlatitude = newGGAline.group(4)
GGAlatitude = StringToFloatGPS(GGAlatitude)
global GGAlongitude
GGAlongitude = newGGAline.group(5)
GGAlongitude = StringToFloatGPS(GGAlongitude)
global GGAaltitude
GGAaltitude = newGGAline.group(6)
GGAaltitude = StringToFloatGPS(GGAaltitude)
s2.write(dict(x=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), y=GGAaltitude))
return True
else:
return False
#Get my login and keys ready ro send data to plot.ly
stream_ids = tls.get_credentials_file()['stream_ids']
#Set up the plotly streams
stream_id1 = stream_ids[0]#External temperature #A
stream_id2 = stream_ids[1]#GGAaltitude #B
stream_id3 = stream_ids[2]#Internal temperature #C
#stream_id4 = stream_ids[3]#Internal pressure #D
stream_id4 = stream_ids[3]#pressureAltitude #D
#stream_id5 = stream_ids[4]#Videolynx temperature #E
stream_id5 = stream_ids[4]#10DOF temperature #E
stream_id6 = stream_ids[5]#CO level in ppm #F
stream_id7 = stream_ids[6]#CH4 level in ppm #G
stream_id8 = stream_ids[7]#Humidity #J
stream_id9 = stream_ids[8]#Roll #L
stream_id10 = stream_ids[9]#Pitch #P
#stream_id11 = stream_ids[10]#Heading #Q
#stream_id12 = stream_ids[11]#Pressure #T
stream_id13 = stream_ids[12]#PressureAltitude #U
#Graph 1 data, stream names coincide with stream_ids for simplicity
#External temperature #A
stream1 = Stream(
token=stream_id1,
maxpoints=20
)
#GGAaltitude #A
stream2 = Stream(
token=stream_id2,
maxpoints=4
)
#Internal temperature #C
stream3 = Stream(
token=stream_id3,
maxpoints=20
)
#pressureAltitude #C
stream4 = Stream(
token=stream_id4,
maxpoints=20
)
#10DOF temperature #E
stream5 = Stream(
token=stream_id5,
maxpoints=20
)
#Graph 2 data, stream names coincide with stream_ids for simplicity
#CO level in ppm #G
stream6 = Stream(
token=stream_id6,
maxpoints=20
)
#CH4 level in ppm #G
stream7 = Stream(
token=stream_id7,
maxpoints=20
)
#Roll #L
stream9 = Stream(
token=stream_id9,
maxpoints=20
)
#Pitch #P
stream10 = Stream(
token=stream_id10,
maxpoints=20
)
#Heading #Q
#stream11 = Stream(
# token=stream_id11,
# maxpoints=20
#)
#Pressure #T
#stream12 = Stream(
# token=stream_id12,
# maxpoints=20
#)
#PressureAltitude #U
stream13 = Stream(
token=stream_id13,
maxpoints=20
)
#Trace names coincide with stream names
trace1 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream1
)
trace2 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream2
)
trace3 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream3
)
trace4 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream4
)
trace5 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream5
)
trace6 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream6
)
trace7 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream7
)
trace9 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream9
)
trace10 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream10
)
#trace11 = Scatter(
# x=[],
# y=[],
# mode='lines+markers',
# stream=stream11
#)
#trace12 = Scatter(
# x=[],
# y=[],
# mode='lines+markers',
# stream=stream12
#)
trace13 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream13
)
#Set up the plotly graphs
data_graph_a = Data([trace1, trace3, trace5])
data_graph_b = Data([trace6, trace7])
data_graph_c = Data([trace9, trace10])
#data_graph_d = Data([trace2, trace4])#Does not work
data_graph_e = Data([trace2, trace4])
layout_a = Layout(title='#cougballoon temperatures')#This is the name on the graph
layout_b = Layout(title='#cougballoon air quality levels')#This is the name on the graph
layout_c = Layout(title='#cougballoon payload pitch and roll data')#This is the name on the graph
#layout_d = Layout(title='#cougballoon altitude')#This is the name on the graph
layout_e = Layout(title='#cougballoon altitude')#This is the name on the graph
fig_a = Figure(data=data_graph_a, layout=layout_a)
fig_b = Figure(data=data_graph_b, layout=layout_b)
fig_c = Figure(data=data_graph_c, layout=layout_c)
#fig_d = Figure(data=data_graph_d, layout=layout_d)
fig_e = Figure(data=data_graph_e, layout=layout_e)
unique_url_a = py.plot(fig_a, filename='cougballoon1', fileopt='extend')#Name above the graph
unique_url_b = py.plot(fig_b, filename='cougballoon2', fileopt='extend')#Name above the graph
unique_url_c = py.plot(fig_c, filename='cougballoon3', fileopt='extend')#Name above the graph
#unique_url_d = py.plot(fig_d, filename='cougballoon4', fileopt='extend')#Name above the graph
unique_url_e = py.plot(fig_e, filename='cougballoon5', fileopt='extend')#Name above the graph
#Print the plotly urls
print unique_url_a
print unique_url_b
print unique_url_c
#print unique_url_d
print unique_url_e
#Get the plotly streams ready
s1 = py.Stream(stream_id1)
s2 = py.Stream(stream_id2)
s3 = py.Stream(stream_id3)
s4 = py.Stream(stream_id4)
s5 = py.Stream(stream_id5)
s6 = py.Stream(stream_id6)
s7 = py.Stream(stream_id7)
s9 = py.Stream(stream_id9)
s10 = py.Stream(stream_id10)
#s11 = py.Stream(stream_id11)
#s12 = py.Stream(stream_id12)
#s13 = py.Stream(stream_id13)
#Open the plotly streams
s1.open()
s2.open()
s3.open()
s4.open()
s5.open()
s6.open()
s7.open()
s9.open()
s10.open()
#s11.open()
#s12.open()
#s13.open()
import datetime
import time
# Delay start of stream by 5 sec (time to switch tabs)
time.sleep(5)
#Clean out the buffers
line = ser.readline()
time.sleep(2)
line = ser.readline()
time.sleep(2)
line = ser.readline()
time.sleep(2)
while True:
# Current time on x-axis, values on y-axis
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
line = ser.readline() #properly captures incoming string
#External temperature #A
if ((line.find("A")) == 0):
print "External temperature:"
print line
y = StringToFloat(line, extTemp)
saveData(line)
extTemp = y
print y
s1.write(dict(x=x, y=y))
#External pressure #B
elif ((line.find("B")) == 0):
print "External Pressure:"
print line
y = StringToFloat(line)
saveData(line)
print y
#s2.write(dict(x=x, y=y))
#Internal temperature #C
elif ((line.find("C")) == 0):
print "Internal temperature:"
print line
y = StringToFloat(line, intTemp)
saveData(line)
intTemp = y
print y
s3.write(dict(x=x, y=y))
#Internal pressure #D
#elif ((line.find("D")) == 0):
#print "Internal pressure:"
#print line
#y = StringToFloat(line)
#saveData(line)
#print y
#s4.write(dict(x=x, y=y))
#Videolynx temperature #E
elif ((line.find("E")) == 0):
print "Videolynx temperature:"
print line
y = StringToFloat(line)
saveData(line)
vidTemp = y
print y
#s5.write(dict(x=x, y=y))
#CO level in ppm #F
elif ((line.find("F")) == 0):
print "CO level (in ppm):"
print line
y = StringToFloat(line, COlevel)
saveData(line)
COlevel = y
print y
s6.write(dict(x=x, y=y))
#CH4 level in ppm #G
elif ((line.find("G")) == 0):
print "CH4 level (in ppm):"
print line
y = StringToFloat(line, CH4level)
saveData(line)
CH4level = y
print y
s7.write(dict(x=x, y=y))
#Humidity #J
elif ((line.find("J")) == 0):
print "Humidity:"
print line
y = StringToFloat(line)
saveData(line)
print y
#What data do we want here?
elif ((line.find("K")) == 0):
print "FOUND A K!"
print line
y = StringToFloat(line)
saveData(line)
print y
#What data do we want here?
elif ((line.find("L")) == 0):
print "Roll:"
print line
y = StringToFloat(line, roll)
saveData(line)
roll = y
print y
s9.write(dict(x=x, y=y))
#HACKHD INFO BELOW
elif ((line.find("Hack")) == 0):
print "HackHD information"
print line
saveData(line)
HackStatus = line
HackStatus = HackStatus[6:13]
HackStatus = HackStatus.rstrip('\n');
HackStatus = HackStatus.rstrip('\r');
print HackStatus
#What data do we want here?
elif ((line.find("P")) == 0):
print "Pitch:"
print line
y = StringToFloat(line, pitch)
saveData(line)
pitch = y
print y
s10.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("Q")) == 0):
print "Heading:"
print line
y = StringToFloat(line, heading)
saveData(line)
heading = y
print y
#s11.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("T")) == 0):
print "Pressure"
print line
y = StringToFloat(line, pressure)
saveData(line)
pressure = y
print y
#What data do we want here?
elif ((line.find("U")) == 0):
print "Altitude(from press/temp):"
print line
y = StringToFloat(line, pressureAltitude)
saveData(line)
pressureAltitude = y
print y
s4.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("V")) == 0):
print "Temperature(from 10dof):"
print line
y = StringToFloat(line, temperature10DOF)
saveData(line)
temperature10DOF = y
print y
s5.write(dict(x=x, y=y))
#Take care of the incoming GPS data, send to plotly and post as JSON
elif ((line.find("$")) == 0):
print "Incoming GPS information"
handleGPSdata(line)
if (line.startswith( '$GPGGA' ) == True):
GGAreceived = RegExprNMEAdataGGA(line)
elif (line.startswith( '$GPRMC' ) == True):
RMCreceived = RegExprNMEAdataRMC(line)
#When an RMC and a GGA string have been received, post it!
if ((GGAreceived == True) & (RMCreceived == True)):
parseToJson(RMClongitude, RMClatitude, GGAaltitude, RMCspeed, RMCheading, RMCday, RMCmonth, RMCyear, RMChours, RMCminutes, RMCseconds, extTemp, intTemp, vidTemp, COlevel, CH4level, HackStatus)
GGAreceived = False
RMCreceived = False
#Close the stream when done plotting, but we never really close it...
s.close() | hours = hours.rstrip('\n');
hours = hours.rstrip('\r');
hours = int(hours) + 17
if (hours > 24):
hours = hours - 24
hours = str(hours)
return hours | identifier_body |
cougballoon_rcv.py |
##################################
## Michael Hamilton
## [email protected]
## #cougballoon
## v1.0 Mar 1, 2015
## v1.1 Mar 13, 2015 - added JSON
## v1.2 Apr 5, 2015 - finalized graphs
## v1.3 Apr 6, 2015 - repaired value errors
##################################
#Axis titles and legends have been created, verify they remain upon running.
#Will return previous value if there is an error with the reading, and
#will output the error to the serial monitor below
#add heading back to graph
import re
import json
import plotly
plotly.__version__
import plotly.plotly as py
import plotly.tools as tls
from plotly.graph_objs import *
import numpy as np
#https://plot.ly/python/streaming-line-tutorial/
import serial
import io
import os
#Set initial values? Why? Didn't work without it....
RMClongitude = 0
RMClatitude = 0
GGAaltitude = 0
RMCspeed = 0
RMCheading = 0
RMCday = 0
RMCmonth = 0
RMCyear = 0
RMChours = 0
RMCminutes = 0
RMCseconds = 0
extTemp = 70.0 #A
intTemp = 0 #C
vidTemp = 40.0 #E
COlevel = 0 #F
CH4level = 0 #G
HackStatus = "000000" #Hack
roll = 0
pitch = 0
heading = 0
pressure = 0
pressureAltitude = 0
temperature10DOF = 0
GGAreceived = False
RMCreceived = False
#Depending on the port we are plugged into
#ser = serial.Serial('/dev/tty.usbmodem1411', 9600)
ser = serial.Serial('/dev/tty.usbmodem1421', 9600)
#Change time to local time zone
def UTCtoPSTDST(hours):
hours = hours.rstrip('\n');
hours = hours.rstrip('\r');
hours = int(hours) + 17
if (hours > 24):
hours = hours - 24
hours = str(hours)
return hours
#Save all incoming data with a current date/time string
def saveData(a):
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
saveAllIncomingData(x)
saveAllIncomingData(a)
#Convert GPS strings to floats (Couldn't get str.isnumeric() to work correctly)
def StringToFloatGPS(a):
a = a.rstrip('\n');
a = a.rstrip('\r');
a = float(a)
return a
#FIX SO IT DOES NOT RETURN A ZERO!!!!!!!!
#COnvert data strings to floats
def StringToFloat(a, b):
#print len(a)
if (len(a) < 4):
print "Incomplete data, returning a zero."
return b
a = a[1:len(a)]
a = a.rstrip('\n');
a = a.rstrip('\r');
if (a == "-"):
print "Only a negative sign in string, returning a zero."
return b
a = float(a)
return a
#Saves all incoming data to a file on the desktop
def saveAllIncomingData(c):
f = open('/Users/michaelhamilton/Desktop/cougballoonData.txt', 'a')
f.write(c)
f.close
#Convert nmea string to .kml file, send to server
def handleGPSdata(nmeaString):
#Commented out lines are for .docs, we are using .txt files instead.
#f = open('/Users/michaelhamilton/gpsbabel/nmeaRawData.doc', 'a')
f = open('/Users/michaelhamilton/gpsbabel/nmeaRawData.txt', 'a')
f.write(nmeaString)
f.close()
saveAllIncomingData(nmeaString)
#os.system("cd /Users/michaelhamilton/gpsbabel && ./gpsbabel -i nmea -f nmeaRawData.doc -o kml,deficon=http://encs.vancouver.wsu.edu/~mikehmbn/balloon-icon-map.png,line_color=FF321E98,floating=1 -F cougballoon.kml")
os.system("cd /Users/michaelhamilton/gpsbabel && ./gpsbabel -i nmea -f nmeaRawData.txt -o kml,deficon=http://encs.vancouver.wsu.edu/~mikehmbn/balloon-icon-map.png,line_color=FF321E98,floating=1 -F cougballoon.kml")
os.system("cd /Users/michaelhamilton/gpsbabel && scp cougballoon.kml [email protected]:Sites/")
print "Updated KML file was sent to the server"
return
#Get JSON data and send it to the server
def parseToJson(RMClongitude, RMClatitude, GGAaltitude, RMCspeed, RMCheading, RMCday, RMCmonth, RMCyear, RMChours, RMCminutes, RMCseconds, extTemp, intTemp, vidTemp, COlevel, CH4level, HackStatus):
JSONdata2 = { 'cougballoon':[ { 'Longitude':RMClongitude, 'Latitude':RMClatitude, 'Altitude':GGAaltitude, 'Speed':RMCspeed, 'Heading':RMCheading, 'Time':{'Day':RMCday, 'Month':RMCmonth, 'Year':RMCyear, 'Hours':RMChours, 'Minutes':RMCminutes, 'Seconds':RMCseconds},'External temperature(deg F)':extTemp, 'Internal temperature(deg F)':intTemp, 'Video Transmitter temperature(deg F)':vidTemp, 'Carbon Monoxide level(ppm)':COlevel, 'Methane level(ppm)':CH4level, 'HackHD':HackStatus } ] }
data_string2 = json.dumps(JSONdata2)
#Now post it to json_data.json for the map legend
f = open('/Users/michaelhamilton/Desktop/json_data.json', 'w')
f.write(data_string2)
f.close()
os.system("scp /Users/michaelhamilton/Desktop/json_data.json [email protected]:Sites/")
#Now to handle it for json_data.html
JSONdata = [ { 'Longitude':RMClongitude, 'Latitude':RMClatitude, 'Altitude':GGAaltitude, 'Speed(mph)':RMCspeed, 'Heading':RMCheading, 'Time':{'Day':RMCday, 'Month':RMCmonth, 'Year':RMCyear, 'Hours':RMChours, 'Minutes':RMCminutes, 'Seconds':RMCseconds},'External temperature(deg F)':extTemp, 'Internal temperature(deg F)':intTemp, 'Video Transmitter temperature(deg F)':vidTemp, 'Carbon Monoxide level(ppm)':COlevel, 'Methane level(ppm)':CH4level, 'HackHD camera statuses':HackStatus } ]
data_string = json.dumps(JSONdata)
JSONdataString = str(data_string)
newJSONdata = re.match(r'\[(.*)', JSONdataString)
newJSONdata2 = "," + newJSONdata.group(1)
f = open('/Users/michaelhamilton/Desktop/json_data.html', 'r+')
jumpBack = -1 #jump back 1 spot from the end
f.seek(jumpBack, 2) #2 is the end of the file
last = f.readline() #read the last line
while (last != "]"): #if it's not a ], then keep jumping back
jumpBack = jumpBack - 1 #decrement
if (last == "]"):
f.seek(-1, 2)
f.write(newJSONdata2)
f.close()
#Send it to the server
os.system("cd /Users/michaelhamilton/Desktop && scp json_data.html [email protected]:Sites/")
print "Updated JSON information was sent to the server."
return
#Parse out the data from an RMC nmea string
def RegExprNMEAdataRMC(line):
#if it's an RMC string....
print line
newRMCline = re.match( r'\$GPRMC,(\d\d)(\d\d)(\d\d).*,\D,(\d+.\d+),\D,(\d+.\d+),\D,(\d+.\d+),(\d+.\d+),(\d\d)(\d\d)(\d\d),.*,.*', line, re.I)
#All data are strings, not integers
if (newRMCline):
global RMChours
RMChours = newRMCline.group(1)
#Convert UTC hours to PST(Daylight Savings Time)
RMChours = UTCtoPSTDST(RMChours)
global RMCminutes
RMCminutes = newRMCline.group(2)
global RMCseconds
RMCseconds = newRMCline.group(3)
global RMClatitude
RMClatitude = newRMCline.group(4)
RMClatitude = StringToFloatGPS(RMClatitude)
global RMClongitude
RMClongitude = newRMCline.group(5)
RMClongitude = StringToFloatGPS(RMClongitude)
global RMCspeed
RMCspeed = newRMCline.group(6)
RMCspeed = StringToFloatGPS(RMCspeed)
global RMCheading
RMCheading = newRMCline.group(7)
RMCheading = StringToFloatGPS(RMCheading)
global RMCday
RMCday = newRMCline.group(8)
global RMCmonth
RMCmonth = newRMCline.group(9)
global RMCyear
RMCyear = newRMCline.group(10)
return True
else:
return False
#Parse out the data from an GGA nmea string
def RegExprNMEAdataGGA(line):
#if it's a GGA string....
print line
newGGAline = re.match( r'\$GPGGA,(\d\d)(\d\d)(\d\d).*,(.*..*),\D,(.*..*),\D,\d,\d\d\,\d.\d\d,(\d+.\d),\D.*', line, re.I)
#All data are strings, not integers
if (newGGAline):
global GGAhours
GGAhours = newGGAline.group(1)
#Convert UTC hours to PST(Daylight Savings Time)
GGAhours = UTCtoPSTDST(GGAhours)
global GGAminutes
GGAminutes = newGGAline.group(2)
global GGAseconds
GGAseconds = newGGAline.group(3)
global GGAlatitude
GGAlatitude = newGGAline.group(4)
GGAlatitude = StringToFloatGPS(GGAlatitude)
global GGAlongitude
GGAlongitude = newGGAline.group(5)
GGAlongitude = StringToFloatGPS(GGAlongitude)
global GGAaltitude
GGAaltitude = newGGAline.group(6)
GGAaltitude = StringToFloatGPS(GGAaltitude)
s2.write(dict(x=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), y=GGAaltitude))
return True
else:
return False
#Get my login and keys ready ro send data to plot.ly
stream_ids = tls.get_credentials_file()['stream_ids']
#Set up the plotly streams
stream_id1 = stream_ids[0]#External temperature #A
stream_id2 = stream_ids[1]#GGAaltitude #B
stream_id3 = stream_ids[2]#Internal temperature #C
#stream_id4 = stream_ids[3]#Internal pressure #D
stream_id4 = stream_ids[3]#pressureAltitude #D
#stream_id5 = stream_ids[4]#Videolynx temperature #E
stream_id5 = stream_ids[4]#10DOF temperature #E
stream_id6 = stream_ids[5]#CO level in ppm #F
stream_id7 = stream_ids[6]#CH4 level in ppm #G
stream_id8 = stream_ids[7]#Humidity #J
stream_id9 = stream_ids[8]#Roll #L
stream_id10 = stream_ids[9]#Pitch #P
#stream_id11 = stream_ids[10]#Heading #Q
#stream_id12 = stream_ids[11]#Pressure #T
stream_id13 = stream_ids[12]#PressureAltitude #U
#Graph 1 data, stream names coincide with stream_ids for simplicity
#External temperature #A
stream1 = Stream(
token=stream_id1,
maxpoints=20
)
#GGAaltitude #A
stream2 = Stream(
token=stream_id2,
maxpoints=4
)
#Internal temperature #C
stream3 = Stream(
token=stream_id3,
maxpoints=20
)
#pressureAltitude #C
stream4 = Stream(
token=stream_id4,
maxpoints=20
)
#10DOF temperature #E
stream5 = Stream(
token=stream_id5,
maxpoints=20
)
#Graph 2 data, stream names coincide with stream_ids for simplicity
#CO level in ppm #G
stream6 = Stream(
token=stream_id6,
maxpoints=20
)
#CH4 level in ppm #G
stream7 = Stream(
token=stream_id7,
maxpoints=20
)
#Roll #L
stream9 = Stream(
token=stream_id9,
maxpoints=20
)
#Pitch #P
stream10 = Stream(
token=stream_id10,
maxpoints=20
)
#Heading #Q
#stream11 = Stream(
# token=stream_id11,
# maxpoints=20
#)
#Pressure #T
#stream12 = Stream(
# token=stream_id12,
# maxpoints=20
#)
#PressureAltitude #U
stream13 = Stream(
token=stream_id13,
maxpoints=20
)
#Trace names coincide with stream names
trace1 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream1
)
trace2 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream2
)
trace3 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream3
)
trace4 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream4
)
trace5 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream5
)
trace6 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream6
)
trace7 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream7
)
trace9 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream9
)
trace10 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream10
)
#trace11 = Scatter(
# x=[],
# y=[],
# mode='lines+markers',
# stream=stream11
#)
#trace12 = Scatter(
# x=[],
# y=[],
# mode='lines+markers',
# stream=stream12
#)
trace13 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream13
)
#Set up the plotly graphs
data_graph_a = Data([trace1, trace3, trace5])
data_graph_b = Data([trace6, trace7])
data_graph_c = Data([trace9, trace10])
#data_graph_d = Data([trace2, trace4])#Does not work
data_graph_e = Data([trace2, trace4])
layout_a = Layout(title='#cougballoon temperatures')#This is the name on the graph
layout_b = Layout(title='#cougballoon air quality levels')#This is the name on the graph
layout_c = Layout(title='#cougballoon payload pitch and roll data')#This is the name on the graph
#layout_d = Layout(title='#cougballoon altitude')#This is the name on the graph
layout_e = Layout(title='#cougballoon altitude')#This is the name on the graph
fig_a = Figure(data=data_graph_a, layout=layout_a)
fig_b = Figure(data=data_graph_b, layout=layout_b)
fig_c = Figure(data=data_graph_c, layout=layout_c)
#fig_d = Figure(data=data_graph_d, layout=layout_d)
fig_e = Figure(data=data_graph_e, layout=layout_e)
unique_url_a = py.plot(fig_a, filename='cougballoon1', fileopt='extend')#Name above the graph
unique_url_b = py.plot(fig_b, filename='cougballoon2', fileopt='extend')#Name above the graph
unique_url_c = py.plot(fig_c, filename='cougballoon3', fileopt='extend')#Name above the graph
#unique_url_d = py.plot(fig_d, filename='cougballoon4', fileopt='extend')#Name above the graph
unique_url_e = py.plot(fig_e, filename='cougballoon5', fileopt='extend')#Name above the graph
#Print the plotly urls
print unique_url_a
print unique_url_b
print unique_url_c
#print unique_url_d
print unique_url_e
#Get the plotly streams ready
s1 = py.Stream(stream_id1)
s2 = py.Stream(stream_id2)
s3 = py.Stream(stream_id3)
s4 = py.Stream(stream_id4)
s5 = py.Stream(stream_id5)
s6 = py.Stream(stream_id6)
s7 = py.Stream(stream_id7)
s9 = py.Stream(stream_id9)
s10 = py.Stream(stream_id10)
#s11 = py.Stream(stream_id11)
#s12 = py.Stream(stream_id12)
#s13 = py.Stream(stream_id13)
#Open the plotly streams
s1.open()
s2.open()
s3.open()
s4.open()
s5.open()
s6.open()
s7.open()
s9.open()
s10.open()
#s11.open()
#s12.open()
#s13.open()
import datetime
import time
# Delay start of stream by 5 sec (time to switch tabs)
time.sleep(5)
#Clean out the buffers
line = ser.readline()
time.sleep(2)
line = ser.readline()
time.sleep(2)
line = ser.readline()
time.sleep(2)
while True:
# Current time on x-axis, values on y-axis
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
line = ser.readline() #properly captures incoming string
#External temperature #A
if ((line.find("A")) == 0):
print "External temperature:"
print line
y = StringToFloat(line, extTemp)
saveData(line)
extTemp = y
print y
s1.write(dict(x=x, y=y))
#External pressure #B
elif ((line.find("B")) == 0):
print "External Pressure:"
print line
y = StringToFloat(line)
saveData(line)
print y
#s2.write(dict(x=x, y=y))
#Internal temperature #C
elif ((line.find("C")) == 0):
print "Internal temperature:"
print line
y = StringToFloat(line, intTemp)
saveData(line)
intTemp = y
print y
s3.write(dict(x=x, y=y))
#Internal pressure #D
#elif ((line.find("D")) == 0):
#print "Internal pressure:"
#print line
#y = StringToFloat(line)
#saveData(line)
#print y
#s4.write(dict(x=x, y=y))
#Videolynx temperature #E
elif ((line.find("E")) == 0):
print "Videolynx temperature:"
print line
y = StringToFloat(line)
saveData(line)
vidTemp = y
print y
#s5.write(dict(x=x, y=y))
#CO level in ppm #F
elif ((line.find("F")) == 0):
print "CO level (in ppm):"
print line
y = StringToFloat(line, COlevel)
saveData(line)
COlevel = y
print y
s6.write(dict(x=x, y=y))
#CH4 level in ppm #G
elif ((line.find("G")) == 0):
print "CH4 level (in ppm):"
print line
y = StringToFloat(line, CH4level)
saveData(line)
CH4level = y
print y
s7.write(dict(x=x, y=y))
#Humidity #J
elif ((line.find("J")) == 0):
print "Humidity:"
print line
y = StringToFloat(line)
saveData(line)
print y
#What data do we want here?
elif ((line.find("K")) == 0):
print "FOUND A K!"
print line
y = StringToFloat(line)
saveData(line)
print y
#What data do we want here?
elif ((line.find("L")) == 0):
print "Roll:"
print line
y = StringToFloat(line, roll)
saveData(line)
roll = y
print y
s9.write(dict(x=x, y=y))
#HACKHD INFO BELOW
elif ((line.find("Hack")) == 0):
print "HackHD information"
print line
saveData(line)
HackStatus = line
HackStatus = HackStatus[6:13]
HackStatus = HackStatus.rstrip('\n');
HackStatus = HackStatus.rstrip('\r');
print HackStatus
#What data do we want here?
elif ((line.find("P")) == 0):
print "Pitch:"
print line
y = StringToFloat(line, pitch)
saveData(line)
pitch = y
print y
s10.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("Q")) == 0):
print "Heading:"
print line
y = StringToFloat(line, heading)
saveData(line)
heading = y
print y
#s11.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("T")) == 0):
print "Pressure"
print line
y = StringToFloat(line, pressure)
saveData(line)
pressure = y
print y
#What data do we want here?
elif ((line.find("U")) == 0):
|
#What data do we want here?
elif ((line.find("V")) == 0):
print "Temperature(from 10dof):"
print line
y = StringToFloat(line, temperature10DOF)
saveData(line)
temperature10DOF = y
print y
s5.write(dict(x=x, y=y))
#Take care of the incoming GPS data, send to plotly and post as JSON
elif ((line.find("$")) == 0):
print "Incoming GPS information"
handleGPSdata(line)
if (line.startswith( '$GPGGA' ) == True):
GGAreceived = RegExprNMEAdataGGA(line)
elif (line.startswith( '$GPRMC' ) == True):
RMCreceived = RegExprNMEAdataRMC(line)
#When an RMC and a GGA string have been received, post it!
if ((GGAreceived == True) & (RMCreceived == True)):
parseToJson(RMClongitude, RMClatitude, GGAaltitude, RMCspeed, RMCheading, RMCday, RMCmonth, RMCyear, RMChours, RMCminutes, RMCseconds, extTemp, intTemp, vidTemp, COlevel, CH4level, HackStatus)
GGAreceived = False
RMCreceived = False
#Close the stream when done plotting, but we never really close it...
s.close() | print "Altitude(from press/temp):"
print line
y = StringToFloat(line, pressureAltitude)
saveData(line)
pressureAltitude = y
print y
s4.write(dict(x=x, y=y)) | conditional_block |
options.go | // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp
import (
"fmt"
"reflect"
"runtime"
"strings"
)
// Option configures for specific behavior of Equal and Diff. In particular,
// the fundamental Option functions (Ignore, Transformer, and Comparer),
// configure how equality is determined.
//
// The fundamental options may be composed with filters (FilterPath and
// FilterValues) to control the scope over which they are applied.
//
// The cmp/cmpopts package provides helper functions for creating options that
// may be used with Equal and Diff.
type Option interface {
// Prevent Option from being equivalent to interface{}, which provides
// a small type checking benefit by preventing Equal(opt, x, y).
option()
}
// Options is a list of Option values that also satisfies the Option interface.
// Helper comparison packages may return an Options value when packing multiple
// Option values into a single Option. When this package processes an Options,
// it will be implicitly expanded into a flat list.
//
// Applying a filter on an Options is equivalent to applying that same filter
// on all individual options held within.
type Options []Option
func (Options) option() {}
type (
pathFilter func(Path) bool
valueFilter struct {
in reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
)
type option struct {
typeFilter reflect.Type
pathFilters []pathFilter
valueFilters []valueFilter
// op is the operation to perform. If nil, then this acts as an ignore.
op interface{} // nil | *transformer | *comparer
}
func (option) | () {}
func (o option) String() string {
// TODO: Add information about the caller?
// TODO: Maintain the order that filters were added?
var ss []string
switch op := o.op.(type) {
case *transformer:
fn := getFuncName(op.fnc.Pointer())
ss = append(ss, fmt.Sprintf("Transformer(%s, %s)", op.name, fn))
case *comparer:
fn := getFuncName(op.fnc.Pointer())
ss = append(ss, fmt.Sprintf("Comparer(%s)", fn))
default:
ss = append(ss, "Ignore()")
}
for _, f := range o.pathFilters {
fn := getFuncName(reflect.ValueOf(f).Pointer())
ss = append(ss, fmt.Sprintf("FilterPath(%s)", fn))
}
for _, f := range o.valueFilters {
fn := getFuncName(f.fnc.Pointer())
ss = append(ss, fmt.Sprintf("FilterValues(%s)", fn))
}
return strings.Join(ss, "\n\t")
}
// getFuncName returns a short function name from the pointer.
// The string parsing logic works up until Go1.9.
func getFuncName(p uintptr) string {
fnc := runtime.FuncForPC(p)
if fnc == nil {
return "<unknown>"
}
name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
// Strip the package name from method name.
name = strings.TrimSuffix(name, ")-fm")
name = strings.TrimSuffix(name, ")·fm")
if i := strings.LastIndexByte(name, '('); i >= 0 {
methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
methodName = methodName[j+1:] // E.g., "myfunc"
}
name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
}
}
if i := strings.LastIndexByte(name, '/'); i >= 0 {
// Strip the package name.
name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
}
return name
}
// FilterPath returns a new Option where opt is only evaluated if filter f
// returns true for the current Path in the value tree.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterPath(f func(Path) bool, opt Option) Option {
if f == nil {
panic("invalid path filter function")
}
switch opt := opt.(type) {
case Options:
var opts []Option
for _, o := range opt {
opts = append(opts, FilterPath(f, o)) // Append to slice copy
}
return Options(opts)
case option:
n := len(opt.pathFilters)
opt.pathFilters = append(opt.pathFilters[:n:n], f) // Append to copy
return opt
default:
panic(fmt.Sprintf("unknown option type: %T", opt))
}
}
// FilterValues returns a new Option where opt is only evaluated if filter f,
// which is a function of the form "func(T, T) bool", returns true for the
// current pair of values being compared. If the type of the values is not
// assignable to T, then this filter implicitly returns false.
//
// The filter function must be
// symmetric (i.e., agnostic to the order of the inputs) and
// deterministic (i.e., produces the same result when given the same inputs).
// If T is an interface, it is possible that f is called with two values with
// different concrete types that both implement T.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterValues(f interface{}, opt Option) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != valueFilterFunc || v.IsNil() {
panic(fmt.Sprintf("invalid values filter function: %T", f))
}
switch opt := opt.(type) {
case Options:
var opts []Option
for _, o := range opt {
opts = append(opts, FilterValues(f, o)) // Append to slice copy
}
return Options(opts)
case option:
n := len(opt.valueFilters)
vf := valueFilter{v.Type().In(0), v}
opt.valueFilters = append(opt.valueFilters[:n:n], vf) // Append to copy
return opt
default:
panic(fmt.Sprintf("unknown option type: %T", opt))
}
}
// Ignore is an Option that causes all comparisons to be ignored.
// This value is intended to be combined with FilterPath or FilterValues.
// It is an error to pass an unfiltered Ignore option to Equal.
func Ignore() Option {
return option{}
}
// Transformer returns an Option that applies a transformation function that
// converts values of a certain type into that of another.
//
// The transformer f must be a function "func(T) R" that converts values of
// type T to those of type R and is implicitly filtered to input values
// assignable to T. The transformer must not mutate T in any way.
// If T and R are the same type, an additional filter must be applied to
// act as the base case to prevent an infinite recursion applying the same
// transform to itself (see the SortedSlice example).
//
// The name is a user provided label that is used as the Transform.Name in the
// transformation PathStep. If empty, an arbitrary name is used.
func Transformer(name string, f interface{}) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != transformFunc || v.IsNil() {
panic(fmt.Sprintf("invalid transformer function: %T", f))
}
if name == "" {
name = "λ" // Lambda-symbol as place-holder for anonymous transformer
}
if !isValid(name) {
panic(fmt.Sprintf("invalid name: %q", name))
}
opt := option{op: &transformer{name, reflect.ValueOf(f)}}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
opt.typeFilter = ti
}
return opt
}
type transformer struct {
name string
fnc reflect.Value // func(T) R
}
// Comparer returns an Option that determines whether two values are equal
// to each other.
//
// The comparer f must be a function "func(T, T) bool" and is implicitly
// filtered to input values assignable to T. If T is an interface, it is
// possible that f is called with two values of different concrete types that
// both implement T.
//
// The equality function must be:
// • Symmetric: equal(x, y) == equal(y, x)
// • Deterministic: equal(x, y) == equal(x, y)
// • Pure: equal(x, y) does not modify x or y
func Comparer(f interface{}) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != equalFunc || v.IsNil() {
panic(fmt.Sprintf("invalid comparer function: %T", f))
}
opt := option{op: &comparer{v}}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
opt.typeFilter = ti
}
return opt
}
type comparer struct {
fnc reflect.Value // func(T, T) bool
}
// AllowUnexported returns an Option that forcibly allows operations on
// unexported fields in certain structs, which are specified by passing in a
// value of each struct type.
//
// Users of this option must understand that comparing on unexported fields
// from external packages is not safe since changes in the internal
// implementation of some external package may cause the result of Equal
// to unexpectedly change. However, it may be valid to use this option on types
// defined in an internal package where the semantic meaning of an unexported
// field is in the control of the user.
//
// For some cases, a custom Comparer should be used instead that defines
// equality as a function of the public API of a type rather than the underlying
// unexported implementation.
//
// For example, the reflect.Type documentation defines equality to be determined
// by the == operator on the interface (essentially performing a shallow pointer
// comparison) and most attempts to compare *regexp.Regexp types are interested
// in only checking that the regular expression strings are equal.
// Both of these are accomplished using Comparers:
//
// Comparer(func(x, y reflect.Type) bool { return x == y })
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
//
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
// all unexported fields on specified struct types.
func AllowUnexported(types ...interface{}) Option {
if !supportAllowUnexported {
panic("AllowUnexported is not supported on App Engine Classic or GopherJS")
}
m := make(map[reflect.Type]bool)
for _, typ := range types {
t := reflect.TypeOf(typ)
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
m[t] = true
}
return visibleStructs(m)
}
type visibleStructs map[reflect.Type]bool
func (visibleStructs) option() {}
// reporter is an Option that configures how differences are reported.
type reporter interface {
// TODO: Not exported yet.
//
// Perhaps add PushStep and PopStep and change Report to only accept
// a PathStep instead of the full-path? Adding a PushStep and PopStep makes
// it clear that we are traversing the value tree in a depth-first-search
// manner, which has an effect on how values are printed.
Option
// Report is called for every comparison made and will be provided with
// the two values being compared, the equality result, and the
// current path in the value tree. It is possible for x or y to be an
// invalid reflect.Value if one of the values is non-existent;
// which is possible with maps and slices.
Report(x, y reflect.Value, eq bool, p Path)
}
| option | identifier_name |
options.go | // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp
import (
"fmt"
"reflect"
"runtime"
"strings"
)
// Option configures for specific behavior of Equal and Diff. In particular,
// the fundamental Option functions (Ignore, Transformer, and Comparer),
// configure how equality is determined.
//
// The fundamental options may be composed with filters (FilterPath and
// FilterValues) to control the scope over which they are applied.
//
// The cmp/cmpopts package provides helper functions for creating options that
// may be used with Equal and Diff.
type Option interface {
// Prevent Option from being equivalent to interface{}, which provides
// a small type checking benefit by preventing Equal(opt, x, y).
option()
}
// Options is a list of Option values that also satisfies the Option interface.
// Helper comparison packages may return an Options value when packing multiple
// Option values into a single Option. When this package processes an Options,
// it will be implicitly expanded into a flat list.
//
// Applying a filter on an Options is equivalent to applying that same filter
// on all individual options held within.
type Options []Option
func (Options) option() {}
type (
pathFilter func(Path) bool
valueFilter struct {
in reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
)
type option struct {
typeFilter reflect.Type
pathFilters []pathFilter
valueFilters []valueFilter
// op is the operation to perform. If nil, then this acts as an ignore.
op interface{} // nil | *transformer | *comparer
}
func (option) option() {}
func (o option) String() string {
// TODO: Add information about the caller?
// TODO: Maintain the order that filters were added?
var ss []string
switch op := o.op.(type) {
case *transformer:
fn := getFuncName(op.fnc.Pointer())
ss = append(ss, fmt.Sprintf("Transformer(%s, %s)", op.name, fn))
case *comparer:
fn := getFuncName(op.fnc.Pointer())
ss = append(ss, fmt.Sprintf("Comparer(%s)", fn))
default:
ss = append(ss, "Ignore()")
}
for _, f := range o.pathFilters {
fn := getFuncName(reflect.ValueOf(f).Pointer())
ss = append(ss, fmt.Sprintf("FilterPath(%s)", fn))
}
for _, f := range o.valueFilters {
fn := getFuncName(f.fnc.Pointer())
ss = append(ss, fmt.Sprintf("FilterValues(%s)", fn))
}
return strings.Join(ss, "\n\t")
}
// getFuncName returns a short function name from the pointer.
// The string parsing logic works up until Go1.9.
func getFuncName(p uintptr) string | // FilterPath returns a new Option where opt is only evaluated if filter f
// returns true for the current Path in the value tree.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterPath(f func(Path) bool, opt Option) Option {
if f == nil {
panic("invalid path filter function")
}
switch opt := opt.(type) {
case Options:
var opts []Option
for _, o := range opt {
opts = append(opts, FilterPath(f, o)) // Append to slice copy
}
return Options(opts)
case option:
n := len(opt.pathFilters)
opt.pathFilters = append(opt.pathFilters[:n:n], f) // Append to copy
return opt
default:
panic(fmt.Sprintf("unknown option type: %T", opt))
}
}
// FilterValues returns a new Option where opt is only evaluated if filter f,
// which is a function of the form "func(T, T) bool", returns true for the
// current pair of values being compared. If the type of the values is not
// assignable to T, then this filter implicitly returns false.
//
// The filter function must be
// symmetric (i.e., agnostic to the order of the inputs) and
// deterministic (i.e., produces the same result when given the same inputs).
// If T is an interface, it is possible that f is called with two values with
// different concrete types that both implement T.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterValues(f interface{}, opt Option) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != valueFilterFunc || v.IsNil() {
panic(fmt.Sprintf("invalid values filter function: %T", f))
}
switch opt := opt.(type) {
case Options:
var opts []Option
for _, o := range opt {
opts = append(opts, FilterValues(f, o)) // Append to slice copy
}
return Options(opts)
case option:
n := len(opt.valueFilters)
vf := valueFilter{v.Type().In(0), v}
opt.valueFilters = append(opt.valueFilters[:n:n], vf) // Append to copy
return opt
default:
panic(fmt.Sprintf("unknown option type: %T", opt))
}
}
// Ignore is an Option that causes all comparisons to be ignored.
// This value is intended to be combined with FilterPath or FilterValues.
// It is an error to pass an unfiltered Ignore option to Equal.
func Ignore() Option {
return option{}
}
// Transformer returns an Option that applies a transformation function that
// converts values of a certain type into that of another.
//
// The transformer f must be a function "func(T) R" that converts values of
// type T to those of type R and is implicitly filtered to input values
// assignable to T. The transformer must not mutate T in any way.
// If T and R are the same type, an additional filter must be applied to
// act as the base case to prevent an infinite recursion applying the same
// transform to itself (see the SortedSlice example).
//
// The name is a user provided label that is used as the Transform.Name in the
// transformation PathStep. If empty, an arbitrary name is used.
func Transformer(name string, f interface{}) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != transformFunc || v.IsNil() {
panic(fmt.Sprintf("invalid transformer function: %T", f))
}
if name == "" {
name = "λ" // Lambda-symbol as place-holder for anonymous transformer
}
if !isValid(name) {
panic(fmt.Sprintf("invalid name: %q", name))
}
opt := option{op: &transformer{name, reflect.ValueOf(f)}}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
opt.typeFilter = ti
}
return opt
}
type transformer struct {
name string
fnc reflect.Value // func(T) R
}
// Comparer returns an Option that determines whether two values are equal
// to each other.
//
// The comparer f must be a function "func(T, T) bool" and is implicitly
// filtered to input values assignable to T. If T is an interface, it is
// possible that f is called with two values of different concrete types that
// both implement T.
//
// The equality function must be:
// • Symmetric: equal(x, y) == equal(y, x)
// • Deterministic: equal(x, y) == equal(x, y)
// • Pure: equal(x, y) does not modify x or y
func Comparer(f interface{}) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != equalFunc || v.IsNil() {
panic(fmt.Sprintf("invalid comparer function: %T", f))
}
opt := option{op: &comparer{v}}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
opt.typeFilter = ti
}
return opt
}
type comparer struct {
fnc reflect.Value // func(T, T) bool
}
// AllowUnexported returns an Option that forcibly allows operations on
// unexported fields in certain structs, which are specified by passing in a
// value of each struct type.
//
// Users of this option must understand that comparing on unexported fields
// from external packages is not safe since changes in the internal
// implementation of some external package may cause the result of Equal
// to unexpectedly change. However, it may be valid to use this option on types
// defined in an internal package where the semantic meaning of an unexported
// field is in the control of the user.
//
// For some cases, a custom Comparer should be used instead that defines
// equality as a function of the public API of a type rather than the underlying
// unexported implementation.
//
// For example, the reflect.Type documentation defines equality to be determined
// by the == operator on the interface (essentially performing a shallow pointer
// comparison) and most attempts to compare *regexp.Regexp types are interested
// in only checking that the regular expression strings are equal.
// Both of these are accomplished using Comparers:
//
// Comparer(func(x, y reflect.Type) bool { return x == y })
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
//
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
// all unexported fields on specified struct types.
func AllowUnexported(types ...interface{}) Option {
if !supportAllowUnexported {
panic("AllowUnexported is not supported on App Engine Classic or GopherJS")
}
m := make(map[reflect.Type]bool)
for _, typ := range types {
t := reflect.TypeOf(typ)
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
m[t] = true
}
return visibleStructs(m)
}
type visibleStructs map[reflect.Type]bool
func (visibleStructs) option() {}
// reporter is an Option that configures how differences are reported.
type reporter interface {
// TODO: Not exported yet.
//
// Perhaps add PushStep and PopStep and change Report to only accept
// a PathStep instead of the full-path? Adding a PushStep and PopStep makes
// it clear that we are traversing the value tree in a depth-first-search
// manner, which has an effect on how values are printed.
Option
// Report is called for every comparison made and will be provided with
// the two values being compared, the equality result, and the
// current path in the value tree. It is possible for x or y to be an
// invalid reflect.Value if one of the values is non-existent;
// which is possible with maps and slices.
Report(x, y reflect.Value, eq bool, p Path)
}
| {
fnc := runtime.FuncForPC(p)
if fnc == nil {
return "<unknown>"
}
name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
// Strip the package name from method name.
name = strings.TrimSuffix(name, ")-fm")
name = strings.TrimSuffix(name, ")·fm")
if i := strings.LastIndexByte(name, '('); i >= 0 {
methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
methodName = methodName[j+1:] // E.g., "myfunc"
}
name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
}
}
if i := strings.LastIndexByte(name, '/'); i >= 0 {
// Strip the package name.
name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
}
return name
}
| identifier_body |
options.go | // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp
| "runtime"
"strings"
)
// Option configures for specific behavior of Equal and Diff. In particular,
// the fundamental Option functions (Ignore, Transformer, and Comparer),
// configure how equality is determined.
//
// The fundamental options may be composed with filters (FilterPath and
// FilterValues) to control the scope over which they are applied.
//
// The cmp/cmpopts package provides helper functions for creating options that
// may be used with Equal and Diff.
type Option interface {
// Prevent Option from being equivalent to interface{}, which provides
// a small type checking benefit by preventing Equal(opt, x, y).
option()
}
// Options is a list of Option values that also satisfies the Option interface.
// Helper comparison packages may return an Options value when packing multiple
// Option values into a single Option. When this package processes an Options,
// it will be implicitly expanded into a flat list.
//
// Applying a filter on an Options is equivalent to applying that same filter
// on all individual options held within.
type Options []Option
func (Options) option() {}
type (
pathFilter func(Path) bool
valueFilter struct {
in reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
)
type option struct {
typeFilter reflect.Type
pathFilters []pathFilter
valueFilters []valueFilter
// op is the operation to perform. If nil, then this acts as an ignore.
op interface{} // nil | *transformer | *comparer
}
func (option) option() {}
func (o option) String() string {
// TODO: Add information about the caller?
// TODO: Maintain the order that filters were added?
var ss []string
switch op := o.op.(type) {
case *transformer:
fn := getFuncName(op.fnc.Pointer())
ss = append(ss, fmt.Sprintf("Transformer(%s, %s)", op.name, fn))
case *comparer:
fn := getFuncName(op.fnc.Pointer())
ss = append(ss, fmt.Sprintf("Comparer(%s)", fn))
default:
ss = append(ss, "Ignore()")
}
for _, f := range o.pathFilters {
fn := getFuncName(reflect.ValueOf(f).Pointer())
ss = append(ss, fmt.Sprintf("FilterPath(%s)", fn))
}
for _, f := range o.valueFilters {
fn := getFuncName(f.fnc.Pointer())
ss = append(ss, fmt.Sprintf("FilterValues(%s)", fn))
}
return strings.Join(ss, "\n\t")
}
// getFuncName returns a short function name from the pointer.
// The string parsing logic works up until Go1.9.
func getFuncName(p uintptr) string {
fnc := runtime.FuncForPC(p)
if fnc == nil {
return "<unknown>"
}
name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
// Strip the package name from method name.
name = strings.TrimSuffix(name, ")-fm")
name = strings.TrimSuffix(name, ")·fm")
if i := strings.LastIndexByte(name, '('); i >= 0 {
methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
methodName = methodName[j+1:] // E.g., "myfunc"
}
name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
}
}
if i := strings.LastIndexByte(name, '/'); i >= 0 {
// Strip the package name.
name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
}
return name
}
// FilterPath returns a new Option where opt is only evaluated if filter f
// returns true for the current Path in the value tree.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterPath(f func(Path) bool, opt Option) Option {
if f == nil {
panic("invalid path filter function")
}
switch opt := opt.(type) {
case Options:
var opts []Option
for _, o := range opt {
opts = append(opts, FilterPath(f, o)) // Append to slice copy
}
return Options(opts)
case option:
n := len(opt.pathFilters)
opt.pathFilters = append(opt.pathFilters[:n:n], f) // Append to copy
return opt
default:
panic(fmt.Sprintf("unknown option type: %T", opt))
}
}
// FilterValues returns a new Option where opt is only evaluated if filter f,
// which is a function of the form "func(T, T) bool", returns true for the
// current pair of values being compared. If the type of the values is not
// assignable to T, then this filter implicitly returns false.
//
// The filter function must be
// symmetric (i.e., agnostic to the order of the inputs) and
// deterministic (i.e., produces the same result when given the same inputs).
// If T is an interface, it is possible that f is called with two values with
// different concrete types that both implement T.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterValues(f interface{}, opt Option) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != valueFilterFunc || v.IsNil() {
panic(fmt.Sprintf("invalid values filter function: %T", f))
}
switch opt := opt.(type) {
case Options:
var opts []Option
for _, o := range opt {
opts = append(opts, FilterValues(f, o)) // Append to slice copy
}
return Options(opts)
case option:
n := len(opt.valueFilters)
vf := valueFilter{v.Type().In(0), v}
opt.valueFilters = append(opt.valueFilters[:n:n], vf) // Append to copy
return opt
default:
panic(fmt.Sprintf("unknown option type: %T", opt))
}
}
// Ignore is an Option that causes all comparisons to be ignored.
// This value is intended to be combined with FilterPath or FilterValues.
// It is an error to pass an unfiltered Ignore option to Equal.
func Ignore() Option {
return option{}
}
// Transformer returns an Option that applies a transformation function that
// converts values of a certain type into that of another.
//
// The transformer f must be a function "func(T) R" that converts values of
// type T to those of type R and is implicitly filtered to input values
// assignable to T. The transformer must not mutate T in any way.
// If T and R are the same type, an additional filter must be applied to
// act as the base case to prevent an infinite recursion applying the same
// transform to itself (see the SortedSlice example).
//
// The name is a user provided label that is used as the Transform.Name in the
// transformation PathStep. If empty, an arbitrary name is used.
func Transformer(name string, f interface{}) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != transformFunc || v.IsNil() {
panic(fmt.Sprintf("invalid transformer function: %T", f))
}
if name == "" {
name = "λ" // Lambda-symbol as place-holder for anonymous transformer
}
if !isValid(name) {
panic(fmt.Sprintf("invalid name: %q", name))
}
opt := option{op: &transformer{name, reflect.ValueOf(f)}}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
opt.typeFilter = ti
}
return opt
}
type transformer struct {
name string
fnc reflect.Value // func(T) R
}
// Comparer returns an Option that determines whether two values are equal
// to each other.
//
// The comparer f must be a function "func(T, T) bool" and is implicitly
// filtered to input values assignable to T. If T is an interface, it is
// possible that f is called with two values of different concrete types that
// both implement T.
//
// The equality function must be:
// • Symmetric: equal(x, y) == equal(y, x)
// • Deterministic: equal(x, y) == equal(x, y)
// • Pure: equal(x, y) does not modify x or y
func Comparer(f interface{}) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != equalFunc || v.IsNil() {
panic(fmt.Sprintf("invalid comparer function: %T", f))
}
opt := option{op: &comparer{v}}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
opt.typeFilter = ti
}
return opt
}
type comparer struct {
fnc reflect.Value // func(T, T) bool
}
// AllowUnexported returns an Option that forcibly allows operations on
// unexported fields in certain structs, which are specified by passing in a
// value of each struct type.
//
// Users of this option must understand that comparing on unexported fields
// from external packages is not safe since changes in the internal
// implementation of some external package may cause the result of Equal
// to unexpectedly change. However, it may be valid to use this option on types
// defined in an internal package where the semantic meaning of an unexported
// field is in the control of the user.
//
// For some cases, a custom Comparer should be used instead that defines
// equality as a function of the public API of a type rather than the underlying
// unexported implementation.
//
// For example, the reflect.Type documentation defines equality to be determined
// by the == operator on the interface (essentially performing a shallow pointer
// comparison) and most attempts to compare *regexp.Regexp types are interested
// in only checking that the regular expression strings are equal.
// Both of these are accomplished using Comparers:
//
// Comparer(func(x, y reflect.Type) bool { return x == y })
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
//
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
// all unexported fields on specified struct types.
func AllowUnexported(types ...interface{}) Option {
if !supportAllowUnexported {
panic("AllowUnexported is not supported on App Engine Classic or GopherJS")
}
m := make(map[reflect.Type]bool)
for _, typ := range types {
t := reflect.TypeOf(typ)
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
m[t] = true
}
return visibleStructs(m)
}
type visibleStructs map[reflect.Type]bool
func (visibleStructs) option() {}
// reporter is an Option that configures how differences are reported.
type reporter interface {
// TODO: Not exported yet.
//
// Perhaps add PushStep and PopStep and change Report to only accept
// a PathStep instead of the full-path? Adding a PushStep and PopStep makes
// it clear that we are traversing the value tree in a depth-first-search
// manner, which has an effect on how values are printed.
Option
// Report is called for every comparison made and will be provided with
// the two values being compared, the equality result, and the
// current path in the value tree. It is possible for x or y to be an
// invalid reflect.Value if one of the values is non-existent;
// which is possible with maps and slices.
Report(x, y reflect.Value, eq bool, p Path)
} | import (
"fmt"
"reflect" | random_line_split |
options.go | // Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp
import (
"fmt"
"reflect"
"runtime"
"strings"
)
// Option configures for specific behavior of Equal and Diff. In particular,
// the fundamental Option functions (Ignore, Transformer, and Comparer),
// configure how equality is determined.
//
// The fundamental options may be composed with filters (FilterPath and
// FilterValues) to control the scope over which they are applied.
//
// The cmp/cmpopts package provides helper functions for creating options that
// may be used with Equal and Diff.
type Option interface {
// Prevent Option from being equivalent to interface{}, which provides
// a small type checking benefit by preventing Equal(opt, x, y).
option()
}
// Options is a list of Option values that also satisfies the Option interface.
// Helper comparison packages may return an Options value when packing multiple
// Option values into a single Option. When this package processes an Options,
// it will be implicitly expanded into a flat list.
//
// Applying a filter on an Options is equivalent to applying that same filter
// on all individual options held within.
type Options []Option
func (Options) option() {}
type (
pathFilter func(Path) bool
valueFilter struct {
in reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
)
type option struct {
typeFilter reflect.Type
pathFilters []pathFilter
valueFilters []valueFilter
// op is the operation to perform. If nil, then this acts as an ignore.
op interface{} // nil | *transformer | *comparer
}
func (option) option() {}
func (o option) String() string {
// TODO: Add information about the caller?
// TODO: Maintain the order that filters were added?
var ss []string
switch op := o.op.(type) {
case *transformer:
fn := getFuncName(op.fnc.Pointer())
ss = append(ss, fmt.Sprintf("Transformer(%s, %s)", op.name, fn))
case *comparer:
fn := getFuncName(op.fnc.Pointer())
ss = append(ss, fmt.Sprintf("Comparer(%s)", fn))
default:
ss = append(ss, "Ignore()")
}
for _, f := range o.pathFilters {
fn := getFuncName(reflect.ValueOf(f).Pointer())
ss = append(ss, fmt.Sprintf("FilterPath(%s)", fn))
}
for _, f := range o.valueFilters {
fn := getFuncName(f.fnc.Pointer())
ss = append(ss, fmt.Sprintf("FilterValues(%s)", fn))
}
return strings.Join(ss, "\n\t")
}
// getFuncName returns a short function name from the pointer.
// The string parsing logic works up until Go1.9.
func getFuncName(p uintptr) string {
fnc := runtime.FuncForPC(p)
if fnc == nil |
name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
// Strip the package name from method name.
name = strings.TrimSuffix(name, ")-fm")
name = strings.TrimSuffix(name, ")·fm")
if i := strings.LastIndexByte(name, '('); i >= 0 {
methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
methodName = methodName[j+1:] // E.g., "myfunc"
}
name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
}
}
if i := strings.LastIndexByte(name, '/'); i >= 0 {
// Strip the package name.
name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
}
return name
}
// FilterPath returns a new Option where opt is only evaluated if filter f
// returns true for the current Path in the value tree.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterPath(f func(Path) bool, opt Option) Option {
if f == nil {
panic("invalid path filter function")
}
switch opt := opt.(type) {
case Options:
var opts []Option
for _, o := range opt {
opts = append(opts, FilterPath(f, o)) // Append to slice copy
}
return Options(opts)
case option:
n := len(opt.pathFilters)
opt.pathFilters = append(opt.pathFilters[:n:n], f) // Append to copy
return opt
default:
panic(fmt.Sprintf("unknown option type: %T", opt))
}
}
// FilterValues returns a new Option where opt is only evaluated if filter f,
// which is a function of the form "func(T, T) bool", returns true for the
// current pair of values being compared. If the type of the values is not
// assignable to T, then this filter implicitly returns false.
//
// The filter function must be
// symmetric (i.e., agnostic to the order of the inputs) and
// deterministic (i.e., produces the same result when given the same inputs).
// If T is an interface, it is possible that f is called with two values with
// different concrete types that both implement T.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterValues(f interface{}, opt Option) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != valueFilterFunc || v.IsNil() {
panic(fmt.Sprintf("invalid values filter function: %T", f))
}
switch opt := opt.(type) {
case Options:
var opts []Option
for _, o := range opt {
opts = append(opts, FilterValues(f, o)) // Append to slice copy
}
return Options(opts)
case option:
n := len(opt.valueFilters)
vf := valueFilter{v.Type().In(0), v}
opt.valueFilters = append(opt.valueFilters[:n:n], vf) // Append to copy
return opt
default:
panic(fmt.Sprintf("unknown option type: %T", opt))
}
}
// Ignore is an Option that causes all comparisons to be ignored.
// This value is intended to be combined with FilterPath or FilterValues.
// It is an error to pass an unfiltered Ignore option to Equal.
func Ignore() Option {
return option{}
}
// Transformer returns an Option that applies a transformation function that
// converts values of a certain type into that of another.
//
// The transformer f must be a function "func(T) R" that converts values of
// type T to those of type R and is implicitly filtered to input values
// assignable to T. The transformer must not mutate T in any way.
// If T and R are the same type, an additional filter must be applied to
// act as the base case to prevent an infinite recursion applying the same
// transform to itself (see the SortedSlice example).
//
// The name is a user provided label that is used as the Transform.Name in the
// transformation PathStep. If empty, an arbitrary name is used.
func Transformer(name string, f interface{}) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != transformFunc || v.IsNil() {
panic(fmt.Sprintf("invalid transformer function: %T", f))
}
if name == "" {
name = "λ" // Lambda-symbol as place-holder for anonymous transformer
}
if !isValid(name) {
panic(fmt.Sprintf("invalid name: %q", name))
}
opt := option{op: &transformer{name, reflect.ValueOf(f)}}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
opt.typeFilter = ti
}
return opt
}
type transformer struct {
name string
fnc reflect.Value // func(T) R
}
// Comparer returns an Option that determines whether two values are equal
// to each other.
//
// The comparer f must be a function "func(T, T) bool" and is implicitly
// filtered to input values assignable to T. If T is an interface, it is
// possible that f is called with two values of different concrete types that
// both implement T.
//
// The equality function must be:
// • Symmetric: equal(x, y) == equal(y, x)
// • Deterministic: equal(x, y) == equal(x, y)
// • Pure: equal(x, y) does not modify x or y
func Comparer(f interface{}) Option {
v := reflect.ValueOf(f)
if functionType(v.Type()) != equalFunc || v.IsNil() {
panic(fmt.Sprintf("invalid comparer function: %T", f))
}
opt := option{op: &comparer{v}}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
opt.typeFilter = ti
}
return opt
}
type comparer struct {
fnc reflect.Value // func(T, T) bool
}
// AllowUnexported returns an Option that forcibly allows operations on
// unexported fields in certain structs, which are specified by passing in a
// value of each struct type.
//
// Users of this option must understand that comparing on unexported fields
// from external packages is not safe since changes in the internal
// implementation of some external package may cause the result of Equal
// to unexpectedly change. However, it may be valid to use this option on types
// defined in an internal package where the semantic meaning of an unexported
// field is in the control of the user.
//
// For some cases, a custom Comparer should be used instead that defines
// equality as a function of the public API of a type rather than the underlying
// unexported implementation.
//
// For example, the reflect.Type documentation defines equality to be determined
// by the == operator on the interface (essentially performing a shallow pointer
// comparison) and most attempts to compare *regexp.Regexp types are interested
// in only checking that the regular expression strings are equal.
// Both of these are accomplished using Comparers:
//
// Comparer(func(x, y reflect.Type) bool { return x == y })
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
//
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
// all unexported fields on specified struct types.
func AllowUnexported(types ...interface{}) Option {
if !supportAllowUnexported {
panic("AllowUnexported is not supported on App Engine Classic or GopherJS")
}
m := make(map[reflect.Type]bool)
for _, typ := range types {
t := reflect.TypeOf(typ)
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
m[t] = true
}
return visibleStructs(m)
}
type visibleStructs map[reflect.Type]bool
func (visibleStructs) option() {}
// reporter is an Option that configures how differences are reported.
type reporter interface {
// TODO: Not exported yet.
//
// Perhaps add PushStep and PopStep and change Report to only accept
// a PathStep instead of the full-path? Adding a PushStep and PopStep makes
// it clear that we are traversing the value tree in a depth-first-search
// manner, which has an effect on how values are printed.
Option
// Report is called for every comparison made and will be provided with
// the two values being compared, the equality result, and the
// current path in the value tree. It is possible for x or y to be an
// invalid reflect.Value if one of the values is non-existent;
// which is possible with maps and slices.
Report(x, y reflect.Value, eq bool, p Path)
}
| {
return "<unknown>"
} | conditional_block |
combat.rs | use crate::components::*;
use crate::map::*;
use crate::NewState;
use bracket_lib::prelude::*;
use legion::systems::CommandBuffer;
use legion::*;
use std::collections::HashSet;
pub fn player_open_fire_at_target(ecs: &mut World, map: &mut Map) -> NewState {
let mut player_entity = None;
let mut target = None;
<(Entity, &Player, &Targeting)>::query()
.iter(ecs)
.for_each(|(entity, _, targeting)| {
target = targeting.current_target;
player_entity = Some(*entity);
});
// If there's nothing to fire at, return to waiting
if target.is_none() {
return NewState::Wait;
}
ranged_attack(ecs, map, player_entity.unwrap(), target.unwrap(), 20);
NewState::Player
}
pub fn ranged_attack(
ecs: &mut World,
map: &mut Map,
attacker: Entity,
victim: Entity,
ranged_power: i32,
) {
let mut attacker_pos = None;
let mut victim_pos = None;
// Find positions for the start and end
if let Ok(ae) = ecs.entry_ref(attacker) {
if let Ok(pos) = ae.get_component::<Position>() {
attacker_pos = Some(pos.clone());
}
}
if let Ok(ae) = ecs.entry_ref(victim) {
if let Ok(pos) = ae.get_component::<Position>() {
victim_pos = Some(pos.clone());
}
}
if attacker_pos.is_none() || victim_pos.is_none() {
return;
}
let attacker_pos = attacker_pos.unwrap();
let victim_pos = victim_pos.unwrap();
// Set state for the projectile path
let mut power = ranged_power;
let mut range = 0;
let mut projectile_path = Vec::new();
let mut splatter = None;
let mut commands = CommandBuffer::new(ecs);
let current_layer = attacker_pos.layer;
// Map of entity locations. Rebuilt every time because it might change.
let pos_map = <(&Position, &Health)>::query()
.iter(ecs)
.map(|(pos, _)| pos.pt)
.collect::<HashSet<Point>>();
// Plot the initial trajectory
line2d_bresenham(attacker_pos.pt, victim_pos.pt)
.iter()
.skip(1)
.for_each(|pt| {
projectile_path.push(*pt);
if pos_map.contains(&pt) {
power -=
hit_tile_contents(ecs, *pt, current_layer, &mut commands, &mut splatter, power);
if power < 1 {
power = 1;
range += 200;
}
}
if let Some(bsplatter) = &mut splatter {
let idx = map.get_current().point2d_to_index(*pt);
map.get_current_mut().tiles[idx].color.fg = bsplatter.to_rgba(1.0);
bsplatter.r = f32::max(0.0, bsplatter.r - 0.1);
bsplatter.g = f32::max(0.0, bsplatter.g - 0.1);
bsplatter.b = f32::max(0.0, bsplatter.b - 0.1);
if bsplatter.r + bsplatter.g + bsplatter.b < 0.1 {
splatter = None;
}
}
range += 1;
if range > 5 {
power -= 1;
if power < 1 {
power = 1;
}
}
});
// The trajectory can continue if we have power left
use ultraviolet::Vec2;
let mut projectile_pos: Vec2 = Vec2::new(victim_pos.pt.x as f32, victim_pos.pt.y as f32);
let slope = (projectile_pos - Vec2::new(attacker_pos.pt.x as f32, attacker_pos.pt.y as f32))
.normalized();
while range < 25 && power > 0 {
projectile_pos += slope;
let pt = Point::new(projectile_pos.x as i32, projectile_pos.y as i32);
projectile_path.push(pt);
if pos_map.contains(&pt) {
power -= hit_tile_contents(ecs, pt, current_layer, &mut commands, &mut splatter, power);
if power < 1 {
power = 1;
range += 200;
}
}
if let Some(bsplatter) = &mut splatter {
let idx = map.get_current().point2d_to_index(pt);
map.get_current_mut().tiles[idx].color.fg = bsplatter.to_rgba(1.0);
bsplatter.r = f32::max(0.0, bsplatter.r - 0.1);
bsplatter.g = f32::max(0.0, bsplatter.g - 0.1);
bsplatter.b = f32::max(0.0, bsplatter.b - 0.1);
if bsplatter.r + bsplatter.g + bsplatter.b < 0.1 {
splatter = None;
}
}
let idx = map.get_current().point2d_to_index(pt);
if map.get_current().tiles[idx].tile_type == TileType::Wall {
range += 100;
power = 0;
}
range += 1;
if range > 5 {
power -= 1;
if power < 1 {
power = 1;
range += 100;
}
}
}
commands.push((
Projectile {
path: projectile_path,
layer: current_layer as usize,
},
Glyph {
glyph: to_cp437('*'),
color: ColorPair::new(RED, BLACK),
},
));
commands.flush(ecs);
}
pub fn hit_tile_contents(
ecs: &mut World,
pt: Point,
layer: u32,
commands: &mut CommandBuffer,
splatter: &mut Option<RGB>,
power: i32,
) -> i32 {
let mut rng_lock = crate::RNG.lock();
let rng = rng_lock.as_mut().unwrap();
let mut power_loss = 0;
let mut dead_entities = Vec::new();
<(Entity, &Position, &mut Health)>::query()
.iter_mut(ecs)
.filter(|(_, pos, _)| pos.layer == layer && pos.pt == pt)
.for_each(|(entity, _, hp)| {
power_loss += hp.current;
if power_loss < 0 {
power_loss = 0;
}
let damage = i32::max(0, power + rng.roll_dice(1, 4) - 2);
//println!("{}", damage);
hp.current -= damage;
if hp.current < 0 {
hp.current = 0;
dead_entities.push(*entity);
}
});
dead_entities.iter().for_each(|e| {
if let Ok(er) = ecs.entry_ref(*e) {
if let Ok(boom) = er.get_component::<Explosive>() {
if let Ok(pos) = er.get_component::<Position>() {
commands.push((
Position::with_pt(pos.pt, pos.layer),
Boom { range: boom.range },
));
}
}
}
});
kill_things(ecs, commands, dead_entities, splatter);
power_loss
}
pub fn melee(ecs: &mut World, map: &mut Map, attacker: Entity, victim: Entity, melee_power: i32) {
// Check range and validity
let mut attacker_pos = None;
let mut defender_pos = None;
if let Ok(e) = ecs.entry_ref(attacker) {
if let Ok(pos) = e.get_component::<Position>() {
attacker_pos = Some(*pos);
}
}
if let Ok(e) = ecs.entry_ref(victim) {
if let Ok(pos) = e.get_component::<Position>() {
defender_pos = Some(*pos);
}
}
if attacker_pos.is_none() || defender_pos.is_none() {
return; // Bail out - invalid data arrived
}
let apos = attacker_pos.unwrap();
let dpos = defender_pos.unwrap();
if apos.layer != dpos.layer {
return; // Bail out - can't attack across layers
}
let d = DistanceAlg::Pythagoras.distance2d(apos.pt, dpos.pt);
if d > 1.5 {
return; // Too far away, bail
}
// Inflict damage upon the hapless victim
let mut dead_entities = Vec::new();
if let Ok(mut v) = ecs.entry_mut(victim) {
if let Ok(hp) = v.get_component_mut::<Health>() {
hp.current = i32::max(0, hp.current - melee_power);
if hp.current == 0 {
dead_entities.push(victim);
}
}
if let Ok(blood) = v.get_component::<Blood>() {
let idx = map.get_layer(dpos.layer as usize).point2d_to_index(dpos.pt);
map.get_layer_mut(dpos.layer as usize).tiles[idx].color.fg = blood.0.into();
}
}
// If necessary, kill them.
let mut commands = CommandBuffer::new(ecs);
let mut splatter = None;
kill_things(ecs, &mut commands, dead_entities, &mut splatter);
// Splatter blood. It's good for you.
}
fn | (
ecs: &mut World,
commands: &mut CommandBuffer,
dead_entities: Vec<Entity>,
splatter: &mut Option<RGB>,
) {
dead_entities.iter().for_each(|entity| {
crate::stats::record_death();
let mut was_decor = false;
let mut was_player = false;
if let Ok(mut er) = ecs.entry_mut(*entity) {
let mut was_colonist = false;
if let Ok(_colonist) = er.get_component_mut::<ColonistStatus>() {
commands.add_component(*entity, ColonistStatus::DiedAfterStart);
was_colonist = true;
}
if let Ok(g) = er.get_component_mut::<Glyph>() {
g.color.bg = DARK_RED.into();
g.color.fg = DARK_GRAY.into();
}
if let Ok(n) = er.get_component_mut::<Name>() {
n.0 = format!("Corpse: {}", n.0);
}
if was_colonist {
if let Ok(d) = er.get_component_mut::<Description>() {
let mut rng = RandomNumberGenerator::new();
if rng.range(0, 10) < 5 {
d.0 = format!(
"{} They left behind a spouse and {} children.",
d.0,
rng.range(1, 8)
);
}
}
}
if er.get_component::<Hostile>().is_ok() {
crate::stats::record_monster_death();
}
if let Ok(b) = er.get_component::<Blood>() {
*splatter = Some(b.0);
}
if let Ok(_) = er.get_component::<SetDecoration>() {
was_decor = true;
}
if let Ok(_) = er.get_component::<Player>() {
was_player = true;
}
}
if !was_player {
commands.remove_component::<Health>(*entity);
commands.remove_component::<Active>(*entity);
commands.remove_component::<CanBeActivated>(*entity);
commands.remove_component::<Blood>(*entity);
commands.remove_component::<Targetable>(*entity);
commands.remove_component::<Explosive>(*entity);
commands.remove_component::<TimedEvent>(*entity);
}
if was_decor {
crate::stats::record_prop_death();
commands.remove_component::<Glyph>(*entity);
commands.remove_component::<Description>(*entity);
}
});
}
| kill_things | identifier_name |
combat.rs | use crate::components::*;
use crate::map::*;
use crate::NewState;
use bracket_lib::prelude::*;
use legion::systems::CommandBuffer;
use legion::*;
use std::collections::HashSet;
pub fn player_open_fire_at_target(ecs: &mut World, map: &mut Map) -> NewState {
let mut player_entity = None;
let mut target = None;
<(Entity, &Player, &Targeting)>::query()
.iter(ecs)
.for_each(|(entity, _, targeting)| {
target = targeting.current_target;
player_entity = Some(*entity);
});
// If there's nothing to fire at, return to waiting
if target.is_none() {
return NewState::Wait;
}
ranged_attack(ecs, map, player_entity.unwrap(), target.unwrap(), 20);
NewState::Player
}
pub fn ranged_attack(
ecs: &mut World,
map: &mut Map,
attacker: Entity,
victim: Entity,
ranged_power: i32,
) |
pub fn hit_tile_contents(
ecs: &mut World,
pt: Point,
layer: u32,
commands: &mut CommandBuffer,
splatter: &mut Option<RGB>,
power: i32,
) -> i32 {
let mut rng_lock = crate::RNG.lock();
let rng = rng_lock.as_mut().unwrap();
let mut power_loss = 0;
let mut dead_entities = Vec::new();
<(Entity, &Position, &mut Health)>::query()
.iter_mut(ecs)
.filter(|(_, pos, _)| pos.layer == layer && pos.pt == pt)
.for_each(|(entity, _, hp)| {
power_loss += hp.current;
if power_loss < 0 {
power_loss = 0;
}
let damage = i32::max(0, power + rng.roll_dice(1, 4) - 2);
//println!("{}", damage);
hp.current -= damage;
if hp.current < 0 {
hp.current = 0;
dead_entities.push(*entity);
}
});
dead_entities.iter().for_each(|e| {
if let Ok(er) = ecs.entry_ref(*e) {
if let Ok(boom) = er.get_component::<Explosive>() {
if let Ok(pos) = er.get_component::<Position>() {
commands.push((
Position::with_pt(pos.pt, pos.layer),
Boom { range: boom.range },
));
}
}
}
});
kill_things(ecs, commands, dead_entities, splatter);
power_loss
}
pub fn melee(ecs: &mut World, map: &mut Map, attacker: Entity, victim: Entity, melee_power: i32) {
// Check range and validity
let mut attacker_pos = None;
let mut defender_pos = None;
if let Ok(e) = ecs.entry_ref(attacker) {
if let Ok(pos) = e.get_component::<Position>() {
attacker_pos = Some(*pos);
}
}
if let Ok(e) = ecs.entry_ref(victim) {
if let Ok(pos) = e.get_component::<Position>() {
defender_pos = Some(*pos);
}
}
if attacker_pos.is_none() || defender_pos.is_none() {
return; // Bail out - invalid data arrived
}
let apos = attacker_pos.unwrap();
let dpos = defender_pos.unwrap();
if apos.layer != dpos.layer {
return; // Bail out - can't attack across layers
}
let d = DistanceAlg::Pythagoras.distance2d(apos.pt, dpos.pt);
if d > 1.5 {
return; // Too far away, bail
}
// Inflict damage upon the hapless victim
let mut dead_entities = Vec::new();
if let Ok(mut v) = ecs.entry_mut(victim) {
if let Ok(hp) = v.get_component_mut::<Health>() {
hp.current = i32::max(0, hp.current - melee_power);
if hp.current == 0 {
dead_entities.push(victim);
}
}
if let Ok(blood) = v.get_component::<Blood>() {
let idx = map.get_layer(dpos.layer as usize).point2d_to_index(dpos.pt);
map.get_layer_mut(dpos.layer as usize).tiles[idx].color.fg = blood.0.into();
}
}
// If necessary, kill them.
let mut commands = CommandBuffer::new(ecs);
let mut splatter = None;
kill_things(ecs, &mut commands, dead_entities, &mut splatter);
// Splatter blood. It's good for you.
}
fn kill_things(
ecs: &mut World,
commands: &mut CommandBuffer,
dead_entities: Vec<Entity>,
splatter: &mut Option<RGB>,
) {
dead_entities.iter().for_each(|entity| {
crate::stats::record_death();
let mut was_decor = false;
let mut was_player = false;
if let Ok(mut er) = ecs.entry_mut(*entity) {
let mut was_colonist = false;
if let Ok(_colonist) = er.get_component_mut::<ColonistStatus>() {
commands.add_component(*entity, ColonistStatus::DiedAfterStart);
was_colonist = true;
}
if let Ok(g) = er.get_component_mut::<Glyph>() {
g.color.bg = DARK_RED.into();
g.color.fg = DARK_GRAY.into();
}
if let Ok(n) = er.get_component_mut::<Name>() {
n.0 = format!("Corpse: {}", n.0);
}
if was_colonist {
if let Ok(d) = er.get_component_mut::<Description>() {
let mut rng = RandomNumberGenerator::new();
if rng.range(0, 10) < 5 {
d.0 = format!(
"{} They left behind a spouse and {} children.",
d.0,
rng.range(1, 8)
);
}
}
}
if er.get_component::<Hostile>().is_ok() {
crate::stats::record_monster_death();
}
if let Ok(b) = er.get_component::<Blood>() {
*splatter = Some(b.0);
}
if let Ok(_) = er.get_component::<SetDecoration>() {
was_decor = true;
}
if let Ok(_) = er.get_component::<Player>() {
was_player = true;
}
}
if !was_player {
commands.remove_component::<Health>(*entity);
commands.remove_component::<Active>(*entity);
commands.remove_component::<CanBeActivated>(*entity);
commands.remove_component::<Blood>(*entity);
commands.remove_component::<Targetable>(*entity);
commands.remove_component::<Explosive>(*entity);
commands.remove_component::<TimedEvent>(*entity);
}
if was_decor {
crate::stats::record_prop_death();
commands.remove_component::<Glyph>(*entity);
commands.remove_component::<Description>(*entity);
}
});
}
| {
let mut attacker_pos = None;
let mut victim_pos = None;
// Find positions for the start and end
if let Ok(ae) = ecs.entry_ref(attacker) {
if let Ok(pos) = ae.get_component::<Position>() {
attacker_pos = Some(pos.clone());
}
}
if let Ok(ae) = ecs.entry_ref(victim) {
if let Ok(pos) = ae.get_component::<Position>() {
victim_pos = Some(pos.clone());
}
}
if attacker_pos.is_none() || victim_pos.is_none() {
return;
}
let attacker_pos = attacker_pos.unwrap();
let victim_pos = victim_pos.unwrap();
// Set state for the projectile path
let mut power = ranged_power;
let mut range = 0;
let mut projectile_path = Vec::new();
let mut splatter = None;
let mut commands = CommandBuffer::new(ecs);
let current_layer = attacker_pos.layer;
// Map of entity locations. Rebuilt every time because it might change.
let pos_map = <(&Position, &Health)>::query()
.iter(ecs)
.map(|(pos, _)| pos.pt)
.collect::<HashSet<Point>>();
// Plot the initial trajectory
line2d_bresenham(attacker_pos.pt, victim_pos.pt)
.iter()
.skip(1)
.for_each(|pt| {
projectile_path.push(*pt);
if pos_map.contains(&pt) {
power -=
hit_tile_contents(ecs, *pt, current_layer, &mut commands, &mut splatter, power);
if power < 1 {
power = 1;
range += 200;
}
}
if let Some(bsplatter) = &mut splatter {
let idx = map.get_current().point2d_to_index(*pt);
map.get_current_mut().tiles[idx].color.fg = bsplatter.to_rgba(1.0);
bsplatter.r = f32::max(0.0, bsplatter.r - 0.1);
bsplatter.g = f32::max(0.0, bsplatter.g - 0.1);
bsplatter.b = f32::max(0.0, bsplatter.b - 0.1);
if bsplatter.r + bsplatter.g + bsplatter.b < 0.1 {
splatter = None;
}
}
range += 1;
if range > 5 {
power -= 1;
if power < 1 {
power = 1;
}
}
});
// The trajectory can continue if we have power left
use ultraviolet::Vec2;
let mut projectile_pos: Vec2 = Vec2::new(victim_pos.pt.x as f32, victim_pos.pt.y as f32);
let slope = (projectile_pos - Vec2::new(attacker_pos.pt.x as f32, attacker_pos.pt.y as f32))
.normalized();
while range < 25 && power > 0 {
projectile_pos += slope;
let pt = Point::new(projectile_pos.x as i32, projectile_pos.y as i32);
projectile_path.push(pt);
if pos_map.contains(&pt) {
power -= hit_tile_contents(ecs, pt, current_layer, &mut commands, &mut splatter, power);
if power < 1 {
power = 1;
range += 200;
}
}
if let Some(bsplatter) = &mut splatter {
let idx = map.get_current().point2d_to_index(pt);
map.get_current_mut().tiles[idx].color.fg = bsplatter.to_rgba(1.0);
bsplatter.r = f32::max(0.0, bsplatter.r - 0.1);
bsplatter.g = f32::max(0.0, bsplatter.g - 0.1);
bsplatter.b = f32::max(0.0, bsplatter.b - 0.1);
if bsplatter.r + bsplatter.g + bsplatter.b < 0.1 {
splatter = None;
}
}
let idx = map.get_current().point2d_to_index(pt);
if map.get_current().tiles[idx].tile_type == TileType::Wall {
range += 100;
power = 0;
}
range += 1;
if range > 5 {
power -= 1;
if power < 1 {
power = 1;
range += 100;
}
}
}
commands.push((
Projectile {
path: projectile_path,
layer: current_layer as usize,
},
Glyph {
glyph: to_cp437('*'),
color: ColorPair::new(RED, BLACK),
},
));
commands.flush(ecs);
} | identifier_body |
combat.rs | use crate::components::*;
use crate::map::*;
use crate::NewState;
use bracket_lib::prelude::*;
use legion::systems::CommandBuffer;
use legion::*;
use std::collections::HashSet;
pub fn player_open_fire_at_target(ecs: &mut World, map: &mut Map) -> NewState {
let mut player_entity = None;
let mut target = None;
<(Entity, &Player, &Targeting)>::query()
.iter(ecs)
.for_each(|(entity, _, targeting)| {
target = targeting.current_target;
player_entity = Some(*entity);
});
// If there's nothing to fire at, return to waiting
if target.is_none() {
return NewState::Wait;
}
ranged_attack(ecs, map, player_entity.unwrap(), target.unwrap(), 20);
NewState::Player
}
pub fn ranged_attack(
ecs: &mut World,
map: &mut Map,
attacker: Entity,
victim: Entity,
ranged_power: i32,
) {
let mut attacker_pos = None;
let mut victim_pos = None;
// Find positions for the start and end
if let Ok(ae) = ecs.entry_ref(attacker) {
if let Ok(pos) = ae.get_component::<Position>() {
attacker_pos = Some(pos.clone());
}
}
if let Ok(ae) = ecs.entry_ref(victim) {
if let Ok(pos) = ae.get_component::<Position>() {
victim_pos = Some(pos.clone());
}
}
if attacker_pos.is_none() || victim_pos.is_none() {
return;
}
let attacker_pos = attacker_pos.unwrap();
let victim_pos = victim_pos.unwrap();
// Set state for the projectile path
let mut power = ranged_power;
let mut range = 0;
let mut projectile_path = Vec::new();
let mut splatter = None;
let mut commands = CommandBuffer::new(ecs);
let current_layer = attacker_pos.layer;
// Map of entity locations. Rebuilt every time because it might change.
let pos_map = <(&Position, &Health)>::query()
.iter(ecs)
.map(|(pos, _)| pos.pt)
.collect::<HashSet<Point>>();
// Plot the initial trajectory
line2d_bresenham(attacker_pos.pt, victim_pos.pt)
.iter()
.skip(1)
.for_each(|pt| {
projectile_path.push(*pt);
if pos_map.contains(&pt) {
power -=
hit_tile_contents(ecs, *pt, current_layer, &mut commands, &mut splatter, power);
if power < 1 {
power = 1;
range += 200;
}
}
if let Some(bsplatter) = &mut splatter {
let idx = map.get_current().point2d_to_index(*pt);
map.get_current_mut().tiles[idx].color.fg = bsplatter.to_rgba(1.0);
bsplatter.r = f32::max(0.0, bsplatter.r - 0.1);
bsplatter.g = f32::max(0.0, bsplatter.g - 0.1);
bsplatter.b = f32::max(0.0, bsplatter.b - 0.1);
if bsplatter.r + bsplatter.g + bsplatter.b < 0.1 {
splatter = None;
}
}
range += 1;
if range > 5 {
power -= 1;
if power < 1 {
power = 1;
}
}
});
// The trajectory can continue if we have power left
use ultraviolet::Vec2;
let mut projectile_pos: Vec2 = Vec2::new(victim_pos.pt.x as f32, victim_pos.pt.y as f32);
let slope = (projectile_pos - Vec2::new(attacker_pos.pt.x as f32, attacker_pos.pt.y as f32))
.normalized();
while range < 25 && power > 0 {
projectile_pos += slope;
let pt = Point::new(projectile_pos.x as i32, projectile_pos.y as i32);
projectile_path.push(pt);
if pos_map.contains(&pt) {
power -= hit_tile_contents(ecs, pt, current_layer, &mut commands, &mut splatter, power);
if power < 1 {
power = 1;
range += 200;
}
}
if let Some(bsplatter) = &mut splatter {
let idx = map.get_current().point2d_to_index(pt);
map.get_current_mut().tiles[idx].color.fg = bsplatter.to_rgba(1.0);
bsplatter.r = f32::max(0.0, bsplatter.r - 0.1);
bsplatter.g = f32::max(0.0, bsplatter.g - 0.1);
bsplatter.b = f32::max(0.0, bsplatter.b - 0.1);
if bsplatter.r + bsplatter.g + bsplatter.b < 0.1 {
splatter = None;
}
}
let idx = map.get_current().point2d_to_index(pt);
if map.get_current().tiles[idx].tile_type == TileType::Wall {
range += 100;
power = 0;
}
range += 1;
if range > 5 {
power -= 1;
if power < 1 {
power = 1;
range += 100;
}
}
}
commands.push((
Projectile {
path: projectile_path,
layer: current_layer as usize,
},
Glyph {
glyph: to_cp437('*'),
color: ColorPair::new(RED, BLACK),
},
));
commands.flush(ecs);
}
pub fn hit_tile_contents(
ecs: &mut World,
pt: Point,
layer: u32,
commands: &mut CommandBuffer,
splatter: &mut Option<RGB>,
power: i32,
) -> i32 {
let mut rng_lock = crate::RNG.lock();
let rng = rng_lock.as_mut().unwrap();
let mut power_loss = 0;
let mut dead_entities = Vec::new();
<(Entity, &Position, &mut Health)>::query()
.iter_mut(ecs)
.filter(|(_, pos, _)| pos.layer == layer && pos.pt == pt)
.for_each(|(entity, _, hp)| {
power_loss += hp.current;
if power_loss < 0 {
power_loss = 0;
}
let damage = i32::max(0, power + rng.roll_dice(1, 4) - 2);
//println!("{}", damage);
hp.current -= damage;
if hp.current < 0 {
hp.current = 0;
dead_entities.push(*entity);
}
});
dead_entities.iter().for_each(|e| {
if let Ok(er) = ecs.entry_ref(*e) {
if let Ok(boom) = er.get_component::<Explosive>() {
if let Ok(pos) = er.get_component::<Position>() {
commands.push((
Position::with_pt(pos.pt, pos.layer),
Boom { range: boom.range },
));
}
}
}
});
kill_things(ecs, commands, dead_entities, splatter);
power_loss
}
pub fn melee(ecs: &mut World, map: &mut Map, attacker: Entity, victim: Entity, melee_power: i32) {
// Check range and validity
let mut attacker_pos = None;
let mut defender_pos = None;
if let Ok(e) = ecs.entry_ref(attacker) {
if let Ok(pos) = e.get_component::<Position>() {
attacker_pos = Some(*pos);
}
}
if let Ok(e) = ecs.entry_ref(victim) {
if let Ok(pos) = e.get_component::<Position>() {
defender_pos = Some(*pos);
}
}
if attacker_pos.is_none() || defender_pos.is_none() {
return; // Bail out - invalid data arrived
}
let apos = attacker_pos.unwrap();
let dpos = defender_pos.unwrap();
if apos.layer != dpos.layer {
return; // Bail out - can't attack across layers
}
let d = DistanceAlg::Pythagoras.distance2d(apos.pt, dpos.pt);
if d > 1.5 {
return; // Too far away, bail
}
// Inflict damage upon the hapless victim
let mut dead_entities = Vec::new();
if let Ok(mut v) = ecs.entry_mut(victim) |
// If necessary, kill them.
let mut commands = CommandBuffer::new(ecs);
let mut splatter = None;
kill_things(ecs, &mut commands, dead_entities, &mut splatter);
// Splatter blood. It's good for you.
}
fn kill_things(
ecs: &mut World,
commands: &mut CommandBuffer,
dead_entities: Vec<Entity>,
splatter: &mut Option<RGB>,
) {
dead_entities.iter().for_each(|entity| {
crate::stats::record_death();
let mut was_decor = false;
let mut was_player = false;
if let Ok(mut er) = ecs.entry_mut(*entity) {
let mut was_colonist = false;
if let Ok(_colonist) = er.get_component_mut::<ColonistStatus>() {
commands.add_component(*entity, ColonistStatus::DiedAfterStart);
was_colonist = true;
}
if let Ok(g) = er.get_component_mut::<Glyph>() {
g.color.bg = DARK_RED.into();
g.color.fg = DARK_GRAY.into();
}
if let Ok(n) = er.get_component_mut::<Name>() {
n.0 = format!("Corpse: {}", n.0);
}
if was_colonist {
if let Ok(d) = er.get_component_mut::<Description>() {
let mut rng = RandomNumberGenerator::new();
if rng.range(0, 10) < 5 {
d.0 = format!(
"{} They left behind a spouse and {} children.",
d.0,
rng.range(1, 8)
);
}
}
}
if er.get_component::<Hostile>().is_ok() {
crate::stats::record_monster_death();
}
if let Ok(b) = er.get_component::<Blood>() {
*splatter = Some(b.0);
}
if let Ok(_) = er.get_component::<SetDecoration>() {
was_decor = true;
}
if let Ok(_) = er.get_component::<Player>() {
was_player = true;
}
}
if !was_player {
commands.remove_component::<Health>(*entity);
commands.remove_component::<Active>(*entity);
commands.remove_component::<CanBeActivated>(*entity);
commands.remove_component::<Blood>(*entity);
commands.remove_component::<Targetable>(*entity);
commands.remove_component::<Explosive>(*entity);
commands.remove_component::<TimedEvent>(*entity);
}
if was_decor {
crate::stats::record_prop_death();
commands.remove_component::<Glyph>(*entity);
commands.remove_component::<Description>(*entity);
}
});
}
| {
if let Ok(hp) = v.get_component_mut::<Health>() {
hp.current = i32::max(0, hp.current - melee_power);
if hp.current == 0 {
dead_entities.push(victim);
}
}
if let Ok(blood) = v.get_component::<Blood>() {
let idx = map.get_layer(dpos.layer as usize).point2d_to_index(dpos.pt);
map.get_layer_mut(dpos.layer as usize).tiles[idx].color.fg = blood.0.into();
}
} | conditional_block |
combat.rs | use crate::components::*;
use crate::map::*;
use crate::NewState;
use bracket_lib::prelude::*;
use legion::systems::CommandBuffer;
use legion::*;
use std::collections::HashSet;
pub fn player_open_fire_at_target(ecs: &mut World, map: &mut Map) -> NewState {
let mut player_entity = None;
let mut target = None;
<(Entity, &Player, &Targeting)>::query()
.iter(ecs)
.for_each(|(entity, _, targeting)| {
target = targeting.current_target;
player_entity = Some(*entity);
});
// If there's nothing to fire at, return to waiting
if target.is_none() {
return NewState::Wait;
}
ranged_attack(ecs, map, player_entity.unwrap(), target.unwrap(), 20);
NewState::Player
}
pub fn ranged_attack(
ecs: &mut World,
map: &mut Map,
attacker: Entity,
victim: Entity,
ranged_power: i32,
) {
let mut attacker_pos = None;
let mut victim_pos = None;
// Find positions for the start and end
if let Ok(ae) = ecs.entry_ref(attacker) {
if let Ok(pos) = ae.get_component::<Position>() {
attacker_pos = Some(pos.clone());
}
}
if let Ok(ae) = ecs.entry_ref(victim) {
if let Ok(pos) = ae.get_component::<Position>() {
victim_pos = Some(pos.clone());
}
}
if attacker_pos.is_none() || victim_pos.is_none() {
return;
}
let attacker_pos = attacker_pos.unwrap();
let victim_pos = victim_pos.unwrap();
// Set state for the projectile path
let mut power = ranged_power;
let mut range = 0;
let mut projectile_path = Vec::new();
let mut splatter = None;
let mut commands = CommandBuffer::new(ecs);
let current_layer = attacker_pos.layer;
// Map of entity locations. Rebuilt every time because it might change.
let pos_map = <(&Position, &Health)>::query()
.iter(ecs)
.map(|(pos, _)| pos.pt)
.collect::<HashSet<Point>>();
// Plot the initial trajectory
line2d_bresenham(attacker_pos.pt, victim_pos.pt)
.iter()
.skip(1)
.for_each(|pt| {
projectile_path.push(*pt);
if pos_map.contains(&pt) {
power -=
hit_tile_contents(ecs, *pt, current_layer, &mut commands, &mut splatter, power);
if power < 1 {
power = 1;
range += 200;
}
}
if let Some(bsplatter) = &mut splatter {
let idx = map.get_current().point2d_to_index(*pt);
map.get_current_mut().tiles[idx].color.fg = bsplatter.to_rgba(1.0);
bsplatter.r = f32::max(0.0, bsplatter.r - 0.1);
bsplatter.g = f32::max(0.0, bsplatter.g - 0.1);
bsplatter.b = f32::max(0.0, bsplatter.b - 0.1);
if bsplatter.r + bsplatter.g + bsplatter.b < 0.1 {
splatter = None;
}
}
range += 1;
if range > 5 {
power -= 1;
if power < 1 {
power = 1;
}
}
});
// The trajectory can continue if we have power left
use ultraviolet::Vec2;
let mut projectile_pos: Vec2 = Vec2::new(victim_pos.pt.x as f32, victim_pos.pt.y as f32);
let slope = (projectile_pos - Vec2::new(attacker_pos.pt.x as f32, attacker_pos.pt.y as f32))
.normalized();
while range < 25 && power > 0 {
projectile_pos += slope;
let pt = Point::new(projectile_pos.x as i32, projectile_pos.y as i32);
projectile_path.push(pt);
if pos_map.contains(&pt) {
power -= hit_tile_contents(ecs, pt, current_layer, &mut commands, &mut splatter, power);
if power < 1 {
power = 1;
range += 200;
}
}
if let Some(bsplatter) = &mut splatter {
let idx = map.get_current().point2d_to_index(pt);
map.get_current_mut().tiles[idx].color.fg = bsplatter.to_rgba(1.0);
bsplatter.r = f32::max(0.0, bsplatter.r - 0.1);
bsplatter.g = f32::max(0.0, bsplatter.g - 0.1);
bsplatter.b = f32::max(0.0, bsplatter.b - 0.1);
if bsplatter.r + bsplatter.g + bsplatter.b < 0.1 {
splatter = None;
}
}
let idx = map.get_current().point2d_to_index(pt);
if map.get_current().tiles[idx].tile_type == TileType::Wall {
range += 100;
power = 0;
}
range += 1;
if range > 5 {
power -= 1;
if power < 1 {
power = 1;
range += 100;
}
}
}
commands.push((
Projectile {
path: projectile_path, | },
Glyph {
glyph: to_cp437('*'),
color: ColorPair::new(RED, BLACK),
},
));
commands.flush(ecs);
}
pub fn hit_tile_contents(
ecs: &mut World,
pt: Point,
layer: u32,
commands: &mut CommandBuffer,
splatter: &mut Option<RGB>,
power: i32,
) -> i32 {
let mut rng_lock = crate::RNG.lock();
let rng = rng_lock.as_mut().unwrap();
let mut power_loss = 0;
let mut dead_entities = Vec::new();
<(Entity, &Position, &mut Health)>::query()
.iter_mut(ecs)
.filter(|(_, pos, _)| pos.layer == layer && pos.pt == pt)
.for_each(|(entity, _, hp)| {
power_loss += hp.current;
if power_loss < 0 {
power_loss = 0;
}
let damage = i32::max(0, power + rng.roll_dice(1, 4) - 2);
//println!("{}", damage);
hp.current -= damage;
if hp.current < 0 {
hp.current = 0;
dead_entities.push(*entity);
}
});
dead_entities.iter().for_each(|e| {
if let Ok(er) = ecs.entry_ref(*e) {
if let Ok(boom) = er.get_component::<Explosive>() {
if let Ok(pos) = er.get_component::<Position>() {
commands.push((
Position::with_pt(pos.pt, pos.layer),
Boom { range: boom.range },
));
}
}
}
});
kill_things(ecs, commands, dead_entities, splatter);
power_loss
}
pub fn melee(ecs: &mut World, map: &mut Map, attacker: Entity, victim: Entity, melee_power: i32) {
// Check range and validity
let mut attacker_pos = None;
let mut defender_pos = None;
if let Ok(e) = ecs.entry_ref(attacker) {
if let Ok(pos) = e.get_component::<Position>() {
attacker_pos = Some(*pos);
}
}
if let Ok(e) = ecs.entry_ref(victim) {
if let Ok(pos) = e.get_component::<Position>() {
defender_pos = Some(*pos);
}
}
if attacker_pos.is_none() || defender_pos.is_none() {
return; // Bail out - invalid data arrived
}
let apos = attacker_pos.unwrap();
let dpos = defender_pos.unwrap();
if apos.layer != dpos.layer {
return; // Bail out - can't attack across layers
}
let d = DistanceAlg::Pythagoras.distance2d(apos.pt, dpos.pt);
if d > 1.5 {
return; // Too far away, bail
}
// Inflict damage upon the hapless victim
let mut dead_entities = Vec::new();
if let Ok(mut v) = ecs.entry_mut(victim) {
if let Ok(hp) = v.get_component_mut::<Health>() {
hp.current = i32::max(0, hp.current - melee_power);
if hp.current == 0 {
dead_entities.push(victim);
}
}
if let Ok(blood) = v.get_component::<Blood>() {
let idx = map.get_layer(dpos.layer as usize).point2d_to_index(dpos.pt);
map.get_layer_mut(dpos.layer as usize).tiles[idx].color.fg = blood.0.into();
}
}
// If necessary, kill them.
let mut commands = CommandBuffer::new(ecs);
let mut splatter = None;
kill_things(ecs, &mut commands, dead_entities, &mut splatter);
// Splatter blood. It's good for you.
}
fn kill_things(
ecs: &mut World,
commands: &mut CommandBuffer,
dead_entities: Vec<Entity>,
splatter: &mut Option<RGB>,
) {
dead_entities.iter().for_each(|entity| {
crate::stats::record_death();
let mut was_decor = false;
let mut was_player = false;
if let Ok(mut er) = ecs.entry_mut(*entity) {
let mut was_colonist = false;
if let Ok(_colonist) = er.get_component_mut::<ColonistStatus>() {
commands.add_component(*entity, ColonistStatus::DiedAfterStart);
was_colonist = true;
}
if let Ok(g) = er.get_component_mut::<Glyph>() {
g.color.bg = DARK_RED.into();
g.color.fg = DARK_GRAY.into();
}
if let Ok(n) = er.get_component_mut::<Name>() {
n.0 = format!("Corpse: {}", n.0);
}
if was_colonist {
if let Ok(d) = er.get_component_mut::<Description>() {
let mut rng = RandomNumberGenerator::new();
if rng.range(0, 10) < 5 {
d.0 = format!(
"{} They left behind a spouse and {} children.",
d.0,
rng.range(1, 8)
);
}
}
}
if er.get_component::<Hostile>().is_ok() {
crate::stats::record_monster_death();
}
if let Ok(b) = er.get_component::<Blood>() {
*splatter = Some(b.0);
}
if let Ok(_) = er.get_component::<SetDecoration>() {
was_decor = true;
}
if let Ok(_) = er.get_component::<Player>() {
was_player = true;
}
}
if !was_player {
commands.remove_component::<Health>(*entity);
commands.remove_component::<Active>(*entity);
commands.remove_component::<CanBeActivated>(*entity);
commands.remove_component::<Blood>(*entity);
commands.remove_component::<Targetable>(*entity);
commands.remove_component::<Explosive>(*entity);
commands.remove_component::<TimedEvent>(*entity);
}
if was_decor {
crate::stats::record_prop_death();
commands.remove_component::<Glyph>(*entity);
commands.remove_component::<Description>(*entity);
}
});
} | layer: current_layer as usize, | random_line_split |
tutor-details.component.ts | import { Component, OnInit, OnDestroy } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { Subscription } from 'rxjs/Subscription';
import { Meteor } from 'meteor/meteor';
import { MeteorObservable } from 'meteor-rxjs';
import { InjectUser } from "angular2-meteor-accounts-ui";
import {ROUTER_DIRECTIVES, Location} from "angular2/router";
import { Router } from '@angular/router';
import { FormBuilder, FormGroup, Validators } from '@angular/forms';
import { CreditCardValidator } from 'ng2-cc-library';
import { Requests } from '../../../../both/collections/requests.collection';
import { ActivatedRoute } from '@angular/router';
import { Request } from '../../../../both/models/request.model';
import 'rxjs/add/operator/map';
import { Users } from '../../../../both/collections/users.collection';
import { Classes } from '../../../../both/collections/classes.collection';
import { Class_ } from '../../../../both/models/class.model';
import { Tutors } from '../../../../both/collections/tutors.collection';
import { Tutor } from '../../../../both/models/tutor.model';
import { Accounts } from 'meteor/accounts-base';
import style from './tutor-details.component.scss';
import template from './tutor-details.component.html';
import {IMyOptions} from 'mydatepicker';
Meteor.startup(() => {
Accounts.ui.config({requestPermissions:{google:['https://www.googleapis.com/auth/calendar']}, forceApprovalPrompt: {google: true}, requestOfflineToken: {google: true}});
Stripe.setPublishableKey(Meteor.settings.public.stripe.livePublishableKey);
var handler = StripeCheckout.configure({
key: Meteor.settings.public.stripe.testPublishableKey,
token: function(token) {}
});
});
@Component({
selector: 'tutor-details',
template,
styles: [ style ]
})
@InjectUser('user')
export class TutorDetailsComponentUser implements OnInit, OnDestroy {
loggedIn:boolean=true;
user_skype_email: string;
today: Date = new Date();
today_show: Date = new Date();
tutorId: string;
slot: number;
color: string[]= new Array(24);
day: number=1;
tutorAsUserId: string;
tutor: Tutor;
paramsSub: Subscription;
imagesSubs: Subscription;
classesSub: Subscription;
reqSub: Subscription;
tutorSub: Subscription;
requests: Observable<Request[]>;
mailtoTutor: string;
tutor_user_email: string;
class: Class_;
tutorClasses: Observable<Class_[]>;
user: Meteor.User;
checkout: boolean=false;
a_day: number[] = new Array(24);
tutorSchedule: number[][] = new Array();
colorsSched: string[][] = new Array();
range_for_disp: number[] = new Array(12);
amount: number=0;
checkDetails: string[]=new Array(3);
payment_form: FormGroup;
payment_form_2: FormGroup;
submitted: boolean = false;
g_calendar: boolean=false;
// cc
cardNumber: string;
expiryMonth: string;
expiryYear: string;
cvc: string;
constructor(
private router: Router,
private formBuilder: FormBuilder,
private route: ActivatedRoute
) |
private myDatePickerOptions: IMyOptions = {
// other options...
dateFormat: 'dd.mm.yyyy',
inline: true,
disableDateRanges: [{ begin: {year: this.today.getFullYear(), month: this.today.getMonth()-2, day: this.today.getDate()}, end: {year: this.today.getFullYear(),
month: this.today.getMonth()+1, day: this.today.getDate()-1} },{ begin: {year: this.today.getFullYear(),month: this.today.getMonth()+1, day: this.today.getDate()+7},
end: {year: this.today.getFullYear(),month: this.today.getMonth()+2, day: this.today.getDate()} }]
};
// Initialized to specific date (09.10.2018).
private model: Object = { date: { year: 2018, month: 10, day: 9 } };
ngOnInit() {
console.log(Meteor.userId());
if(!Meteor.userId()){
this.loggedIn=false;
Bert.alert( 'You need to be logged in to view this page', 'danger', 'fixed-bottom' );
this.router.navigate('/login');
}
this.imagesSubs = MeteorObservable.subscribe('images').subscribe();
this.payment_form_2 = this.formBuilder.group({
creditCard: ['', [<any>CreditCardValidator.validateCCNumber]],
expDate: ['', [<any>CreditCardValidator.validateExpDate]],
cvc: ['', [<any>Validators.required, <any>Validators.minLength(3), <any>Validators.maxLength(4)]]
});
this.payment_form = this.formBuilder.group({
cardNumber: ['', Validators.required],
expiryMonth: ['', Validators.required],
expiryYear: ['', Validators.required],
cvc: ['', Validators.required]
});
for (var i = 0; i < 24; i++) {
this.color[i]='green';
}
for (var i = 0; i < 7; i++) {
this.colorsSched[i]=this.color;
}
this.paramsSub = this.route.params
.map(params => params['tutorId'])
.subscribe(tutorId => {
this.tutorId = tutorId;
if (this.tutorSub) {
this.tutorSub.unsubscribe();
}
});
this.tutorSub = MeteorObservable.subscribe('tutors').subscribe(() => {
this.tutor=Tutors.findOne(this.tutorId);
this.tutorAsUserId=this.tutor.userId;
this.tutorSchedule=this.tutor.times;
this.amount = this.tutor.hourly_rating;
let _MS_PER_DAY = 1000 * 60 * 60 * 24;
let utc1 = Date.UTC(this.today.getFullYear(), this.today.getMonth(), this.today.getDate());
let utc2 = Date.UTC(this.tutor.lastUpdateDate.getFullYear(), this.tutor.lastUpdateDate.getMonth(), this.tutor.lastUpdateDate.getDate());
let last_update_diff = Math.floor((utc2 - utc1) / _MS_PER_DAY);
for (var i = 0; i < last_update_diff; i++) {
for(var j = 0; j < 24; j++) {
this.colorsSched[i][j]='blue';
}
}
for (var i = last_update_diff; i < 7; i++) {
for(var j = 0; j < 24; j++) {
// console.log(this.tutorSchedule[i][j]);
if(this.tutorSchedule[i][j]==1){
this.colorsSched[i][j]='blue';
}else{
this.colorsSched[i][j]='green';
}
}
}
// console.log(this.colorsSched);
});
console.log(this.colorsSched);
this.tutorSub = MeteorObservable.subscribe('users').subscribe(() => {
this.tutor_user_email=Users.findOne(this.tutorAsUserId).emails[0].address;
this.mailtoTutor="mailto:"+ this.tutor_user_email;
});
//TODO only find classes that this tutor do 34064745
this.classesSub = MeteorObservable.subscribe('classes').subscribe(() => {
this.tutorClasses = Classes.find({tutorId: {$eq: this.tutorAsUserId} });
});
}
get isMe(): boolean {
if(this.user)
return this.user._id === this.tutorAsUserId;
return false;
}
toggleSlot(i: number): void {
this.today_show.setHours(i,0,0);
this.slot = i;
console.log(this.tutorSchedule[this.day][i]);
if(this.tutorSchedule[this.day][i]==0){
this.tutorSchedule[this.day][i]=1;
this.colorsSched[this.day][i]='green';
}else if(this.tutorSchedule[this.day][i]==1){
this.tutorSchedule[this.day][i]=0;
this.colorsSched[this.day][i]='blue';
}
// else if(this.colorsSched[this.day][i]='blue'){
// this.colorsSched[this.day][i]='green';
// }
}
onDateChanged(event: IMyDateModel) {
let _MS_PER_DAY = 1000 * 60 * 60 * 24;
let utc1 = Date.UTC(this.today.getFullYear(), this.today.getMonth(), this.today.getDate());
let utc2 = Date.UTC(event.jsdate.getFullYear(), event.jsdate.getMonth(), event.jsdate.getDate());
let diff = Math.floor((utc2 - utc1) / _MS_PER_DAY);
let i = diff;
this.day = diff;
this.today_show.setDate(this.today.getDate()+i);
for(var j = 0; j < 24; j++) {
// console.log(this.tutorSchedule[i][j]);
if(this.tutorSchedule[i][j]==1){
this.colorsSched[i][j]='green';
}else{
this.colorsSched[i][j]='red';
}
}
}
CheckoutFn():void{
if (this.payment_form.valid) {
console.log('payment form valid')
Stripe.card.createToken({
number: this.payment_form.value.cardNumber,
cvc: this.payment_form.value.cvc,
exp_month: this.payment_form.value.expiryMonth,
exp_year: this.payment_form.value.expiryYear
}, function(status, response) {
console.log(status);
console.log(response.error.message);
stripeToken = response.id;
Meteor.call('chargeCard', stripeToken, amount);
});
}
}
step1(): void{
this.checkout = false;
}
GoToCheckOut(): void{
// if(!this.user_skype_email){
// alert('Please enter your skype username so the teatch can contact you :)');
// }else{
// alert('you are now registered in this class :)');
// }
console.log(this.slot);
console.log(this.tutorSchedule)
this.tutorSchedule[this.day][this.slot]=2;
// Tutors.update(this.tutorId, {
// $set:{times: this.tutorSchedule }
// });
this.checkout=true;
}
onSubmit() {
let success= false;
let free= true;
if (this.payment_form_2.valid) {
this.submitted = true;
let m = this.payment_form_2.value.expDate[0]+this.payment_form_2.value.expDate[1];
let y = this.payment_form_2.value.expDate[5]+this.payment_form_2.value.expDate[6];
Bert.alert('berfore payment','success');
Stripe.card.createToken({
number: this.payment_form_2.value.creditCard,
cvc: this.payment_form_2.value.cvc,
exp_month: m,
exp_year: y
}, function(status, response) {
console.log(status);
console.log(response);
stripeToken = response.id;
// Meteor.call('chargeCard', stripeToken, this.amount);
Meteor.call('chargeCard', stripeToken, 3);
});
Bert.alert('Thanks for booking my class','success');
this.router.navigate(['/thanks']);
// let id = Classes.findOne({
// userId:{
// $elemMatch:{$eq: Meteor.userId()}
// }
// })._id;
// console.log(id);
// if(id){
// free=false;
// }
}
// if(free||success){
// //add the user skype user name to the class
// Classes.insert(Object.assign({ userId: Meteor.userId(),
// tutorId: this.tutorId,startDate: this.today_show, userSkype: this.user_skype_email}));
// this.router.navigate(['/thanks']);
// }else{
// Bert.alert('Payment failed, or you are already registered in another classs', 'danger');
// }
// this.g_calendar=true;
}
addToCalendnaer():void {
// let dateString = "2010-08-09 01:02:03"
// , reggie = /(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})/
// , [, year, month, day, hours, minutes, seconds] = reggie.exec(dateString)
// , dateObject = new Date(year, month-1, day, hours, minutes, seconds);
// let utc1 = Date.UTC(this.today_show.getFullYear(), this.today_show.getMonth(), this.today_show.getDate());
// console.log(utc1);
// console.log(this.today_show[1]);
let user = Meteor.users.findOne({_id: Meteor.userId()});
const event = {
'summary': 'Quran Class',
'location': 'Online- Skype',
'description': '45min class',
'start':{
'dateTime': this.today_show.toISOString(),
'timeZone': 'America/Los_Angeles'},
'end':{
'dateTime': this.today_show.toISOString(),
'timeZone': 'America/Los_Angeles'}
};
GoogleApi.post('calendar/v3/calendars/primary/events', { data: event },function (error, result){
console.log(error);
console.log(result);
});
}
ngOnDestroy() {
this.classesSub.unsubscribe();
this.paramsSub.unsubscribe();
this.tutorSub.unsubscribe();
this.imagesSubs.unsubscribe();
}
}
| {} | identifier_body |
tutor-details.component.ts | import { Component, OnInit, OnDestroy } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { Subscription } from 'rxjs/Subscription';
import { Meteor } from 'meteor/meteor';
import { MeteorObservable } from 'meteor-rxjs';
import { InjectUser } from "angular2-meteor-accounts-ui";
import {ROUTER_DIRECTIVES, Location} from "angular2/router";
import { Router } from '@angular/router';
import { FormBuilder, FormGroup, Validators } from '@angular/forms';
import { CreditCardValidator } from 'ng2-cc-library';
import { Requests } from '../../../../both/collections/requests.collection';
import { ActivatedRoute } from '@angular/router';
import { Request } from '../../../../both/models/request.model';
import 'rxjs/add/operator/map';
import { Users } from '../../../../both/collections/users.collection';
import { Classes } from '../../../../both/collections/classes.collection';
import { Class_ } from '../../../../both/models/class.model';
import { Tutors } from '../../../../both/collections/tutors.collection';
import { Tutor } from '../../../../both/models/tutor.model';
import { Accounts } from 'meteor/accounts-base';
import style from './tutor-details.component.scss';
import template from './tutor-details.component.html';
import {IMyOptions} from 'mydatepicker';
Meteor.startup(() => {
Accounts.ui.config({requestPermissions:{google:['https://www.googleapis.com/auth/calendar']}, forceApprovalPrompt: {google: true}, requestOfflineToken: {google: true}});
Stripe.setPublishableKey(Meteor.settings.public.stripe.livePublishableKey);
var handler = StripeCheckout.configure({
key: Meteor.settings.public.stripe.testPublishableKey,
token: function(token) {}
});
});
@Component({
selector: 'tutor-details',
template,
styles: [ style ]
})
@InjectUser('user')
export class TutorDetailsComponentUser implements OnInit, OnDestroy {
loggedIn:boolean=true;
user_skype_email: string;
today: Date = new Date();
today_show: Date = new Date();
tutorId: string;
slot: number;
color: string[]= new Array(24);
day: number=1;
tutorAsUserId: string;
tutor: Tutor;
paramsSub: Subscription;
imagesSubs: Subscription;
classesSub: Subscription;
reqSub: Subscription;
tutorSub: Subscription;
requests: Observable<Request[]>;
mailtoTutor: string;
tutor_user_email: string;
class: Class_;
tutorClasses: Observable<Class_[]>;
user: Meteor.User;
checkout: boolean=false;
a_day: number[] = new Array(24);
tutorSchedule: number[][] = new Array();
colorsSched: string[][] = new Array();
range_for_disp: number[] = new Array(12);
amount: number=0;
checkDetails: string[]=new Array(3);
payment_form: FormGroup;
payment_form_2: FormGroup;
submitted: boolean = false;
g_calendar: boolean=false;
// cc
cardNumber: string;
expiryMonth: string;
expiryYear: string;
cvc: string;
constructor(
private router: Router,
private formBuilder: FormBuilder,
private route: ActivatedRoute
) {}
private myDatePickerOptions: IMyOptions = {
// other options...
dateFormat: 'dd.mm.yyyy',
inline: true,
disableDateRanges: [{ begin: {year: this.today.getFullYear(), month: this.today.getMonth()-2, day: this.today.getDate()}, end: {year: this.today.getFullYear(),
month: this.today.getMonth()+1, day: this.today.getDate()-1} },{ begin: {year: this.today.getFullYear(),month: this.today.getMonth()+1, day: this.today.getDate()+7},
end: {year: this.today.getFullYear(),month: this.today.getMonth()+2, day: this.today.getDate()} }]
};
// Initialized to specific date (09.10.2018).
private model: Object = { date: { year: 2018, month: 10, day: 9 } };
ngOnInit() {
console.log(Meteor.userId());
if(!Meteor.userId()){
this.loggedIn=false;
Bert.alert( 'You need to be logged in to view this page', 'danger', 'fixed-bottom' );
this.router.navigate('/login');
}
this.imagesSubs = MeteorObservable.subscribe('images').subscribe();
this.payment_form_2 = this.formBuilder.group({
creditCard: ['', [<any>CreditCardValidator.validateCCNumber]],
expDate: ['', [<any>CreditCardValidator.validateExpDate]],
cvc: ['', [<any>Validators.required, <any>Validators.minLength(3), <any>Validators.maxLength(4)]]
});
this.payment_form = this.formBuilder.group({
cardNumber: ['', Validators.required],
expiryMonth: ['', Validators.required],
expiryYear: ['', Validators.required],
cvc: ['', Validators.required]
});
for (var i = 0; i < 24; i++) {
this.color[i]='green';
}
for (var i = 0; i < 7; i++) {
this.colorsSched[i]=this.color;
}
this.paramsSub = this.route.params
.map(params => params['tutorId'])
.subscribe(tutorId => {
this.tutorId = tutorId;
if (this.tutorSub) {
this.tutorSub.unsubscribe();
}
});
this.tutorSub = MeteorObservable.subscribe('tutors').subscribe(() => {
this.tutor=Tutors.findOne(this.tutorId);
this.tutorAsUserId=this.tutor.userId;
this.tutorSchedule=this.tutor.times;
this.amount = this.tutor.hourly_rating;
let _MS_PER_DAY = 1000 * 60 * 60 * 24;
let utc1 = Date.UTC(this.today.getFullYear(), this.today.getMonth(), this.today.getDate());
let utc2 = Date.UTC(this.tutor.lastUpdateDate.getFullYear(), this.tutor.lastUpdateDate.getMonth(), this.tutor.lastUpdateDate.getDate());
let last_update_diff = Math.floor((utc2 - utc1) / _MS_PER_DAY);
for (var i = 0; i < last_update_diff; i++) {
for(var j = 0; j < 24; j++) {
this.colorsSched[i][j]='blue';
}
}
for (var i = last_update_diff; i < 7; i++) {
for(var j = 0; j < 24; j++) {
// console.log(this.tutorSchedule[i][j]);
if(this.tutorSchedule[i][j]==1){
this.colorsSched[i][j]='blue';
}else{
this.colorsSched[i][j]='green';
}
}
}
// console.log(this.colorsSched);
});
console.log(this.colorsSched);
this.tutorSub = MeteorObservable.subscribe('users').subscribe(() => {
this.tutor_user_email=Users.findOne(this.tutorAsUserId).emails[0].address;
this.mailtoTutor="mailto:"+ this.tutor_user_email;
});
//TODO only find classes that this tutor do 34064745
this.classesSub = MeteorObservable.subscribe('classes').subscribe(() => {
this.tutorClasses = Classes.find({tutorId: {$eq: this.tutorAsUserId} });
});
}
get isMe(): boolean {
if(this.user)
return this.user._id === this.tutorAsUserId;
return false;
}
toggleSlot(i: number): void {
this.today_show.setHours(i,0,0);
this.slot = i;
console.log(this.tutorSchedule[this.day][i]);
if(this.tutorSchedule[this.day][i]==0){
this.tutorSchedule[this.day][i]=1;
this.colorsSched[this.day][i]='green';
}else if(this.tutorSchedule[this.day][i]==1){
this.tutorSchedule[this.day][i]=0;
this.colorsSched[this.day][i]='blue';
}
// else if(this.colorsSched[this.day][i]='blue'){
// this.colorsSched[this.day][i]='green';
// }
}
onDateChanged(event: IMyDateModel) {
let _MS_PER_DAY = 1000 * 60 * 60 * 24;
let utc1 = Date.UTC(this.today.getFullYear(), this.today.getMonth(), this.today.getDate());
let utc2 = Date.UTC(event.jsdate.getFullYear(), event.jsdate.getMonth(), event.jsdate.getDate());
let diff = Math.floor((utc2 - utc1) / _MS_PER_DAY);
let i = diff;
this.day = diff;
this.today_show.setDate(this.today.getDate()+i);
for(var j = 0; j < 24; j++) {
// console.log(this.tutorSchedule[i][j]);
if(this.tutorSchedule[i][j]==1){
this.colorsSched[i][j]='green';
}else{
this.colorsSched[i][j]='red';
}
}
}
CheckoutFn():void{
if (this.payment_form.valid) {
console.log('payment form valid')
Stripe.card.createToken({
number: this.payment_form.value.cardNumber,
cvc: this.payment_form.value.cvc,
exp_month: this.payment_form.value.expiryMonth,
exp_year: this.payment_form.value.expiryYear
}, function(status, response) {
console.log(status);
console.log(response.error.message);
stripeToken = response.id;
Meteor.call('chargeCard', stripeToken, amount);
});
}
}
| (): void{
this.checkout = false;
}
GoToCheckOut(): void{
// if(!this.user_skype_email){
// alert('Please enter your skype username so the teatch can contact you :)');
// }else{
// alert('you are now registered in this class :)');
// }
console.log(this.slot);
console.log(this.tutorSchedule)
this.tutorSchedule[this.day][this.slot]=2;
// Tutors.update(this.tutorId, {
// $set:{times: this.tutorSchedule }
// });
this.checkout=true;
}
onSubmit() {
let success= false;
let free= true;
if (this.payment_form_2.valid) {
this.submitted = true;
let m = this.payment_form_2.value.expDate[0]+this.payment_form_2.value.expDate[1];
let y = this.payment_form_2.value.expDate[5]+this.payment_form_2.value.expDate[6];
Bert.alert('berfore payment','success');
Stripe.card.createToken({
number: this.payment_form_2.value.creditCard,
cvc: this.payment_form_2.value.cvc,
exp_month: m,
exp_year: y
}, function(status, response) {
console.log(status);
console.log(response);
stripeToken = response.id;
// Meteor.call('chargeCard', stripeToken, this.amount);
Meteor.call('chargeCard', stripeToken, 3);
});
Bert.alert('Thanks for booking my class','success');
this.router.navigate(['/thanks']);
// let id = Classes.findOne({
// userId:{
// $elemMatch:{$eq: Meteor.userId()}
// }
// })._id;
// console.log(id);
// if(id){
// free=false;
// }
}
// if(free||success){
// //add the user skype user name to the class
// Classes.insert(Object.assign({ userId: Meteor.userId(),
// tutorId: this.tutorId,startDate: this.today_show, userSkype: this.user_skype_email}));
// this.router.navigate(['/thanks']);
// }else{
// Bert.alert('Payment failed, or you are already registered in another classs', 'danger');
// }
// this.g_calendar=true;
}
addToCalendnaer():void {
// let dateString = "2010-08-09 01:02:03"
// , reggie = /(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})/
// , [, year, month, day, hours, minutes, seconds] = reggie.exec(dateString)
// , dateObject = new Date(year, month-1, day, hours, minutes, seconds);
// let utc1 = Date.UTC(this.today_show.getFullYear(), this.today_show.getMonth(), this.today_show.getDate());
// console.log(utc1);
// console.log(this.today_show[1]);
let user = Meteor.users.findOne({_id: Meteor.userId()});
const event = {
'summary': 'Quran Class',
'location': 'Online- Skype',
'description': '45min class',
'start':{
'dateTime': this.today_show.toISOString(),
'timeZone': 'America/Los_Angeles'},
'end':{
'dateTime': this.today_show.toISOString(),
'timeZone': 'America/Los_Angeles'}
};
GoogleApi.post('calendar/v3/calendars/primary/events', { data: event },function (error, result){
console.log(error);
console.log(result);
});
}
ngOnDestroy() {
this.classesSub.unsubscribe();
this.paramsSub.unsubscribe();
this.tutorSub.unsubscribe();
this.imagesSubs.unsubscribe();
}
}
| step1 | identifier_name |
tutor-details.component.ts | import { Component, OnInit, OnDestroy } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { Subscription } from 'rxjs/Subscription';
import { Meteor } from 'meteor/meteor';
import { MeteorObservable } from 'meteor-rxjs';
import { InjectUser } from "angular2-meteor-accounts-ui";
import {ROUTER_DIRECTIVES, Location} from "angular2/router";
import { Router } from '@angular/router';
import { FormBuilder, FormGroup, Validators } from '@angular/forms';
import { CreditCardValidator } from 'ng2-cc-library';
import { Requests } from '../../../../both/collections/requests.collection';
import { ActivatedRoute } from '@angular/router';
import { Request } from '../../../../both/models/request.model';
import 'rxjs/add/operator/map';
import { Users } from '../../../../both/collections/users.collection';
import { Classes } from '../../../../both/collections/classes.collection';
import { Class_ } from '../../../../both/models/class.model';
import { Tutors } from '../../../../both/collections/tutors.collection';
import { Tutor } from '../../../../both/models/tutor.model';
import { Accounts } from 'meteor/accounts-base';
import style from './tutor-details.component.scss';
import template from './tutor-details.component.html';
import {IMyOptions} from 'mydatepicker';
Meteor.startup(() => {
Accounts.ui.config({requestPermissions:{google:['https://www.googleapis.com/auth/calendar']}, forceApprovalPrompt: {google: true}, requestOfflineToken: {google: true}});
Stripe.setPublishableKey(Meteor.settings.public.stripe.livePublishableKey);
var handler = StripeCheckout.configure({
key: Meteor.settings.public.stripe.testPublishableKey,
token: function(token) {}
});
});
@Component({
selector: 'tutor-details',
template,
styles: [ style ]
})
@InjectUser('user')
export class TutorDetailsComponentUser implements OnInit, OnDestroy {
loggedIn:boolean=true;
user_skype_email: string;
today: Date = new Date();
today_show: Date = new Date();
tutorId: string;
slot: number;
color: string[]= new Array(24);
day: number=1;
tutorAsUserId: string;
tutor: Tutor;
paramsSub: Subscription;
imagesSubs: Subscription;
classesSub: Subscription;
reqSub: Subscription;
tutorSub: Subscription;
requests: Observable<Request[]>;
mailtoTutor: string;
tutor_user_email: string;
class: Class_;
tutorClasses: Observable<Class_[]>;
user: Meteor.User;
checkout: boolean=false;
a_day: number[] = new Array(24);
tutorSchedule: number[][] = new Array();
colorsSched: string[][] = new Array();
range_for_disp: number[] = new Array(12);
amount: number=0;
checkDetails: string[]=new Array(3);
payment_form: FormGroup;
payment_form_2: FormGroup;
submitted: boolean = false;
g_calendar: boolean=false;
// cc
cardNumber: string;
expiryMonth: string;
expiryYear: string;
cvc: string;
constructor(
private router: Router,
private formBuilder: FormBuilder,
private route: ActivatedRoute
) {}
private myDatePickerOptions: IMyOptions = {
// other options...
dateFormat: 'dd.mm.yyyy',
inline: true,
disableDateRanges: [{ begin: {year: this.today.getFullYear(), month: this.today.getMonth()-2, day: this.today.getDate()}, end: {year: this.today.getFullYear(),
month: this.today.getMonth()+1, day: this.today.getDate()-1} },{ begin: {year: this.today.getFullYear(),month: this.today.getMonth()+1, day: this.today.getDate()+7},
end: {year: this.today.getFullYear(),month: this.today.getMonth()+2, day: this.today.getDate()} }]
};
// Initialized to specific date (09.10.2018).
private model: Object = { date: { year: 2018, month: 10, day: 9 } };
ngOnInit() {
console.log(Meteor.userId());
if(!Meteor.userId()){
this.loggedIn=false;
Bert.alert( 'You need to be logged in to view this page', 'danger', 'fixed-bottom' );
this.router.navigate('/login');
}
this.imagesSubs = MeteorObservable.subscribe('images').subscribe();
this.payment_form_2 = this.formBuilder.group({
creditCard: ['', [<any>CreditCardValidator.validateCCNumber]],
expDate: ['', [<any>CreditCardValidator.validateExpDate]],
cvc: ['', [<any>Validators.required, <any>Validators.minLength(3), <any>Validators.maxLength(4)]]
});
this.payment_form = this.formBuilder.group({
cardNumber: ['', Validators.required],
expiryMonth: ['', Validators.required],
expiryYear: ['', Validators.required],
cvc: ['', Validators.required]
});
for (var i = 0; i < 24; i++) {
this.color[i]='green';
}
for (var i = 0; i < 7; i++) {
this.colorsSched[i]=this.color;
}
this.paramsSub = this.route.params
.map(params => params['tutorId'])
.subscribe(tutorId => {
this.tutorId = tutorId;
if (this.tutorSub) {
this.tutorSub.unsubscribe();
}
});
this.tutorSub = MeteorObservable.subscribe('tutors').subscribe(() => {
this.tutor=Tutors.findOne(this.tutorId);
this.tutorAsUserId=this.tutor.userId;
this.tutorSchedule=this.tutor.times;
this.amount = this.tutor.hourly_rating;
let _MS_PER_DAY = 1000 * 60 * 60 * 24;
let utc1 = Date.UTC(this.today.getFullYear(), this.today.getMonth(), this.today.getDate());
let utc2 = Date.UTC(this.tutor.lastUpdateDate.getFullYear(), this.tutor.lastUpdateDate.getMonth(), this.tutor.lastUpdateDate.getDate());
let last_update_diff = Math.floor((utc2 - utc1) / _MS_PER_DAY);
for (var i = 0; i < last_update_diff; i++) {
for(var j = 0; j < 24; j++) {
this.colorsSched[i][j]='blue';
}
}
for (var i = last_update_diff; i < 7; i++) {
for(var j = 0; j < 24; j++) {
// console.log(this.tutorSchedule[i][j]);
if(this.tutorSchedule[i][j]==1){
this.colorsSched[i][j]='blue';
}else{
this.colorsSched[i][j]='green';
}
}
}
// console.log(this.colorsSched);
});
console.log(this.colorsSched);
this.tutorSub = MeteorObservable.subscribe('users').subscribe(() => {
this.tutor_user_email=Users.findOne(this.tutorAsUserId).emails[0].address;
this.mailtoTutor="mailto:"+ this.tutor_user_email;
});
//TODO only find classes that this tutor do 34064745
this.classesSub = MeteorObservable.subscribe('classes').subscribe(() => {
this.tutorClasses = Classes.find({tutorId: {$eq: this.tutorAsUserId} });
});
}
get isMe(): boolean {
if(this.user)
return this.user._id === this.tutorAsUserId;
return false;
}
toggleSlot(i: number): void {
this.today_show.setHours(i,0,0);
this.slot = i;
console.log(this.tutorSchedule[this.day][i]);
if(this.tutorSchedule[this.day][i]==0) | else if(this.tutorSchedule[this.day][i]==1){
this.tutorSchedule[this.day][i]=0;
this.colorsSched[this.day][i]='blue';
}
// else if(this.colorsSched[this.day][i]='blue'){
// this.colorsSched[this.day][i]='green';
// }
}
onDateChanged(event: IMyDateModel) {
let _MS_PER_DAY = 1000 * 60 * 60 * 24;
let utc1 = Date.UTC(this.today.getFullYear(), this.today.getMonth(), this.today.getDate());
let utc2 = Date.UTC(event.jsdate.getFullYear(), event.jsdate.getMonth(), event.jsdate.getDate());
let diff = Math.floor((utc2 - utc1) / _MS_PER_DAY);
let i = diff;
this.day = diff;
this.today_show.setDate(this.today.getDate()+i);
for(var j = 0; j < 24; j++) {
// console.log(this.tutorSchedule[i][j]);
if(this.tutorSchedule[i][j]==1){
this.colorsSched[i][j]='green';
}else{
this.colorsSched[i][j]='red';
}
}
}
CheckoutFn():void{
if (this.payment_form.valid) {
console.log('payment form valid')
Stripe.card.createToken({
number: this.payment_form.value.cardNumber,
cvc: this.payment_form.value.cvc,
exp_month: this.payment_form.value.expiryMonth,
exp_year: this.payment_form.value.expiryYear
}, function(status, response) {
console.log(status);
console.log(response.error.message);
stripeToken = response.id;
Meteor.call('chargeCard', stripeToken, amount);
});
}
}
step1(): void{
this.checkout = false;
}
GoToCheckOut(): void{
// if(!this.user_skype_email){
// alert('Please enter your skype username so the teatch can contact you :)');
// }else{
// alert('you are now registered in this class :)');
// }
console.log(this.slot);
console.log(this.tutorSchedule)
this.tutorSchedule[this.day][this.slot]=2;
// Tutors.update(this.tutorId, {
// $set:{times: this.tutorSchedule }
// });
this.checkout=true;
}
onSubmit() {
let success= false;
let free= true;
if (this.payment_form_2.valid) {
this.submitted = true;
let m = this.payment_form_2.value.expDate[0]+this.payment_form_2.value.expDate[1];
let y = this.payment_form_2.value.expDate[5]+this.payment_form_2.value.expDate[6];
Bert.alert('berfore payment','success');
Stripe.card.createToken({
number: this.payment_form_2.value.creditCard,
cvc: this.payment_form_2.value.cvc,
exp_month: m,
exp_year: y
}, function(status, response) {
console.log(status);
console.log(response);
stripeToken = response.id;
// Meteor.call('chargeCard', stripeToken, this.amount);
Meteor.call('chargeCard', stripeToken, 3);
});
Bert.alert('Thanks for booking my class','success');
this.router.navigate(['/thanks']);
// let id = Classes.findOne({
// userId:{
// $elemMatch:{$eq: Meteor.userId()}
// }
// })._id;
// console.log(id);
// if(id){
// free=false;
// }
}
// if(free||success){
// //add the user skype user name to the class
// Classes.insert(Object.assign({ userId: Meteor.userId(),
// tutorId: this.tutorId,startDate: this.today_show, userSkype: this.user_skype_email}));
// this.router.navigate(['/thanks']);
// }else{
// Bert.alert('Payment failed, or you are already registered in another classs', 'danger');
// }
// this.g_calendar=true;
}
addToCalendnaer():void {
// let dateString = "2010-08-09 01:02:03"
// , reggie = /(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})/
// , [, year, month, day, hours, minutes, seconds] = reggie.exec(dateString)
// , dateObject = new Date(year, month-1, day, hours, minutes, seconds);
// let utc1 = Date.UTC(this.today_show.getFullYear(), this.today_show.getMonth(), this.today_show.getDate());
// console.log(utc1);
// console.log(this.today_show[1]);
let user = Meteor.users.findOne({_id: Meteor.userId()});
const event = {
'summary': 'Quran Class',
'location': 'Online- Skype',
'description': '45min class',
'start':{
'dateTime': this.today_show.toISOString(),
'timeZone': 'America/Los_Angeles'},
'end':{
'dateTime': this.today_show.toISOString(),
'timeZone': 'America/Los_Angeles'}
};
GoogleApi.post('calendar/v3/calendars/primary/events', { data: event },function (error, result){
console.log(error);
console.log(result);
});
}
ngOnDestroy() {
this.classesSub.unsubscribe();
this.paramsSub.unsubscribe();
this.tutorSub.unsubscribe();
this.imagesSubs.unsubscribe();
}
}
| {
this.tutorSchedule[this.day][i]=1;
this.colorsSched[this.day][i]='green';
} | conditional_block |
tutor-details.component.ts | import { Component, OnInit, OnDestroy } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { Subscription } from 'rxjs/Subscription';
import { Meteor } from 'meteor/meteor';
import { MeteorObservable } from 'meteor-rxjs';
import { InjectUser } from "angular2-meteor-accounts-ui";
import {ROUTER_DIRECTIVES, Location} from "angular2/router";
import { Router } from '@angular/router';
import { FormBuilder, FormGroup, Validators } from '@angular/forms';
import { CreditCardValidator } from 'ng2-cc-library';
import { Requests } from '../../../../both/collections/requests.collection';
import { ActivatedRoute } from '@angular/router';
import { Request } from '../../../../both/models/request.model';
import 'rxjs/add/operator/map';
import { Users } from '../../../../both/collections/users.collection';
import { Classes } from '../../../../both/collections/classes.collection';
import { Class_ } from '../../../../both/models/class.model';
import { Tutors } from '../../../../both/collections/tutors.collection';
import { Tutor } from '../../../../both/models/tutor.model';
import { Accounts } from 'meteor/accounts-base';
import style from './tutor-details.component.scss';
import template from './tutor-details.component.html';
import {IMyOptions} from 'mydatepicker';
Meteor.startup(() => {
Accounts.ui.config({requestPermissions:{google:['https://www.googleapis.com/auth/calendar']}, forceApprovalPrompt: {google: true}, requestOfflineToken: {google: true}});
Stripe.setPublishableKey(Meteor.settings.public.stripe.livePublishableKey);
var handler = StripeCheckout.configure({
key: Meteor.settings.public.stripe.testPublishableKey,
token: function(token) {}
});
});
@Component({
selector: 'tutor-details',
template,
styles: [ style ]
})
@InjectUser('user')
export class TutorDetailsComponentUser implements OnInit, OnDestroy {
loggedIn:boolean=true;
user_skype_email: string;
today: Date = new Date();
today_show: Date = new Date();
tutorId: string;
slot: number;
color: string[]= new Array(24);
day: number=1;
tutorAsUserId: string;
tutor: Tutor;
paramsSub: Subscription;
imagesSubs: Subscription;
classesSub: Subscription;
reqSub: Subscription;
tutorSub: Subscription;
requests: Observable<Request[]>;
mailtoTutor: string;
tutor_user_email: string;
class: Class_;
tutorClasses: Observable<Class_[]>;
user: Meteor.User;
checkout: boolean=false;
a_day: number[] = new Array(24);
tutorSchedule: number[][] = new Array();
colorsSched: string[][] = new Array();
range_for_disp: number[] = new Array(12);
amount: number=0;
checkDetails: string[]=new Array(3);
payment_form: FormGroup;
payment_form_2: FormGroup;
submitted: boolean = false;
g_calendar: boolean=false;
// cc
cardNumber: string;
expiryMonth: string;
expiryYear: string;
cvc: string;
constructor(
private router: Router,
private formBuilder: FormBuilder,
private route: ActivatedRoute
) {}
private myDatePickerOptions: IMyOptions = {
// other options...
dateFormat: 'dd.mm.yyyy',
inline: true,
disableDateRanges: [{ begin: {year: this.today.getFullYear(), month: this.today.getMonth()-2, day: this.today.getDate()}, end: {year: this.today.getFullYear(),
month: this.today.getMonth()+1, day: this.today.getDate()-1} },{ begin: {year: this.today.getFullYear(),month: this.today.getMonth()+1, day: this.today.getDate()+7},
end: {year: this.today.getFullYear(),month: this.today.getMonth()+2, day: this.today.getDate()} }]
};
// Initialized to specific date (09.10.2018).
private model: Object = { date: { year: 2018, month: 10, day: 9 } };
ngOnInit() {
console.log(Meteor.userId());
if(!Meteor.userId()){
this.loggedIn=false;
Bert.alert( 'You need to be logged in to view this page', 'danger', 'fixed-bottom' );
this.router.navigate('/login');
}
this.imagesSubs = MeteorObservable.subscribe('images').subscribe();
this.payment_form_2 = this.formBuilder.group({
creditCard: ['', [<any>CreditCardValidator.validateCCNumber]],
expDate: ['', [<any>CreditCardValidator.validateExpDate]],
cvc: ['', [<any>Validators.required, <any>Validators.minLength(3), <any>Validators.maxLength(4)]]
});
this.payment_form = this.formBuilder.group({
cardNumber: ['', Validators.required],
expiryMonth: ['', Validators.required],
expiryYear: ['', Validators.required],
cvc: ['', Validators.required]
});
for (var i = 0; i < 24; i++) {
this.color[i]='green';
}
for (var i = 0; i < 7; i++) {
this.colorsSched[i]=this.color;
}
this.paramsSub = this.route.params
.map(params => params['tutorId'])
.subscribe(tutorId => {
this.tutorId = tutorId;
if (this.tutorSub) {
this.tutorSub.unsubscribe();
}
});
this.tutorSub = MeteorObservable.subscribe('tutors').subscribe(() => {
this.tutor=Tutors.findOne(this.tutorId);
this.tutorAsUserId=this.tutor.userId;
this.tutorSchedule=this.tutor.times;
this.amount = this.tutor.hourly_rating;
let _MS_PER_DAY = 1000 * 60 * 60 * 24;
let utc1 = Date.UTC(this.today.getFullYear(), this.today.getMonth(), this.today.getDate());
let utc2 = Date.UTC(this.tutor.lastUpdateDate.getFullYear(), this.tutor.lastUpdateDate.getMonth(), this.tutor.lastUpdateDate.getDate());
let last_update_diff = Math.floor((utc2 - utc1) / _MS_PER_DAY);
for (var i = 0; i < last_update_diff; i++) {
for(var j = 0; j < 24; j++) {
this.colorsSched[i][j]='blue';
}
}
for (var i = last_update_diff; i < 7; i++) {
for(var j = 0; j < 24; j++) {
// console.log(this.tutorSchedule[i][j]);
if(this.tutorSchedule[i][j]==1){
this.colorsSched[i][j]='blue';
}else{
this.colorsSched[i][j]='green';
}
}
}
// console.log(this.colorsSched);
});
console.log(this.colorsSched);
this.tutorSub = MeteorObservable.subscribe('users').subscribe(() => {
this.tutor_user_email=Users.findOne(this.tutorAsUserId).emails[0].address;
this.mailtoTutor="mailto:"+ this.tutor_user_email;
});
//TODO only find classes that this tutor do 34064745
this.classesSub = MeteorObservable.subscribe('classes').subscribe(() => {
this.tutorClasses = Classes.find({tutorId: {$eq: this.tutorAsUserId} });
});
}
get isMe(): boolean {
if(this.user)
return this.user._id === this.tutorAsUserId;
return false;
}
toggleSlot(i: number): void {
this.today_show.setHours(i,0,0);
this.slot = i;
console.log(this.tutorSchedule[this.day][i]);
if(this.tutorSchedule[this.day][i]==0){
this.tutorSchedule[this.day][i]=1;
this.colorsSched[this.day][i]='green';
}else if(this.tutorSchedule[this.day][i]==1){
this.tutorSchedule[this.day][i]=0;
this.colorsSched[this.day][i]='blue';
}
// else if(this.colorsSched[this.day][i]='blue'){
// this.colorsSched[this.day][i]='green';
// }
}
onDateChanged(event: IMyDateModel) {
let _MS_PER_DAY = 1000 * 60 * 60 * 24;
let utc1 = Date.UTC(this.today.getFullYear(), this.today.getMonth(), this.today.getDate());
let utc2 = Date.UTC(event.jsdate.getFullYear(), event.jsdate.getMonth(), event.jsdate.getDate());
let diff = Math.floor((utc2 - utc1) / _MS_PER_DAY);
let i = diff;
this.day = diff;
this.today_show.setDate(this.today.getDate()+i);
for(var j = 0; j < 24; j++) {
// console.log(this.tutorSchedule[i][j]);
if(this.tutorSchedule[i][j]==1){
this.colorsSched[i][j]='green';
}else{
this.colorsSched[i][j]='red';
} | console.log('payment form valid')
Stripe.card.createToken({
number: this.payment_form.value.cardNumber,
cvc: this.payment_form.value.cvc,
exp_month: this.payment_form.value.expiryMonth,
exp_year: this.payment_form.value.expiryYear
}, function(status, response) {
console.log(status);
console.log(response.error.message);
stripeToken = response.id;
Meteor.call('chargeCard', stripeToken, amount);
});
}
}
step1(): void{
this.checkout = false;
}
GoToCheckOut(): void{
// if(!this.user_skype_email){
// alert('Please enter your skype username so the teatch can contact you :)');
// }else{
// alert('you are now registered in this class :)');
// }
console.log(this.slot);
console.log(this.tutorSchedule)
this.tutorSchedule[this.day][this.slot]=2;
// Tutors.update(this.tutorId, {
// $set:{times: this.tutorSchedule }
// });
this.checkout=true;
}
onSubmit() {
let success= false;
let free= true;
if (this.payment_form_2.valid) {
this.submitted = true;
let m = this.payment_form_2.value.expDate[0]+this.payment_form_2.value.expDate[1];
let y = this.payment_form_2.value.expDate[5]+this.payment_form_2.value.expDate[6];
Bert.alert('berfore payment','success');
Stripe.card.createToken({
number: this.payment_form_2.value.creditCard,
cvc: this.payment_form_2.value.cvc,
exp_month: m,
exp_year: y
}, function(status, response) {
console.log(status);
console.log(response);
stripeToken = response.id;
// Meteor.call('chargeCard', stripeToken, this.amount);
Meteor.call('chargeCard', stripeToken, 3);
});
Bert.alert('Thanks for booking my class','success');
this.router.navigate(['/thanks']);
// let id = Classes.findOne({
// userId:{
// $elemMatch:{$eq: Meteor.userId()}
// }
// })._id;
// console.log(id);
// if(id){
// free=false;
// }
}
// if(free||success){
// //add the user skype user name to the class
// Classes.insert(Object.assign({ userId: Meteor.userId(),
// tutorId: this.tutorId,startDate: this.today_show, userSkype: this.user_skype_email}));
// this.router.navigate(['/thanks']);
// }else{
// Bert.alert('Payment failed, or you are already registered in another classs', 'danger');
// }
// this.g_calendar=true;
}
addToCalendnaer():void {
// let dateString = "2010-08-09 01:02:03"
// , reggie = /(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})/
// , [, year, month, day, hours, minutes, seconds] = reggie.exec(dateString)
// , dateObject = new Date(year, month-1, day, hours, minutes, seconds);
// let utc1 = Date.UTC(this.today_show.getFullYear(), this.today_show.getMonth(), this.today_show.getDate());
// console.log(utc1);
// console.log(this.today_show[1]);
let user = Meteor.users.findOne({_id: Meteor.userId()});
const event = {
'summary': 'Quran Class',
'location': 'Online- Skype',
'description': '45min class',
'start':{
'dateTime': this.today_show.toISOString(),
'timeZone': 'America/Los_Angeles'},
'end':{
'dateTime': this.today_show.toISOString(),
'timeZone': 'America/Los_Angeles'}
};
GoogleApi.post('calendar/v3/calendars/primary/events', { data: event },function (error, result){
console.log(error);
console.log(result);
});
}
ngOnDestroy() {
this.classesSub.unsubscribe();
this.paramsSub.unsubscribe();
this.tutorSub.unsubscribe();
this.imagesSubs.unsubscribe();
}
} | }
}
CheckoutFn():void{
if (this.payment_form.valid) { | random_line_split |
lib.go | package main
import (
"encoding/json"
"fmt"
"github.com/gorilla/websocket"
log "github.com/sirupsen/logrus"
"gopkg.in/routeros.v2"
"gopkg.in/yaml.v2"
"io/ioutil"
"strings"
"sync"
"time"
)
| pongWait = 60 * time.Second
// Send pings to client with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Time allowed to write the file to the client.
writeWait = 10 * time.Second
)
// Event types
const (
EVENT_CONNECT = iota
EVENT_ROAMING
EVENT_DISCONNECT
EVENT_LEVEL
)
type LeaseEntry struct {
IP string
MAC string
Server string
Hostname string
Comment string
}
type ReportEntry struct {
IP string
Name string
Interface string
SSID string
MAC string
Signal string
Hostname string
Comment string
}
type ReportEvent struct {
EventType int
Old ReportEntry
New ReportEntry
}
var WS = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
type BroadcastData struct {
Report []ReportEntry
ReportMap map[string]ReportEntry
Data string
LastUpdate time.Time
sync.RWMutex
ReportChan chan ReportEvent
}
type LeaseList struct {
List []LeaseEntry
sync.RWMutex
}
type ConfMikrotik struct {
Address string `yaml:"address"`
Username string `yaml:"username"`
Password string `yaml:"password"`
Interval time.Duration `yaml:"interval"`
Mode string `yaml:"mode"`
}
type ConfDevice struct {
Name string `yaml:"name"`
MAC string `yaml:"mac"`
OnConnect ConfigEvent `yaml:"on.connect"`
OnDisconnect ConfigEvent `yaml:"on.disconnect"`
OnRoaming ConfigEvent `yaml:"on.roaming"`
OnLevel ConfigEvent `yaml:"on.level"`
}
type ConfigEvent struct {
HttpPost string `yaml:"http.post"`
HttpGet string `yaml:"http.get"`
HttpPostContent string `yaml:"http.post.content"`
HttpHeader map[string]string `yaml:"http.header"`
}
type LogInfo struct {
Level log.Level `yaml:"level"`
}
type Config struct {
Log LogInfo `yaml:"log"`
Router ConfMikrotik `yaml:"router"`
DHCP ConfMikrotik `yaml:"dhcp"`
Devices []ConfDevice `yaml:"devices"`
}
// Init BroadcastData entry
func (b *BroadcastData) Init() {
b.ReportMap = map[string]ReportEntry{}
b.ReportChan = make(chan ReportEvent)
}
var broadcastData BroadcastData
var leaseList LeaseList
var config Config
var configMTX sync.RWMutex
var devList map[string]ConfDevice
var devListMTX sync.RWMutex
func GetDHCPLeases(address, username, password string) (list []LeaseEntry, err error) {
cl, err := routeros.Dial(address, username, password)
if err != nil {
return
}
defer cl.Close()
reply, err := cl.Run("/ip/dhcp-server/lease/print")
if err != nil {
return
}
for _, re := range reply.Re {
list = append(list, LeaseEntry{
IP: re.Map["address"],
MAC: re.Map["mac-address"],
Server: re.Map["server"],
Hostname: re.Map["host-name"],
Comment: re.Map["comment"],
})
}
return
}
func reloadDHCP() {
ticker := time.NewTicker(config.DHCP.Interval)
for { // nolint:gosimple
select {
case <-ticker.C:
l, err := GetDHCPLeases(config.DHCP.Address, config.DHCP.Username, config.DHCP.Password)
if err != nil {
log.WithFields(log.Fields{"dhcp-addr": config.DHCP.Address}).Error("Error reloading DHCP Leases: ", err)
return
} else {
leaseList.RLock()
leaseList.List = l
leaseList.RUnlock()
log.WithFields(log.Fields{"count": len(l)}).Debug("Reloaded DHCP Leases")
}
}
}
}
func FindLeaseByMAC(list []LeaseEntry, mac string) (e LeaseEntry, ok bool) {
for _, e := range list {
if e.MAC == mac {
return e, true
}
}
return
}
func RTLoop(c *routeros.Client, conf *Config) {
for {
cmd := "/caps-man/registration-table/print"
if strings.ToLower(config.Router.Mode) == "wifi" {
cmd = "/interface/wireless/registration-table/print"
}
reply, err := c.Run(cmd)
if err != nil {
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Error("Error during request to CapsMan server: ", err)
// Try to close connection
c.Close()
// Reconnect loop
for {
// Sleep for 5 sec
time.Sleep(5 * time.Second)
cNew, err := routeros.Dial(config.Router.Address, config.Router.Username, config.Router.Password)
if err != nil {
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Error("Reconnect error to CapsMan server: ", err)
continue
}
c = cNew
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Warn("Reconnected to CapsMan server")
break
}
continue
}
var report []ReportEntry
leaseList.RLock()
for _, re := range reply.Re {
var n, c, ip string
if le, ok := FindLeaseByMAC(leaseList.List, re.Map["mac-address"]); ok {
n = le.Hostname
c = le.Comment
ip = le.IP
}
devListMTX.RLock()
rec := ReportEntry{
IP: ip,
Name: devList[re.Map["mac-address"]].Name,
Interface: re.Map["interface"],
SSID: re.Map["ssid"],
MAC: re.Map["mac-address"],
Signal: re.Map["rx-signal"],
Hostname: n,
Comment: c,
}
if strings.ToLower(config.Router.Mode) == "wifi" {
rec.Signal = re.Map["signal-strength"]
if i := strings.Index(rec.Signal, "@"); i > 0 {
rec.Signal = rec.Signal[0:i]
}
}
devListMTX.RUnlock()
report = append(report, rec)
// fmt.Printf("%-20s\t%-20s\t%-20s\t%-10s\t%-30s\t%-30s\n", re.Map["interface"], re.Map["ssid"], re.Map["mac-address"], re.Map["rx-signal"], n, c)
}
log.WithFields(log.Fields{"count": len(report)}).Debug("Reloaded CapsMan entries")
leaseList.RUnlock()
if err = broadcastData.reportUpdate(report); err != nil {
log.WithFields(log.Fields{}).Warn("Error during reportUpdate: ", err)
}
time.Sleep(*interval)
}
}
func loadConfig(configFileName string) (config Config, err error) {
devListMTX.RLock()
defer devListMTX.RUnlock()
config = Config{}
devList = make(map[string]ConfDevice)
source, err := ioutil.ReadFile(configFileName)
if err != nil {
err = fmt.Errorf("cannot read config file [%s]", configFileName)
return
}
if err = yaml.Unmarshal(source, &config); err != nil {
err = fmt.Errorf("error parsing config file [%s]: %v", configFileName, err)
return
}
for _, v := range config.Devices {
devList[strings.ToUpper(v.MAC)] = v
}
return
}
func usage() {
}
// Handle report update request
func (b *BroadcastData) reportUpdate(report []ReportEntry) error {
output, err := json.Marshal(report)
if err != nil {
return err
}
// Lock mutex
b.RLock()
defer b.RUnlock()
// Prepare new list of entries
rm := map[string]ReportEntry{}
for _, v := range report {
rm[v.MAC] = v
}
// Scan for new entries
for k := range rm {
if _, ok := b.ReportMap[k]; !ok {
// New entry
b.ReportChan <- ReportEvent{
EventType: EVENT_CONNECT,
New: rm[k],
}
} else {
// Check for roaming
if rm[k].Interface != b.ReportMap[k].Interface {
b.ReportChan <- ReportEvent{
EventType: EVENT_ROAMING,
Old: b.ReportMap[k],
New: rm[k],
}
}
// Check for signal level change
if rm[k].Signal != b.ReportMap[k].Signal {
b.ReportChan <- ReportEvent{
EventType: EVENT_LEVEL,
Old: b.ReportMap[k],
New: rm[k],
}
}
}
}
// Scan for deleted entries
for k := range b.ReportMap {
if _, ok := rm[k]; !ok {
b.ReportChan <- ReportEvent{
EventType: EVENT_DISCONNECT,
Old: b.ReportMap[k],
}
}
}
b.ReportMap = rm
b.Report = report
b.Data = string(output)
b.LastUpdate = time.Now()
return nil
}
func (b *BroadcastData) EventHandler() {
for { // nolint:gosimple
select {
case data := <-b.ReportChan:
// fmt.Printf("New event received: %v\n", data)
switch data.EventType {
case EVENT_CONNECT:
log.WithFields(log.Fields{"action": "register", "mac": data.New.MAC, "name": data.New.Name, "interface": data.New.Interface, "ssid": data.New.SSID, "hostname": data.New.Hostname, "comment": data.New.Comment, "level-to": data.New.Signal}).Info("New connection registered")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnConnect.HttpPost) > 0) || (len(dev.OnConnect.HttpGet) > 0) {
go makeRequest(dev.OnConnect, map[string]string{
"name": dev.Name,
"mac": data.New.MAC,
"roaming.to": "",
"roaming.from": "",
"level.to": data.New.Signal,
"level.from": "",
})
}
}
case EVENT_DISCONNECT:
log.WithFields(log.Fields{"action": "disconnect", "mac": data.Old.MAC, "name": data.Old.Name, "interface": data.Old.Interface, "hostname": data.Old.Hostname, "comment": data.Old.Comment}).Info("Client disconnect")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnDisconnect.HttpPost) > 0) || (len(dev.OnDisconnect.HttpGet) > 0) {
go makeRequest(dev.OnDisconnect, map[string]string{
"name": dev.Name,
"mac": data.Old.MAC,
"roaming.to": "",
"roaming.from": "",
"level.to": "",
"level.from": data.Old.Signal,
})
}
}
case EVENT_ROAMING:
log.WithFields(log.Fields{"action": "roaming", "mac": data.New.MAC, "name": data.New.Name, "interface-from": data.Old.Interface, "interface-to": data.New.Interface, "level-from": data.Old.Signal, "level-to": data.New.Signal}).Info("Client roaming")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnRoaming.HttpPost) > 0) || (len(dev.OnRoaming.HttpGet) > 0) {
go makeRequest(dev.OnRoaming, map[string]string{
"name": dev.Name,
"mac": data.New.MAC,
"roaming.to": data.New.Interface,
"roaming.from": data.Old.Interface,
"level.from": data.Old.Signal,
"level.to": data.New.Signal,
})
}
}
case EVENT_LEVEL:
log.WithFields(log.Fields{"action": "level", "mac": data.New.MAC, "name": data.New.Name, "interface": data.New.Interface, "level-from": data.Old.Signal, "level-to": data.New.Signal}).Debug("Signal level change")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnLevel.HttpPost) > 0) || (len(dev.OnLevel.HttpGet) > 0) {
go makeRequest(dev.OnLevel, map[string]string{
"name": dev.Name,
"mac": data.Old.MAC,
"roaming.to": "",
"roaming.from": "",
"level.from": data.Old.Signal,
"level.to": data.New.Signal,
})
}
}
default:
}
}
}
} | const (
// Time allowed to read the next pong message from the client. | random_line_split |
lib.go | package main
import (
"encoding/json"
"fmt"
"github.com/gorilla/websocket"
log "github.com/sirupsen/logrus"
"gopkg.in/routeros.v2"
"gopkg.in/yaml.v2"
"io/ioutil"
"strings"
"sync"
"time"
)
const (
// Time allowed to read the next pong message from the client.
pongWait = 60 * time.Second
// Send pings to client with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Time allowed to write the file to the client.
writeWait = 10 * time.Second
)
// Event types
const (
EVENT_CONNECT = iota
EVENT_ROAMING
EVENT_DISCONNECT
EVENT_LEVEL
)
type LeaseEntry struct {
IP string
MAC string
Server string
Hostname string
Comment string
}
type ReportEntry struct {
IP string
Name string
Interface string
SSID string
MAC string
Signal string
Hostname string
Comment string
}
type ReportEvent struct {
EventType int
Old ReportEntry
New ReportEntry
}
var WS = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
type BroadcastData struct {
Report []ReportEntry
ReportMap map[string]ReportEntry
Data string
LastUpdate time.Time
sync.RWMutex
ReportChan chan ReportEvent
}
type LeaseList struct {
List []LeaseEntry
sync.RWMutex
}
type ConfMikrotik struct {
Address string `yaml:"address"`
Username string `yaml:"username"`
Password string `yaml:"password"`
Interval time.Duration `yaml:"interval"`
Mode string `yaml:"mode"`
}
type ConfDevice struct {
Name string `yaml:"name"`
MAC string `yaml:"mac"`
OnConnect ConfigEvent `yaml:"on.connect"`
OnDisconnect ConfigEvent `yaml:"on.disconnect"`
OnRoaming ConfigEvent `yaml:"on.roaming"`
OnLevel ConfigEvent `yaml:"on.level"`
}
type ConfigEvent struct {
HttpPost string `yaml:"http.post"`
HttpGet string `yaml:"http.get"`
HttpPostContent string `yaml:"http.post.content"`
HttpHeader map[string]string `yaml:"http.header"`
}
type LogInfo struct {
Level log.Level `yaml:"level"`
}
type Config struct {
Log LogInfo `yaml:"log"`
Router ConfMikrotik `yaml:"router"`
DHCP ConfMikrotik `yaml:"dhcp"`
Devices []ConfDevice `yaml:"devices"`
}
// Init BroadcastData entry
func (b *BroadcastData) Init() {
b.ReportMap = map[string]ReportEntry{}
b.ReportChan = make(chan ReportEvent)
}
var broadcastData BroadcastData
var leaseList LeaseList
var config Config
var configMTX sync.RWMutex
var devList map[string]ConfDevice
var devListMTX sync.RWMutex
func GetDHCPLeases(address, username, password string) (list []LeaseEntry, err error) {
cl, err := routeros.Dial(address, username, password)
if err != nil {
return
}
defer cl.Close()
reply, err := cl.Run("/ip/dhcp-server/lease/print")
if err != nil {
return
}
for _, re := range reply.Re {
list = append(list, LeaseEntry{
IP: re.Map["address"],
MAC: re.Map["mac-address"],
Server: re.Map["server"],
Hostname: re.Map["host-name"],
Comment: re.Map["comment"],
})
}
return
}
func reloadDHCP() |
func FindLeaseByMAC(list []LeaseEntry, mac string) (e LeaseEntry, ok bool) {
for _, e := range list {
if e.MAC == mac {
return e, true
}
}
return
}
func RTLoop(c *routeros.Client, conf *Config) {
for {
cmd := "/caps-man/registration-table/print"
if strings.ToLower(config.Router.Mode) == "wifi" {
cmd = "/interface/wireless/registration-table/print"
}
reply, err := c.Run(cmd)
if err != nil {
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Error("Error during request to CapsMan server: ", err)
// Try to close connection
c.Close()
// Reconnect loop
for {
// Sleep for 5 sec
time.Sleep(5 * time.Second)
cNew, err := routeros.Dial(config.Router.Address, config.Router.Username, config.Router.Password)
if err != nil {
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Error("Reconnect error to CapsMan server: ", err)
continue
}
c = cNew
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Warn("Reconnected to CapsMan server")
break
}
continue
}
var report []ReportEntry
leaseList.RLock()
for _, re := range reply.Re {
var n, c, ip string
if le, ok := FindLeaseByMAC(leaseList.List, re.Map["mac-address"]); ok {
n = le.Hostname
c = le.Comment
ip = le.IP
}
devListMTX.RLock()
rec := ReportEntry{
IP: ip,
Name: devList[re.Map["mac-address"]].Name,
Interface: re.Map["interface"],
SSID: re.Map["ssid"],
MAC: re.Map["mac-address"],
Signal: re.Map["rx-signal"],
Hostname: n,
Comment: c,
}
if strings.ToLower(config.Router.Mode) == "wifi" {
rec.Signal = re.Map["signal-strength"]
if i := strings.Index(rec.Signal, "@"); i > 0 {
rec.Signal = rec.Signal[0:i]
}
}
devListMTX.RUnlock()
report = append(report, rec)
// fmt.Printf("%-20s\t%-20s\t%-20s\t%-10s\t%-30s\t%-30s\n", re.Map["interface"], re.Map["ssid"], re.Map["mac-address"], re.Map["rx-signal"], n, c)
}
log.WithFields(log.Fields{"count": len(report)}).Debug("Reloaded CapsMan entries")
leaseList.RUnlock()
if err = broadcastData.reportUpdate(report); err != nil {
log.WithFields(log.Fields{}).Warn("Error during reportUpdate: ", err)
}
time.Sleep(*interval)
}
}
func loadConfig(configFileName string) (config Config, err error) {
devListMTX.RLock()
defer devListMTX.RUnlock()
config = Config{}
devList = make(map[string]ConfDevice)
source, err := ioutil.ReadFile(configFileName)
if err != nil {
err = fmt.Errorf("cannot read config file [%s]", configFileName)
return
}
if err = yaml.Unmarshal(source, &config); err != nil {
err = fmt.Errorf("error parsing config file [%s]: %v", configFileName, err)
return
}
for _, v := range config.Devices {
devList[strings.ToUpper(v.MAC)] = v
}
return
}
func usage() {
}
// Handle report update request
func (b *BroadcastData) reportUpdate(report []ReportEntry) error {
output, err := json.Marshal(report)
if err != nil {
return err
}
// Lock mutex
b.RLock()
defer b.RUnlock()
// Prepare new list of entries
rm := map[string]ReportEntry{}
for _, v := range report {
rm[v.MAC] = v
}
// Scan for new entries
for k := range rm {
if _, ok := b.ReportMap[k]; !ok {
// New entry
b.ReportChan <- ReportEvent{
EventType: EVENT_CONNECT,
New: rm[k],
}
} else {
// Check for roaming
if rm[k].Interface != b.ReportMap[k].Interface {
b.ReportChan <- ReportEvent{
EventType: EVENT_ROAMING,
Old: b.ReportMap[k],
New: rm[k],
}
}
// Check for signal level change
if rm[k].Signal != b.ReportMap[k].Signal {
b.ReportChan <- ReportEvent{
EventType: EVENT_LEVEL,
Old: b.ReportMap[k],
New: rm[k],
}
}
}
}
// Scan for deleted entries
for k := range b.ReportMap {
if _, ok := rm[k]; !ok {
b.ReportChan <- ReportEvent{
EventType: EVENT_DISCONNECT,
Old: b.ReportMap[k],
}
}
}
b.ReportMap = rm
b.Report = report
b.Data = string(output)
b.LastUpdate = time.Now()
return nil
}
func (b *BroadcastData) EventHandler() {
for { // nolint:gosimple
select {
case data := <-b.ReportChan:
// fmt.Printf("New event received: %v\n", data)
switch data.EventType {
case EVENT_CONNECT:
log.WithFields(log.Fields{"action": "register", "mac": data.New.MAC, "name": data.New.Name, "interface": data.New.Interface, "ssid": data.New.SSID, "hostname": data.New.Hostname, "comment": data.New.Comment, "level-to": data.New.Signal}).Info("New connection registered")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnConnect.HttpPost) > 0) || (len(dev.OnConnect.HttpGet) > 0) {
go makeRequest(dev.OnConnect, map[string]string{
"name": dev.Name,
"mac": data.New.MAC,
"roaming.to": "",
"roaming.from": "",
"level.to": data.New.Signal,
"level.from": "",
})
}
}
case EVENT_DISCONNECT:
log.WithFields(log.Fields{"action": "disconnect", "mac": data.Old.MAC, "name": data.Old.Name, "interface": data.Old.Interface, "hostname": data.Old.Hostname, "comment": data.Old.Comment}).Info("Client disconnect")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnDisconnect.HttpPost) > 0) || (len(dev.OnDisconnect.HttpGet) > 0) {
go makeRequest(dev.OnDisconnect, map[string]string{
"name": dev.Name,
"mac": data.Old.MAC,
"roaming.to": "",
"roaming.from": "",
"level.to": "",
"level.from": data.Old.Signal,
})
}
}
case EVENT_ROAMING:
log.WithFields(log.Fields{"action": "roaming", "mac": data.New.MAC, "name": data.New.Name, "interface-from": data.Old.Interface, "interface-to": data.New.Interface, "level-from": data.Old.Signal, "level-to": data.New.Signal}).Info("Client roaming")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnRoaming.HttpPost) > 0) || (len(dev.OnRoaming.HttpGet) > 0) {
go makeRequest(dev.OnRoaming, map[string]string{
"name": dev.Name,
"mac": data.New.MAC,
"roaming.to": data.New.Interface,
"roaming.from": data.Old.Interface,
"level.from": data.Old.Signal,
"level.to": data.New.Signal,
})
}
}
case EVENT_LEVEL:
log.WithFields(log.Fields{"action": "level", "mac": data.New.MAC, "name": data.New.Name, "interface": data.New.Interface, "level-from": data.Old.Signal, "level-to": data.New.Signal}).Debug("Signal level change")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnLevel.HttpPost) > 0) || (len(dev.OnLevel.HttpGet) > 0) {
go makeRequest(dev.OnLevel, map[string]string{
"name": dev.Name,
"mac": data.Old.MAC,
"roaming.to": "",
"roaming.from": "",
"level.from": data.Old.Signal,
"level.to": data.New.Signal,
})
}
}
default:
}
}
}
}
| {
ticker := time.NewTicker(config.DHCP.Interval)
for { // nolint:gosimple
select {
case <-ticker.C:
l, err := GetDHCPLeases(config.DHCP.Address, config.DHCP.Username, config.DHCP.Password)
if err != nil {
log.WithFields(log.Fields{"dhcp-addr": config.DHCP.Address}).Error("Error reloading DHCP Leases: ", err)
return
} else {
leaseList.RLock()
leaseList.List = l
leaseList.RUnlock()
log.WithFields(log.Fields{"count": len(l)}).Debug("Reloaded DHCP Leases")
}
}
}
} | identifier_body |
lib.go | package main
import (
"encoding/json"
"fmt"
"github.com/gorilla/websocket"
log "github.com/sirupsen/logrus"
"gopkg.in/routeros.v2"
"gopkg.in/yaml.v2"
"io/ioutil"
"strings"
"sync"
"time"
)
const (
// Time allowed to read the next pong message from the client.
pongWait = 60 * time.Second
// Send pings to client with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Time allowed to write the file to the client.
writeWait = 10 * time.Second
)
// Event types
const (
EVENT_CONNECT = iota
EVENT_ROAMING
EVENT_DISCONNECT
EVENT_LEVEL
)
type LeaseEntry struct {
IP string
MAC string
Server string
Hostname string
Comment string
}
type ReportEntry struct {
IP string
Name string
Interface string
SSID string
MAC string
Signal string
Hostname string
Comment string
}
type ReportEvent struct {
EventType int
Old ReportEntry
New ReportEntry
}
var WS = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
type BroadcastData struct {
Report []ReportEntry
ReportMap map[string]ReportEntry
Data string
LastUpdate time.Time
sync.RWMutex
ReportChan chan ReportEvent
}
type LeaseList struct {
List []LeaseEntry
sync.RWMutex
}
type ConfMikrotik struct {
Address string `yaml:"address"`
Username string `yaml:"username"`
Password string `yaml:"password"`
Interval time.Duration `yaml:"interval"`
Mode string `yaml:"mode"`
}
type ConfDevice struct {
Name string `yaml:"name"`
MAC string `yaml:"mac"`
OnConnect ConfigEvent `yaml:"on.connect"`
OnDisconnect ConfigEvent `yaml:"on.disconnect"`
OnRoaming ConfigEvent `yaml:"on.roaming"`
OnLevel ConfigEvent `yaml:"on.level"`
}
type ConfigEvent struct {
HttpPost string `yaml:"http.post"`
HttpGet string `yaml:"http.get"`
HttpPostContent string `yaml:"http.post.content"`
HttpHeader map[string]string `yaml:"http.header"`
}
type LogInfo struct {
Level log.Level `yaml:"level"`
}
type Config struct {
Log LogInfo `yaml:"log"`
Router ConfMikrotik `yaml:"router"`
DHCP ConfMikrotik `yaml:"dhcp"`
Devices []ConfDevice `yaml:"devices"`
}
// Init BroadcastData entry
func (b *BroadcastData) Init() {
b.ReportMap = map[string]ReportEntry{}
b.ReportChan = make(chan ReportEvent)
}
var broadcastData BroadcastData
var leaseList LeaseList
var config Config
var configMTX sync.RWMutex
var devList map[string]ConfDevice
var devListMTX sync.RWMutex
func GetDHCPLeases(address, username, password string) (list []LeaseEntry, err error) {
cl, err := routeros.Dial(address, username, password)
if err != nil {
return
}
defer cl.Close()
reply, err := cl.Run("/ip/dhcp-server/lease/print")
if err != nil {
return
}
for _, re := range reply.Re {
list = append(list, LeaseEntry{
IP: re.Map["address"],
MAC: re.Map["mac-address"],
Server: re.Map["server"],
Hostname: re.Map["host-name"],
Comment: re.Map["comment"],
})
}
return
}
func reloadDHCP() {
ticker := time.NewTicker(config.DHCP.Interval)
for { // nolint:gosimple
select {
case <-ticker.C:
l, err := GetDHCPLeases(config.DHCP.Address, config.DHCP.Username, config.DHCP.Password)
if err != nil {
log.WithFields(log.Fields{"dhcp-addr": config.DHCP.Address}).Error("Error reloading DHCP Leases: ", err)
return
} else {
leaseList.RLock()
leaseList.List = l
leaseList.RUnlock()
log.WithFields(log.Fields{"count": len(l)}).Debug("Reloaded DHCP Leases")
}
}
}
}
func FindLeaseByMAC(list []LeaseEntry, mac string) (e LeaseEntry, ok bool) {
for _, e := range list {
if e.MAC == mac {
return e, true
}
}
return
}
func RTLoop(c *routeros.Client, conf *Config) {
for {
cmd := "/caps-man/registration-table/print"
if strings.ToLower(config.Router.Mode) == "wifi" {
cmd = "/interface/wireless/registration-table/print"
}
reply, err := c.Run(cmd)
if err != nil {
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Error("Error during request to CapsMan server: ", err)
// Try to close connection
c.Close()
// Reconnect loop
for {
// Sleep for 5 sec
time.Sleep(5 * time.Second)
cNew, err := routeros.Dial(config.Router.Address, config.Router.Username, config.Router.Password)
if err != nil {
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Error("Reconnect error to CapsMan server: ", err)
continue
}
c = cNew
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Warn("Reconnected to CapsMan server")
break
}
continue
}
var report []ReportEntry
leaseList.RLock()
for _, re := range reply.Re {
var n, c, ip string
if le, ok := FindLeaseByMAC(leaseList.List, re.Map["mac-address"]); ok {
n = le.Hostname
c = le.Comment
ip = le.IP
}
devListMTX.RLock()
rec := ReportEntry{
IP: ip,
Name: devList[re.Map["mac-address"]].Name,
Interface: re.Map["interface"],
SSID: re.Map["ssid"],
MAC: re.Map["mac-address"],
Signal: re.Map["rx-signal"],
Hostname: n,
Comment: c,
}
if strings.ToLower(config.Router.Mode) == "wifi" {
rec.Signal = re.Map["signal-strength"]
if i := strings.Index(rec.Signal, "@"); i > 0 {
rec.Signal = rec.Signal[0:i]
}
}
devListMTX.RUnlock()
report = append(report, rec)
// fmt.Printf("%-20s\t%-20s\t%-20s\t%-10s\t%-30s\t%-30s\n", re.Map["interface"], re.Map["ssid"], re.Map["mac-address"], re.Map["rx-signal"], n, c)
}
log.WithFields(log.Fields{"count": len(report)}).Debug("Reloaded CapsMan entries")
leaseList.RUnlock()
if err = broadcastData.reportUpdate(report); err != nil {
log.WithFields(log.Fields{}).Warn("Error during reportUpdate: ", err)
}
time.Sleep(*interval)
}
}
func loadConfig(configFileName string) (config Config, err error) {
devListMTX.RLock()
defer devListMTX.RUnlock()
config = Config{}
devList = make(map[string]ConfDevice)
source, err := ioutil.ReadFile(configFileName)
if err != nil {
err = fmt.Errorf("cannot read config file [%s]", configFileName)
return
}
if err = yaml.Unmarshal(source, &config); err != nil {
err = fmt.Errorf("error parsing config file [%s]: %v", configFileName, err)
return
}
for _, v := range config.Devices {
devList[strings.ToUpper(v.MAC)] = v
}
return
}
func | () {
}
// Handle report update request
func (b *BroadcastData) reportUpdate(report []ReportEntry) error {
output, err := json.Marshal(report)
if err != nil {
return err
}
// Lock mutex
b.RLock()
defer b.RUnlock()
// Prepare new list of entries
rm := map[string]ReportEntry{}
for _, v := range report {
rm[v.MAC] = v
}
// Scan for new entries
for k := range rm {
if _, ok := b.ReportMap[k]; !ok {
// New entry
b.ReportChan <- ReportEvent{
EventType: EVENT_CONNECT,
New: rm[k],
}
} else {
// Check for roaming
if rm[k].Interface != b.ReportMap[k].Interface {
b.ReportChan <- ReportEvent{
EventType: EVENT_ROAMING,
Old: b.ReportMap[k],
New: rm[k],
}
}
// Check for signal level change
if rm[k].Signal != b.ReportMap[k].Signal {
b.ReportChan <- ReportEvent{
EventType: EVENT_LEVEL,
Old: b.ReportMap[k],
New: rm[k],
}
}
}
}
// Scan for deleted entries
for k := range b.ReportMap {
if _, ok := rm[k]; !ok {
b.ReportChan <- ReportEvent{
EventType: EVENT_DISCONNECT,
Old: b.ReportMap[k],
}
}
}
b.ReportMap = rm
b.Report = report
b.Data = string(output)
b.LastUpdate = time.Now()
return nil
}
func (b *BroadcastData) EventHandler() {
for { // nolint:gosimple
select {
case data := <-b.ReportChan:
// fmt.Printf("New event received: %v\n", data)
switch data.EventType {
case EVENT_CONNECT:
log.WithFields(log.Fields{"action": "register", "mac": data.New.MAC, "name": data.New.Name, "interface": data.New.Interface, "ssid": data.New.SSID, "hostname": data.New.Hostname, "comment": data.New.Comment, "level-to": data.New.Signal}).Info("New connection registered")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnConnect.HttpPost) > 0) || (len(dev.OnConnect.HttpGet) > 0) {
go makeRequest(dev.OnConnect, map[string]string{
"name": dev.Name,
"mac": data.New.MAC,
"roaming.to": "",
"roaming.from": "",
"level.to": data.New.Signal,
"level.from": "",
})
}
}
case EVENT_DISCONNECT:
log.WithFields(log.Fields{"action": "disconnect", "mac": data.Old.MAC, "name": data.Old.Name, "interface": data.Old.Interface, "hostname": data.Old.Hostname, "comment": data.Old.Comment}).Info("Client disconnect")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnDisconnect.HttpPost) > 0) || (len(dev.OnDisconnect.HttpGet) > 0) {
go makeRequest(dev.OnDisconnect, map[string]string{
"name": dev.Name,
"mac": data.Old.MAC,
"roaming.to": "",
"roaming.from": "",
"level.to": "",
"level.from": data.Old.Signal,
})
}
}
case EVENT_ROAMING:
log.WithFields(log.Fields{"action": "roaming", "mac": data.New.MAC, "name": data.New.Name, "interface-from": data.Old.Interface, "interface-to": data.New.Interface, "level-from": data.Old.Signal, "level-to": data.New.Signal}).Info("Client roaming")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnRoaming.HttpPost) > 0) || (len(dev.OnRoaming.HttpGet) > 0) {
go makeRequest(dev.OnRoaming, map[string]string{
"name": dev.Name,
"mac": data.New.MAC,
"roaming.to": data.New.Interface,
"roaming.from": data.Old.Interface,
"level.from": data.Old.Signal,
"level.to": data.New.Signal,
})
}
}
case EVENT_LEVEL:
log.WithFields(log.Fields{"action": "level", "mac": data.New.MAC, "name": data.New.Name, "interface": data.New.Interface, "level-from": data.Old.Signal, "level-to": data.New.Signal}).Debug("Signal level change")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnLevel.HttpPost) > 0) || (len(dev.OnLevel.HttpGet) > 0) {
go makeRequest(dev.OnLevel, map[string]string{
"name": dev.Name,
"mac": data.Old.MAC,
"roaming.to": "",
"roaming.from": "",
"level.from": data.Old.Signal,
"level.to": data.New.Signal,
})
}
}
default:
}
}
}
}
| usage | identifier_name |
lib.go | package main
import (
"encoding/json"
"fmt"
"github.com/gorilla/websocket"
log "github.com/sirupsen/logrus"
"gopkg.in/routeros.v2"
"gopkg.in/yaml.v2"
"io/ioutil"
"strings"
"sync"
"time"
)
const (
// Time allowed to read the next pong message from the client.
pongWait = 60 * time.Second
// Send pings to client with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Time allowed to write the file to the client.
writeWait = 10 * time.Second
)
// Event types
const (
EVENT_CONNECT = iota
EVENT_ROAMING
EVENT_DISCONNECT
EVENT_LEVEL
)
type LeaseEntry struct {
IP string
MAC string
Server string
Hostname string
Comment string
}
type ReportEntry struct {
IP string
Name string
Interface string
SSID string
MAC string
Signal string
Hostname string
Comment string
}
type ReportEvent struct {
EventType int
Old ReportEntry
New ReportEntry
}
var WS = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
type BroadcastData struct {
Report []ReportEntry
ReportMap map[string]ReportEntry
Data string
LastUpdate time.Time
sync.RWMutex
ReportChan chan ReportEvent
}
type LeaseList struct {
List []LeaseEntry
sync.RWMutex
}
type ConfMikrotik struct {
Address string `yaml:"address"`
Username string `yaml:"username"`
Password string `yaml:"password"`
Interval time.Duration `yaml:"interval"`
Mode string `yaml:"mode"`
}
type ConfDevice struct {
Name string `yaml:"name"`
MAC string `yaml:"mac"`
OnConnect ConfigEvent `yaml:"on.connect"`
OnDisconnect ConfigEvent `yaml:"on.disconnect"`
OnRoaming ConfigEvent `yaml:"on.roaming"`
OnLevel ConfigEvent `yaml:"on.level"`
}
type ConfigEvent struct {
HttpPost string `yaml:"http.post"`
HttpGet string `yaml:"http.get"`
HttpPostContent string `yaml:"http.post.content"`
HttpHeader map[string]string `yaml:"http.header"`
}
type LogInfo struct {
Level log.Level `yaml:"level"`
}
type Config struct {
Log LogInfo `yaml:"log"`
Router ConfMikrotik `yaml:"router"`
DHCP ConfMikrotik `yaml:"dhcp"`
Devices []ConfDevice `yaml:"devices"`
}
// Init BroadcastData entry
func (b *BroadcastData) Init() {
b.ReportMap = map[string]ReportEntry{}
b.ReportChan = make(chan ReportEvent)
}
var broadcastData BroadcastData
var leaseList LeaseList
var config Config
var configMTX sync.RWMutex
var devList map[string]ConfDevice
var devListMTX sync.RWMutex
func GetDHCPLeases(address, username, password string) (list []LeaseEntry, err error) {
cl, err := routeros.Dial(address, username, password)
if err != nil {
return
}
defer cl.Close()
reply, err := cl.Run("/ip/dhcp-server/lease/print")
if err != nil {
return
}
for _, re := range reply.Re {
list = append(list, LeaseEntry{
IP: re.Map["address"],
MAC: re.Map["mac-address"],
Server: re.Map["server"],
Hostname: re.Map["host-name"],
Comment: re.Map["comment"],
})
}
return
}
func reloadDHCP() {
ticker := time.NewTicker(config.DHCP.Interval)
for { // nolint:gosimple
select {
case <-ticker.C:
l, err := GetDHCPLeases(config.DHCP.Address, config.DHCP.Username, config.DHCP.Password)
if err != nil {
log.WithFields(log.Fields{"dhcp-addr": config.DHCP.Address}).Error("Error reloading DHCP Leases: ", err)
return
} else {
leaseList.RLock()
leaseList.List = l
leaseList.RUnlock()
log.WithFields(log.Fields{"count": len(l)}).Debug("Reloaded DHCP Leases")
}
}
}
}
func FindLeaseByMAC(list []LeaseEntry, mac string) (e LeaseEntry, ok bool) {
for _, e := range list {
if e.MAC == mac {
return e, true
}
}
return
}
func RTLoop(c *routeros.Client, conf *Config) {
for {
cmd := "/caps-man/registration-table/print"
if strings.ToLower(config.Router.Mode) == "wifi" {
cmd = "/interface/wireless/registration-table/print"
}
reply, err := c.Run(cmd)
if err != nil {
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Error("Error during request to CapsMan server: ", err)
// Try to close connection
c.Close()
// Reconnect loop
for {
// Sleep for 5 sec
time.Sleep(5 * time.Second)
cNew, err := routeros.Dial(config.Router.Address, config.Router.Username, config.Router.Password)
if err != nil {
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Error("Reconnect error to CapsMan server: ", err)
continue
}
c = cNew
log.WithFields(log.Fields{"address": config.Router.Address, "username": config.Router.Username}).Warn("Reconnected to CapsMan server")
break
}
continue
}
var report []ReportEntry
leaseList.RLock()
for _, re := range reply.Re {
var n, c, ip string
if le, ok := FindLeaseByMAC(leaseList.List, re.Map["mac-address"]); ok {
n = le.Hostname
c = le.Comment
ip = le.IP
}
devListMTX.RLock()
rec := ReportEntry{
IP: ip,
Name: devList[re.Map["mac-address"]].Name,
Interface: re.Map["interface"],
SSID: re.Map["ssid"],
MAC: re.Map["mac-address"],
Signal: re.Map["rx-signal"],
Hostname: n,
Comment: c,
}
if strings.ToLower(config.Router.Mode) == "wifi" {
rec.Signal = re.Map["signal-strength"]
if i := strings.Index(rec.Signal, "@"); i > 0 {
rec.Signal = rec.Signal[0:i]
}
}
devListMTX.RUnlock()
report = append(report, rec)
// fmt.Printf("%-20s\t%-20s\t%-20s\t%-10s\t%-30s\t%-30s\n", re.Map["interface"], re.Map["ssid"], re.Map["mac-address"], re.Map["rx-signal"], n, c)
}
log.WithFields(log.Fields{"count": len(report)}).Debug("Reloaded CapsMan entries")
leaseList.RUnlock()
if err = broadcastData.reportUpdate(report); err != nil {
log.WithFields(log.Fields{}).Warn("Error during reportUpdate: ", err)
}
time.Sleep(*interval)
}
}
func loadConfig(configFileName string) (config Config, err error) {
devListMTX.RLock()
defer devListMTX.RUnlock()
config = Config{}
devList = make(map[string]ConfDevice)
source, err := ioutil.ReadFile(configFileName)
if err != nil {
err = fmt.Errorf("cannot read config file [%s]", configFileName)
return
}
if err = yaml.Unmarshal(source, &config); err != nil {
err = fmt.Errorf("error parsing config file [%s]: %v", configFileName, err)
return
}
for _, v := range config.Devices {
devList[strings.ToUpper(v.MAC)] = v
}
return
}
func usage() {
}
// Handle report update request
func (b *BroadcastData) reportUpdate(report []ReportEntry) error {
output, err := json.Marshal(report)
if err != nil {
return err
}
// Lock mutex
b.RLock()
defer b.RUnlock()
// Prepare new list of entries
rm := map[string]ReportEntry{}
for _, v := range report {
rm[v.MAC] = v
}
// Scan for new entries
for k := range rm {
if _, ok := b.ReportMap[k]; !ok {
// New entry
b.ReportChan <- ReportEvent{
EventType: EVENT_CONNECT,
New: rm[k],
}
} else {
// Check for roaming
if rm[k].Interface != b.ReportMap[k].Interface {
b.ReportChan <- ReportEvent{
EventType: EVENT_ROAMING,
Old: b.ReportMap[k],
New: rm[k],
}
}
// Check for signal level change
if rm[k].Signal != b.ReportMap[k].Signal {
b.ReportChan <- ReportEvent{
EventType: EVENT_LEVEL,
Old: b.ReportMap[k],
New: rm[k],
}
}
}
}
// Scan for deleted entries
for k := range b.ReportMap {
if _, ok := rm[k]; !ok {
b.ReportChan <- ReportEvent{
EventType: EVENT_DISCONNECT,
Old: b.ReportMap[k],
}
}
}
b.ReportMap = rm
b.Report = report
b.Data = string(output)
b.LastUpdate = time.Now()
return nil
}
func (b *BroadcastData) EventHandler() {
for { // nolint:gosimple
select {
case data := <-b.ReportChan:
// fmt.Printf("New event received: %v\n", data)
switch data.EventType {
case EVENT_CONNECT:
log.WithFields(log.Fields{"action": "register", "mac": data.New.MAC, "name": data.New.Name, "interface": data.New.Interface, "ssid": data.New.SSID, "hostname": data.New.Hostname, "comment": data.New.Comment, "level-to": data.New.Signal}).Info("New connection registered")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnConnect.HttpPost) > 0) || (len(dev.OnConnect.HttpGet) > 0) {
go makeRequest(dev.OnConnect, map[string]string{
"name": dev.Name,
"mac": data.New.MAC,
"roaming.to": "",
"roaming.from": "",
"level.to": data.New.Signal,
"level.from": "",
})
}
}
case EVENT_DISCONNECT:
log.WithFields(log.Fields{"action": "disconnect", "mac": data.Old.MAC, "name": data.Old.Name, "interface": data.Old.Interface, "hostname": data.Old.Hostname, "comment": data.Old.Comment}).Info("Client disconnect")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnDisconnect.HttpPost) > 0) || (len(dev.OnDisconnect.HttpGet) > 0) {
go makeRequest(dev.OnDisconnect, map[string]string{
"name": dev.Name,
"mac": data.Old.MAC,
"roaming.to": "",
"roaming.from": "",
"level.to": "",
"level.from": data.Old.Signal,
})
}
}
case EVENT_ROAMING:
log.WithFields(log.Fields{"action": "roaming", "mac": data.New.MAC, "name": data.New.Name, "interface-from": data.Old.Interface, "interface-to": data.New.Interface, "level-from": data.Old.Signal, "level-to": data.New.Signal}).Info("Client roaming")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok {
if (len(dev.OnRoaming.HttpPost) > 0) || (len(dev.OnRoaming.HttpGet) > 0) {
go makeRequest(dev.OnRoaming, map[string]string{
"name": dev.Name,
"mac": data.New.MAC,
"roaming.to": data.New.Interface,
"roaming.from": data.Old.Interface,
"level.from": data.Old.Signal,
"level.to": data.New.Signal,
})
}
}
case EVENT_LEVEL:
log.WithFields(log.Fields{"action": "level", "mac": data.New.MAC, "name": data.New.Name, "interface": data.New.Interface, "level-from": data.Old.Signal, "level-to": data.New.Signal}).Debug("Signal level change")
// Get device info
devListMTX.RLock()
dev, ok := devList[data.New.MAC]
devListMTX.RUnlock()
if ok |
default:
}
}
}
}
| {
if (len(dev.OnLevel.HttpPost) > 0) || (len(dev.OnLevel.HttpGet) > 0) {
go makeRequest(dev.OnLevel, map[string]string{
"name": dev.Name,
"mac": data.Old.MAC,
"roaming.to": "",
"roaming.from": "",
"level.from": data.Old.Signal,
"level.to": data.New.Signal,
})
}
} | conditional_block |
uiTools.js | /* globals
getPropValue, getProperty, toTitleCase:true, firmlyInSlicingMode:true, axisIndex:true,
guiUtils:true, AxisList:true, slicingOrChanging:true, disabledIfSlicingOrChanging:true, UI,
getRoundedProp:true, getAllDataForKesmName:true, KesmNames,propertyCollections,
roundIfNumber:true
*/
// make things like Meteor.settings.public available in helpers
Template.registerHelper('Meteor', function() {
return Meteor;
});
// make the Session available in helpers
Template.registerHelper('Session', function() {
return Session;
});
Meteor.startup(function() {
Template.registerHelper('allPossibleKesmNames', function() {
return KesmNames.find().map(function(doc) { return doc.kesmName; });
});
});
getAllDataForKesmName = function(kesmName) {
var properties = {};
if (propertyCollections && _.has(propertyCollections, kesmName)) {
_.each(propertyCollections[kesmName].find().fetch(), function(doc) {
properties[doc.property] = doc.value;
});
}
var sample = Samples.findOne(properties['currentSampleID']) || null;
var machine = Machines.findOne({ kesmName: kesmName });
var process;
if (!! machine) {
process = Processes.findOne({
className: { $in: ['KESMMain', 'KESM'] },
machineId: machine._id
});
} else {
machine = null;
process = null;
}
var uri = "http://" + Meteor.settings.public.serverURIs[kesmName];
return {
kesmName: kesmName,
machine: machine,
process: process,
properties: properties,
sample: sample,
uri: uri
};
};
Template.registerHelper('allDataForKesmName', getAllDataForKesmName);
Template.registerHelper('odd', function(value) {
return (value % 2 !== 0);
});
Template.registerHelper('classForDot', function(active) {
return active? 'green' : 'red';
});
toTitleCase = function(str) {
return str.replace(/\w\S*/g, function(txt) {
return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();
});
};
// Small Collection of utility functions to make the gui-widgits dance etc.
AxisList = [{
axis: 'x',
AXIS: 'X',
index: 0
}, {
axis: 'y',
AXIS: 'Y',
index: 1
}, {
axis: 'z',
AXIS: 'Z',
index: 2
}];
Template.registerHelper('axes', function() {
return AxisList;
});
axisIndex = function(axisLetter) {
return _.findWhere(AxisList, { axis: axisLetter.toLowerCase() }).index;
};
var axisIsInFault = function(axis) {
return getPropValue('stage_'+axis+'_fault', false);
};
Template.registerHelper('colorClassForAxis', function(axisLetter) {
var colorForAxis = {
x: 'error',
y: 'success',
z: 'info',
};
if (axisIsInFault(axisLetter)) {
return 'fault';
} else {
return colorForAxis[axisLetter];
}
});
Template.registerHelper('isInFault', axisIsInFault);
var scopeModes = [{
mode: 'maintenance'
}, {
mode: 'focus'
}, {
mode: 'slicing'
}];
Template.registerHelper('scopeModes', function() {
return scopeModes;
});
Template.registerHelper('scopeIsInMode', function(mode) {
var scopeMode = getPropValue('scopeMode', 'maintenance');
return scopeMode.value === mode;
});
Template.registerHelper('changingModes', function() {
var scopeMode = getPropValue('scopeMode', 'maintenance');
return scopeMode.value !== scopeMode.requestedValue;
});
firmlyInSlicingMode = function() {
var scopeMode = getProperty('scopeMode', 'maintenance');
return (scopeMode.value === 'slicing') && (scopeMode.value === scopeMode.requestedValue);
};
slicingOrChanging = function() {
var scopeMode = getProperty('scopeMode', 'maintenance');
return (scopeMode.value === 'slicing') || scopeMode.value !== scopeMode.requestedValue;
};
Template.registerHelper('slicingOrChanging', slicingOrChanging);
// Returns 'disabled' if the scope is in 'slicing' mode, or if it's changing modes.
// This is used to disable buttons and inputs.
disabledIfSlicingOrChanging = function() {
return slicingOrChanging()? 'disabled' : '';
};
Template.registerHelper('disabledIfSlicingOrChanging', disabledIfSlicingOrChanging);
Template.registerHelper('disabledUnlessSlicing', function() {
return firmlyInSlicingMode()? '' : 'disabled';
});
// The default number of digits to display for floating point numbers.
// If this is 0, then num.toFixed(0) should round to the nearest integer.
var NUM_DECIMALS = 1;
roundIfNumber = function(value, numDecimals) {
if (_.isUndefined(numDecimals)) {
numDecimals = NUM_DECIMALS;
}
return _.isNumber(value)? value.toFixed(numDecimals) : value;
};
getRoundedProp = function(propName, defaultValue, numDecimals) {
var value = getPropValue(propName, defaultValue);
// Don't try to round the default value, since it could already be a string.
if (value === defaultValue) {
return value;
}
return roundIfNumber(value, numDecimals);
};
Template.registerHelper('getRoundedProp', getRoundedProp);
// This is a bit of a hack to make it easier to use a template
// like a controller object. Use it like:
// {{#with data a=true b=1}} | Template.registerHelper('data', function(data) {
return data.hash;
});
Meteor.cb = function(e,r) {
if (e) console.error(e);
if(r) {
if (_.isArray(r) && console.table) {
console.table(r);
} else {
console.log(r);
}
}
};
guiUtils = {};
guiUtils.makeMeteorReactiveTextBox = function(textInputQuery, meteorPropertyName, options) {
options = options || {};
var cleanseFunction = options.cleanseFunction || parseFloat;
var updateFunction = options.updateFunction || 'updateProperty';
var interacting = false;
var propertySelector = {
property: meteorPropertyName
}; // Meteor Property selector
// Meteor -> jQuery-ui
var updateTextBoxFromMeteor = function() {
// Query the relevent stage data source
var tbQuery = Properties.findOne(propertySelector);
// Do not update if user interaction is going on
// Update the text box if the mongo property has changed
if (interacting) {
return;
}
if (tbQuery) {
$(textInputQuery).val(tbQuery.value);
}
};
// Call this first when the slider is initialized, and when changes happen
Tracker.autorun(updateTextBoxFromMeteor);
// jQuery-ui -> Meteor
var pushTextChangeToMeteor = function() {
var currentValue = $(textInputQuery).val();
if (cleanseFunction) {
currentValue = cleanseFunction(currentValue);
}
Meteor.call(updateFunction, meteorPropertyName, currentValue, options, Meteor.cb);
// Also change it back to the parsed version
$(textInputQuery).val(currentValue);
};
// Now we register change handlers via the various jQuery whatnot
// Register doing an update on a enter key
$(textInputQuery).bind('keypress', function(event) {
var code = (event.keyCode ? event.keyCode : event.which);
//Enter keycode
if (code == 13) {
// Force focus-out
$(textInputQuery).blur();
}
});
// Make a change commit if the field loses focus
$(textInputQuery).focusout(function(event) {
pushTextChangeToMeteor();
interacting = false;
});
// Disable updating when user is editing values
$(textInputQuery).focusin(function(event) {
interacting = true;
});
};
// Functions to init sliders on gui, also registers Meteor Tracker
guiUtils.makeMeteorPropSlider = function(searchId, meteorPropertyName, cleanFunc, updateMethodName) {
// Flag for is the slider is being used
var interacting = false;
// Select and create the slider element
// Also register the start and stip functions
$(searchId).slider({
range: "min",
step: 0.000001,
min: 0.0,
max: 60.0,
start: function(event, ui) {
interacting = true;
},
stop: function(event, ui) {
var sliderLocation = $(searchId).slider("value");
// Update the metor when the slider is dropped somewhere new
Meteor.call(updateMethodName, meteorPropertyName, cleanFunc(sliderLocation), "web");
interacting = false;
}
});
// Closured function to update the slider defined based on meteor values
Tracker.autorun(function() {
// Query the relevent stage data source
var valueQuery = Properties.findOne({
property: meteorPropertyName
});
if (valueQuery && !interacting) {
// Update the fucking slider
$(searchId).slider("value", valueQuery.value);
}
});
};
// Initialize the image-extents slider (diff b/c it has two sliders)
guiUtils.initImageExtentSlider = function(axisName) {
var interacting = false;
// name for the jQuery selector
var sliderName = "#image" + axisName + "AreaSlider";
// Meteor property names
var propertyNameMin = "slice_" + axisName + "_min";
var propertyNameMax = "slice_" + axisName + "_max";
// Init the slider with default values . . .
// The values will later be updated by autorun function below
$(sliderName).slider({
range: "min",
step: 0.0001,
min: 0.0,
max: 60.0,
values: [20, 40],
start: function(event, ui) {
interacting = true;
},
stop: function(event, ui) {
var vals = $(sliderName).slider("values");
// Update the associated meteor properties
var minV = Math.min(vals[0], vals[1]);
var maxV = Math.max(vals[0], vals[1]);
// Create the properties.
Meteor.call("createProperty", propertyNameMin, minV, "web");
Meteor.call("createProperty", propertyNameMax, maxV, "web");
// End the interaction flag
interacting = false;
}
});
// Closured function to update the slider defined above
// Additionally Meteor dependancy registration etc.
Tracker.autorun(function() {
// Query the relevent stage data source
var minQ = Properties.findOne({
property: propertyNameMin
});
var maxQ = Properties.findOne({
property: propertyNameMax
});
// Check for empty return is necessary, as collection might not be initialized before this is run
if (minQ && maxQ) {
// Update the slider
$(sliderName).slider("values", [minQ.value, maxQ.value]);
}
});
}; | // {{> controllerTemplate}}
// {{/with}} | random_line_split |
uiTools.js | /* globals
getPropValue, getProperty, toTitleCase:true, firmlyInSlicingMode:true, axisIndex:true,
guiUtils:true, AxisList:true, slicingOrChanging:true, disabledIfSlicingOrChanging:true, UI,
getRoundedProp:true, getAllDataForKesmName:true, KesmNames,propertyCollections,
roundIfNumber:true
*/
// make things like Meteor.settings.public available in helpers
Template.registerHelper('Meteor', function() {
return Meteor;
});
// make the Session available in helpers
Template.registerHelper('Session', function() {
return Session;
});
Meteor.startup(function() {
Template.registerHelper('allPossibleKesmNames', function() {
return KesmNames.find().map(function(doc) { return doc.kesmName; });
});
});
getAllDataForKesmName = function(kesmName) {
var properties = {};
if (propertyCollections && _.has(propertyCollections, kesmName)) {
_.each(propertyCollections[kesmName].find().fetch(), function(doc) {
properties[doc.property] = doc.value;
});
}
var sample = Samples.findOne(properties['currentSampleID']) || null;
var machine = Machines.findOne({ kesmName: kesmName });
var process;
if (!! machine) {
process = Processes.findOne({
className: { $in: ['KESMMain', 'KESM'] },
machineId: machine._id
});
} else {
machine = null;
process = null;
}
var uri = "http://" + Meteor.settings.public.serverURIs[kesmName];
return {
kesmName: kesmName,
machine: machine,
process: process,
properties: properties,
sample: sample,
uri: uri
};
};
Template.registerHelper('allDataForKesmName', getAllDataForKesmName);
Template.registerHelper('odd', function(value) {
return (value % 2 !== 0);
});
Template.registerHelper('classForDot', function(active) {
return active? 'green' : 'red';
});
toTitleCase = function(str) {
return str.replace(/\w\S*/g, function(txt) {
return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();
});
};
// Small Collection of utility functions to make the gui-widgits dance etc.
AxisList = [{
axis: 'x',
AXIS: 'X',
index: 0
}, {
axis: 'y',
AXIS: 'Y',
index: 1
}, {
axis: 'z',
AXIS: 'Z',
index: 2
}];
Template.registerHelper('axes', function() {
return AxisList;
});
axisIndex = function(axisLetter) {
return _.findWhere(AxisList, { axis: axisLetter.toLowerCase() }).index;
};
var axisIsInFault = function(axis) {
return getPropValue('stage_'+axis+'_fault', false);
};
Template.registerHelper('colorClassForAxis', function(axisLetter) {
var colorForAxis = {
x: 'error',
y: 'success',
z: 'info',
};
if (axisIsInFault(axisLetter)) {
return 'fault';
} else {
return colorForAxis[axisLetter];
}
});
Template.registerHelper('isInFault', axisIsInFault);
var scopeModes = [{
mode: 'maintenance'
}, {
mode: 'focus'
}, {
mode: 'slicing'
}];
Template.registerHelper('scopeModes', function() {
return scopeModes;
});
Template.registerHelper('scopeIsInMode', function(mode) {
var scopeMode = getPropValue('scopeMode', 'maintenance');
return scopeMode.value === mode;
});
Template.registerHelper('changingModes', function() {
var scopeMode = getPropValue('scopeMode', 'maintenance');
return scopeMode.value !== scopeMode.requestedValue;
});
firmlyInSlicingMode = function() {
var scopeMode = getProperty('scopeMode', 'maintenance');
return (scopeMode.value === 'slicing') && (scopeMode.value === scopeMode.requestedValue);
};
slicingOrChanging = function() {
var scopeMode = getProperty('scopeMode', 'maintenance');
return (scopeMode.value === 'slicing') || scopeMode.value !== scopeMode.requestedValue;
};
Template.registerHelper('slicingOrChanging', slicingOrChanging);
// Returns 'disabled' if the scope is in 'slicing' mode, or if it's changing modes.
// This is used to disable buttons and inputs.
disabledIfSlicingOrChanging = function() {
return slicingOrChanging()? 'disabled' : '';
};
Template.registerHelper('disabledIfSlicingOrChanging', disabledIfSlicingOrChanging);
Template.registerHelper('disabledUnlessSlicing', function() {
return firmlyInSlicingMode()? '' : 'disabled';
});
// The default number of digits to display for floating point numbers.
// If this is 0, then num.toFixed(0) should round to the nearest integer.
var NUM_DECIMALS = 1;
roundIfNumber = function(value, numDecimals) {
if (_.isUndefined(numDecimals)) {
numDecimals = NUM_DECIMALS;
}
return _.isNumber(value)? value.toFixed(numDecimals) : value;
};
getRoundedProp = function(propName, defaultValue, numDecimals) {
var value = getPropValue(propName, defaultValue);
// Don't try to round the default value, since it could already be a string.
if (value === defaultValue) {
return value;
}
return roundIfNumber(value, numDecimals);
};
Template.registerHelper('getRoundedProp', getRoundedProp);
// This is a bit of a hack to make it easier to use a template
// like a controller object. Use it like:
// {{#with data a=true b=1}}
// {{> controllerTemplate}}
// {{/with}}
Template.registerHelper('data', function(data) {
return data.hash;
});
Meteor.cb = function(e,r) {
if (e) console.error(e);
if(r) {
if (_.isArray(r) && console.table) {
console.table(r);
} else {
console.log(r);
}
}
};
guiUtils = {};
guiUtils.makeMeteorReactiveTextBox = function(textInputQuery, meteorPropertyName, options) {
options = options || {};
var cleanseFunction = options.cleanseFunction || parseFloat;
var updateFunction = options.updateFunction || 'updateProperty';
var interacting = false;
var propertySelector = {
property: meteorPropertyName
}; // Meteor Property selector
// Meteor -> jQuery-ui
var updateTextBoxFromMeteor = function() {
// Query the relevent stage data source
var tbQuery = Properties.findOne(propertySelector);
// Do not update if user interaction is going on
// Update the text box if the mongo property has changed
if (interacting) |
if (tbQuery) {
$(textInputQuery).val(tbQuery.value);
}
};
// Call this first when the slider is initialized, and when changes happen
Tracker.autorun(updateTextBoxFromMeteor);
// jQuery-ui -> Meteor
var pushTextChangeToMeteor = function() {
var currentValue = $(textInputQuery).val();
if (cleanseFunction) {
currentValue = cleanseFunction(currentValue);
}
Meteor.call(updateFunction, meteorPropertyName, currentValue, options, Meteor.cb);
// Also change it back to the parsed version
$(textInputQuery).val(currentValue);
};
// Now we register change handlers via the various jQuery whatnot
// Register doing an update on a enter key
$(textInputQuery).bind('keypress', function(event) {
var code = (event.keyCode ? event.keyCode : event.which);
//Enter keycode
if (code == 13) {
// Force focus-out
$(textInputQuery).blur();
}
});
// Make a change commit if the field loses focus
$(textInputQuery).focusout(function(event) {
pushTextChangeToMeteor();
interacting = false;
});
// Disable updating when user is editing values
$(textInputQuery).focusin(function(event) {
interacting = true;
});
};
// Functions to init sliders on gui, also registers Meteor Tracker
guiUtils.makeMeteorPropSlider = function(searchId, meteorPropertyName, cleanFunc, updateMethodName) {
// Flag for is the slider is being used
var interacting = false;
// Select and create the slider element
// Also register the start and stip functions
$(searchId).slider({
range: "min",
step: 0.000001,
min: 0.0,
max: 60.0,
start: function(event, ui) {
interacting = true;
},
stop: function(event, ui) {
var sliderLocation = $(searchId).slider("value");
// Update the metor when the slider is dropped somewhere new
Meteor.call(updateMethodName, meteorPropertyName, cleanFunc(sliderLocation), "web");
interacting = false;
}
});
// Closured function to update the slider defined based on meteor values
Tracker.autorun(function() {
// Query the relevent stage data source
var valueQuery = Properties.findOne({
property: meteorPropertyName
});
if (valueQuery && !interacting) {
// Update the fucking slider
$(searchId).slider("value", valueQuery.value);
}
});
};
// Initialize the image-extents slider (diff b/c it has two sliders)
guiUtils.initImageExtentSlider = function(axisName) {
var interacting = false;
// name for the jQuery selector
var sliderName = "#image" + axisName + "AreaSlider";
// Meteor property names
var propertyNameMin = "slice_" + axisName + "_min";
var propertyNameMax = "slice_" + axisName + "_max";
// Init the slider with default values . . .
// The values will later be updated by autorun function below
$(sliderName).slider({
range: "min",
step: 0.0001,
min: 0.0,
max: 60.0,
values: [20, 40],
start: function(event, ui) {
interacting = true;
},
stop: function(event, ui) {
var vals = $(sliderName).slider("values");
// Update the associated meteor properties
var minV = Math.min(vals[0], vals[1]);
var maxV = Math.max(vals[0], vals[1]);
// Create the properties.
Meteor.call("createProperty", propertyNameMin, minV, "web");
Meteor.call("createProperty", propertyNameMax, maxV, "web");
// End the interaction flag
interacting = false;
}
});
// Closured function to update the slider defined above
// Additionally Meteor dependancy registration etc.
Tracker.autorun(function() {
// Query the relevent stage data source
var minQ = Properties.findOne({
property: propertyNameMin
});
var maxQ = Properties.findOne({
property: propertyNameMax
});
// Check for empty return is necessary, as collection might not be initialized before this is run
if (minQ && maxQ) {
// Update the slider
$(sliderName).slider("values", [minQ.value, maxQ.value]);
}
});
};
| {
return;
} | conditional_block |
BatteryMonitor6813Tester.go | package main
import (
"BatteryMonitor6813Tester/LTC6813"
"database/sql"
"errors"
"flag"
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/mux"
"io/ioutil"
"log"
"log/syslog"
"net/http"
"os"
"periph.io/x/periph/conn/physic"
"periph.io/x/periph/conn/pin"
"periph.io/x/periph/conn/pin/pinreg"
"periph.io/x/periph/conn/spi"
"periph.io/x/periph/conn/spi/spireg"
"periph.io/x/periph/host"
"strconv"
"sync"
"time"
)
const SPI_BAUD_RATE = physic.MegaHertz * 1
const SPI_BITS_PER_WORD = 8
var ltc *LTC6813.LTC6813
var spiConnection spi.Conn
var verbose *bool
var spiDevice *string
var nErrors int
var pDB *sql.DB
var pDatabaseLogin, pDatabasePassword, pDatabaseServer, pDatabasePort, pDatabaseName *string
var ltc_lock sync.Mutex
var nDevices int
func printPin(fn string, p pin.Pin) {
name, pos := pinreg.Position(p)
if name != "" {
fmt.Printf(" %-4s: %-10s found on header %s, #%d\n", fn, p, name, pos)
} else {
fmt.Printf(" %-4s: %-10s\n", fn, p)
}
}
/* Set up the LTC6804 chain by repeatedly configuring and reading longer chains until a failure occurrs.
*/
func getLTC6813() (int, error) {
var chainLength = 1
// for {
// LTC6813.New(spiConnection, 2).Initialise()
//
// }
ltc_lock.Lock()
defer ltc_lock.Unlock()
for |
chainLength--
if chainLength > 0 {
ltc = LTC6813.New(spiConnection, chainLength)
if err := ltc.Initialise(); err != nil {
fmt.Print(err)
log.Fatal(err)
}
_, err := ltc.MeasureVoltages()
if err != nil {
fmt.Println("MeasureVoltages - ", err)
}
_, err = ltc.MeasureTemperatures()
if err != nil {
fmt.Println("MeasureTemperatures - ", err)
}
} else {
ltc = nil
}
return chainLength, nil
}
func performMeasurements() {
var total float32
var err error
var banks int
fmt.Println("Measuring")
if nDevices == 0 {
nDevices, err = getLTC6813()
if err != nil {
fmt.Print(err)
nErrors++
return
}
}
if nDevices == 0 {
fmt.Printf("\033cNo devices found on %s - %s", *spiDevice, time.Now().Format("15:04:05.99"))
return
}
banks, err = ltc.MeasureVoltagesSC()
if err != nil {
// Retry if it failed and ignore the failure if the retry was successful
banks, err = ltc.MeasureVoltagesSC()
}
if err != nil {
fmt.Print(" Error measuring voltages - ", err)
time.Sleep(time.Second * 2)
nDevices = 0
nErrors++
} else {
fmt.Print("\033c")
fmt.Printf("%d LTC6813 found on %s - %s - %d errors.\n", nDevices, *spiDevice, time.Now().Format("15:04:05.99"), nErrors)
for bank := 0; bank < banks; bank++ {
fmt.Printf("Bank%2d", bank)
total = 0.0
for cell := 0; cell < 18; cell++ {
fmt.Printf(" : %1.4f", ltc.GetVolts(bank, cell))
total = total + ltc.GetVolts(bank, cell)
}
fmt.Printf(" Sum = %2.3f\n", total)
}
banks, err = ltc.MeasureTemperatures()
if err != nil {
banks, err = ltc.MeasureTemperatures()
}
if err != nil {
fmt.Print(" Error measuring temperatures - ", err)
time.Sleep(time.Second * 2)
nDevices = 0
nErrors++
}
fmt.Println("Temperatures")
for bank := 0; bank < banks; bank++ {
fmt.Printf("Bank %d", bank)
for sensor := 0; sensor < 18; sensor++ {
temperature, err := ltc.GetTemperature(bank, sensor)
if err != nil {
fmt.Printf(" : %v ", err)
} else {
fmt.Printf(" : %2.1f℃", temperature)
}
}
fmt.Printf(" - Reference Volts = %1.4f - Sum of Cells = %2.3f\n", ltc.GetRefVolts(bank), ltc.GetSumOfCellsVolts(bank))
}
}
}
func mainImpl() error {
if !*verbose {
log.SetOutput(ioutil.Discard)
}
log.SetFlags(log.Lmicroseconds)
if flag.NArg() != 0 {
return errors.New("unexpected argument, try -help")
}
for {
nDevices, err := getLTC6813()
if err == nil && nDevices > 0 {
break
}
fmt.Println("Looking for a device")
}
done := make(chan bool)
ticker := time.NewTicker(time.Second)
go func() {
for {
select {
case <-done:
return
case <-ticker.C:
performMeasurements()
}
}
}()
// Configure and start the WEB server
router := mux.NewRouter().StrictSlash(true)
router.HandleFunc("/", getValues).Methods("GET")
router.HandleFunc("/version", getVersion).Methods("GET")
router.HandleFunc("/i2cread", getI2Cread).Methods("GET")
router.HandleFunc("/i2cwrite", getI2Cwrite).Methods("GET")
router.HandleFunc("/i2creadByte", getI2CreadByte).Methods("GET")
router.HandleFunc("/i2cVoltage", getI2CVoltage).Methods("GET")
router.HandleFunc("/i2cCharge", getI2CCharge).Methods("GET")
router.HandleFunc("/i2cCurrent", getI2CCurrent).Methods("GET")
router.HandleFunc("/i2cTemp", getI2CTemp).Methods("GET")
http.ListenAndServe(":8080", router) // Listen on port 8080
return nil
}
func connectToDatabase() (*sql.DB, error) {
if pDB != nil {
_ = pDB.Close()
pDB = nil
}
var sConnectionString = *pDatabaseLogin + ":" + *pDatabasePassword + "@tcp(" + *pDatabaseServer + ":" + *pDatabasePort + ")/" + *pDatabaseName
fmt.Println("Connecting to [", sConnectionString, "]")
db, err := sql.Open("mysql", sConnectionString)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
_ = db.Close()
return nil, err
}
return db, err
}
func init() {
verbose = flag.Bool("v", false, "verbose mode")
spiDevice = flag.String("c", "/dev/spidev0.1", "SPI device from /dev")
pDatabaseLogin = flag.String("l", "logger", "Database Login ID")
pDatabasePassword = flag.String("p", "logger", "Database password")
pDatabaseServer = flag.String("s", "localhost", "Database server")
pDatabasePort = flag.String("o", "3306", "Database port")
pDatabaseName = flag.String("d", "battery", "Name of the database")
flag.Parse()
logwriter, e := syslog.New(syslog.LOG_NOTICE, "Hello")
if e == nil {
log.SetOutput(logwriter)
}
// Initialise the SPI subsystem
if _, err := host.Init(); err != nil {
log.Fatal(err)
}
p, err := spireg.Open(*spiDevice)
if err != nil {
log.Fatal(err)
}
spiConnection, err = p.Connect(SPI_BAUD_RATE, spi.Mode0, SPI_BITS_PER_WORD)
if err != nil {
log.Fatal(err)
}
nErrors = 0
// Set up the database connection
pDB, err = connectToDatabase()
if err != nil {
log.Fatalf("Failed to connect to to the database - %s - Sorry, I am giving up.", err)
}
}
/*
WEB Service to return the version information
*/
func getVersion(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, `<html>
<head>
<Cedar Technology Battery Manager>
</head>
<body>
<h1>Cedar Technology Battery Manager</h1>
<h2>Version 1.0 - January 10th 2020</h2>
</body>
</html>`)
}
/*
WEB service to return current process values
*/
func getValues(w http.ResponseWriter, _ *http.Request) {
ltc_lock.Lock()
defer ltc_lock.Unlock()
// This header allows the output to be used in a WEB page from another server as a data source for some controls
w.Header().Set("Access-Control-Allow-Origin", "*")
if ltc != nil {
_, _ = fmt.Fprintf(w, `{%s,%s}`, ltc.GetVoltagesAsJSON(), ltc.GetTemperaturesAsJSON())
} else {
_, _ = fmt.Fprint(w, `{"error":"No Devices"}`)
}
}
/**
WEB service to read the I2C port
*/
func getI2Cread(w http.ResponseWriter, r *http.Request) {
var reg int64
sReg := r.URL.Query().Get("reg")
if sReg != "" {
reg, _ = strconv.ParseInt(sReg, 0, 8)
} else {
reg = 0x1a
}
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
s, err := ltc.ReadI2CWord(int(sensor), LTC6813.LTC2944Address, uint8(reg))
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, s)
}
}
/**
WEB service to read one 8 bit register from the I2C port
*/
func getI2CreadByte(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
var reg int64
sReg := r.URL.Query().Get("reg")
if sReg != "" {
reg, _ = strconv.ParseInt(sReg, 0, 8)
} else {
reg = 0x1a
}
s, err := ltc.ReadI2CByte(int(sensor), LTC6813.LTC2944Address, uint8(reg))
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, s)
}
}
/**
WEB service to read the current from the I2C port
*/
func getI2CCurrent(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CCurrent(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Current on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the voltage from the I2C port
*/
func getI2CVoltage(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CVoltage(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Voltage on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the current from the I2C port
*/
func getI2CCharge(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CAccumulatedCharge(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Accumulated charge on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the temperature from the I2C port
*/
func getI2CTemp(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CTemp(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Temperature on sensor %d = %f", sensor, t)
}
}
/**
WEB service to write to one of the I2C registers
*/
func getI2Cwrite(w http.ResponseWriter, r *http.Request) {
var reg, value int64
s := r.URL.Query().Get("reg")
if s != "" {
reg, _ = strconv.ParseInt(s, 0, 16)
} else {
reg = 0x1a
}
s = r.URL.Query().Get("value")
if s != "" {
value, _ = strconv.ParseInt(s, 0, 16)
} else {
value = 0
}
_, err := ltc.WriteI2CByte(2, LTC6813.LTC2944Address, uint8(reg), uint8(value))
w.Header().Set("Access-Control-Allow-Origin", "*")
// fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, "Register %d set 0x%x", reg, value)
}
}
func main() {
if err := mainImpl(); err != nil {
fmt.Fprintf(os.Stderr, "Hello Error: %s.\n", err)
os.Exit(1)
}
fmt.Println("Program has ended.")
}
| {
testLtc := LTC6813.New(spiConnection, chainLength)
if _, err := testLtc.Test(); err != nil {
fmt.Println(err)
break
}
testLtc = nil
chainLength++
} | conditional_block |
BatteryMonitor6813Tester.go | package main
import (
"BatteryMonitor6813Tester/LTC6813"
"database/sql"
"errors"
"flag"
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/mux"
"io/ioutil"
"log"
"log/syslog"
"net/http"
"os"
"periph.io/x/periph/conn/physic"
"periph.io/x/periph/conn/pin"
"periph.io/x/periph/conn/pin/pinreg"
"periph.io/x/periph/conn/spi"
"periph.io/x/periph/conn/spi/spireg"
"periph.io/x/periph/host"
"strconv"
"sync"
"time"
)
const SPI_BAUD_RATE = physic.MegaHertz * 1
const SPI_BITS_PER_WORD = 8
var ltc *LTC6813.LTC6813
var spiConnection spi.Conn
var verbose *bool
var spiDevice *string
var nErrors int
var pDB *sql.DB
var pDatabaseLogin, pDatabasePassword, pDatabaseServer, pDatabasePort, pDatabaseName *string
var ltc_lock sync.Mutex
var nDevices int
func printPin(fn string, p pin.Pin) {
name, pos := pinreg.Position(p)
if name != "" {
fmt.Printf(" %-4s: %-10s found on header %s, #%d\n", fn, p, name, pos)
} else {
fmt.Printf(" %-4s: %-10s\n", fn, p)
}
}
/* Set up the LTC6804 chain by repeatedly configuring and reading longer chains until a failure occurrs.
*/
func getLTC6813() (int, error) {
var chainLength = 1
// for {
// LTC6813.New(spiConnection, 2).Initialise()
//
// }
ltc_lock.Lock()
defer ltc_lock.Unlock()
for {
testLtc := LTC6813.New(spiConnection, chainLength)
if _, err := testLtc.Test(); err != nil {
fmt.Println(err)
break
}
testLtc = nil
chainLength++
}
chainLength--
if chainLength > 0 {
ltc = LTC6813.New(spiConnection, chainLength)
if err := ltc.Initialise(); err != nil {
fmt.Print(err)
log.Fatal(err)
}
_, err := ltc.MeasureVoltages()
if err != nil {
fmt.Println("MeasureVoltages - ", err)
}
_, err = ltc.MeasureTemperatures()
if err != nil {
fmt.Println("MeasureTemperatures - ", err)
}
} else {
ltc = nil
}
return chainLength, nil
}
func performMeasurements() {
var total float32
var err error
var banks int
fmt.Println("Measuring")
if nDevices == 0 {
nDevices, err = getLTC6813()
if err != nil {
fmt.Print(err)
nErrors++
return
}
}
if nDevices == 0 {
fmt.Printf("\033cNo devices found on %s - %s", *spiDevice, time.Now().Format("15:04:05.99"))
return
}
banks, err = ltc.MeasureVoltagesSC()
if err != nil {
// Retry if it failed and ignore the failure if the retry was successful
banks, err = ltc.MeasureVoltagesSC()
}
if err != nil {
fmt.Print(" Error measuring voltages - ", err)
time.Sleep(time.Second * 2)
nDevices = 0
nErrors++
} else {
fmt.Print("\033c")
fmt.Printf("%d LTC6813 found on %s - %s - %d errors.\n", nDevices, *spiDevice, time.Now().Format("15:04:05.99"), nErrors)
for bank := 0; bank < banks; bank++ {
fmt.Printf("Bank%2d", bank)
total = 0.0
for cell := 0; cell < 18; cell++ {
fmt.Printf(" : %1.4f", ltc.GetVolts(bank, cell))
total = total + ltc.GetVolts(bank, cell)
}
fmt.Printf(" Sum = %2.3f\n", total)
}
banks, err = ltc.MeasureTemperatures()
if err != nil {
banks, err = ltc.MeasureTemperatures()
}
if err != nil {
fmt.Print(" Error measuring temperatures - ", err)
time.Sleep(time.Second * 2)
nDevices = 0
nErrors++
}
fmt.Println("Temperatures")
for bank := 0; bank < banks; bank++ {
fmt.Printf("Bank %d", bank)
for sensor := 0; sensor < 18; sensor++ {
temperature, err := ltc.GetTemperature(bank, sensor)
if err != nil {
fmt.Printf(" : %v ", err)
} else {
fmt.Printf(" : %2.1f℃", temperature)
}
}
fmt.Printf(" - Reference Volts = %1.4f - Sum of Cells = %2.3f\n", ltc.GetRefVolts(bank), ltc.GetSumOfCellsVolts(bank))
}
}
}
func mainImpl() error {
if !*verbose {
log.SetOutput(ioutil.Discard)
}
log.SetFlags(log.Lmicroseconds)
if flag.NArg() != 0 {
return errors.New("unexpected argument, try -help")
}
for {
nDevices, err := getLTC6813()
if err == nil && nDevices > 0 {
break
}
fmt.Println("Looking for a device")
}
done := make(chan bool)
ticker := time.NewTicker(time.Second)
go func() {
for {
select {
case <-done:
return
case <-ticker.C:
performMeasurements()
}
}
}()
// Configure and start the WEB server
router := mux.NewRouter().StrictSlash(true)
router.HandleFunc("/", getValues).Methods("GET")
router.HandleFunc("/version", getVersion).Methods("GET")
router.HandleFunc("/i2cread", getI2Cread).Methods("GET")
router.HandleFunc("/i2cwrite", getI2Cwrite).Methods("GET")
router.HandleFunc("/i2creadByte", getI2CreadByte).Methods("GET")
router.HandleFunc("/i2cVoltage", getI2CVoltage).Methods("GET")
router.HandleFunc("/i2cCharge", getI2CCharge).Methods("GET")
router.HandleFunc("/i2cCurrent", getI2CCurrent).Methods("GET")
router.HandleFunc("/i2cTemp", getI2CTemp).Methods("GET")
http.ListenAndServe(":8080", router) // Listen on port 8080
return nil
}
func connectToDatabase() (*sql.DB, error) {
if pDB != nil {
_ = pDB.Close()
pDB = nil
}
var sConnectionString = *pDatabaseLogin + ":" + *pDatabasePassword + "@tcp(" + *pDatabaseServer + ":" + *pDatabasePort + ")/" + *pDatabaseName
fmt.Println("Connecting to [", sConnectionString, "]")
db, err := sql.Open("mysql", sConnectionString)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
_ = db.Close()
return nil, err
}
return db, err
}
func init() {
verbose = flag.Bool("v", false, "verbose mode")
spiDevice = flag.String("c", "/dev/spidev0.1", "SPI device from /dev")
pDatabaseLogin = flag.String("l", "logger", "Database Login ID")
pDatabasePassword = flag.String("p", "logger", "Database password")
pDatabaseServer = flag.String("s", "localhost", "Database server")
pDatabasePort = flag.String("o", "3306", "Database port")
pDatabaseName = flag.String("d", "battery", "Name of the database")
flag.Parse()
logwriter, e := syslog.New(syslog.LOG_NOTICE, "Hello")
if e == nil {
log.SetOutput(logwriter)
}
// Initialise the SPI subsystem
if _, err := host.Init(); err != nil {
log.Fatal(err)
}
p, err := spireg.Open(*spiDevice)
if err != nil {
log.Fatal(err)
}
spiConnection, err = p.Connect(SPI_BAUD_RATE, spi.Mode0, SPI_BITS_PER_WORD)
if err != nil {
log.Fatal(err)
}
nErrors = 0
// Set up the database connection
pDB, err = connectToDatabase()
if err != nil {
log.Fatalf("Failed to connect to to the database - %s - Sorry, I am giving up.", err)
}
}
/*
WEB Service to return the version information
*/
func getVersion(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, `<html>
<head>
<Cedar Technology Battery Manager>
</head>
<body>
<h1>Cedar Technology Battery Manager</h1>
<h2>Version 1.0 - January 10th 2020</h2>
</body>
</html>`)
}
/*
WEB service to return current process values
*/
func getValues(w http.ResponseWriter, _ *http.Request) {
ltc_lock.Lock()
defer ltc_lock.Unlock()
// This header allows the output to be used in a WEB page from another server as a data source for some controls
w.Header().Set("Access-Control-Allow-Origin", "*")
if ltc != nil {
_, _ = fmt.Fprintf(w, `{%s,%s}`, ltc.GetVoltagesAsJSON(), ltc.GetTemperaturesAsJSON())
} else {
_, _ = fmt.Fprint(w, `{"error":"No Devices"}`)
}
}
/**
WEB service to read the I2C port
*/
func getI2Cread(w http.ResponseWriter, r *http.Request) {
var reg int64
sReg := r.URL.Query().Get("reg")
if sReg != "" {
reg, _ = strconv.ParseInt(sReg, 0, 8)
} else {
reg = 0x1a
}
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
s, err := ltc.ReadI2CWord(int(sensor), LTC6813.LTC2944Address, uint8(reg))
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, s)
}
}
/**
WEB service to read one 8 bit register from the I2C port
*/
func getI2CreadByte(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
var reg int64
sReg := r.URL.Query().Get("reg")
if sReg != "" {
reg, _ = strconv.ParseInt(sReg, 0, 8)
} else {
reg = 0x1a
}
s, err := ltc.ReadI2CByte(int(sensor), LTC6813.LTC2944Address, uint8(reg))
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, s)
}
} | sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CCurrent(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Current on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the voltage from the I2C port
*/
func getI2CVoltage(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CVoltage(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Voltage on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the current from the I2C port
*/
func getI2CCharge(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CAccumulatedCharge(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Accumulated charge on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the temperature from the I2C port
*/
func getI2CTemp(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CTemp(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Temperature on sensor %d = %f", sensor, t)
}
}
/**
WEB service to write to one of the I2C registers
*/
func getI2Cwrite(w http.ResponseWriter, r *http.Request) {
var reg, value int64
s := r.URL.Query().Get("reg")
if s != "" {
reg, _ = strconv.ParseInt(s, 0, 16)
} else {
reg = 0x1a
}
s = r.URL.Query().Get("value")
if s != "" {
value, _ = strconv.ParseInt(s, 0, 16)
} else {
value = 0
}
_, err := ltc.WriteI2CByte(2, LTC6813.LTC2944Address, uint8(reg), uint8(value))
w.Header().Set("Access-Control-Allow-Origin", "*")
// fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, "Register %d set 0x%x", reg, value)
}
}
func main() {
if err := mainImpl(); err != nil {
fmt.Fprintf(os.Stderr, "Hello Error: %s.\n", err)
os.Exit(1)
}
fmt.Println("Program has ended.")
} |
/**
WEB service to read the current from the I2C port
*/
func getI2CCurrent(w http.ResponseWriter, r *http.Request) { | random_line_split |
BatteryMonitor6813Tester.go | package main
import (
"BatteryMonitor6813Tester/LTC6813"
"database/sql"
"errors"
"flag"
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/mux"
"io/ioutil"
"log"
"log/syslog"
"net/http"
"os"
"periph.io/x/periph/conn/physic"
"periph.io/x/periph/conn/pin"
"periph.io/x/periph/conn/pin/pinreg"
"periph.io/x/periph/conn/spi"
"periph.io/x/periph/conn/spi/spireg"
"periph.io/x/periph/host"
"strconv"
"sync"
"time"
)
const SPI_BAUD_RATE = physic.MegaHertz * 1
const SPI_BITS_PER_WORD = 8
var ltc *LTC6813.LTC6813
var spiConnection spi.Conn
var verbose *bool
var spiDevice *string
var nErrors int
var pDB *sql.DB
var pDatabaseLogin, pDatabasePassword, pDatabaseServer, pDatabasePort, pDatabaseName *string
var ltc_lock sync.Mutex
var nDevices int
func printPin(fn string, p pin.Pin) {
name, pos := pinreg.Position(p)
if name != "" {
fmt.Printf(" %-4s: %-10s found on header %s, #%d\n", fn, p, name, pos)
} else {
fmt.Printf(" %-4s: %-10s\n", fn, p)
}
}
/* Set up the LTC6804 chain by repeatedly configuring and reading longer chains until a failure occurrs.
*/
func getLTC6813() (int, error) {
var chainLength = 1
// for {
// LTC6813.New(spiConnection, 2).Initialise()
//
// }
ltc_lock.Lock()
defer ltc_lock.Unlock()
for {
testLtc := LTC6813.New(spiConnection, chainLength)
if _, err := testLtc.Test(); err != nil {
fmt.Println(err)
break
}
testLtc = nil
chainLength++
}
chainLength--
if chainLength > 0 {
ltc = LTC6813.New(spiConnection, chainLength)
if err := ltc.Initialise(); err != nil {
fmt.Print(err)
log.Fatal(err)
}
_, err := ltc.MeasureVoltages()
if err != nil {
fmt.Println("MeasureVoltages - ", err)
}
_, err = ltc.MeasureTemperatures()
if err != nil {
fmt.Println("MeasureTemperatures - ", err)
}
} else {
ltc = nil
}
return chainLength, nil
}
func performMeasurements() {
var total float32
var err error
var banks int
fmt.Println("Measuring")
if nDevices == 0 {
nDevices, err = getLTC6813()
if err != nil {
fmt.Print(err)
nErrors++
return
}
}
if nDevices == 0 {
fmt.Printf("\033cNo devices found on %s - %s", *spiDevice, time.Now().Format("15:04:05.99"))
return
}
banks, err = ltc.MeasureVoltagesSC()
if err != nil {
// Retry if it failed and ignore the failure if the retry was successful
banks, err = ltc.MeasureVoltagesSC()
}
if err != nil {
fmt.Print(" Error measuring voltages - ", err)
time.Sleep(time.Second * 2)
nDevices = 0
nErrors++
} else {
fmt.Print("\033c")
fmt.Printf("%d LTC6813 found on %s - %s - %d errors.\n", nDevices, *spiDevice, time.Now().Format("15:04:05.99"), nErrors)
for bank := 0; bank < banks; bank++ {
fmt.Printf("Bank%2d", bank)
total = 0.0
for cell := 0; cell < 18; cell++ {
fmt.Printf(" : %1.4f", ltc.GetVolts(bank, cell))
total = total + ltc.GetVolts(bank, cell)
}
fmt.Printf(" Sum = %2.3f\n", total)
}
banks, err = ltc.MeasureTemperatures()
if err != nil {
banks, err = ltc.MeasureTemperatures()
}
if err != nil {
fmt.Print(" Error measuring temperatures - ", err)
time.Sleep(time.Second * 2)
nDevices = 0
nErrors++
}
fmt.Println("Temperatures")
for bank := 0; bank < banks; bank++ {
fmt.Printf("Bank %d", bank)
for sensor := 0; sensor < 18; sensor++ {
temperature, err := ltc.GetTemperature(bank, sensor)
if err != nil {
fmt.Printf(" : %v ", err)
} else {
fmt.Printf(" : %2.1f℃", temperature)
}
}
fmt.Printf(" - Reference Volts = %1.4f - Sum of Cells = %2.3f\n", ltc.GetRefVolts(bank), ltc.GetSumOfCellsVolts(bank))
}
}
}
func mainImpl() error {
if !*verbose {
log.SetOutput(ioutil.Discard)
}
log.SetFlags(log.Lmicroseconds)
if flag.NArg() != 0 {
return errors.New("unexpected argument, try -help")
}
for {
nDevices, err := getLTC6813()
if err == nil && nDevices > 0 {
break
}
fmt.Println("Looking for a device")
}
done := make(chan bool)
ticker := time.NewTicker(time.Second)
go func() {
for {
select {
case <-done:
return
case <-ticker.C:
performMeasurements()
}
}
}()
// Configure and start the WEB server
router := mux.NewRouter().StrictSlash(true)
router.HandleFunc("/", getValues).Methods("GET")
router.HandleFunc("/version", getVersion).Methods("GET")
router.HandleFunc("/i2cread", getI2Cread).Methods("GET")
router.HandleFunc("/i2cwrite", getI2Cwrite).Methods("GET")
router.HandleFunc("/i2creadByte", getI2CreadByte).Methods("GET")
router.HandleFunc("/i2cVoltage", getI2CVoltage).Methods("GET")
router.HandleFunc("/i2cCharge", getI2CCharge).Methods("GET")
router.HandleFunc("/i2cCurrent", getI2CCurrent).Methods("GET")
router.HandleFunc("/i2cTemp", getI2CTemp).Methods("GET")
http.ListenAndServe(":8080", router) // Listen on port 8080
return nil
}
func co | (*sql.DB, error) {
if pDB != nil {
_ = pDB.Close()
pDB = nil
}
var sConnectionString = *pDatabaseLogin + ":" + *pDatabasePassword + "@tcp(" + *pDatabaseServer + ":" + *pDatabasePort + ")/" + *pDatabaseName
fmt.Println("Connecting to [", sConnectionString, "]")
db, err := sql.Open("mysql", sConnectionString)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
_ = db.Close()
return nil, err
}
return db, err
}
func init() {
verbose = flag.Bool("v", false, "verbose mode")
spiDevice = flag.String("c", "/dev/spidev0.1", "SPI device from /dev")
pDatabaseLogin = flag.String("l", "logger", "Database Login ID")
pDatabasePassword = flag.String("p", "logger", "Database password")
pDatabaseServer = flag.String("s", "localhost", "Database server")
pDatabasePort = flag.String("o", "3306", "Database port")
pDatabaseName = flag.String("d", "battery", "Name of the database")
flag.Parse()
logwriter, e := syslog.New(syslog.LOG_NOTICE, "Hello")
if e == nil {
log.SetOutput(logwriter)
}
// Initialise the SPI subsystem
if _, err := host.Init(); err != nil {
log.Fatal(err)
}
p, err := spireg.Open(*spiDevice)
if err != nil {
log.Fatal(err)
}
spiConnection, err = p.Connect(SPI_BAUD_RATE, spi.Mode0, SPI_BITS_PER_WORD)
if err != nil {
log.Fatal(err)
}
nErrors = 0
// Set up the database connection
pDB, err = connectToDatabase()
if err != nil {
log.Fatalf("Failed to connect to to the database - %s - Sorry, I am giving up.", err)
}
}
/*
WEB Service to return the version information
*/
func getVersion(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, `<html>
<head>
<Cedar Technology Battery Manager>
</head>
<body>
<h1>Cedar Technology Battery Manager</h1>
<h2>Version 1.0 - January 10th 2020</h2>
</body>
</html>`)
}
/*
WEB service to return current process values
*/
func getValues(w http.ResponseWriter, _ *http.Request) {
ltc_lock.Lock()
defer ltc_lock.Unlock()
// This header allows the output to be used in a WEB page from another server as a data source for some controls
w.Header().Set("Access-Control-Allow-Origin", "*")
if ltc != nil {
_, _ = fmt.Fprintf(w, `{%s,%s}`, ltc.GetVoltagesAsJSON(), ltc.GetTemperaturesAsJSON())
} else {
_, _ = fmt.Fprint(w, `{"error":"No Devices"}`)
}
}
/**
WEB service to read the I2C port
*/
func getI2Cread(w http.ResponseWriter, r *http.Request) {
var reg int64
sReg := r.URL.Query().Get("reg")
if sReg != "" {
reg, _ = strconv.ParseInt(sReg, 0, 8)
} else {
reg = 0x1a
}
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
s, err := ltc.ReadI2CWord(int(sensor), LTC6813.LTC2944Address, uint8(reg))
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, s)
}
}
/**
WEB service to read one 8 bit register from the I2C port
*/
func getI2CreadByte(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
var reg int64
sReg := r.URL.Query().Get("reg")
if sReg != "" {
reg, _ = strconv.ParseInt(sReg, 0, 8)
} else {
reg = 0x1a
}
s, err := ltc.ReadI2CByte(int(sensor), LTC6813.LTC2944Address, uint8(reg))
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, s)
}
}
/**
WEB service to read the current from the I2C port
*/
func getI2CCurrent(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CCurrent(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Current on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the voltage from the I2C port
*/
func getI2CVoltage(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CVoltage(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Voltage on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the current from the I2C port
*/
func getI2CCharge(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CAccumulatedCharge(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Accumulated charge on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the temperature from the I2C port
*/
func getI2CTemp(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CTemp(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Temperature on sensor %d = %f", sensor, t)
}
}
/**
WEB service to write to one of the I2C registers
*/
func getI2Cwrite(w http.ResponseWriter, r *http.Request) {
var reg, value int64
s := r.URL.Query().Get("reg")
if s != "" {
reg, _ = strconv.ParseInt(s, 0, 16)
} else {
reg = 0x1a
}
s = r.URL.Query().Get("value")
if s != "" {
value, _ = strconv.ParseInt(s, 0, 16)
} else {
value = 0
}
_, err := ltc.WriteI2CByte(2, LTC6813.LTC2944Address, uint8(reg), uint8(value))
w.Header().Set("Access-Control-Allow-Origin", "*")
// fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, "Register %d set 0x%x", reg, value)
}
}
func main() {
if err := mainImpl(); err != nil {
fmt.Fprintf(os.Stderr, "Hello Error: %s.\n", err)
os.Exit(1)
}
fmt.Println("Program has ended.")
}
| nnectToDatabase() | identifier_name |
BatteryMonitor6813Tester.go | package main
import (
"BatteryMonitor6813Tester/LTC6813"
"database/sql"
"errors"
"flag"
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/mux"
"io/ioutil"
"log"
"log/syslog"
"net/http"
"os"
"periph.io/x/periph/conn/physic"
"periph.io/x/periph/conn/pin"
"periph.io/x/periph/conn/pin/pinreg"
"periph.io/x/periph/conn/spi"
"periph.io/x/periph/conn/spi/spireg"
"periph.io/x/periph/host"
"strconv"
"sync"
"time"
)
const SPI_BAUD_RATE = physic.MegaHertz * 1
const SPI_BITS_PER_WORD = 8
var ltc *LTC6813.LTC6813
var spiConnection spi.Conn
var verbose *bool
var spiDevice *string
var nErrors int
var pDB *sql.DB
var pDatabaseLogin, pDatabasePassword, pDatabaseServer, pDatabasePort, pDatabaseName *string
var ltc_lock sync.Mutex
var nDevices int
func printPin(fn string, p pin.Pin) {
name, pos := pinreg.Position(p)
if name != "" {
fmt.Printf(" %-4s: %-10s found on header %s, #%d\n", fn, p, name, pos)
} else {
fmt.Printf(" %-4s: %-10s\n", fn, p)
}
}
/* Set up the LTC6804 chain by repeatedly configuring and reading longer chains until a failure occurrs.
*/
func getLTC6813() (int, error) {
var chainLength = 1
// for {
// LTC6813.New(spiConnection, 2).Initialise()
//
// }
ltc_lock.Lock()
defer ltc_lock.Unlock()
for {
testLtc := LTC6813.New(spiConnection, chainLength)
if _, err := testLtc.Test(); err != nil {
fmt.Println(err)
break
}
testLtc = nil
chainLength++
}
chainLength--
if chainLength > 0 {
ltc = LTC6813.New(spiConnection, chainLength)
if err := ltc.Initialise(); err != nil {
fmt.Print(err)
log.Fatal(err)
}
_, err := ltc.MeasureVoltages()
if err != nil {
fmt.Println("MeasureVoltages - ", err)
}
_, err = ltc.MeasureTemperatures()
if err != nil {
fmt.Println("MeasureTemperatures - ", err)
}
} else {
ltc = nil
}
return chainLength, nil
}
func performMeasurements() {
var total float32
var err error
var banks int
fmt.Println("Measuring")
if nDevices == 0 {
nDevices, err = getLTC6813()
if err != nil {
fmt.Print(err)
nErrors++
return
}
}
if nDevices == 0 {
fmt.Printf("\033cNo devices found on %s - %s", *spiDevice, time.Now().Format("15:04:05.99"))
return
}
banks, err = ltc.MeasureVoltagesSC()
if err != nil {
// Retry if it failed and ignore the failure if the retry was successful
banks, err = ltc.MeasureVoltagesSC()
}
if err != nil {
fmt.Print(" Error measuring voltages - ", err)
time.Sleep(time.Second * 2)
nDevices = 0
nErrors++
} else {
fmt.Print("\033c")
fmt.Printf("%d LTC6813 found on %s - %s - %d errors.\n", nDevices, *spiDevice, time.Now().Format("15:04:05.99"), nErrors)
for bank := 0; bank < banks; bank++ {
fmt.Printf("Bank%2d", bank)
total = 0.0
for cell := 0; cell < 18; cell++ {
fmt.Printf(" : %1.4f", ltc.GetVolts(bank, cell))
total = total + ltc.GetVolts(bank, cell)
}
fmt.Printf(" Sum = %2.3f\n", total)
}
banks, err = ltc.MeasureTemperatures()
if err != nil {
banks, err = ltc.MeasureTemperatures()
}
if err != nil {
fmt.Print(" Error measuring temperatures - ", err)
time.Sleep(time.Second * 2)
nDevices = 0
nErrors++
}
fmt.Println("Temperatures")
for bank := 0; bank < banks; bank++ {
fmt.Printf("Bank %d", bank)
for sensor := 0; sensor < 18; sensor++ {
temperature, err := ltc.GetTemperature(bank, sensor)
if err != nil {
fmt.Printf(" : %v ", err)
} else {
fmt.Printf(" : %2.1f℃", temperature)
}
}
fmt.Printf(" - Reference Volts = %1.4f - Sum of Cells = %2.3f\n", ltc.GetRefVolts(bank), ltc.GetSumOfCellsVolts(bank))
}
}
}
func mainImpl() error {
| func connectToDatabase() (*sql.DB, error) {
if pDB != nil {
_ = pDB.Close()
pDB = nil
}
var sConnectionString = *pDatabaseLogin + ":" + *pDatabasePassword + "@tcp(" + *pDatabaseServer + ":" + *pDatabasePort + ")/" + *pDatabaseName
fmt.Println("Connecting to [", sConnectionString, "]")
db, err := sql.Open("mysql", sConnectionString)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
_ = db.Close()
return nil, err
}
return db, err
}
func init() {
verbose = flag.Bool("v", false, "verbose mode")
spiDevice = flag.String("c", "/dev/spidev0.1", "SPI device from /dev")
pDatabaseLogin = flag.String("l", "logger", "Database Login ID")
pDatabasePassword = flag.String("p", "logger", "Database password")
pDatabaseServer = flag.String("s", "localhost", "Database server")
pDatabasePort = flag.String("o", "3306", "Database port")
pDatabaseName = flag.String("d", "battery", "Name of the database")
flag.Parse()
logwriter, e := syslog.New(syslog.LOG_NOTICE, "Hello")
if e == nil {
log.SetOutput(logwriter)
}
// Initialise the SPI subsystem
if _, err := host.Init(); err != nil {
log.Fatal(err)
}
p, err := spireg.Open(*spiDevice)
if err != nil {
log.Fatal(err)
}
spiConnection, err = p.Connect(SPI_BAUD_RATE, spi.Mode0, SPI_BITS_PER_WORD)
if err != nil {
log.Fatal(err)
}
nErrors = 0
// Set up the database connection
pDB, err = connectToDatabase()
if err != nil {
log.Fatalf("Failed to connect to to the database - %s - Sorry, I am giving up.", err)
}
}
/*
WEB Service to return the version information
*/
func getVersion(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, `<html>
<head>
<Cedar Technology Battery Manager>
</head>
<body>
<h1>Cedar Technology Battery Manager</h1>
<h2>Version 1.0 - January 10th 2020</h2>
</body>
</html>`)
}
/*
WEB service to return current process values
*/
func getValues(w http.ResponseWriter, _ *http.Request) {
ltc_lock.Lock()
defer ltc_lock.Unlock()
// This header allows the output to be used in a WEB page from another server as a data source for some controls
w.Header().Set("Access-Control-Allow-Origin", "*")
if ltc != nil {
_, _ = fmt.Fprintf(w, `{%s,%s}`, ltc.GetVoltagesAsJSON(), ltc.GetTemperaturesAsJSON())
} else {
_, _ = fmt.Fprint(w, `{"error":"No Devices"}`)
}
}
/**
WEB service to read the I2C port
*/
func getI2Cread(w http.ResponseWriter, r *http.Request) {
var reg int64
sReg := r.URL.Query().Get("reg")
if sReg != "" {
reg, _ = strconv.ParseInt(sReg, 0, 8)
} else {
reg = 0x1a
}
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
s, err := ltc.ReadI2CWord(int(sensor), LTC6813.LTC2944Address, uint8(reg))
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, s)
}
}
/**
WEB service to read one 8 bit register from the I2C port
*/
func getI2CreadByte(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
var reg int64
sReg := r.URL.Query().Get("reg")
if sReg != "" {
reg, _ = strconv.ParseInt(sReg, 0, 8)
} else {
reg = 0x1a
}
s, err := ltc.ReadI2CByte(int(sensor), LTC6813.LTC2944Address, uint8(reg))
w.Header().Set("Access-Control-Allow-Origin", "*")
fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, s)
}
}
/**
WEB service to read the current from the I2C port
*/
func getI2CCurrent(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CCurrent(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Current on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the voltage from the I2C port
*/
func getI2CVoltage(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CVoltage(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Voltage on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the current from the I2C port
*/
func getI2CCharge(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CAccumulatedCharge(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Accumulated charge on sensor %d = %f", sensor, t)
}
}
/**
WEB service to read the temperature from the I2C port
*/
func getI2CTemp(w http.ResponseWriter, r *http.Request) {
sensor, _ := strconv.ParseInt(r.URL.Query().Get("sensor"), 0, 8)
t, err := ltc.GetI2CTemp(int(sensor))
if err != nil {
fmt.Fprint(w, err)
} else {
fmt.Fprintf(w, "Temperature on sensor %d = %f", sensor, t)
}
}
/**
WEB service to write to one of the I2C registers
*/
func getI2Cwrite(w http.ResponseWriter, r *http.Request) {
var reg, value int64
s := r.URL.Query().Get("reg")
if s != "" {
reg, _ = strconv.ParseInt(s, 0, 16)
} else {
reg = 0x1a
}
s = r.URL.Query().Get("value")
if s != "" {
value, _ = strconv.ParseInt(s, 0, 16)
} else {
value = 0
}
_, err := ltc.WriteI2CByte(2, LTC6813.LTC2944Address, uint8(reg), uint8(value))
w.Header().Set("Access-Control-Allow-Origin", "*")
// fmt.Fprint(w, "Request = ", r.URL.Query().Get("reg"), "\n")
if err != nil {
fmt.Fprint(w, "Error - ", err)
} else {
fmt.Fprintf(w, "Register %d set 0x%x", reg, value)
}
}
func main() {
if err := mainImpl(); err != nil {
fmt.Fprintf(os.Stderr, "Hello Error: %s.\n", err)
os.Exit(1)
}
fmt.Println("Program has ended.")
}
| if !*verbose {
log.SetOutput(ioutil.Discard)
}
log.SetFlags(log.Lmicroseconds)
if flag.NArg() != 0 {
return errors.New("unexpected argument, try -help")
}
for {
nDevices, err := getLTC6813()
if err == nil && nDevices > 0 {
break
}
fmt.Println("Looking for a device")
}
done := make(chan bool)
ticker := time.NewTicker(time.Second)
go func() {
for {
select {
case <-done:
return
case <-ticker.C:
performMeasurements()
}
}
}()
// Configure and start the WEB server
router := mux.NewRouter().StrictSlash(true)
router.HandleFunc("/", getValues).Methods("GET")
router.HandleFunc("/version", getVersion).Methods("GET")
router.HandleFunc("/i2cread", getI2Cread).Methods("GET")
router.HandleFunc("/i2cwrite", getI2Cwrite).Methods("GET")
router.HandleFunc("/i2creadByte", getI2CreadByte).Methods("GET")
router.HandleFunc("/i2cVoltage", getI2CVoltage).Methods("GET")
router.HandleFunc("/i2cCharge", getI2CCharge).Methods("GET")
router.HandleFunc("/i2cCurrent", getI2CCurrent).Methods("GET")
router.HandleFunc("/i2cTemp", getI2CTemp).Methods("GET")
http.ListenAndServe(":8080", router) // Listen on port 8080
return nil
}
| identifier_body |
image_caption.py | # importing libraries
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import numpy as np
import os
import glob
from keras.applications.resnet50 import ResNet50
from keras.optimizers import Adam
from keras.layers import Dense, Flatten,Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector,Concatenate
from keras.models import Sequential, Model
from keras.utils import np_utils
import random
from keras.preprocessing import image, sequence
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
import glob
images_directory = '/content/drive/My Drive/Flickr_Data/'
img_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/'
cap_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr8k.token.txt'
training_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.trainImages.txt'
valid_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.devImages.txt'
testing_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.testImages.txt'
cap = open(cap_path, 'r').read().split("\n")
| # Loading cap as values and images as key in dictionary
tok = {}
for item in range(len(cap)-1):
tem = cap[item].split("#") #tem[0]= imgname.jpg ..... tem[1]=0 captionn.
if tem[0] in tok:
tok[tem[0]].append(tem[1][2:])
else:
tok[tem[0]] = [tem[1][2:]] #tem[n]= imgName ... #tok[tem[n]] = list of caption
# Making 3 files with 2 colmns as 'image_id' and 'captions'
training_dataset = open('flickr_8k_train_dataset.txt','wb')
training_dataset.write(b"image_id\tcap\n")
valid_dataset = open('flickr_8k_val_dataset.txt','wb')
valid_dataset.write(b"image_id\tcap\n")
testing_dataset = open('flickr_8k_test_dataset.txt','wb')
testing_dataset.write(b"image_id\tcap\n")
# Loading image ids and captions for each of these images in the above 3 files
for img in x_training:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
training_dataset.write((img+"\t"+caption+"\n").encode())
training_dataset.flush()
training_dataset.close()
for img in x_testing:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
testing_dataset.write((img+"\t"+caption+"\n").encode())
testing_dataset.flush()
testing_dataset.close()
for img in x_valid:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
valid_dataset.write((img+"\t"+caption+"\n").encode())
valid_dataset.flush()
valid_dataset.close()
# Here, we're using ResNet50 Model
from IPython.core.display import display, HTML
model = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
model.summary()
# process images to target size
def preprocess(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im) # (x, y, z)
im = np.expand_dims(im, axis=0) # (0, x, y, z)
return im
training_data = {}
counter=0
for item in x_training:
if item == "":
continue
if counter >= 3000:
break
counter+=1
if counter%1000==0:
print(counter)
path = img_path + item
img = preprocess(path) #to change the dimensions of the image for using ResNet model
pred = model.predict(img).reshape(2048) # shape of each image is (2048, 0)
training_data[item] = pred
# opening train_enc_img.p file and dumping content of training_data to this file
with open( "train_enc_img.p", "wb" ) as pickle_f: #obj hierarchy is converted into byte stream
pickle.dump(training_data, pickle_f )
# Storing image and its corresponding caption into a dataframe
pd_dataset = pd.read_csv("flickr_8k_train_dataset.txt", delimiter='\t')
dframe = pd_dataset.values
print(dframe.shape)
pd_dataset.head()
# Storing all the captions from dframe into a list
senten = []
for item in range(dframe.shape[0]):
senten.append(dframe[item, 1])
#senten will have 30000 length
# First 5 captions stored in senten
senten[:5]
# Splitting each captions stored in 'senten' and storing them in 'wor' as list of list
wor = [i.split() for i in senten]
# Creating a list of all unique wor
uniq = []
for i in wor:
uniq.extend(i)
uniq = list(set(uniq))
print(len(uniq))
vocabulary_size = len(uniq)
# making 2 lists to index each unique word and vice-versa
w_to_i = {val:index for index, val in enumerate(uniq)}
i_to_w = {index:val for index, val in enumerate(uniq)}
w_to_i['UNK'] = 0
w_to_i['raining'] = 8253
i_to_w[0] = 'UNK'
i_to_w[8253] = 'raining'
vocabulary_size = len(w_to_i.keys())
print(vocabulary_size)
max_len = 0
for i in senten:
i = i.split()
if len(i) > max_len:
max_len = len(i)
print(max_len) #finding longest caption
pad_seq, subsequent_wor = [], []
for item in range(dframe.shape[0]): #30000 items
part_seq = []
next_wor = []
text = dframe[item, 1].split() #diving each caption for every image into words
text = [w_to_i[i] for i in text] #finding index for each word
for i in range(1, len(text)):
part_seq.append(text[:i]) #start, 1st word, ... , last word
next_wor.append(text[i]) #1st word, ... , last word, end
pad_part_seq = sequence.pad_sequences(part_seq, max_len, padding='post')
next_wor_1hot = np.zeros([len(next_wor), vocabulary_size], dtype=np.bool)
for i,next_word in enumerate(next_wor):
next_wor_1hot[i, next_word] = 1
pad_seq.append(pad_part_seq )
subsequent_wor.append(next_wor_1hot)
pad_seq = np.asarray(pad_seq)
subsequent_wor = np.asarray(subsequent_wor)
print(pad_seq.shape)
print(subsequent_wor.shape)
print(pad_seq[0])
for item in range(len(pad_seq[0])):
for y in range(max_len):
print(i_to_w[pad_seq[0][item][y]],)
print("\n")
print(len(pad_seq[0]))
num_imgs = 2000
cap = np.zeros([0, max_len])
next_wor = np.zeros([0, vocabulary_size])
for item in range(num_imgs): #img_to_padded_seqs.shape[0]):
cap = np.concatenate([cap, pad_seq[item]])
next_wor = np.concatenate([next_wor, subsequent_wor[item]])
np.save("cap.npy", cap)
np.save("next_wor.npy", next_wor)
print(cap.shape)
print(next_wor.shape)
with open('train_enc_img.p', 'rb') as f:
enc_img = pickle.load(f, encoding="bytes")
imgs = []
for item in range(dframe.shape[0]): #30000
if dframe[item, 0] in enc_img.keys(): #dframe[0,0], [1,0], ... , [4,0] match with 0th key of enc_img
imgs.append(list(enc_img[dframe[item, 0]]))
imgs = np.asarray(imgs)
print(imgs.shape)
images = []
img_names = []
for item in range(num_imgs): #2000
for y in range(pad_seq[item].shape[0]): #14
images.append(imgs[item]) #1st iteration: 14 times name of image in byte form
img_names.append(dframe[item, 0]) # normal form
images = np.asarray(images) #images contains image_name in byte form
np.save("images.npy", images)
img_names = np.asarray(img_names) #img_names contains image_name normally
np.save("img_names.npy", img_names)
print(images.shape)
print(len(img_names))
cap = np.load("cap.npy")
next_wor = np.load("next_wor.npy")
print(cap.shape)
print(next_wor.shape)
images = np.load("images.npy")
print(images.shape)
imag = np.load("img_names.npy")
print(imag.shape)
embed_size = 128
max_len = 40
img_model = Sequential()
img_model.add(Dense(embed_size, input_shape=(2048,), activation='relu'))
img_model.add(RepeatVector(max_len))
img_model.summary()
lang_model = Sequential()
lang_model.add(Embedding(input_dim=vocabulary_size, output_dim=embed_size, input_length=max_len))
lang_model.add(LSTM(256, return_sequences=True))
lang_model.add(TimeDistributed(Dense(embed_size)))
lang_model.summary()
concat = Concatenate()([img_model.output, lang_model.output])
x = LSTM(128, return_sequences=True)(concat)
x = LSTM(512, return_sequences=False)(x)
x = Dense(vocabulary_size)(x)
out = Activation('softmax')(x)
model = Model(inputs=[img_model.input, lang_model.input], outputs = out)
model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics=['accuracy'])
model.summary()
hist = model.fit([images, cap], next_wor, batch_size=512, epochs=210)
for label in ["loss"]:
plt.plot(hist.history[label],label=label)
plt.legend()
plt.xlabel("epochs")
plt.ylabel("loss")
plt.show()
for label in ["accuracy"]:
plt.plot(hist.history[label],label=label)
plt.legend()
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.show()
model.save_weights("model_weights.h5")
def preprocess(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im) #(224,224,3)
im = np.expand_dims(im, axis=0) #(1,224,224,3)
return im
def get_encode(model, img):
image = preprocess(img)
pred = model.predict(image).reshape(2048)
return pred
resnet = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/3376942201_2c45d99237.jpg"
test_img = get_encode(resnet, img)
def predict_cap(image):
start_wor = ["<start>"]
while True:
par_cap = [w_to_i[i] for i in start_wor] #par_cap list is made
par_cap = sequence.pad_sequences([par_cap], maxlen=max_len, padding='post') #convert list to sequence of len = 40
preds = model.predict([np.array([image]), np.array(par_cap)]) # PREDICTION
xx = np.argmax(preds[0])
word_pred = i_to_w[xx] # convert 5972 to DOG
start_wor.append(word_pred) # [dog] is added in list
if word_pred == "<end>" or len(start_wor) > max_len:
break
return ' '.join(start_wor[1:-1])
final_caption = predict_cap(test_img)
from IPython.display import Image,display
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/1.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/car.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/bike.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/tennis.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption) | x_training = open(training_path, 'r').read().split("\n")
x_valid = open(valid_path, 'r').read().split("\n")
x_testing = open(testing_path , 'r').read().split("\n")
| random_line_split |
image_caption.py | # importing libraries
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import numpy as np
import os
import glob
from keras.applications.resnet50 import ResNet50
from keras.optimizers import Adam
from keras.layers import Dense, Flatten,Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector,Concatenate
from keras.models import Sequential, Model
from keras.utils import np_utils
import random
from keras.preprocessing import image, sequence
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
import glob
images_directory = '/content/drive/My Drive/Flickr_Data/'
img_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/'
cap_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr8k.token.txt'
training_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.trainImages.txt'
valid_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.devImages.txt'
testing_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.testImages.txt'
cap = open(cap_path, 'r').read().split("\n")
x_training = open(training_path, 'r').read().split("\n")
x_valid = open(valid_path, 'r').read().split("\n")
x_testing = open(testing_path , 'r').read().split("\n")
# Loading cap as values and images as key in dictionary
tok = {}
for item in range(len(cap)-1):
tem = cap[item].split("#") #tem[0]= imgname.jpg ..... tem[1]=0 captionn.
if tem[0] in tok:
tok[tem[0]].append(tem[1][2:])
else:
tok[tem[0]] = [tem[1][2:]] #tem[n]= imgName ... #tok[tem[n]] = list of caption
# Making 3 files with 2 colmns as 'image_id' and 'captions'
training_dataset = open('flickr_8k_train_dataset.txt','wb')
training_dataset.write(b"image_id\tcap\n")
valid_dataset = open('flickr_8k_val_dataset.txt','wb')
valid_dataset.write(b"image_id\tcap\n")
testing_dataset = open('flickr_8k_test_dataset.txt','wb')
testing_dataset.write(b"image_id\tcap\n")
# Loading image ids and captions for each of these images in the above 3 files
for img in x_training:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
training_dataset.write((img+"\t"+caption+"\n").encode())
training_dataset.flush()
training_dataset.close()
for img in x_testing:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
testing_dataset.write((img+"\t"+caption+"\n").encode())
testing_dataset.flush()
testing_dataset.close()
for img in x_valid:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
valid_dataset.write((img+"\t"+caption+"\n").encode())
valid_dataset.flush()
valid_dataset.close()
# Here, we're using ResNet50 Model
from IPython.core.display import display, HTML
model = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
model.summary()
# process images to target size
def preprocess(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im) # (x, y, z)
im = np.expand_dims(im, axis=0) # (0, x, y, z)
return im
training_data = {}
counter=0
for item in x_training:
if item == "":
continue
if counter >= 3000:
break
counter+=1
if counter%1000==0:
print(counter)
path = img_path + item
img = preprocess(path) #to change the dimensions of the image for using ResNet model
pred = model.predict(img).reshape(2048) # shape of each image is (2048, 0)
training_data[item] = pred
# opening train_enc_img.p file and dumping content of training_data to this file
with open( "train_enc_img.p", "wb" ) as pickle_f: #obj hierarchy is converted into byte stream
pickle.dump(training_data, pickle_f )
# Storing image and its corresponding caption into a dataframe
pd_dataset = pd.read_csv("flickr_8k_train_dataset.txt", delimiter='\t')
dframe = pd_dataset.values
print(dframe.shape)
pd_dataset.head()
# Storing all the captions from dframe into a list
senten = []
for item in range(dframe.shape[0]):
senten.append(dframe[item, 1])
#senten will have 30000 length
# First 5 captions stored in senten
senten[:5]
# Splitting each captions stored in 'senten' and storing them in 'wor' as list of list
wor = [i.split() for i in senten]
# Creating a list of all unique wor
uniq = []
for i in wor:
uniq.extend(i)
uniq = list(set(uniq))
print(len(uniq))
vocabulary_size = len(uniq)
# making 2 lists to index each unique word and vice-versa
w_to_i = {val:index for index, val in enumerate(uniq)}
i_to_w = {index:val for index, val in enumerate(uniq)}
w_to_i['UNK'] = 0
w_to_i['raining'] = 8253
i_to_w[0] = 'UNK'
i_to_w[8253] = 'raining'
vocabulary_size = len(w_to_i.keys())
print(vocabulary_size)
max_len = 0
for i in senten:
i = i.split()
if len(i) > max_len:
max_len = len(i)
print(max_len) #finding longest caption
pad_seq, subsequent_wor = [], []
for item in range(dframe.shape[0]): #30000 items
part_seq = []
next_wor = []
text = dframe[item, 1].split() #diving each caption for every image into words
text = [w_to_i[i] for i in text] #finding index for each word
for i in range(1, len(text)):
part_seq.append(text[:i]) #start, 1st word, ... , last word
next_wor.append(text[i]) #1st word, ... , last word, end
pad_part_seq = sequence.pad_sequences(part_seq, max_len, padding='post')
next_wor_1hot = np.zeros([len(next_wor), vocabulary_size], dtype=np.bool)
for i,next_word in enumerate(next_wor):
next_wor_1hot[i, next_word] = 1
pad_seq.append(pad_part_seq )
subsequent_wor.append(next_wor_1hot)
pad_seq = np.asarray(pad_seq)
subsequent_wor = np.asarray(subsequent_wor)
print(pad_seq.shape)
print(subsequent_wor.shape)
print(pad_seq[0])
for item in range(len(pad_seq[0])):
for y in range(max_len):
print(i_to_w[pad_seq[0][item][y]],)
print("\n")
print(len(pad_seq[0]))
num_imgs = 2000
cap = np.zeros([0, max_len])
next_wor = np.zeros([0, vocabulary_size])
for item in range(num_imgs): #img_to_padded_seqs.shape[0]):
cap = np.concatenate([cap, pad_seq[item]])
next_wor = np.concatenate([next_wor, subsequent_wor[item]])
np.save("cap.npy", cap)
np.save("next_wor.npy", next_wor)
print(cap.shape)
print(next_wor.shape)
with open('train_enc_img.p', 'rb') as f:
enc_img = pickle.load(f, encoding="bytes")
imgs = []
for item in range(dframe.shape[0]): #30000
if dframe[item, 0] in enc_img.keys(): #dframe[0,0], [1,0], ... , [4,0] match with 0th key of enc_img
imgs.append(list(enc_img[dframe[item, 0]]))
imgs = np.asarray(imgs)
print(imgs.shape)
images = []
img_names = []
for item in range(num_imgs): #2000
for y in range(pad_seq[item].shape[0]): #14
images.append(imgs[item]) #1st iteration: 14 times name of image in byte form
img_names.append(dframe[item, 0]) # normal form
images = np.asarray(images) #images contains image_name in byte form
np.save("images.npy", images)
img_names = np.asarray(img_names) #img_names contains image_name normally
np.save("img_names.npy", img_names)
print(images.shape)
print(len(img_names))
cap = np.load("cap.npy")
next_wor = np.load("next_wor.npy")
print(cap.shape)
print(next_wor.shape)
images = np.load("images.npy")
print(images.shape)
imag = np.load("img_names.npy")
print(imag.shape)
embed_size = 128
max_len = 40
img_model = Sequential()
img_model.add(Dense(embed_size, input_shape=(2048,), activation='relu'))
img_model.add(RepeatVector(max_len))
img_model.summary()
lang_model = Sequential()
lang_model.add(Embedding(input_dim=vocabulary_size, output_dim=embed_size, input_length=max_len))
lang_model.add(LSTM(256, return_sequences=True))
lang_model.add(TimeDistributed(Dense(embed_size)))
lang_model.summary()
concat = Concatenate()([img_model.output, lang_model.output])
x = LSTM(128, return_sequences=True)(concat)
x = LSTM(512, return_sequences=False)(x)
x = Dense(vocabulary_size)(x)
out = Activation('softmax')(x)
model = Model(inputs=[img_model.input, lang_model.input], outputs = out)
model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics=['accuracy'])
model.summary()
hist = model.fit([images, cap], next_wor, batch_size=512, epochs=210)
for label in ["loss"]:
plt.plot(hist.history[label],label=label)
plt.legend()
plt.xlabel("epochs")
plt.ylabel("loss")
plt.show()
for label in ["accuracy"]:
plt.plot(hist.history[label],label=label)
plt.legend()
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.show()
model.save_weights("model_weights.h5")
def preprocess(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im) #(224,224,3)
im = np.expand_dims(im, axis=0) #(1,224,224,3)
return im
def get_encode(model, img):
|
resnet = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/3376942201_2c45d99237.jpg"
test_img = get_encode(resnet, img)
def predict_cap(image):
start_wor = ["<start>"]
while True:
par_cap = [w_to_i[i] for i in start_wor] #par_cap list is made
par_cap = sequence.pad_sequences([par_cap], maxlen=max_len, padding='post') #convert list to sequence of len = 40
preds = model.predict([np.array([image]), np.array(par_cap)]) # PREDICTION
xx = np.argmax(preds[0])
word_pred = i_to_w[xx] # convert 5972 to DOG
start_wor.append(word_pred) # [dog] is added in list
if word_pred == "<end>" or len(start_wor) > max_len:
break
return ' '.join(start_wor[1:-1])
final_caption = predict_cap(test_img)
from IPython.display import Image,display
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/1.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/car.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/bike.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/tennis.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption) | image = preprocess(img)
pred = model.predict(image).reshape(2048)
return pred | identifier_body |
image_caption.py | # importing libraries
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import numpy as np
import os
import glob
from keras.applications.resnet50 import ResNet50
from keras.optimizers import Adam
from keras.layers import Dense, Flatten,Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector,Concatenate
from keras.models import Sequential, Model
from keras.utils import np_utils
import random
from keras.preprocessing import image, sequence
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
import glob
images_directory = '/content/drive/My Drive/Flickr_Data/'
img_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/'
cap_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr8k.token.txt'
training_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.trainImages.txt'
valid_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.devImages.txt'
testing_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.testImages.txt'
cap = open(cap_path, 'r').read().split("\n")
x_training = open(training_path, 'r').read().split("\n")
x_valid = open(valid_path, 'r').read().split("\n")
x_testing = open(testing_path , 'r').read().split("\n")
# Loading cap as values and images as key in dictionary
tok = {}
for item in range(len(cap)-1):
tem = cap[item].split("#") #tem[0]= imgname.jpg ..... tem[1]=0 captionn.
if tem[0] in tok:
tok[tem[0]].append(tem[1][2:])
else:
tok[tem[0]] = [tem[1][2:]] #tem[n]= imgName ... #tok[tem[n]] = list of caption
# Making 3 files with 2 colmns as 'image_id' and 'captions'
training_dataset = open('flickr_8k_train_dataset.txt','wb')
training_dataset.write(b"image_id\tcap\n")
valid_dataset = open('flickr_8k_val_dataset.txt','wb')
valid_dataset.write(b"image_id\tcap\n")
testing_dataset = open('flickr_8k_test_dataset.txt','wb')
testing_dataset.write(b"image_id\tcap\n")
# Loading image ids and captions for each of these images in the above 3 files
for img in x_training:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
training_dataset.write((img+"\t"+caption+"\n").encode())
training_dataset.flush()
training_dataset.close()
for img in x_testing:
|
testing_dataset.close()
for img in x_valid:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
valid_dataset.write((img+"\t"+caption+"\n").encode())
valid_dataset.flush()
valid_dataset.close()
# Here, we're using ResNet50 Model
from IPython.core.display import display, HTML
model = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
model.summary()
# process images to target size
def preprocess(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im) # (x, y, z)
im = np.expand_dims(im, axis=0) # (0, x, y, z)
return im
training_data = {}
counter=0
for item in x_training:
if item == "":
continue
if counter >= 3000:
break
counter+=1
if counter%1000==0:
print(counter)
path = img_path + item
img = preprocess(path) #to change the dimensions of the image for using ResNet model
pred = model.predict(img).reshape(2048) # shape of each image is (2048, 0)
training_data[item] = pred
# opening train_enc_img.p file and dumping content of training_data to this file
with open( "train_enc_img.p", "wb" ) as pickle_f: #obj hierarchy is converted into byte stream
pickle.dump(training_data, pickle_f )
# Storing image and its corresponding caption into a dataframe
pd_dataset = pd.read_csv("flickr_8k_train_dataset.txt", delimiter='\t')
dframe = pd_dataset.values
print(dframe.shape)
pd_dataset.head()
# Storing all the captions from dframe into a list
senten = []
for item in range(dframe.shape[0]):
senten.append(dframe[item, 1])
#senten will have 30000 length
# First 5 captions stored in senten
senten[:5]
# Splitting each captions stored in 'senten' and storing them in 'wor' as list of list
wor = [i.split() for i in senten]
# Creating a list of all unique wor
uniq = []
for i in wor:
uniq.extend(i)
uniq = list(set(uniq))
print(len(uniq))
vocabulary_size = len(uniq)
# making 2 lists to index each unique word and vice-versa
w_to_i = {val:index for index, val in enumerate(uniq)}
i_to_w = {index:val for index, val in enumerate(uniq)}
w_to_i['UNK'] = 0
w_to_i['raining'] = 8253
i_to_w[0] = 'UNK'
i_to_w[8253] = 'raining'
vocabulary_size = len(w_to_i.keys())
print(vocabulary_size)
max_len = 0
for i in senten:
i = i.split()
if len(i) > max_len:
max_len = len(i)
print(max_len) #finding longest caption
pad_seq, subsequent_wor = [], []
for item in range(dframe.shape[0]): #30000 items
part_seq = []
next_wor = []
text = dframe[item, 1].split() #diving each caption for every image into words
text = [w_to_i[i] for i in text] #finding index for each word
for i in range(1, len(text)):
part_seq.append(text[:i]) #start, 1st word, ... , last word
next_wor.append(text[i]) #1st word, ... , last word, end
pad_part_seq = sequence.pad_sequences(part_seq, max_len, padding='post')
next_wor_1hot = np.zeros([len(next_wor), vocabulary_size], dtype=np.bool)
for i,next_word in enumerate(next_wor):
next_wor_1hot[i, next_word] = 1
pad_seq.append(pad_part_seq )
subsequent_wor.append(next_wor_1hot)
pad_seq = np.asarray(pad_seq)
subsequent_wor = np.asarray(subsequent_wor)
print(pad_seq.shape)
print(subsequent_wor.shape)
print(pad_seq[0])
for item in range(len(pad_seq[0])):
for y in range(max_len):
print(i_to_w[pad_seq[0][item][y]],)
print("\n")
print(len(pad_seq[0]))
num_imgs = 2000
cap = np.zeros([0, max_len])
next_wor = np.zeros([0, vocabulary_size])
for item in range(num_imgs): #img_to_padded_seqs.shape[0]):
cap = np.concatenate([cap, pad_seq[item]])
next_wor = np.concatenate([next_wor, subsequent_wor[item]])
np.save("cap.npy", cap)
np.save("next_wor.npy", next_wor)
print(cap.shape)
print(next_wor.shape)
with open('train_enc_img.p', 'rb') as f:
enc_img = pickle.load(f, encoding="bytes")
imgs = []
for item in range(dframe.shape[0]): #30000
if dframe[item, 0] in enc_img.keys(): #dframe[0,0], [1,0], ... , [4,0] match with 0th key of enc_img
imgs.append(list(enc_img[dframe[item, 0]]))
imgs = np.asarray(imgs)
print(imgs.shape)
images = []
img_names = []
for item in range(num_imgs): #2000
for y in range(pad_seq[item].shape[0]): #14
images.append(imgs[item]) #1st iteration: 14 times name of image in byte form
img_names.append(dframe[item, 0]) # normal form
images = np.asarray(images) #images contains image_name in byte form
np.save("images.npy", images)
img_names = np.asarray(img_names) #img_names contains image_name normally
np.save("img_names.npy", img_names)
print(images.shape)
print(len(img_names))
cap = np.load("cap.npy")
next_wor = np.load("next_wor.npy")
print(cap.shape)
print(next_wor.shape)
images = np.load("images.npy")
print(images.shape)
imag = np.load("img_names.npy")
print(imag.shape)
embed_size = 128
max_len = 40
img_model = Sequential()
img_model.add(Dense(embed_size, input_shape=(2048,), activation='relu'))
img_model.add(RepeatVector(max_len))
img_model.summary()
lang_model = Sequential()
lang_model.add(Embedding(input_dim=vocabulary_size, output_dim=embed_size, input_length=max_len))
lang_model.add(LSTM(256, return_sequences=True))
lang_model.add(TimeDistributed(Dense(embed_size)))
lang_model.summary()
concat = Concatenate()([img_model.output, lang_model.output])
x = LSTM(128, return_sequences=True)(concat)
x = LSTM(512, return_sequences=False)(x)
x = Dense(vocabulary_size)(x)
out = Activation('softmax')(x)
model = Model(inputs=[img_model.input, lang_model.input], outputs = out)
model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics=['accuracy'])
model.summary()
hist = model.fit([images, cap], next_wor, batch_size=512, epochs=210)
for label in ["loss"]:
plt.plot(hist.history[label],label=label)
plt.legend()
plt.xlabel("epochs")
plt.ylabel("loss")
plt.show()
for label in ["accuracy"]:
plt.plot(hist.history[label],label=label)
plt.legend()
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.show()
model.save_weights("model_weights.h5")
def preprocess(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im) #(224,224,3)
im = np.expand_dims(im, axis=0) #(1,224,224,3)
return im
def get_encode(model, img):
image = preprocess(img)
pred = model.predict(image).reshape(2048)
return pred
resnet = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/3376942201_2c45d99237.jpg"
test_img = get_encode(resnet, img)
def predict_cap(image):
start_wor = ["<start>"]
while True:
par_cap = [w_to_i[i] for i in start_wor] #par_cap list is made
par_cap = sequence.pad_sequences([par_cap], maxlen=max_len, padding='post') #convert list to sequence of len = 40
preds = model.predict([np.array([image]), np.array(par_cap)]) # PREDICTION
xx = np.argmax(preds[0])
word_pred = i_to_w[xx] # convert 5972 to DOG
start_wor.append(word_pred) # [dog] is added in list
if word_pred == "<end>" or len(start_wor) > max_len:
break
return ' '.join(start_wor[1:-1])
final_caption = predict_cap(test_img)
from IPython.display import Image,display
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/1.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/car.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/bike.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/tennis.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption) | if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
testing_dataset.write((img+"\t"+caption+"\n").encode())
testing_dataset.flush() | conditional_block |
image_caption.py | # importing libraries
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import numpy as np
import os
import glob
from keras.applications.resnet50 import ResNet50
from keras.optimizers import Adam
from keras.layers import Dense, Flatten,Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector,Concatenate
from keras.models import Sequential, Model
from keras.utils import np_utils
import random
from keras.preprocessing import image, sequence
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
import glob
images_directory = '/content/drive/My Drive/Flickr_Data/'
img_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/'
cap_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr8k.token.txt'
training_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.trainImages.txt'
valid_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.devImages.txt'
testing_path = '/content/drive/My Drive/Flickr_Data/Flickr_Data/Flickr_TextData/Flickr_8k.testImages.txt'
cap = open(cap_path, 'r').read().split("\n")
x_training = open(training_path, 'r').read().split("\n")
x_valid = open(valid_path, 'r').read().split("\n")
x_testing = open(testing_path , 'r').read().split("\n")
# Loading cap as values and images as key in dictionary
tok = {}
for item in range(len(cap)-1):
tem = cap[item].split("#") #tem[0]= imgname.jpg ..... tem[1]=0 captionn.
if tem[0] in tok:
tok[tem[0]].append(tem[1][2:])
else:
tok[tem[0]] = [tem[1][2:]] #tem[n]= imgName ... #tok[tem[n]] = list of caption
# Making 3 files with 2 colmns as 'image_id' and 'captions'
training_dataset = open('flickr_8k_train_dataset.txt','wb')
training_dataset.write(b"image_id\tcap\n")
valid_dataset = open('flickr_8k_val_dataset.txt','wb')
valid_dataset.write(b"image_id\tcap\n")
testing_dataset = open('flickr_8k_test_dataset.txt','wb')
testing_dataset.write(b"image_id\tcap\n")
# Loading image ids and captions for each of these images in the above 3 files
for img in x_training:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
training_dataset.write((img+"\t"+caption+"\n").encode())
training_dataset.flush()
training_dataset.close()
for img in x_testing:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
testing_dataset.write((img+"\t"+caption+"\n").encode())
testing_dataset.flush()
testing_dataset.close()
for img in x_valid:
if img == '':
continue
for capt in tok[img]:
caption = "<start> "+ capt + " <end>"
valid_dataset.write((img+"\t"+caption+"\n").encode())
valid_dataset.flush()
valid_dataset.close()
# Here, we're using ResNet50 Model
from IPython.core.display import display, HTML
model = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
model.summary()
# process images to target size
def preprocess(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im) # (x, y, z)
im = np.expand_dims(im, axis=0) # (0, x, y, z)
return im
training_data = {}
counter=0
for item in x_training:
if item == "":
continue
if counter >= 3000:
break
counter+=1
if counter%1000==0:
print(counter)
path = img_path + item
img = preprocess(path) #to change the dimensions of the image for using ResNet model
pred = model.predict(img).reshape(2048) # shape of each image is (2048, 0)
training_data[item] = pred
# opening train_enc_img.p file and dumping content of training_data to this file
with open( "train_enc_img.p", "wb" ) as pickle_f: #obj hierarchy is converted into byte stream
pickle.dump(training_data, pickle_f )
# Storing image and its corresponding caption into a dataframe
pd_dataset = pd.read_csv("flickr_8k_train_dataset.txt", delimiter='\t')
dframe = pd_dataset.values
print(dframe.shape)
pd_dataset.head()
# Storing all the captions from dframe into a list
senten = []
for item in range(dframe.shape[0]):
senten.append(dframe[item, 1])
#senten will have 30000 length
# First 5 captions stored in senten
senten[:5]
# Splitting each captions stored in 'senten' and storing them in 'wor' as list of list
wor = [i.split() for i in senten]
# Creating a list of all unique wor
uniq = []
for i in wor:
uniq.extend(i)
uniq = list(set(uniq))
print(len(uniq))
vocabulary_size = len(uniq)
# making 2 lists to index each unique word and vice-versa
w_to_i = {val:index for index, val in enumerate(uniq)}
i_to_w = {index:val for index, val in enumerate(uniq)}
w_to_i['UNK'] = 0
w_to_i['raining'] = 8253
i_to_w[0] = 'UNK'
i_to_w[8253] = 'raining'
vocabulary_size = len(w_to_i.keys())
print(vocabulary_size)
max_len = 0
for i in senten:
i = i.split()
if len(i) > max_len:
max_len = len(i)
print(max_len) #finding longest caption
pad_seq, subsequent_wor = [], []
for item in range(dframe.shape[0]): #30000 items
part_seq = []
next_wor = []
text = dframe[item, 1].split() #diving each caption for every image into words
text = [w_to_i[i] for i in text] #finding index for each word
for i in range(1, len(text)):
part_seq.append(text[:i]) #start, 1st word, ... , last word
next_wor.append(text[i]) #1st word, ... , last word, end
pad_part_seq = sequence.pad_sequences(part_seq, max_len, padding='post')
next_wor_1hot = np.zeros([len(next_wor), vocabulary_size], dtype=np.bool)
for i,next_word in enumerate(next_wor):
next_wor_1hot[i, next_word] = 1
pad_seq.append(pad_part_seq )
subsequent_wor.append(next_wor_1hot)
pad_seq = np.asarray(pad_seq)
subsequent_wor = np.asarray(subsequent_wor)
print(pad_seq.shape)
print(subsequent_wor.shape)
print(pad_seq[0])
for item in range(len(pad_seq[0])):
for y in range(max_len):
print(i_to_w[pad_seq[0][item][y]],)
print("\n")
print(len(pad_seq[0]))
num_imgs = 2000
cap = np.zeros([0, max_len])
next_wor = np.zeros([0, vocabulary_size])
for item in range(num_imgs): #img_to_padded_seqs.shape[0]):
cap = np.concatenate([cap, pad_seq[item]])
next_wor = np.concatenate([next_wor, subsequent_wor[item]])
np.save("cap.npy", cap)
np.save("next_wor.npy", next_wor)
print(cap.shape)
print(next_wor.shape)
with open('train_enc_img.p', 'rb') as f:
enc_img = pickle.load(f, encoding="bytes")
imgs = []
for item in range(dframe.shape[0]): #30000
if dframe[item, 0] in enc_img.keys(): #dframe[0,0], [1,0], ... , [4,0] match with 0th key of enc_img
imgs.append(list(enc_img[dframe[item, 0]]))
imgs = np.asarray(imgs)
print(imgs.shape)
images = []
img_names = []
for item in range(num_imgs): #2000
for y in range(pad_seq[item].shape[0]): #14
images.append(imgs[item]) #1st iteration: 14 times name of image in byte form
img_names.append(dframe[item, 0]) # normal form
images = np.asarray(images) #images contains image_name in byte form
np.save("images.npy", images)
img_names = np.asarray(img_names) #img_names contains image_name normally
np.save("img_names.npy", img_names)
print(images.shape)
print(len(img_names))
cap = np.load("cap.npy")
next_wor = np.load("next_wor.npy")
print(cap.shape)
print(next_wor.shape)
images = np.load("images.npy")
print(images.shape)
imag = np.load("img_names.npy")
print(imag.shape)
embed_size = 128
max_len = 40
img_model = Sequential()
img_model.add(Dense(embed_size, input_shape=(2048,), activation='relu'))
img_model.add(RepeatVector(max_len))
img_model.summary()
lang_model = Sequential()
lang_model.add(Embedding(input_dim=vocabulary_size, output_dim=embed_size, input_length=max_len))
lang_model.add(LSTM(256, return_sequences=True))
lang_model.add(TimeDistributed(Dense(embed_size)))
lang_model.summary()
concat = Concatenate()([img_model.output, lang_model.output])
x = LSTM(128, return_sequences=True)(concat)
x = LSTM(512, return_sequences=False)(x)
x = Dense(vocabulary_size)(x)
out = Activation('softmax')(x)
model = Model(inputs=[img_model.input, lang_model.input], outputs = out)
model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics=['accuracy'])
model.summary()
hist = model.fit([images, cap], next_wor, batch_size=512, epochs=210)
for label in ["loss"]:
plt.plot(hist.history[label],label=label)
plt.legend()
plt.xlabel("epochs")
plt.ylabel("loss")
plt.show()
for label in ["accuracy"]:
plt.plot(hist.history[label],label=label)
plt.legend()
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.show()
model.save_weights("model_weights.h5")
def preprocess(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im) #(224,224,3)
im = np.expand_dims(im, axis=0) #(1,224,224,3)
return im
def | (model, img):
image = preprocess(img)
pred = model.predict(image).reshape(2048)
return pred
resnet = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/3376942201_2c45d99237.jpg"
test_img = get_encode(resnet, img)
def predict_cap(image):
start_wor = ["<start>"]
while True:
par_cap = [w_to_i[i] for i in start_wor] #par_cap list is made
par_cap = sequence.pad_sequences([par_cap], maxlen=max_len, padding='post') #convert list to sequence of len = 40
preds = model.predict([np.array([image]), np.array(par_cap)]) # PREDICTION
xx = np.argmax(preds[0])
word_pred = i_to_w[xx] # convert 5972 to DOG
start_wor.append(word_pred) # [dog] is added in list
if word_pred == "<end>" or len(start_wor) > max_len:
break
return ' '.join(start_wor[1:-1])
final_caption = predict_cap(test_img)
from IPython.display import Image,display
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/1.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/car.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/bike.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption)
img = "/content/drive/My Drive/Flickr_Data/Flickr_Data/Images/tennis.jpg"
test_img = get_encode(resnet, img)
from IPython.display import Image,display
final_caption = predict_cap(test_img)
z = Image(filename=img)
display(z)
print(final_caption) | get_encode | identifier_name |
idempotent.rs | //! mut_aliasing -- several links, AT_LEAST one of them is MUTABLE
//! BTW: bunch_of_closures_with_mut_refs is like obj which alias through its params, which is
//! mut_aliased refs on some another obj, or shared data
//! that_is: CLOSURE_CAPUTRING == OBJ.MUT_ALIASED_REFS
|
0
// fns with side-effs: --== IDEMPOTENT VS NON_IDEMPONTENT ==--
//
/// fn is idempotent because its caused side-effs SIMULTANEOUSLY fulfill (2-former:wrong) 1 cond:
/// 1. base on "input" or "immutable free-vars" .. NO mut_aliasing
/// ..
/// (here was 2nd statement) Just change value .. not ADD.
/// .. BUT .. it's NEVERHTHELESS .. 1-statement(MUT_alising)
//
// (small caveat: JUST REMINDER)
var x; fn some_fn(n) { x += n*x }
// not IDEMPOTENT because: *WRONG: we update, and/but not completly rewrite `x`,
// *RIGHT: mut_aliasing (our computed value for `x` rely on mut_aliased `x`)
// (BUT)
// BTW(p.s.)--v: `some_fn` break rule:
/// "mutate only TREE_in_the_WOOD , emit "own_copied/immutabl" value"
// (TREE_in_the_WOOD is accesed or through "self/this", or "closure"-capturing)
//
fn some_fn(a = [], n) { a.push(n) }
// ^-- here was: "ADD value, not just change."
// BUT actually .. it's also can be considered as MUT_alising .. since result of `push`
// depends of `a`, .. and if we call 2 times `some_fn` of course it's gives different sideeff,
// since its computation based on MUTated value, ("EU ZHE")
// ..
// From the other hand .. `some_fn` can be considered as IDEMPONENT, since it's always gives
// the same side-eff for the same input-VALUE .. that is if we alway will give `[x] as a` ..
// we always receive `[x, n] as a`
// ..
// (v-- chut' ne udalil :lol:, podumal wo hn9, A NE-NE-NE)
//> ... SO, if "fn" mutate own args, and computation which "generate new value,
// to update old value in args (in case of [] is some "next-empty-cell" in arr, and `len`)
// is based ON THIS ARG", ..
/// we (most likely) will consider it as NOT IDEMPONENT
/// .. SO .. WHAT IS IMPORTANT ??
// despite to be NOT IDEMPOTENT (from "caller" perspective) .. since we have
// 2 refs (caller and callee) and "our"(callee) is `mut` .. WE HAVE ADVANTAGE
// of EXPLICIT PASSING THIS REF(to callee) , BUT unfortunatly in JS we can't write
// `&mut reff` ..
// IT'S (1) PREVENT "ORDER"-PROBLEM described above (2) makes MORE OBVIOUS for
// "reviewer" that "a" can be mutated (he at least SEE this "a" is passsed) ..
// ..but `some_fn(num)` is MUCH WORSE
//
/// TRY TO MAKE ALL EFFECTFULL-FN TO BE IDEMPOTENT
|
1
// ALSO: (p.s. JS here, ONLY SINGLE-threaded programs and (in this post) SYNC)
{
fn calculateAverage(list) { // << obviously IDEMPOTENT
sum = list.reduce((sum, x) => x + sum, 0);
return sum / list.length;
}
var sum, nums = [1,2,4,7,11,16,22];
var avg = calculateAverage( nums );
}
// Getify said that (if `sum` not mutably-aliased some another closure)
// mutation of `sum` here it's "UNOBSERVED SIDE-EFF" ..
// (.. although, such "UNOBSERVED MUTATIONS" easy can be eliminated, and we can use
// pure-style easily here (but actually here it's just MATTER OF TASTE)
// )
// ... SO .. what reasons we have for IDEMPOTENT MUTATIONS aka "unobserved side-eff"?
// ... either NONE or "OWNED DATA"(P.Graham)
// .. and further author cite Hickey:
/// "If a tree falls in the forest, but no one is around to hear it, does it still make a sound?"
// (^-- NO SHARED MUTABLE ALIASING .. or ..
// .. we INCAPSULATE some data to MAINTAIN invariant [like in
// exmps/js_state.rs:state_shraing] )
//
// .. so it "like" pure-fn, or in term of P.Graham fn what used "owned data"
fn get_cached_fn() {
/// AS-OPPOSED to "`a` in `some_fn`"(above) `cache` is not "mut aliased"(only one ref which
/// mutate and read)
var cache = {}; // << "owned data", "unheard tree in the forest"
return fn XX(n) { // "unobserved mutation", "like-pure", "right incapsultaion"
if (cache[n] !== undefined) return cache[n] // .... NAZYVAI KAK HOCHEW'
..... /* do computation */ cache[n] = res_of_comutation();
}
} // author: "The point is that if XX(..) is the only part of the program that accesses
// and updates the cache side cause/effect, so it's "like"-pure"
|
2.0
// ALSO: ORDER/FREQUENCY DEPENDENCY
{
var users = {}, userOrders = {};
fn fetchUserData(userId) {
ajax( "http://some.api/user/" + userId, fn onUserData(userData) { | }
fn fetchOrders(userId) {
ajax( "http://some.api/orders/" + userId, fn onOrders(orders) {
for (let i = 0; i < orders.length; i++) {
// keep a reference to latest order for each user
users[userId].latestOrder = orders[i];
userOrders[orders[i].orderId] = orders[i];
}
});
}
}
// `fetchUserData` should be called before `fetchOrders` ... AVOID SUCH "LOCAL STATE MACHINES"
/// [GIST] there is should NOT BE ANY DEPENDENCY between ORDER/FREQUENCY OF CALL to
/// "getters/setters" ..(that is (kind of) SHOULD BE ONLY IDEMPOTENT-EFFECTFUL-FN)..
/// of some "local state machine" aka "class instance" aka "object"(with incapsulation)
// ..
// ...So.. in this exmp, (also Miwko: initializing DB, which depends on "glob-state" - that is
// "aliasing") .. `fetchOrders` depends on
///| to be called.. IN SOME PARTICULAR STATE .. and this state is reached through
///| "PARAMS-REF"(aliasing) ... SO.. in such case,
///| -=! SUCH STATE SHOULD BE PASSED DIRECTLY !=-..
/// (as in Miwko db-inital we refactor too)
/// ..to AVOID "ORDER/FREQUENCY-OF-CALL(getter/setter) DEPENDENCY"(to set "needed" state)
// ... any Getter/Setter should be designed in such way to BE ABLE to be called in ANY STATE!
//>> OR.. THIS "ORDER_DEPENDENCY/COUPLING" SHOULD BE INCAPSULATED IN fn
/// (BUT EXPLICIT PASSING OF MUT_ALISASED DEPENDENCY AS ARG
/// IF IT POSSIBLE
/// IS BETTER)
// (in Miwko db-exmp: if we don't pass explicitly "mut-aliased"-state , then we should
// incapsulate all COUPLED fns in single fn .. that is prepare NEEDED STATE ..
// BUT.. IF THIS CHAIN OF COUPLED-fns is too long ..
// |> WE RETURN TO THE PROBLEM ..
// since we refactoring of fn-that-incapsulate-coupling is ERROR PRONE .. SO
/// THIS CHAIN OF COUPLED_FNs SHOULD BE SHORT .. USUALLY === 2 )
// ..
// Of course to achieve some "desired side-effs"(without them programm is useless) we
// OFTEN need call "getters/setters" in some particular order ..BUT.. that DOESN'T mean that
// this order should become SPECIAL(HARDCODED) aka ORDER DEPENDENCY .. we just peek that order,
// but it's should NOT BE SPECIAL for our "get/set-ters".
//
| 2.1
// So.. why I looked at ::exmps/js_state.rs::state_sharing fn .. and though: "why I need
// "immutable-world", clojure, etc .. "ved'" this fn pretty good, and do its job well"
// ..
// .... BECAUSE: this "local-state-machine" don't rely on PARTICULAR STATE and ORDER/FREQUENCY..
// so IN ANY STATE ... we can call "get/set-ters" in ANY ORDER .. AND IT'S OK, IT DO EXACTLY
// WHAT WE WANT, EXECTLY SIDE-EFF WHAT WE WANT.
/// After createing an object we can start do ANY "STATE_TRANSITION" which this
/// object has, BUT, if only some STATE_TRANSITIONS allowed, IT'S ERRORPRONE.
// ...> whether "produce" or "consume" be called first .. WE DON'T CARE,
// ...> what the state we have in the moment of call to any fn(cons/prod) .. WE DON'T CARE...
// ..(some particular-state doesn't have special meaning for us)
// .....
///|"consume"(as a getter[but it aslo a setter]) is NOT IDEMPOTENT .. because..
// for this I need pass the list-for-consuming EXPLICITLY..
// (the main condition for IDEMPOTENCY - absence of MUT_ALIASING[here `list`])
/// .. BUT:
/// It INCAPSULATE "INVARIANT" of "prefiex is chenged only when we "splavili" old values in a
/// list and clean it up"
///>> .. INCUPSULATEs IN ONE STATE_TRANSITION
// .. SO .. it's let's both (`consume` and `produce`) "fns" works without FEAR about shared
// (between them ) STATE(and INCAPSULATED) on which the relie(mut_aliased) since it doesn't
// aliased elsewhere("tree in the wood") .. and PROPERLY changed (INVARIANT) in `consumer`, and
// this change INCAPSULATED in right place
/// that_is: WE DEFINE ATOMIC_STATE_TRANSITIONs, why atomic, since state mutation/transaction
/// can't be "interrupted" (here exmp: "prefix" changed BUT we not yet "splavili" values from
/// `list` -- IT'S INTERRUPTION of STATE_TRANSITION)
/// ..
// .. also .. it doesn't rely on any particular state of `list` when we need "splavit'", so
// we doesn't need MAINTAIN THIS INVARIANT.
/// ..
/// | SO.. we may say, that every "local-state-mach/obj" has some "patterns" of state_changing
/// | .. let's call it "STATE_TRANSITIONS", and we should properly "FORM" this
/// | state_transitions: .. incapsulate "INVARIANTS" in ONE_MOVE_PHASE, and incapsulate
/// | "DATA-ACCESS" ... !!! TO MAKE THEM ATOMIC !!!
/// ..
/// AGAIN: (1) PROPER DEFINE (2) MAKE ATOMIC .. UNIT-test those 2-RULES is ACCOMPLISHED
// ..
// | moreover: list consuming, and set-new-prefix is (temporary) COUPLED ..
// | .. that is ORDER DEPENDENCY ..
// | ..BUT.. we INCAPSULATE it in "consume" - WE CANNOT INDEPENDENTLY SET PREFIX .. since
// | it COUPLED .. and if we could(set pref independently) that would CREATE ABILITY to write
// | code with ORDER/FREQUENCY DEPENDENCY(or call it temporal-coupling)
BL99, da "STATE_TRANSITION" eto prosto well-defined API u obecta kotoryi
incapsuliruet TREE_in_the_WOOD
| 2.2
/// |about STATE_TRANSITION : since I invented this term after written bellow--v)
// | to achive any goal with "response" object .. we need pass some STATE_TRANSITION,
// and every this STATE_TRANSITION make sense (OFF_COURSE) only in ATOMIC form
// ... `req, method, action` <<- THIS-IS main STATE_TRANSITION of our object, so
// we need INCAPSULATE(ATOMICALLY_BOUND) it, ... i.e. MAKE SURE it's ATOMIC
// ..
// .. in expm of Bugaenko-request .. we can either do..
// 1. ***
Request req = new Request("http://exmp.com", POST);
//..
// .. and NO `self.method`-setter .. that is we create API that EXCLUDE possibility of
// "INCONSISTENT STATE"(where getters can be "buggy" with wrong ORDER-dependency)
/// AGAIN: we INCAPSULATE this "ORDER_DEPENDENCY/COUPLING"
// (in some sense `req` "like" immutable, that is to use "GET"-req we NEED CREATE NEW REQUEST)
// 2. ***
res = req.fetch("POST", body) // we talk about SYNC calls in this post
//..
// METHOD should be passed explicitly to `fetch`.. REALLY! why "requst"-obj should contain in
// its state a METHOD param? .. (also `body`)
// .. that is AGAIN: we incapsulate "coupled" effectful-methods( firstly set METHOD, secondly
// fetch result), and explicitly pass the state of which effectful method is depend .. that is
// make method IDEMPOTENT
// ..
// p.s. if we want "bind"(set object to state where it's use some-particul. METHOD) to some
// particular METHOD in JS we should just use "bind"(aka Carrying)
// .. or maybe in Java-like create some OBJECT-WRAPPER(here Fetcher) .. which AGAIN
// configured(INCAPUSLATE-COUPLING)-on-initialization(that is IMMUTABLE)
let res_obj: Fetcher = req.fetch("POST", body);
/* ... somewhere .. >>> */ res_obj.get_result();
/// {
// after 1/2 year: also '+' of such aproach as opposed to
req.fetch(POST, body, &mut res_obj) // because if we create `res_obj`(res_buf) inside
// `fetch` we know that we have no aliasing SINCE: only in rust we can know that if we pass
// `&mut res_obj` then there is not aliasing .... BUT.. in Java it's just impossible ...
// so it's another trick how BY MEANS of api-design we can eliminate problem of
// aliasing/mutability
/// }
// .. and for now: we "BIND" our req to particular state .. but INCAPSULATE this
// "COUPLED MUTATIONS" like "consume" in "exmp/js_state.rs::state_sharing"
/// {
// after 1/2-year: OR: If we even store `fetch`-result in `req` object ... we should
// use `check / throw-excpe` that we can do next `fetch` only after we consume already
// fetched body
/// }
//..
// |... so you can see how undestanding "Mut-aliasing"+"order/frequency-dependency" lead us
// | TO CHANGING THE DESIGN(API) of our programm.
//
| 2.3
//// .......... MUST_SEE_ALSO: FunLight-ch5-Purifying
// (note: closure-captur == obj.param-ref,
// tags: "local-state-machine" .. INCAPSULATE mutation != HIDE mutation)
// INCAPSULATE mutation == LOCALIZE mutation) | users[userId] = userData;
}); | random_line_split |
idempotent.rs | //! mut_aliasing -- several links, AT_LEAST one of them is MUTABLE
//! BTW: bunch_of_closures_with_mut_refs is like obj which alias through its params, which is
//! mut_aliased refs on some another obj, or shared data
//! that_is: CLOSURE_CAPUTRING == OBJ.MUT_ALIASED_REFS
|
0
// fns with side-effs: --== IDEMPOTENT VS NON_IDEMPONTENT ==--
//
/// fn is idempotent because its caused side-effs SIMULTANEOUSLY fulfill (2-former:wrong) 1 cond:
/// 1. base on "input" or "immutable free-vars" .. NO mut_aliasing
/// ..
/// (here was 2nd statement) Just change value .. not ADD.
/// .. BUT .. it's NEVERHTHELESS .. 1-statement(MUT_alising)
//
// (small caveat: JUST REMINDER)
var x; fn some_fn(n) |
// not IDEMPOTENT because: *WRONG: we update, and/but not completly rewrite `x`,
// *RIGHT: mut_aliasing (our computed value for `x` rely on mut_aliased `x`)
// (BUT)
// BTW(p.s.)--v: `some_fn` break rule:
/// "mutate only TREE_in_the_WOOD , emit "own_copied/immutabl" value"
// (TREE_in_the_WOOD is accesed or through "self/this", or "closure"-capturing)
//
fn some_fn(a = [], n) { a.push(n) }
// ^-- here was: "ADD value, not just change."
// BUT actually .. it's also can be considered as MUT_alising .. since result of `push`
// depends of `a`, .. and if we call 2 times `some_fn` of course it's gives different sideeff,
// since its computation based on MUTated value, ("EU ZHE")
// ..
// From the other hand .. `some_fn` can be considered as IDEMPONENT, since it's always gives
// the same side-eff for the same input-VALUE .. that is if we alway will give `[x] as a` ..
// we always receive `[x, n] as a`
// ..
// (v-- chut' ne udalil :lol:, podumal wo hn9, A NE-NE-NE)
//> ... SO, if "fn" mutate own args, and computation which "generate new value,
// to update old value in args (in case of [] is some "next-empty-cell" in arr, and `len`)
// is based ON THIS ARG", ..
/// we (most likely) will consider it as NOT IDEMPONENT
/// .. SO .. WHAT IS IMPORTANT ??
// despite to be NOT IDEMPOTENT (from "caller" perspective) .. since we have
// 2 refs (caller and callee) and "our"(callee) is `mut` .. WE HAVE ADVANTAGE
// of EXPLICIT PASSING THIS REF(to callee) , BUT unfortunatly in JS we can't write
// `&mut reff` ..
// IT'S (1) PREVENT "ORDER"-PROBLEM described above (2) makes MORE OBVIOUS for
// "reviewer" that "a" can be mutated (he at least SEE this "a" is passsed) ..
// ..but `some_fn(num)` is MUCH WORSE
//
/// TRY TO MAKE ALL EFFECTFULL-FN TO BE IDEMPOTENT
|
1
// ALSO: (p.s. JS here, ONLY SINGLE-threaded programs and (in this post) SYNC)
{
fn calculateAverage(list) { // << obviously IDEMPOTENT
sum = list.reduce((sum, x) => x + sum, 0);
return sum / list.length;
}
var sum, nums = [1,2,4,7,11,16,22];
var avg = calculateAverage( nums );
}
// Getify said that (if `sum` not mutably-aliased some another closure)
// mutation of `sum` here it's "UNOBSERVED SIDE-EFF" ..
// (.. although, such "UNOBSERVED MUTATIONS" easy can be eliminated, and we can use
// pure-style easily here (but actually here it's just MATTER OF TASTE)
// )
// ... SO .. what reasons we have for IDEMPOTENT MUTATIONS aka "unobserved side-eff"?
// ... either NONE or "OWNED DATA"(P.Graham)
// .. and further author cite Hickey:
/// "If a tree falls in the forest, but no one is around to hear it, does it still make a sound?"
// (^-- NO SHARED MUTABLE ALIASING .. or ..
// .. we INCAPSULATE some data to MAINTAIN invariant [like in
// exmps/js_state.rs:state_shraing] )
//
// .. so it "like" pure-fn, or in term of P.Graham fn what used "owned data"
fn get_cached_fn() {
/// AS-OPPOSED to "`a` in `some_fn`"(above) `cache` is not "mut aliased"(only one ref which
/// mutate and read)
var cache = {}; // << "owned data", "unheard tree in the forest"
return fn XX(n) { // "unobserved mutation", "like-pure", "right incapsultaion"
if (cache[n] !== undefined) return cache[n] // .... NAZYVAI KAK HOCHEW'
..... /* do computation */ cache[n] = res_of_comutation();
}
} // author: "The point is that if XX(..) is the only part of the program that accesses
// and updates the cache side cause/effect, so it's "like"-pure"
|
2.0
// ALSO: ORDER/FREQUENCY DEPENDENCY
{
var users = {}, userOrders = {};
fn fetchUserData(userId) {
ajax( "http://some.api/user/" + userId, fn onUserData(userData) {
users[userId] = userData;
});
}
fn fetchOrders(userId) {
ajax( "http://some.api/orders/" + userId, fn onOrders(orders) {
for (let i = 0; i < orders.length; i++) {
// keep a reference to latest order for each user
users[userId].latestOrder = orders[i];
userOrders[orders[i].orderId] = orders[i];
}
});
}
}
// `fetchUserData` should be called before `fetchOrders` ... AVOID SUCH "LOCAL STATE MACHINES"
/// [GIST] there is should NOT BE ANY DEPENDENCY between ORDER/FREQUENCY OF CALL to
/// "getters/setters" ..(that is (kind of) SHOULD BE ONLY IDEMPOTENT-EFFECTFUL-FN)..
/// of some "local state machine" aka "class instance" aka "object"(with incapsulation)
// ..
// ...So.. in this exmp, (also Miwko: initializing DB, which depends on "glob-state" - that is
// "aliasing") .. `fetchOrders` depends on
///| to be called.. IN SOME PARTICULAR STATE .. and this state is reached through
///| "PARAMS-REF"(aliasing) ... SO.. in such case,
///| -=! SUCH STATE SHOULD BE PASSED DIRECTLY !=-..
/// (as in Miwko db-inital we refactor too)
/// ..to AVOID "ORDER/FREQUENCY-OF-CALL(getter/setter) DEPENDENCY"(to set "needed" state)
// ... any Getter/Setter should be designed in such way to BE ABLE to be called in ANY STATE!
//>> OR.. THIS "ORDER_DEPENDENCY/COUPLING" SHOULD BE INCAPSULATED IN fn
/// (BUT EXPLICIT PASSING OF MUT_ALISASED DEPENDENCY AS ARG
/// IF IT POSSIBLE
/// IS BETTER)
// (in Miwko db-exmp: if we don't pass explicitly "mut-aliased"-state , then we should
// incapsulate all COUPLED fns in single fn .. that is prepare NEEDED STATE ..
// BUT.. IF THIS CHAIN OF COUPLED-fns is too long ..
// |> WE RETURN TO THE PROBLEM ..
// since we refactoring of fn-that-incapsulate-coupling is ERROR PRONE .. SO
/// THIS CHAIN OF COUPLED_FNs SHOULD BE SHORT .. USUALLY === 2 )
// ..
// Of course to achieve some "desired side-effs"(without them programm is useless) we
// OFTEN need call "getters/setters" in some particular order ..BUT.. that DOESN'T mean that
// this order should become SPECIAL(HARDCODED) aka ORDER DEPENDENCY .. we just peek that order,
// but it's should NOT BE SPECIAL for our "get/set-ters".
//
| 2.1
// So.. why I looked at ::exmps/js_state.rs::state_sharing fn .. and though: "why I need
// "immutable-world", clojure, etc .. "ved'" this fn pretty good, and do its job well"
// ..
// .... BECAUSE: this "local-state-machine" don't rely on PARTICULAR STATE and ORDER/FREQUENCY..
// so IN ANY STATE ... we can call "get/set-ters" in ANY ORDER .. AND IT'S OK, IT DO EXACTLY
// WHAT WE WANT, EXECTLY SIDE-EFF WHAT WE WANT.
/// After createing an object we can start do ANY "STATE_TRANSITION" which this
/// object has, BUT, if only some STATE_TRANSITIONS allowed, IT'S ERRORPRONE.
// ...> whether "produce" or "consume" be called first .. WE DON'T CARE,
// ...> what the state we have in the moment of call to any fn(cons/prod) .. WE DON'T CARE...
// ..(some particular-state doesn't have special meaning for us)
// .....
///|"consume"(as a getter[but it aslo a setter]) is NOT IDEMPOTENT .. because..
// for this I need pass the list-for-consuming EXPLICITLY..
// (the main condition for IDEMPOTENCY - absence of MUT_ALIASING[here `list`])
/// .. BUT:
/// It INCAPSULATE "INVARIANT" of "prefiex is chenged only when we "splavili" old values in a
/// list and clean it up"
///>> .. INCUPSULATEs IN ONE STATE_TRANSITION
// .. SO .. it's let's both (`consume` and `produce`) "fns" works without FEAR about shared
// (between them ) STATE(and INCAPSULATED) on which the relie(mut_aliased) since it doesn't
// aliased elsewhere("tree in the wood") .. and PROPERLY changed (INVARIANT) in `consumer`, and
// this change INCAPSULATED in right place
/// that_is: WE DEFINE ATOMIC_STATE_TRANSITIONs, why atomic, since state mutation/transaction
/// can't be "interrupted" (here exmp: "prefix" changed BUT we not yet "splavili" values from
/// `list` -- IT'S INTERRUPTION of STATE_TRANSITION)
/// ..
// .. also .. it doesn't rely on any particular state of `list` when we need "splavit'", so
// we doesn't need MAINTAIN THIS INVARIANT.
/// ..
/// | SO.. we may say, that every "local-state-mach/obj" has some "patterns" of state_changing
/// | .. let's call it "STATE_TRANSITIONS", and we should properly "FORM" this
/// | state_transitions: .. incapsulate "INVARIANTS" in ONE_MOVE_PHASE, and incapsulate
/// | "DATA-ACCESS" ... !!! TO MAKE THEM ATOMIC !!!
/// ..
/// AGAIN: (1) PROPER DEFINE (2) MAKE ATOMIC .. UNIT-test those 2-RULES is ACCOMPLISHED
// ..
// | moreover: list consuming, and set-new-prefix is (temporary) COUPLED ..
// | .. that is ORDER DEPENDENCY ..
// | ..BUT.. we INCAPSULATE it in "consume" - WE CANNOT INDEPENDENTLY SET PREFIX .. since
// | it COUPLED .. and if we could(set pref independently) that would CREATE ABILITY to write
// | code with ORDER/FREQUENCY DEPENDENCY(or call it temporal-coupling)
BL99, da "STATE_TRANSITION" eto prosto well-defined API u obecta kotoryi
incapsuliruet TREE_in_the_WOOD
| 2.2
/// |about STATE_TRANSITION : since I invented this term after written bellow--v)
// | to achive any goal with "response" object .. we need pass some STATE_TRANSITION,
// and every this STATE_TRANSITION make sense (OFF_COURSE) only in ATOMIC form
// ... `req, method, action` <<- THIS-IS main STATE_TRANSITION of our object, so
// we need INCAPSULATE(ATOMICALLY_BOUND) it, ... i.e. MAKE SURE it's ATOMIC
// ..
// .. in expm of Bugaenko-request .. we can either do..
// 1. ***
Request req = new Request("http://exmp.com", POST);
//..
// .. and NO `self.method`-setter .. that is we create API that EXCLUDE possibility of
// "INCONSISTENT STATE"(where getters can be "buggy" with wrong ORDER-dependency)
/// AGAIN: we INCAPSULATE this "ORDER_DEPENDENCY/COUPLING"
// (in some sense `req` "like" immutable, that is to use "GET"-req we NEED CREATE NEW REQUEST)
// 2. ***
res = req.fetch("POST", body) // we talk about SYNC calls in this post
//..
// METHOD should be passed explicitly to `fetch`.. REALLY! why "requst"-obj should contain in
// its state a METHOD param? .. (also `body`)
// .. that is AGAIN: we incapsulate "coupled" effectful-methods( firstly set METHOD, secondly
// fetch result), and explicitly pass the state of which effectful method is depend .. that is
// make method IDEMPOTENT
// ..
// p.s. if we want "bind"(set object to state where it's use some-particul. METHOD) to some
// particular METHOD in JS we should just use "bind"(aka Carrying)
// .. or maybe in Java-like create some OBJECT-WRAPPER(here Fetcher) .. which AGAIN
// configured(INCAPUSLATE-COUPLING)-on-initialization(that is IMMUTABLE)
let res_obj: Fetcher = req.fetch("POST", body);
/* ... somewhere .. >>> */ res_obj.get_result();
/// {
// after 1/2 year: also '+' of such aproach as opposed to
req.fetch(POST, body, &mut res_obj) // because if we create `res_obj`(res_buf) inside
// `fetch` we know that we have no aliasing SINCE: only in rust we can know that if we pass
// `&mut res_obj` then there is not aliasing .... BUT.. in Java it's just impossible ...
// so it's another trick how BY MEANS of api-design we can eliminate problem of
// aliasing/mutability
/// }
// .. and for now: we "BIND" our req to particular state .. but INCAPSULATE this
// "COUPLED MUTATIONS" like "consume" in "exmp/js_state.rs::state_sharing"
/// {
// after 1/2-year: OR: If we even store `fetch`-result in `req` object ... we should
// use `check / throw-excpe` that we can do next `fetch` only after we consume already
// fetched body
/// }
//..
// |... so you can see how undestanding "Mut-aliasing"+"order/frequency-dependency" lead us
// | TO CHANGING THE DESIGN(API) of our programm.
//
| 2.3
//// .......... MUST_SEE_ALSO: FunLight-ch5-Purifying
// (note: closure-captur == obj.param-ref,
// tags: "local-state-machine" .. INCAPSULATE mutation != HIDE mutation)
// INCAPSULATE mutation == LOCALIZE mutation)
| { x += n*x } | identifier_body |
idempotent.rs | //! mut_aliasing -- several links, AT_LEAST one of them is MUTABLE
//! BTW: bunch_of_closures_with_mut_refs is like obj which alias through its params, which is
//! mut_aliased refs on some another obj, or shared data
//! that_is: CLOSURE_CAPUTRING == OBJ.MUT_ALIASED_REFS
|
0
// fns with side-effs: --== IDEMPOTENT VS NON_IDEMPONTENT ==--
//
/// fn is idempotent because its caused side-effs SIMULTANEOUSLY fulfill (2-former:wrong) 1 cond:
/// 1. base on "input" or "immutable free-vars" .. NO mut_aliasing
/// ..
/// (here was 2nd statement) Just change value .. not ADD.
/// .. BUT .. it's NEVERHTHELESS .. 1-statement(MUT_alising)
//
// (small caveat: JUST REMINDER)
var x; fn some_fn(n) { x += n*x }
// not IDEMPOTENT because: *WRONG: we update, and/but not completly rewrite `x`,
// *RIGHT: mut_aliasing (our computed value for `x` rely on mut_aliased `x`)
// (BUT)
// BTW(p.s.)--v: `some_fn` break rule:
/// "mutate only TREE_in_the_WOOD , emit "own_copied/immutabl" value"
// (TREE_in_the_WOOD is accesed or through "self/this", or "closure"-capturing)
//
fn | (a = [], n) { a.push(n) }
// ^-- here was: "ADD value, not just change."
// BUT actually .. it's also can be considered as MUT_alising .. since result of `push`
// depends of `a`, .. and if we call 2 times `some_fn` of course it's gives different sideeff,
// since its computation based on MUTated value, ("EU ZHE")
// ..
// From the other hand .. `some_fn` can be considered as IDEMPONENT, since it's always gives
// the same side-eff for the same input-VALUE .. that is if we alway will give `[x] as a` ..
// we always receive `[x, n] as a`
// ..
// (v-- chut' ne udalil :lol:, podumal wo hn9, A NE-NE-NE)
//> ... SO, if "fn" mutate own args, and computation which "generate new value,
// to update old value in args (in case of [] is some "next-empty-cell" in arr, and `len`)
// is based ON THIS ARG", ..
/// we (most likely) will consider it as NOT IDEMPONENT
/// .. SO .. WHAT IS IMPORTANT ??
// despite to be NOT IDEMPOTENT (from "caller" perspective) .. since we have
// 2 refs (caller and callee) and "our"(callee) is `mut` .. WE HAVE ADVANTAGE
// of EXPLICIT PASSING THIS REF(to callee) , BUT unfortunatly in JS we can't write
// `&mut reff` ..
// IT'S (1) PREVENT "ORDER"-PROBLEM described above (2) makes MORE OBVIOUS for
// "reviewer" that "a" can be mutated (he at least SEE this "a" is passsed) ..
// ..but `some_fn(num)` is MUCH WORSE
//
/// TRY TO MAKE ALL EFFECTFULL-FN TO BE IDEMPOTENT
|
1
// ALSO: (p.s. JS here, ONLY SINGLE-threaded programs and (in this post) SYNC)
{
fn calculateAverage(list) { // << obviously IDEMPOTENT
sum = list.reduce((sum, x) => x + sum, 0);
return sum / list.length;
}
var sum, nums = [1,2,4,7,11,16,22];
var avg = calculateAverage( nums );
}
// Getify said that (if `sum` not mutably-aliased some another closure)
// mutation of `sum` here it's "UNOBSERVED SIDE-EFF" ..
// (.. although, such "UNOBSERVED MUTATIONS" easy can be eliminated, and we can use
// pure-style easily here (but actually here it's just MATTER OF TASTE)
// )
// ... SO .. what reasons we have for IDEMPOTENT MUTATIONS aka "unobserved side-eff"?
// ... either NONE or "OWNED DATA"(P.Graham)
// .. and further author cite Hickey:
/// "If a tree falls in the forest, but no one is around to hear it, does it still make a sound?"
// (^-- NO SHARED MUTABLE ALIASING .. or ..
// .. we INCAPSULATE some data to MAINTAIN invariant [like in
// exmps/js_state.rs:state_shraing] )
//
// .. so it "like" pure-fn, or in term of P.Graham fn what used "owned data"
fn get_cached_fn() {
/// AS-OPPOSED to "`a` in `some_fn`"(above) `cache` is not "mut aliased"(only one ref which
/// mutate and read)
var cache = {}; // << "owned data", "unheard tree in the forest"
return fn XX(n) { // "unobserved mutation", "like-pure", "right incapsultaion"
if (cache[n] !== undefined) return cache[n] // .... NAZYVAI KAK HOCHEW'
..... /* do computation */ cache[n] = res_of_comutation();
}
} // author: "The point is that if XX(..) is the only part of the program that accesses
// and updates the cache side cause/effect, so it's "like"-pure"
|
2.0
// ALSO: ORDER/FREQUENCY DEPENDENCY
{
var users = {}, userOrders = {};
fn fetchUserData(userId) {
ajax( "http://some.api/user/" + userId, fn onUserData(userData) {
users[userId] = userData;
});
}
fn fetchOrders(userId) {
ajax( "http://some.api/orders/" + userId, fn onOrders(orders) {
for (let i = 0; i < orders.length; i++) {
// keep a reference to latest order for each user
users[userId].latestOrder = orders[i];
userOrders[orders[i].orderId] = orders[i];
}
});
}
}
// `fetchUserData` should be called before `fetchOrders` ... AVOID SUCH "LOCAL STATE MACHINES"
/// [GIST] there is should NOT BE ANY DEPENDENCY between ORDER/FREQUENCY OF CALL to
/// "getters/setters" ..(that is (kind of) SHOULD BE ONLY IDEMPOTENT-EFFECTFUL-FN)..
/// of some "local state machine" aka "class instance" aka "object"(with incapsulation)
// ..
// ...So.. in this exmp, (also Miwko: initializing DB, which depends on "glob-state" - that is
// "aliasing") .. `fetchOrders` depends on
///| to be called.. IN SOME PARTICULAR STATE .. and this state is reached through
///| "PARAMS-REF"(aliasing) ... SO.. in such case,
///| -=! SUCH STATE SHOULD BE PASSED DIRECTLY !=-..
/// (as in Miwko db-inital we refactor too)
/// ..to AVOID "ORDER/FREQUENCY-OF-CALL(getter/setter) DEPENDENCY"(to set "needed" state)
// ... any Getter/Setter should be designed in such way to BE ABLE to be called in ANY STATE!
//>> OR.. THIS "ORDER_DEPENDENCY/COUPLING" SHOULD BE INCAPSULATED IN fn
/// (BUT EXPLICIT PASSING OF MUT_ALISASED DEPENDENCY AS ARG
/// IF IT POSSIBLE
/// IS BETTER)
// (in Miwko db-exmp: if we don't pass explicitly "mut-aliased"-state , then we should
// incapsulate all COUPLED fns in single fn .. that is prepare NEEDED STATE ..
// BUT.. IF THIS CHAIN OF COUPLED-fns is too long ..
// |> WE RETURN TO THE PROBLEM ..
// since we refactoring of fn-that-incapsulate-coupling is ERROR PRONE .. SO
/// THIS CHAIN OF COUPLED_FNs SHOULD BE SHORT .. USUALLY === 2 )
// ..
// Of course to achieve some "desired side-effs"(without them programm is useless) we
// OFTEN need call "getters/setters" in some particular order ..BUT.. that DOESN'T mean that
// this order should become SPECIAL(HARDCODED) aka ORDER DEPENDENCY .. we just peek that order,
// but it's should NOT BE SPECIAL for our "get/set-ters".
//
| 2.1
// So.. why I looked at ::exmps/js_state.rs::state_sharing fn .. and though: "why I need
// "immutable-world", clojure, etc .. "ved'" this fn pretty good, and do its job well"
// ..
// .... BECAUSE: this "local-state-machine" don't rely on PARTICULAR STATE and ORDER/FREQUENCY..
// so IN ANY STATE ... we can call "get/set-ters" in ANY ORDER .. AND IT'S OK, IT DO EXACTLY
// WHAT WE WANT, EXECTLY SIDE-EFF WHAT WE WANT.
/// After createing an object we can start do ANY "STATE_TRANSITION" which this
/// object has, BUT, if only some STATE_TRANSITIONS allowed, IT'S ERRORPRONE.
// ...> whether "produce" or "consume" be called first .. WE DON'T CARE,
// ...> what the state we have in the moment of call to any fn(cons/prod) .. WE DON'T CARE...
// ..(some particular-state doesn't have special meaning for us)
// .....
///|"consume"(as a getter[but it aslo a setter]) is NOT IDEMPOTENT .. because..
// for this I need pass the list-for-consuming EXPLICITLY..
// (the main condition for IDEMPOTENCY - absence of MUT_ALIASING[here `list`])
/// .. BUT:
/// It INCAPSULATE "INVARIANT" of "prefiex is chenged only when we "splavili" old values in a
/// list and clean it up"
///>> .. INCUPSULATEs IN ONE STATE_TRANSITION
// .. SO .. it's let's both (`consume` and `produce`) "fns" works without FEAR about shared
// (between them ) STATE(and INCAPSULATED) on which the relie(mut_aliased) since it doesn't
// aliased elsewhere("tree in the wood") .. and PROPERLY changed (INVARIANT) in `consumer`, and
// this change INCAPSULATED in right place
/// that_is: WE DEFINE ATOMIC_STATE_TRANSITIONs, why atomic, since state mutation/transaction
/// can't be "interrupted" (here exmp: "prefix" changed BUT we not yet "splavili" values from
/// `list` -- IT'S INTERRUPTION of STATE_TRANSITION)
/// ..
// .. also .. it doesn't rely on any particular state of `list` when we need "splavit'", so
// we doesn't need MAINTAIN THIS INVARIANT.
/// ..
/// | SO.. we may say, that every "local-state-mach/obj" has some "patterns" of state_changing
/// | .. let's call it "STATE_TRANSITIONS", and we should properly "FORM" this
/// | state_transitions: .. incapsulate "INVARIANTS" in ONE_MOVE_PHASE, and incapsulate
/// | "DATA-ACCESS" ... !!! TO MAKE THEM ATOMIC !!!
/// ..
/// AGAIN: (1) PROPER DEFINE (2) MAKE ATOMIC .. UNIT-test those 2-RULES is ACCOMPLISHED
// ..
// | moreover: list consuming, and set-new-prefix is (temporary) COUPLED ..
// | .. that is ORDER DEPENDENCY ..
// | ..BUT.. we INCAPSULATE it in "consume" - WE CANNOT INDEPENDENTLY SET PREFIX .. since
// | it COUPLED .. and if we could(set pref independently) that would CREATE ABILITY to write
// | code with ORDER/FREQUENCY DEPENDENCY(or call it temporal-coupling)
BL99, da "STATE_TRANSITION" eto prosto well-defined API u obecta kotoryi
incapsuliruet TREE_in_the_WOOD
| 2.2
/// |about STATE_TRANSITION : since I invented this term after written bellow--v)
// | to achive any goal with "response" object .. we need pass some STATE_TRANSITION,
// and every this STATE_TRANSITION make sense (OFF_COURSE) only in ATOMIC form
// ... `req, method, action` <<- THIS-IS main STATE_TRANSITION of our object, so
// we need INCAPSULATE(ATOMICALLY_BOUND) it, ... i.e. MAKE SURE it's ATOMIC
// ..
// .. in expm of Bugaenko-request .. we can either do..
// 1. ***
Request req = new Request("http://exmp.com", POST);
//..
// .. and NO `self.method`-setter .. that is we create API that EXCLUDE possibility of
// "INCONSISTENT STATE"(where getters can be "buggy" with wrong ORDER-dependency)
/// AGAIN: we INCAPSULATE this "ORDER_DEPENDENCY/COUPLING"
// (in some sense `req` "like" immutable, that is to use "GET"-req we NEED CREATE NEW REQUEST)
// 2. ***
res = req.fetch("POST", body) // we talk about SYNC calls in this post
//..
// METHOD should be passed explicitly to `fetch`.. REALLY! why "requst"-obj should contain in
// its state a METHOD param? .. (also `body`)
// .. that is AGAIN: we incapsulate "coupled" effectful-methods( firstly set METHOD, secondly
// fetch result), and explicitly pass the state of which effectful method is depend .. that is
// make method IDEMPOTENT
// ..
// p.s. if we want "bind"(set object to state where it's use some-particul. METHOD) to some
// particular METHOD in JS we should just use "bind"(aka Carrying)
// .. or maybe in Java-like create some OBJECT-WRAPPER(here Fetcher) .. which AGAIN
// configured(INCAPUSLATE-COUPLING)-on-initialization(that is IMMUTABLE)
let res_obj: Fetcher = req.fetch("POST", body);
/* ... somewhere .. >>> */ res_obj.get_result();
/// {
// after 1/2 year: also '+' of such aproach as opposed to
req.fetch(POST, body, &mut res_obj) // because if we create `res_obj`(res_buf) inside
// `fetch` we know that we have no aliasing SINCE: only in rust we can know that if we pass
// `&mut res_obj` then there is not aliasing .... BUT.. in Java it's just impossible ...
// so it's another trick how BY MEANS of api-design we can eliminate problem of
// aliasing/mutability
/// }
// .. and for now: we "BIND" our req to particular state .. but INCAPSULATE this
// "COUPLED MUTATIONS" like "consume" in "exmp/js_state.rs::state_sharing"
/// {
// after 1/2-year: OR: If we even store `fetch`-result in `req` object ... we should
// use `check / throw-excpe` that we can do next `fetch` only after we consume already
// fetched body
/// }
//..
// |... so you can see how undestanding "Mut-aliasing"+"order/frequency-dependency" lead us
// | TO CHANGING THE DESIGN(API) of our programm.
//
| 2.3
//// .......... MUST_SEE_ALSO: FunLight-ch5-Purifying
// (note: closure-captur == obj.param-ref,
// tags: "local-state-machine" .. INCAPSULATE mutation != HIDE mutation)
// INCAPSULATE mutation == LOCALIZE mutation)
| some_fn | identifier_name |
core_convert.py | from datetime import timedelta, datetime
def filter_dict(obj, val=None):
# TODO: We should not always remove all None items (maybe!?)
return dict(filter(lambda item: item[1] is not val, obj.items()))
def get_season_tag_name(key):
table = {
"Clas Ohlson": "COB",
"Matkasse": "MK",
"Grillartiklar": "G",
"Halloweenartiklar": "H",
"Julartiklar": "J",
"Artiklar som säljs året runt, men mest runt jul": "JB",
"Midsommarartiklar": "M",
"Artiklar som säljs året runt, men mest runt midsommar": "MB",
"Nyårsartiklar": "N",
"Artiklar som säljs året runt, men mest runt nyår": "NB",
"Påskartiklar": "P",
"Artiklar som säljs året runt, men mest runt påsk": "PB",
"Sommarartiklar": "S",
"Sommartorget": "ST",
}
return table[key] if key in table else None
def convert_season_tags(product):
tags = map(lambda x: get_season_tag_name(x), product.tags.all())
return list(filter(lambda x: x is not None, tags))
def convert_order_route_from_product_type(key):
table = {
"Crossdocking": "X",
"Nightorder": "A",
}
return table[key] if key in table else None
def get_attribute_id(key):
# data from prefilledautomaten.attribute
table = {
'Ekonomipack': 1,
'Nyckelhålsmärkt': 1736,
'Ekologisk': 2167,
'Glutenfri': 2168,
'Laktosfri': 2169,
'Låglaktos': 2170,
'Premiumkvalité': 2171,
'Mjölkproteinfri': 2172,
# 'Nyhet': 2173,
'18Åldersgräns': 2174,
'Fairtrade': 2175,
'Svanenmärkt': 2176,
'Kravmärkt': 2177,
'Video': 2178,
'Äkta vara': 2181,
'Astma- och Allergiförbundet': 2184,
'test': 2187,
'Rosa bandet': 2190,
'Svenskt sigill': 2191,
'3+ dagar': 2194,
'5+ dagar': 2197,
'7+ dagar': 2200,
'10+ dagar': 2203,
'30+ dagar': 2206,
'Svenskt ursprung': 2209,
'Svensk fågel': 2212,
'4+ dagar': 2215,
'Vegansk': 2218,
'MSC': 2219,
'Strategisk produkt': 2222,
'Svenskt sigill klimatcertifierad': 2224,
'ASC': 2227,
'Från Sverige': 2230,
'Kött från Sverige': 2233,
'Mjölk från Sverige': 2236,
'Faroklass brandfarligt': 2239,
'Faroklass miljöfarligt': 2242,
'Faroklass skadligt': 2245,
'Faroklass Warning': 2248,
'Energiklass A+': 2251,
'Energiklass C': 2254,
'Energiklass D': 2257,
'Energiklass E': 2260,
'Energiklass A++': 2263,
'Energiklass A': 2266,
'Energiklass B': 2269,
}
return table[key] if key in table else None
def get_dynamic_property_id(key):
table = {
'Volume': 1,
'Weight': 2,
'KfpDfp': 3,
'LastSalesDay': 4,
'LastReceiptDay': 5,
'OldPz1': 6,
'OldPz2': 7,
'OldPz3': 8,
'MaxStock': 9,
'Season': 10,
'OrderFactor': 11,
'MinStock': 12,
'DfpLengthMM': 13,
'DfpWidthMM': 14,
'DfpHeightMM': 15,
'DfpWeightG': 16,
'DfpType': 17,
'SupplierArticleNumber': 18,
'AxfoodArticleId': 19,
'TruckrouteOptimizationProd3': 20,
'KfpHeightMM': 21,
'KfpLengthtMM': 22,
'KfpWidthMM': 23,
'IsFakeStockBalance': 24,
'ExternalImageUrl': 25,
'ProductSupplier': 26,
'ValdioDFPWidthMM': 27,
'ValdioDFPHeightMM': 28,
'ValidoDFPLengthtMM': 29,
'ValdioDFPWeightG': 30,
'DFPEANCode': 31,
'SafetyStock': 33,
'KfpDfpPurchaseOrder': 36,
'NoNutritionsNeeded': 38,
'NoIngredientsNeeded': 41,
'NoAllergensNeeded': 44,
'DeliveredUnitConversionFactor': 45,
'HandlingUnitQuantity': 46,
'BDMaterialNumber': 49,
'ProductSegment': 55,
'StandardUnitKfp': 56,
'StandardUnitGtin': 59,
'LimitedOfferProduct': 61,
'QLPricing': 64,
'QLMatching': 67,
'FirstSalesDate': 70,
'CategoryManager': 73,
}
return table[key] if key in table else None
def get_origin_id(key):
table = {
752: 1, # Svensk
249: 2, # Fransk
# TODO: MAP THIS ?: 3, # Afrika
# TODO: MAP THIS ?: 4, # Grekiskt
# TODO: MAP THIS ?: 5, # Indien
# TODO: MAP THIS ?: 6, # Nordamerika
# TODO: MAP THIS ?: 7, # Latinamerika
# TODO: MAP THIS ?: 8, # Orienten
# TODO: MAP THIS ?: 9, # Japan
# TODO: MAP THIS ?: 10, # Italienskt
# TODO: MAP THIS ?: 11, # Sydostasien
# TODO: MAP THIS ?: 12, # Spansk
# TODO: MAP THIS ?: 13, # Tyskland
# TODO: MAP THIS ?: 14, # "Ryssland och Östeuropa"
# TODO: MAP THIS ?: 15, # Internationellt
# TODO: MAP THIS ?: 16, # Övriga
# TODO: MAP THIS ?: 73, # Sverige
# TODO: MAP THIS ?: 74, # Norge
# TODO: MAP THIS ?: 75, # Kanada
# TODO: MAP THIS ?: 76, # Frankrike
# TODO: MAP THIS ?: 77, # Grekland
# TODO: MAP THIS ?: 78, # Portugal
# TODO: MAP THIS ?: 79, # Danmark
# TODO: MAP THIS ?: 80, # Italien
# TODO: MAP THIS ?: 81, # Finland
# TODO: MAP THIS ?: 82, # Kalifornien
# TODO: MAP THIS ?: 83, # Thailand
# TODO: MAP THIS ?: 84, # Kina
# TODO: MAP THIS ?: 85, # Belgien
# TODO: MAP THIS ?: 86, # Europa
# TODO: MAP THIS ?: 87, # Turkiet
# TODO: MAP THIS ?: 88, # Holland
# TODO: MAP THIS ?: 89, # England
# TODO: MAP THIS ?: 90, # Spanien
# TODO: MAP THIS ?: 91, # Nederländerna
# TODO: MAP THIS ?: 92, # Polen
# TODO: MAP THIS ?: 93, # "Blandat: EG och icke EG"
# TODO: MAP THIS ?: 94, # Ungern
# TODO: MAP THIS ?: 95, # Bulgarien
# TODO: MAP THIS ?: 96, # Kroatien
# TODO: MAP THIS ?: 98, # India
# TODO: MAP THIS ?: 99, # Uruguay
# TODO: MAP THIS ?: 100, # Irland
# TODO: MAP THIS ?: 101, # "Nya Zeeland"
# TODO: MAP THIS ?: 102, # Sverige/England
# TODO: MAP THIS ?: 103, # Sverige/Danmark
# TODO: MAP THIS ?: 104, # China
# TODO: MAP THIS ?: 105, # Holland/Frankrike
# TODO: MAP THIS ?: 106, # "Costa Rica"
# TODO: MAP THIS ?: 107, # Zaire
# TODO: MAP THIS ?: 108, # Israel/USA
# TODO: MAP THIS ?: 109, # Mexico
# TODO: MAP THIS ?: 110, # Holland/Belgien
# TODO: MAP THIS ?: 111, # Frankrike/Italien
# TODO: MAP THIS ?: 112, # Sverge
# TODO: MAP THIS ?: 113, # Centralamerika
# TODO: MAP THIS ?: 114, # Brasilien
# TODO: MAP THIS ?: 115, # Israel/Indien
# TODO: MAP THIS ?: 116, # "Italien/Nya Zeeland"
# TODO: MAP THIS ?: 117, # Sydafrika
# TODO: MAP THIS ?: 118, # Argentina
# TODO: MAP THIS ?: 119, # China/Thailand
# TODO: MAP THIS ?: 120, # USA
# TODO: MAP THIS ?: 121, # Kenya
# TODO: MAP THIS ?: 122, # Israel
# TODO: MAP THIS ?: 123, # Malaysia
# TODO: MAP THIS ?: 124, # Nordostatlanten
# TODO: MAP THIS ?: 125, # Vietnam
# TODO: MAP THIS ?: 126, # Norden
# TODO: MAP THIS ?: 127, # Litauen
# TODO: MAP THIS ?: 131, # Roslagen
# TODO: MAP THIS ?: 135, # U.S.A.
# TODO: MAP THIS ?: 136, # DK
# TODO: MAP THIS ?: 137, # Egypten
# TODO: MAP THIS ?: 138, # Marocko
# TODO: MAP THIS ?: 139, # Chile
# TODO: MAP THIS ?: 140, # "Dominikanska Republiken"
# TODO: MAP THIS ?: 141, # Iran
# TODO: MAP THIS ?: 142, # Colombia
# TODO: MAP THIS ?: 143, # Peru
# TODO: MAP THIS ?: 144, # Zimbabwe
}
return table[key] if key in table else None
def convert_attributes(product, detail=None):
result = []
for tag in product.tags.all():
id = get_attribute_id(tag.name)
if id is not None:
result.append({
'AttributeId': id
})
# Special case for "Nyhet"
if not detail and product.product_detail:
detail = product.product_detail.filter(store=10).first()
if detail is None:
detail = product.product_detail.first()
if detail:
first_enabled = detail.first_enabled if detail.first_enabled else datetime.now() - \
timedelta(days=60)
result.append({
'AttributeId': 2173,
'FromDate': first_enabled,
'ToDate': first_enabled + timedelta(days=30),
})
return result
def create_dynamic_property(key, value, store=None):
prop = {
'PropertyId': get_dynamic_property_id(key),
'PropertyName': key,
'PropertyValue': value,
}
if store is not None:
prop['StoreId'] = store
return prop
def convert_dynamic_properties(product):
result = [
create_dynamic_property('Volume', product.volume_dm3),
create_dynamic_property('Weight', product.weight_g),
create_dynamic_property('KfpHeightMM', product.height_mm),
create_dynamic_property('KfpLengthtMM', product.length_mm),
create_dynamic_property('KfpWidthMM', product.width_mm),
create_dynamic_property('Season', '.'.join(
convert_season_tags(product))),
create_dynamic_property('LastReceiptDay', product.last_receipt_day),
create_dynamic_property('LastSalesDay', product.last_sales_day),
create_dynamic_property('TruckrouteOptimizationProd3',
convert_order_route_from_product_type(product.product_type)),
create_dynamic_property('BDMaterialNumber',
product.prefered_merchantarticle.external_id if product.prefered_merchantarticle else None),
create_dynamic_property('SupplierArticleNumber',
product.prefered_merchantarticle.external_id if product.prefered_merchantarticle else None),
]
base_unit_quantity = get_base_unit_quantity(product, product.article.gtin)
if base_unit_quantity is not None:
create_dynamic_property('KfpDfp', base_unit_quantity)
for detail in product.product_detail.all():
result.append(create_dynamic_property(
'OrderFactor', 1 if detail.orderfactor else 0, detail.store))
result.append(create_dynamic_property(
'BDMaterialNumber', detail.prefered_merchantarticle.external_id if detail.prefered_merchantarticle else None, detail.store))
result.append(create_dynamic_property(
'SupplierArticleNumber', detail.prefered_merchantarticle.external_id if detail.prefered_merchantarticle else None, detail.store))
base_unit_quantity = get_base_unit_quantity(
detail, product.article.gtin)
if base_unit_quantity is not None:
create_dynamic_property('KfpDfp', base_unit_quantity, detail.store)
return result
def get_base_unit_quantity(product, base_unit_gtin):
if product.prefered_merchantarticle is not None:
if product.prefered_merchantarticle.article.child_gtin == base_unit_gtin:
return product.prefered_merchantarticle.article.quantity_of_lower_layer
else:
upper_quantity = product.prefered_merchantarticle.article.quantity_of_lower_layer
next_lower_article = Article.objects.filter(
gtin=product.prefered_merchantarticle.article.child_gtin).first()
if next_lower_article is not None:
if next_lower_article.child_gtin | nit(validoo_unit):
# data from prefilledautomaten.unit
unit_table = {
"H87": 1, # st, PIECES
"GRM": 2, # g, WEIGHT
"KGM": 3, # kg, WEIGHT
"DLT": 6, # dl, VOLUME
"LTR": 7, # L, VOLUME
"MLT": 10, # ml, VOLUME
"CLT": 11, # cl, VOLUME
"HGM": 12, # hg, WEIGHT
"G24": 13, # msk, VOLUME
"G25": 14, # tsk, VOLUME
# "???": 16, # st tekoppar, VOLUME
# "???": 17, # st kaffekoppar, VOLUME
# "???": 18, # glas, VOLUME
"MGM": 25, # mg, WEIGHT,
# "???": 26, # krm, VOLUME
# "???": 27, # st klyftor, PARTS,
# "???": 28, # st krukor, PIECES
# "???": 29, # st tärningar, PIECES
# "???": 30, # knippe, PIECES
}
if(validoo_unit in unit_table):
return unit_table[validoo_unit]
return None
def convert_tags(product):
tags = filter(lambda tag: get_season_tag_name(tag.name) is
None and get_attribute_id(tag.name) is None, product.tags.all())
return list(map(lambda tag: tag.id, tags))
def convert_product(product):
from api.serializers import ProductSerializer
serializer = ProductSerializer(product)
article = product.article
image = product.productimage_set.first()
unit_id = convert_unit(serializer.data['net_content_unit_code'])
return filter_dict({
"ProductId": product.product_id, # int
"ProductName": serializer.data['name'], # string
"Quantity": serializer.data['net_content'], # float
# int
"UnitId": unit_id,
"DisplayUnitId": unit_id, # int
"CategoryId": product.product_category.id if product.product_category else None, # int
# "ProductGroupId": ???, # int
# "CalculatedWeight": ???, # float
# "RecommendedPrice": ???, # float
"VatRate": article.vat, # float
"EanCode": article.gtin, # string
# string
"ImageUrl": image.filename if image else None,
# "ProductUrl": ???, # string
# "SupplierId": ???, # int
# "MaximumOrder": ???, # float
"ProductDescription": serializer.data['description'], # string
# "UsageDescription": ???, # string
# string
"IngredientsDescription": serializer.data['ingredient_description'],
# string
"NutritionDescription": serializer.data['nutrition_description'],
# "StorageDescription": ???, # string
# "StoreVarmColdFrozen": ???, # string
# "PossibleToBuy": ???, # bool
# "IsOffer": ???, # bool
"RecycleFee": product.recycle_fee, # double
# "AmountInPackage": ???, # int
# "TempMostBought": ???, # int
# "ExternalComment": ???, # string
# "InternalComment": ???, # string
# "IsPickingCostIncluded": ???, # bool
# "IsDeliveryCostIncluded": ???, # bool
# "RatesSum": ???, # int
# "RatesCount": ???, # int
"OriginId": get_origin_id(product.origin), # int?
# "IsWine": ???, # bool
# "AxfoodSAPId": ???, # string
# "IsEcological": ???, # bool
# "RelatedProductIDs": ???, # string
"IsAdultProduct": product.adult_product, # bool
# "AutomaticSubscription": ???, # bool
# "IsAlreadyRenamed": ???, # bool
# "OriginalAfterRenameFileSize": ???, # string
# "OriginalCurrentFileSize": ???, # string
# "CreationDate": ???, # DateTime?
# "LastModifiedDate": ???, # DateTime?
# "LastUpdatedByUserId": ???, # int
# "RemovedDate": ???, # DateTime?
})
def convert_product_store(detail, product):
return filter_dict({
# "ProductStoreId": ???, # int
"ProductId": product.product_id, # int
"StoreId": detail.store, # int
# "LocalEancode": ???, # string
"CalculatedCustomerPrice": detail.price, # decimal
# "CalculatedCustomerPrice_Per_Unit": ???, # decimal
"IsOutOfStock": detail.status == 2, # bool
# "OutOfStockDate": ???, # DateTime
# "StockBackDate": ???, # DateTime
"IsReplacementProduct": detail.status == 3 # bool
# "IsApproximateWeight": ???, # bool
# "IsShowPricePerUnit": ???, # bool
# "PriceValidFrom": ???, # DateTime
# "PriceValidTo": ???, # DateTime
# "PriceIn": ???, # decimal
# "PercentageAddon": ???, # decimal
# "FixedAddon": ???, # decimal
# "PickingZone1": ???, # string
# "PickingZone2": ???, # string
# "PickingZone3": ???, # string
# "SoldCount": ???, # int
# "IsForeCastPriorityProduct": ???, # bool
# "DontShowAsMissedProduct": ???, # bool
# "StoreLevelOriginId": ???, # int?
# "PickingNote": ???, # string
# "AdvanceDeliveryMinimumOrder": ???, # int
# "MinimumRequiredDeliveryDays": ???, # byte
# "DeliverableWeekDays": ???, # string
# "DeliveryDaysAhead": ???, # int
# "CancelDaysBefore": ???, # int
# "StorePriceIn": ???, # decimal
# "CreationDate": ???, # DateTime?
# "LastModifiedDate": ???, # DateTime?
# "RemovedDate": ???, # DateTime?
# "CanSendAdvanceDeliveryEmail": ???, # bool
# "OldCalculatedCustomerPrice": ???, # decimal
})
def convert_product_stores(product):
return list(map(lambda x: convert_product_store(x, product), product.product_detail.all()))
| == product.article.gtin:
return next_lower_article.quantity_of_lower_layer * upper_quantity
return None
def convert_u | conditional_block |
core_convert.py | from datetime import timedelta, datetime
def filter_dict(obj, val=None):
# TODO: We should not always remove all None items (maybe!?)
return dict(filter(lambda item: item[1] is not val, obj.items()))
def get_season_tag_name(key):
table = {
"Clas Ohlson": "COB",
"Matkasse": "MK",
"Grillartiklar": "G",
"Halloweenartiklar": "H",
"Julartiklar": "J",
"Artiklar som säljs året runt, men mest runt jul": "JB",
"Midsommarartiklar": "M",
"Artiklar som säljs året runt, men mest runt midsommar": "MB",
"Nyårsartiklar": "N",
"Artiklar som säljs året runt, men mest runt nyår": "NB",
"Påskartiklar": "P",
"Artiklar som säljs året runt, men mest runt påsk": "PB",
"Sommarartiklar": "S",
"Sommartorget": "ST",
}
return table[key] if key in table else None
def convert_season_tags(product):
tags = map(lambda x: get_season_tag_name(x), product.tags.all())
return list(filter(lambda x: x is not None, tags))
def convert_order_route_from_product_type(key):
table = {
"Crossdocking": "X",
"Nightorder": "A",
}
return table[key] if key in table else None
def get_attribute_id(key):
# data from prefilledautomaten.attribute
table = {
| (key):
table = {
'Volume': 1,
'Weight': 2,
'KfpDfp': 3,
'LastSalesDay': 4,
'LastReceiptDay': 5,
'OldPz1': 6,
'OldPz2': 7,
'OldPz3': 8,
'MaxStock': 9,
'Season': 10,
'OrderFactor': 11,
'MinStock': 12,
'DfpLengthMM': 13,
'DfpWidthMM': 14,
'DfpHeightMM': 15,
'DfpWeightG': 16,
'DfpType': 17,
'SupplierArticleNumber': 18,
'AxfoodArticleId': 19,
'TruckrouteOptimizationProd3': 20,
'KfpHeightMM': 21,
'KfpLengthtMM': 22,
'KfpWidthMM': 23,
'IsFakeStockBalance': 24,
'ExternalImageUrl': 25,
'ProductSupplier': 26,
'ValdioDFPWidthMM': 27,
'ValdioDFPHeightMM': 28,
'ValidoDFPLengthtMM': 29,
'ValdioDFPWeightG': 30,
'DFPEANCode': 31,
'SafetyStock': 33,
'KfpDfpPurchaseOrder': 36,
'NoNutritionsNeeded': 38,
'NoIngredientsNeeded': 41,
'NoAllergensNeeded': 44,
'DeliveredUnitConversionFactor': 45,
'HandlingUnitQuantity': 46,
'BDMaterialNumber': 49,
'ProductSegment': 55,
'StandardUnitKfp': 56,
'StandardUnitGtin': 59,
'LimitedOfferProduct': 61,
'QLPricing': 64,
'QLMatching': 67,
'FirstSalesDate': 70,
'CategoryManager': 73,
}
return table[key] if key in table else None
def get_origin_id(key):
table = {
752: 1, # Svensk
249: 2, # Fransk
# TODO: MAP THIS ?: 3, # Afrika
# TODO: MAP THIS ?: 4, # Grekiskt
# TODO: MAP THIS ?: 5, # Indien
# TODO: MAP THIS ?: 6, # Nordamerika
# TODO: MAP THIS ?: 7, # Latinamerika
# TODO: MAP THIS ?: 8, # Orienten
# TODO: MAP THIS ?: 9, # Japan
# TODO: MAP THIS ?: 10, # Italienskt
# TODO: MAP THIS ?: 11, # Sydostasien
# TODO: MAP THIS ?: 12, # Spansk
# TODO: MAP THIS ?: 13, # Tyskland
# TODO: MAP THIS ?: 14, # "Ryssland och Östeuropa"
# TODO: MAP THIS ?: 15, # Internationellt
# TODO: MAP THIS ?: 16, # Övriga
# TODO: MAP THIS ?: 73, # Sverige
# TODO: MAP THIS ?: 74, # Norge
# TODO: MAP THIS ?: 75, # Kanada
# TODO: MAP THIS ?: 76, # Frankrike
# TODO: MAP THIS ?: 77, # Grekland
# TODO: MAP THIS ?: 78, # Portugal
# TODO: MAP THIS ?: 79, # Danmark
# TODO: MAP THIS ?: 80, # Italien
# TODO: MAP THIS ?: 81, # Finland
# TODO: MAP THIS ?: 82, # Kalifornien
# TODO: MAP THIS ?: 83, # Thailand
# TODO: MAP THIS ?: 84, # Kina
# TODO: MAP THIS ?: 85, # Belgien
# TODO: MAP THIS ?: 86, # Europa
# TODO: MAP THIS ?: 87, # Turkiet
# TODO: MAP THIS ?: 88, # Holland
# TODO: MAP THIS ?: 89, # England
# TODO: MAP THIS ?: 90, # Spanien
# TODO: MAP THIS ?: 91, # Nederländerna
# TODO: MAP THIS ?: 92, # Polen
# TODO: MAP THIS ?: 93, # "Blandat: EG och icke EG"
# TODO: MAP THIS ?: 94, # Ungern
# TODO: MAP THIS ?: 95, # Bulgarien
# TODO: MAP THIS ?: 96, # Kroatien
# TODO: MAP THIS ?: 98, # India
# TODO: MAP THIS ?: 99, # Uruguay
# TODO: MAP THIS ?: 100, # Irland
# TODO: MAP THIS ?: 101, # "Nya Zeeland"
# TODO: MAP THIS ?: 102, # Sverige/England
# TODO: MAP THIS ?: 103, # Sverige/Danmark
# TODO: MAP THIS ?: 104, # China
# TODO: MAP THIS ?: 105, # Holland/Frankrike
# TODO: MAP THIS ?: 106, # "Costa Rica"
# TODO: MAP THIS ?: 107, # Zaire
# TODO: MAP THIS ?: 108, # Israel/USA
# TODO: MAP THIS ?: 109, # Mexico
# TODO: MAP THIS ?: 110, # Holland/Belgien
# TODO: MAP THIS ?: 111, # Frankrike/Italien
# TODO: MAP THIS ?: 112, # Sverge
# TODO: MAP THIS ?: 113, # Centralamerika
# TODO: MAP THIS ?: 114, # Brasilien
# TODO: MAP THIS ?: 115, # Israel/Indien
# TODO: MAP THIS ?: 116, # "Italien/Nya Zeeland"
# TODO: MAP THIS ?: 117, # Sydafrika
# TODO: MAP THIS ?: 118, # Argentina
# TODO: MAP THIS ?: 119, # China/Thailand
# TODO: MAP THIS ?: 120, # USA
# TODO: MAP THIS ?: 121, # Kenya
# TODO: MAP THIS ?: 122, # Israel
# TODO: MAP THIS ?: 123, # Malaysia
# TODO: MAP THIS ?: 124, # Nordostatlanten
# TODO: MAP THIS ?: 125, # Vietnam
# TODO: MAP THIS ?: 126, # Norden
# TODO: MAP THIS ?: 127, # Litauen
# TODO: MAP THIS ?: 131, # Roslagen
# TODO: MAP THIS ?: 135, # U.S.A.
# TODO: MAP THIS ?: 136, # DK
# TODO: MAP THIS ?: 137, # Egypten
# TODO: MAP THIS ?: 138, # Marocko
# TODO: MAP THIS ?: 139, # Chile
# TODO: MAP THIS ?: 140, # "Dominikanska Republiken"
# TODO: MAP THIS ?: 141, # Iran
# TODO: MAP THIS ?: 142, # Colombia
# TODO: MAP THIS ?: 143, # Peru
# TODO: MAP THIS ?: 144, # Zimbabwe
}
return table[key] if key in table else None
def convert_attributes(product, detail=None):
result = []
for tag in product.tags.all():
id = get_attribute_id(tag.name)
if id is not None:
result.append({
'AttributeId': id
})
# Special case for "Nyhet"
if not detail and product.product_detail:
detail = product.product_detail.filter(store=10).first()
if detail is None:
detail = product.product_detail.first()
if detail:
first_enabled = detail.first_enabled if detail.first_enabled else datetime.now() - \
timedelta(days=60)
result.append({
'AttributeId': 2173,
'FromDate': first_enabled,
'ToDate': first_enabled + timedelta(days=30),
})
return result
def create_dynamic_property(key, value, store=None):
prop = {
'PropertyId': get_dynamic_property_id(key),
'PropertyName': key,
'PropertyValue': value,
}
if store is not None:
prop['StoreId'] = store
return prop
def convert_dynamic_properties(product):
result = [
create_dynamic_property('Volume', product.volume_dm3),
create_dynamic_property('Weight', product.weight_g),
create_dynamic_property('KfpHeightMM', product.height_mm),
create_dynamic_property('KfpLengthtMM', product.length_mm),
create_dynamic_property('KfpWidthMM', product.width_mm),
create_dynamic_property('Season', '.'.join(
convert_season_tags(product))),
create_dynamic_property('LastReceiptDay', product.last_receipt_day),
create_dynamic_property('LastSalesDay', product.last_sales_day),
create_dynamic_property('TruckrouteOptimizationProd3',
convert_order_route_from_product_type(product.product_type)),
create_dynamic_property('BDMaterialNumber',
product.prefered_merchantarticle.external_id if product.prefered_merchantarticle else None),
create_dynamic_property('SupplierArticleNumber',
product.prefered_merchantarticle.external_id if product.prefered_merchantarticle else None),
]
base_unit_quantity = get_base_unit_quantity(product, product.article.gtin)
if base_unit_quantity is not None:
create_dynamic_property('KfpDfp', base_unit_quantity)
for detail in product.product_detail.all():
result.append(create_dynamic_property(
'OrderFactor', 1 if detail.orderfactor else 0, detail.store))
result.append(create_dynamic_property(
'BDMaterialNumber', detail.prefered_merchantarticle.external_id if detail.prefered_merchantarticle else None, detail.store))
result.append(create_dynamic_property(
'SupplierArticleNumber', detail.prefered_merchantarticle.external_id if detail.prefered_merchantarticle else None, detail.store))
base_unit_quantity = get_base_unit_quantity(
detail, product.article.gtin)
if base_unit_quantity is not None:
create_dynamic_property('KfpDfp', base_unit_quantity, detail.store)
return result
def get_base_unit_quantity(product, base_unit_gtin):
if product.prefered_merchantarticle is not None:
if product.prefered_merchantarticle.article.child_gtin == base_unit_gtin:
return product.prefered_merchantarticle.article.quantity_of_lower_layer
else:
upper_quantity = product.prefered_merchantarticle.article.quantity_of_lower_layer
next_lower_article = Article.objects.filter(
gtin=product.prefered_merchantarticle.article.child_gtin).first()
if next_lower_article is not None:
if next_lower_article.child_gtin == product.article.gtin:
return next_lower_article.quantity_of_lower_layer * upper_quantity
return None
def convert_unit(validoo_unit):
# data from prefilledautomaten.unit
unit_table = {
"H87": 1, # st, PIECES
"GRM": 2, # g, WEIGHT
"KGM": 3, # kg, WEIGHT
"DLT": 6, # dl, VOLUME
"LTR": 7, # L, VOLUME
"MLT": 10, # ml, VOLUME
"CLT": 11, # cl, VOLUME
"HGM": 12, # hg, WEIGHT
"G24": 13, # msk, VOLUME
"G25": 14, # tsk, VOLUME
# "???": 16, # st tekoppar, VOLUME
# "???": 17, # st kaffekoppar, VOLUME
# "???": 18, # glas, VOLUME
"MGM": 25, # mg, WEIGHT,
# "???": 26, # krm, VOLUME
# "???": 27, # st klyftor, PARTS,
# "???": 28, # st krukor, PIECES
# "???": 29, # st tärningar, PIECES
# "???": 30, # knippe, PIECES
}
if(validoo_unit in unit_table):
return unit_table[validoo_unit]
return None
def convert_tags(product):
tags = filter(lambda tag: get_season_tag_name(tag.name) is
None and get_attribute_id(tag.name) is None, product.tags.all())
return list(map(lambda tag: tag.id, tags))
def convert_product(product):
from api.serializers import ProductSerializer
serializer = ProductSerializer(product)
article = product.article
image = product.productimage_set.first()
unit_id = convert_unit(serializer.data['net_content_unit_code'])
return filter_dict({
"ProductId": product.product_id, # int
"ProductName": serializer.data['name'], # string
"Quantity": serializer.data['net_content'], # float
# int
"UnitId": unit_id,
"DisplayUnitId": unit_id, # int
"CategoryId": product.product_category.id if product.product_category else None, # int
# "ProductGroupId": ???, # int
# "CalculatedWeight": ???, # float
# "RecommendedPrice": ???, # float
"VatRate": article.vat, # float
"EanCode": article.gtin, # string
# string
"ImageUrl": image.filename if image else None,
# "ProductUrl": ???, # string
# "SupplierId": ???, # int
# "MaximumOrder": ???, # float
"ProductDescription": serializer.data['description'], # string
# "UsageDescription": ???, # string
# string
"IngredientsDescription": serializer.data['ingredient_description'],
# string
"NutritionDescription": serializer.data['nutrition_description'],
# "StorageDescription": ???, # string
# "StoreVarmColdFrozen": ???, # string
# "PossibleToBuy": ???, # bool
# "IsOffer": ???, # bool
"RecycleFee": product.recycle_fee, # double
# "AmountInPackage": ???, # int
# "TempMostBought": ???, # int
# "ExternalComment": ???, # string
# "InternalComment": ???, # string
# "IsPickingCostIncluded": ???, # bool
# "IsDeliveryCostIncluded": ???, # bool
# "RatesSum": ???, # int
# "RatesCount": ???, # int
"OriginId": get_origin_id(product.origin), # int?
# "IsWine": ???, # bool
# "AxfoodSAPId": ???, # string
# "IsEcological": ???, # bool
# "RelatedProductIDs": ???, # string
"IsAdultProduct": product.adult_product, # bool
# "AutomaticSubscription": ???, # bool
# "IsAlreadyRenamed": ???, # bool
# "OriginalAfterRenameFileSize": ???, # string
# "OriginalCurrentFileSize": ???, # string
# "CreationDate": ???, # DateTime?
# "LastModifiedDate": ???, # DateTime?
# "LastUpdatedByUserId": ???, # int
# "RemovedDate": ???, # DateTime?
})
def convert_product_store(detail, product):
return filter_dict({
# "ProductStoreId": ???, # int
"ProductId": product.product_id, # int
"StoreId": detail.store, # int
# "LocalEancode": ???, # string
"CalculatedCustomerPrice": detail.price, # decimal
# "CalculatedCustomerPrice_Per_Unit": ???, # decimal
"IsOutOfStock": detail.status == 2, # bool
# "OutOfStockDate": ???, # DateTime
# "StockBackDate": ???, # DateTime
"IsReplacementProduct": detail.status == 3 # bool
# "IsApproximateWeight": ???, # bool
# "IsShowPricePerUnit": ???, # bool
# "PriceValidFrom": ???, # DateTime
# "PriceValidTo": ???, # DateTime
# "PriceIn": ???, # decimal
# "PercentageAddon": ???, # decimal
# "FixedAddon": ???, # decimal
# "PickingZone1": ???, # string
# "PickingZone2": ???, # string
# "PickingZone3": ???, # string
# "SoldCount": ???, # int
# "IsForeCastPriorityProduct": ???, # bool
# "DontShowAsMissedProduct": ???, # bool
# "StoreLevelOriginId": ???, # int?
# "PickingNote": ???, # string
# "AdvanceDeliveryMinimumOrder": ???, # int
# "MinimumRequiredDeliveryDays": ???, # byte
# "DeliverableWeekDays": ???, # string
# "DeliveryDaysAhead": ???, # int
# "CancelDaysBefore": ???, # int
# "StorePriceIn": ???, # decimal
# "CreationDate": ???, # DateTime?
# "LastModifiedDate": ???, # DateTime?
# "RemovedDate": ???, # DateTime?
# "CanSendAdvanceDeliveryEmail": ???, # bool
# "OldCalculatedCustomerPrice": ???, # decimal
})
def convert_product_stores(product):
return list(map(lambda x: convert_product_store(x, product), product.product_detail.all()))
| 'Ekonomipack': 1,
'Nyckelhålsmärkt': 1736,
'Ekologisk': 2167,
'Glutenfri': 2168,
'Laktosfri': 2169,
'Låglaktos': 2170,
'Premiumkvalité': 2171,
'Mjölkproteinfri': 2172,
# 'Nyhet': 2173,
'18Åldersgräns': 2174,
'Fairtrade': 2175,
'Svanenmärkt': 2176,
'Kravmärkt': 2177,
'Video': 2178,
'Äkta vara': 2181,
'Astma- och Allergiförbundet': 2184,
'test': 2187,
'Rosa bandet': 2190,
'Svenskt sigill': 2191,
'3+ dagar': 2194,
'5+ dagar': 2197,
'7+ dagar': 2200,
'10+ dagar': 2203,
'30+ dagar': 2206,
'Svenskt ursprung': 2209,
'Svensk fågel': 2212,
'4+ dagar': 2215,
'Vegansk': 2218,
'MSC': 2219,
'Strategisk produkt': 2222,
'Svenskt sigill klimatcertifierad': 2224,
'ASC': 2227,
'Från Sverige': 2230,
'Kött från Sverige': 2233,
'Mjölk från Sverige': 2236,
'Faroklass brandfarligt': 2239,
'Faroklass miljöfarligt': 2242,
'Faroklass skadligt': 2245,
'Faroklass Warning': 2248,
'Energiklass A+': 2251,
'Energiklass C': 2254,
'Energiklass D': 2257,
'Energiklass E': 2260,
'Energiklass A++': 2263,
'Energiklass A': 2266,
'Energiklass B': 2269,
}
return table[key] if key in table else None
def get_dynamic_property_id | identifier_body |
core_convert.py | from datetime import timedelta, datetime
def filter_dict(obj, val=None):
# TODO: We should not always remove all None items (maybe!?)
return dict(filter(lambda item: item[1] is not val, obj.items()))
def get_season_tag_name(key):
table = {
"Clas Ohlson": "COB",
"Matkasse": "MK",
"Grillartiklar": "G",
"Halloweenartiklar": "H",
"Julartiklar": "J",
"Artiklar som säljs året runt, men mest runt jul": "JB",
"Midsommarartiklar": "M",
"Artiklar som säljs året runt, men mest runt midsommar": "MB",
"Nyårsartiklar": "N",
"Artiklar som säljs året runt, men mest runt nyår": "NB",
"Påskartiklar": "P",
"Artiklar som säljs året runt, men mest runt påsk": "PB",
"Sommarartiklar": "S",
"Sommartorget": "ST",
}
return table[key] if key in table else None
def convert_season_tags(product):
tags = map(lambda x: get_season_tag_name(x), product.tags.all())
return list(filter(lambda x: x is not None, tags))
def convert_order_route_from_product_type(key):
table = {
"Crossdocking": "X",
"Nightorder": "A",
}
return table[key] if key in table else None
def get_attribute_id(key):
# data from prefilledautomaten.attribute
table = {
'Ekonomipack': 1,
'Nyckelhålsmärkt': 1736,
'Ekologisk': 2167,
'Glutenfri': 2168,
'Laktosfri': 2169,
'Låglaktos': 2170,
'Premiumkvalité': 2171,
'Mjölkproteinfri': 2172,
# 'Nyhet': 2173,
'18Åldersgräns': 2174,
'Fairtrade': 2175,
'Svanenmärkt': 2176,
'Kravmärkt': 2177,
'Video': 2178,
'Äkta vara': 2181,
'Astma- och Allergiförbundet': 2184,
'test': 2187,
'Rosa bandet': 2190,
'Svenskt sigill': 2191,
'3+ dagar': 2194,
'5+ dagar': 2197,
'7+ dagar': 2200,
'10+ dagar': 2203,
'30+ dagar': 2206,
'Svenskt ursprung': 2209,
'Svensk fågel': 2212,
'4+ dagar': 2215,
'Vegansk': 2218,
'MSC': 2219,
'Strategisk produkt': 2222,
'Svenskt sigill klimatcertifierad': 2224,
'ASC': 2227,
'Från Sverige': 2230,
'Kött från Sverige': 2233,
'Mjölk från Sverige': 2236,
'Faroklass brandfarligt': 2239,
'Faroklass miljöfarligt': 2242,
'Faroklass skadligt': 2245,
'Faroklass Warning': 2248,
'Energiklass A+': 2251,
'Energiklass C': 2254,
'Energiklass D': 2257,
'Energiklass E': 2260,
'Energiklass A++': 2263,
'Energiklass A': 2266,
'Energiklass B': 2269,
}
return table[key] if key in table else None
def get_dynamic_property_id(key):
table = {
'Volume': 1,
'Weight': 2,
'KfpDfp': 3,
'LastSalesDay': 4,
'LastReceiptDay': 5,
'OldPz1': 6,
'OldPz2': 7,
'OldPz3': 8,
'MaxStock': 9,
'Season': 10,
'OrderFactor': 11,
'MinStock': 12,
'DfpLengthMM': 13,
'DfpWidthMM': 14,
'DfpHeightMM': 15,
'DfpWeightG': 16,
'DfpType': 17,
'SupplierArticleNumber': 18,
'AxfoodArticleId': 19,
'TruckrouteOptimizationProd3': 20,
'KfpHeightMM': 21,
'KfpLengthtMM': 22,
'KfpWidthMM': 23,
'IsFakeStockBalance': 24,
'ExternalImageUrl': 25,
'ProductSupplier': 26,
'ValdioDFPWidthMM': 27,
'ValdioDFPHeightMM': 28,
'ValidoDFPLengthtMM': 29,
'ValdioDFPWeightG': 30,
'DFPEANCode': 31,
'SafetyStock': 33,
'KfpDfpPurchaseOrder': 36,
'NoNutritionsNeeded': 38,
'NoIngredientsNeeded': 41,
'NoAllergensNeeded': 44,
'DeliveredUnitConversionFactor': 45,
'HandlingUnitQuantity': 46,
'BDMaterialNumber': 49,
'ProductSegment': 55,
'StandardUnitKfp': 56,
'StandardUnitGtin': 59,
'LimitedOfferProduct': 61,
'QLPricing': 64,
'QLMatching': 67,
'FirstSalesDate': 70,
'CategoryManager': 73,
}
return table[key] if key in table else None
def get_origin_id(key):
table = {
752: 1, # Svensk
249: 2, # Fransk
# TODO: MAP THIS ?: 3, # Afrika
# TODO: MAP THIS ?: 4, # Grekiskt
# TODO: MAP THIS ?: 5, # Indien
# TODO: MAP THIS ?: 6, # Nordamerika
# TODO: MAP THIS ?: 7, # Latinamerika
# TODO: MAP THIS ?: 8, # Orienten
# TODO: MAP THIS ?: 9, # Japan
# TODO: MAP THIS ?: 10, # Italienskt
# TODO: MAP THIS ?: 11, # Sydostasien
# TODO: MAP THIS ?: 12, # Spansk
# TODO: MAP THIS ?: 13, # Tyskland
# TODO: MAP THIS ?: 14, # "Ryssland och Östeuropa"
# TODO: MAP THIS ?: 15, # Internationellt
# TODO: MAP THIS ?: 16, # Övriga
# TODO: MAP THIS ?: 73, # Sverige
# TODO: MAP THIS ?: 74, # Norge
# TODO: MAP THIS ?: 75, # Kanada
# TODO: MAP THIS ?: 76, # Frankrike
# TODO: MAP THIS ?: 77, # Grekland
# TODO: MAP THIS ?: 78, # Portugal
# TODO: MAP THIS ?: 79, # Danmark
# TODO: MAP THIS ?: 80, # Italien
# TODO: MAP THIS ?: 81, # Finland
# TODO: MAP THIS ?: 82, # Kalifornien
# TODO: MAP THIS ?: 83, # Thailand
# TODO: MAP THIS ?: 84, # Kina
# TODO: MAP THIS ?: 85, # Belgien
# TODO: MAP THIS ?: 86, # Europa
# TODO: MAP THIS ?: 87, # Turkiet
# TODO: MAP THIS ?: 88, # Holland
# TODO: MAP THIS ?: 89, # England
# TODO: MAP THIS ?: 90, # Spanien
# TODO: MAP THIS ?: 91, # Nederländerna
# TODO: MAP THIS ?: 92, # Polen
# TODO: MAP THIS ?: 93, # "Blandat: EG och icke EG"
# TODO: MAP THIS ?: 94, # Ungern
# TODO: MAP THIS ?: 95, # Bulgarien
# TODO: MAP THIS ?: 96, # Kroatien
# TODO: MAP THIS ?: 98, # India
# TODO: MAP THIS ?: 99, # Uruguay
# TODO: MAP THIS ?: 100, # Irland
# TODO: MAP THIS ?: 101, # "Nya Zeeland"
# TODO: MAP THIS ?: 102, # Sverige/England
# TODO: MAP THIS ?: 103, # Sverige/Danmark
# TODO: MAP THIS ?: 104, # China
# TODO: MAP THIS ?: 105, # Holland/Frankrike
# TODO: MAP THIS ?: 106, # "Costa Rica"
# TODO: MAP THIS ?: 107, # Zaire
# TODO: MAP THIS ?: 108, # Israel/USA
# TODO: MAP THIS ?: 109, # Mexico
# TODO: MAP THIS ?: 110, # Holland/Belgien
# TODO: MAP THIS ?: 111, # Frankrike/Italien
# TODO: MAP THIS ?: 112, # Sverge
# TODO: MAP THIS ?: 113, # Centralamerika
# TODO: MAP THIS ?: 114, # Brasilien
# TODO: MAP THIS ?: 115, # Israel/Indien
# TODO: MAP THIS ?: 116, # "Italien/Nya Zeeland"
# TODO: MAP THIS ?: 117, # Sydafrika
# TODO: MAP THIS ?: 118, # Argentina
# TODO: MAP THIS ?: 119, # China/Thailand
# TODO: MAP THIS ?: 120, # USA
# TODO: MAP THIS ?: 121, # Kenya
# TODO: MAP THIS ?: 122, # Israel
# TODO: MAP THIS ?: 123, # Malaysia
# TODO: MAP THIS ?: 124, # Nordostatlanten
# TODO: MAP THIS ?: 125, # Vietnam
# TODO: MAP THIS ?: 126, # Norden
# TODO: MAP THIS ?: 127, # Litauen
# TODO: MAP THIS ?: 131, # Roslagen
# TODO: MAP THIS ?: 135, # U.S.A.
# TODO: MAP THIS ?: 136, # DK
# TODO: MAP THIS ?: 137, # Egypten
# TODO: MAP THIS ?: 138, # Marocko
# TODO: MAP THIS ?: 139, # Chile
# TODO: MAP THIS ?: 140, # "Dominikanska Republiken"
# TODO: MAP THIS ?: 141, # Iran
# TODO: MAP THIS ?: 142, # Colombia
# TODO: MAP THIS ?: 143, # Peru
# TODO: MAP THIS ?: 144, # Zimbabwe
}
return table[key] if key in table else None
def convert_attributes(product, detail=None):
result = []
for tag in product.tags.all():
id = get_attribute_id(tag.name)
if id is not None:
result.append({
'AttributeId': id
})
# Special case for "Nyhet"
if not detail and product.product_detail:
detail = product.product_detail.filter(store=10).first()
if detail is None:
detail = product.product_detail.first()
if detail:
first_enabled = detail.first_enabled if detail.first_enabled else datetime.now() - \
timedelta(days=60)
result.append({
'AttributeId': 2173,
'FromDate': first_enabled,
'ToDate': first_enabled + timedelta(days=30),
})
return result
def create_dynamic_property(key, value, store=None):
prop = {
'PropertyId': get_dynamic_property_id(key),
'PropertyName': key,
'PropertyValue': value,
}
if store is not None:
prop['StoreId'] = store
return prop
def convert_dynamic_properties(product):
result = [
create_dynamic_property('Volume', product.volume_dm3),
create_dynamic_property('Weight', product.weight_g),
create_dynamic_property('KfpHeightMM', product.height_mm),
create_dynamic_property('KfpLengthtMM', product.length_mm),
create_dynamic_property('KfpWidthMM', product.width_mm),
create_dynamic_property('Season', '.'.join(
convert_season_tags(product))),
create_dynamic_property('LastReceiptDay', product.last_receipt_day),
create_dynamic_property('LastSalesDay', product.last_sales_day),
create_dynamic_property('TruckrouteOptimizationProd3',
convert_order_route_from_product_type(product.product_type)),
create_dynamic_property('BDMaterialNumber',
product.prefered_merchantarticle.external_id if product.prefered_merchantarticle else None),
create_dynamic_property('SupplierArticleNumber',
product.prefered_merchantarticle.external_id if product.prefered_merchantarticle else None),
]
base_unit_quantity = get_base_unit_quantity(product, product.article.gtin)
if base_unit_quantity is not None:
create_dynamic_property('KfpDfp', base_unit_quantity)
for detail in product.product_detail.all():
result.append(create_dynamic_property(
'OrderFactor', 1 if detail.orderfactor else 0, detail.store))
result.append(create_dynamic_property(
'BDMaterialNumber', detail.prefered_merchantarticle.external_id if detail.prefered_merchantarticle else None, detail.store))
result.append(create_dynamic_property(
'SupplierArticleNumber', detail.prefered_merchantarticle.external_id if detail.prefered_merchantarticle else None, detail.store))
base_unit_quantity = get_base_unit_quantity(
detail, product.article.gtin)
if base_unit_quantity is not None:
create_dynamic_property('KfpDfp', base_unit_quantity, detail.store)
return result
def get_base_unit_quantity(product, base_unit_gtin):
if product.prefered_merchantarticle is not None:
if product.prefered_merchantarticle.article.child_gtin == base_unit_gtin:
return product.prefered_merchantarticle.article.quantity_of_lower_layer
else:
upper_quantity = product.prefered_merchantarticle.article.quantity_of_lower_layer
next_lower_article = Article.objects.filter(
gtin=product.prefered_merchantarticle.article.child_gtin).first()
if next_lower_article is not None:
if next_lower_article.child_gtin == product.article.gtin:
return next_lower_article.quantity_of_lower_layer * upper_quantity
return None
def convert_unit(validoo_unit):
# data from prefilledautomaten.unit
unit_table = { | "MLT": 10, # ml, VOLUME
"CLT": 11, # cl, VOLUME
"HGM": 12, # hg, WEIGHT
"G24": 13, # msk, VOLUME
"G25": 14, # tsk, VOLUME
# "???": 16, # st tekoppar, VOLUME
# "???": 17, # st kaffekoppar, VOLUME
# "???": 18, # glas, VOLUME
"MGM": 25, # mg, WEIGHT,
# "???": 26, # krm, VOLUME
# "???": 27, # st klyftor, PARTS,
# "???": 28, # st krukor, PIECES
# "???": 29, # st tärningar, PIECES
# "???": 30, # knippe, PIECES
}
if(validoo_unit in unit_table):
return unit_table[validoo_unit]
return None
def convert_tags(product):
tags = filter(lambda tag: get_season_tag_name(tag.name) is
None and get_attribute_id(tag.name) is None, product.tags.all())
return list(map(lambda tag: tag.id, tags))
def convert_product(product):
from api.serializers import ProductSerializer
serializer = ProductSerializer(product)
article = product.article
image = product.productimage_set.first()
unit_id = convert_unit(serializer.data['net_content_unit_code'])
return filter_dict({
"ProductId": product.product_id, # int
"ProductName": serializer.data['name'], # string
"Quantity": serializer.data['net_content'], # float
# int
"UnitId": unit_id,
"DisplayUnitId": unit_id, # int
"CategoryId": product.product_category.id if product.product_category else None, # int
# "ProductGroupId": ???, # int
# "CalculatedWeight": ???, # float
# "RecommendedPrice": ???, # float
"VatRate": article.vat, # float
"EanCode": article.gtin, # string
# string
"ImageUrl": image.filename if image else None,
# "ProductUrl": ???, # string
# "SupplierId": ???, # int
# "MaximumOrder": ???, # float
"ProductDescription": serializer.data['description'], # string
# "UsageDescription": ???, # string
# string
"IngredientsDescription": serializer.data['ingredient_description'],
# string
"NutritionDescription": serializer.data['nutrition_description'],
# "StorageDescription": ???, # string
# "StoreVarmColdFrozen": ???, # string
# "PossibleToBuy": ???, # bool
# "IsOffer": ???, # bool
"RecycleFee": product.recycle_fee, # double
# "AmountInPackage": ???, # int
# "TempMostBought": ???, # int
# "ExternalComment": ???, # string
# "InternalComment": ???, # string
# "IsPickingCostIncluded": ???, # bool
# "IsDeliveryCostIncluded": ???, # bool
# "RatesSum": ???, # int
# "RatesCount": ???, # int
"OriginId": get_origin_id(product.origin), # int?
# "IsWine": ???, # bool
# "AxfoodSAPId": ???, # string
# "IsEcological": ???, # bool
# "RelatedProductIDs": ???, # string
"IsAdultProduct": product.adult_product, # bool
# "AutomaticSubscription": ???, # bool
# "IsAlreadyRenamed": ???, # bool
# "OriginalAfterRenameFileSize": ???, # string
# "OriginalCurrentFileSize": ???, # string
# "CreationDate": ???, # DateTime?
# "LastModifiedDate": ???, # DateTime?
# "LastUpdatedByUserId": ???, # int
# "RemovedDate": ???, # DateTime?
})
def convert_product_store(detail, product):
return filter_dict({
# "ProductStoreId": ???, # int
"ProductId": product.product_id, # int
"StoreId": detail.store, # int
# "LocalEancode": ???, # string
"CalculatedCustomerPrice": detail.price, # decimal
# "CalculatedCustomerPrice_Per_Unit": ???, # decimal
"IsOutOfStock": detail.status == 2, # bool
# "OutOfStockDate": ???, # DateTime
# "StockBackDate": ???, # DateTime
"IsReplacementProduct": detail.status == 3 # bool
# "IsApproximateWeight": ???, # bool
# "IsShowPricePerUnit": ???, # bool
# "PriceValidFrom": ???, # DateTime
# "PriceValidTo": ???, # DateTime
# "PriceIn": ???, # decimal
# "PercentageAddon": ???, # decimal
# "FixedAddon": ???, # decimal
# "PickingZone1": ???, # string
# "PickingZone2": ???, # string
# "PickingZone3": ???, # string
# "SoldCount": ???, # int
# "IsForeCastPriorityProduct": ???, # bool
# "DontShowAsMissedProduct": ???, # bool
# "StoreLevelOriginId": ???, # int?
# "PickingNote": ???, # string
# "AdvanceDeliveryMinimumOrder": ???, # int
# "MinimumRequiredDeliveryDays": ???, # byte
# "DeliverableWeekDays": ???, # string
# "DeliveryDaysAhead": ???, # int
# "CancelDaysBefore": ???, # int
# "StorePriceIn": ???, # decimal
# "CreationDate": ???, # DateTime?
# "LastModifiedDate": ???, # DateTime?
# "RemovedDate": ???, # DateTime?
# "CanSendAdvanceDeliveryEmail": ???, # bool
# "OldCalculatedCustomerPrice": ???, # decimal
})
def convert_product_stores(product):
return list(map(lambda x: convert_product_store(x, product), product.product_detail.all())) | "H87": 1, # st, PIECES
"GRM": 2, # g, WEIGHT
"KGM": 3, # kg, WEIGHT
"DLT": 6, # dl, VOLUME
"LTR": 7, # L, VOLUME | random_line_split |
core_convert.py | from datetime import timedelta, datetime
def filter_dict(obj, val=None):
# TODO: We should not always remove all None items (maybe!?)
return dict(filter(lambda item: item[1] is not val, obj.items()))
def get_season_tag_name(key):
table = {
"Clas Ohlson": "COB",
"Matkasse": "MK",
"Grillartiklar": "G",
"Halloweenartiklar": "H",
"Julartiklar": "J",
"Artiklar som säljs året runt, men mest runt jul": "JB",
"Midsommarartiklar": "M",
"Artiklar som säljs året runt, men mest runt midsommar": "MB",
"Nyårsartiklar": "N",
"Artiklar som säljs året runt, men mest runt nyår": "NB",
"Påskartiklar": "P",
"Artiklar som säljs året runt, men mest runt påsk": "PB",
"Sommarartiklar": "S",
"Sommartorget": "ST",
}
return table[key] if key in table else None
def convert_season_tags(product):
tags = map(lambda x: get_season_tag_name(x), product.tags.all())
return list(filter(lambda x: x is not None, tags))
def convert_orde | able = {
"Crossdocking": "X",
"Nightorder": "A",
}
return table[key] if key in table else None
def get_attribute_id(key):
# data from prefilledautomaten.attribute
table = {
'Ekonomipack': 1,
'Nyckelhålsmärkt': 1736,
'Ekologisk': 2167,
'Glutenfri': 2168,
'Laktosfri': 2169,
'Låglaktos': 2170,
'Premiumkvalité': 2171,
'Mjölkproteinfri': 2172,
# 'Nyhet': 2173,
'18Åldersgräns': 2174,
'Fairtrade': 2175,
'Svanenmärkt': 2176,
'Kravmärkt': 2177,
'Video': 2178,
'Äkta vara': 2181,
'Astma- och Allergiförbundet': 2184,
'test': 2187,
'Rosa bandet': 2190,
'Svenskt sigill': 2191,
'3+ dagar': 2194,
'5+ dagar': 2197,
'7+ dagar': 2200,
'10+ dagar': 2203,
'30+ dagar': 2206,
'Svenskt ursprung': 2209,
'Svensk fågel': 2212,
'4+ dagar': 2215,
'Vegansk': 2218,
'MSC': 2219,
'Strategisk produkt': 2222,
'Svenskt sigill klimatcertifierad': 2224,
'ASC': 2227,
'Från Sverige': 2230,
'Kött från Sverige': 2233,
'Mjölk från Sverige': 2236,
'Faroklass brandfarligt': 2239,
'Faroklass miljöfarligt': 2242,
'Faroklass skadligt': 2245,
'Faroklass Warning': 2248,
'Energiklass A+': 2251,
'Energiklass C': 2254,
'Energiklass D': 2257,
'Energiklass E': 2260,
'Energiklass A++': 2263,
'Energiklass A': 2266,
'Energiklass B': 2269,
}
return table[key] if key in table else None
def get_dynamic_property_id(key):
table = {
'Volume': 1,
'Weight': 2,
'KfpDfp': 3,
'LastSalesDay': 4,
'LastReceiptDay': 5,
'OldPz1': 6,
'OldPz2': 7,
'OldPz3': 8,
'MaxStock': 9,
'Season': 10,
'OrderFactor': 11,
'MinStock': 12,
'DfpLengthMM': 13,
'DfpWidthMM': 14,
'DfpHeightMM': 15,
'DfpWeightG': 16,
'DfpType': 17,
'SupplierArticleNumber': 18,
'AxfoodArticleId': 19,
'TruckrouteOptimizationProd3': 20,
'KfpHeightMM': 21,
'KfpLengthtMM': 22,
'KfpWidthMM': 23,
'IsFakeStockBalance': 24,
'ExternalImageUrl': 25,
'ProductSupplier': 26,
'ValdioDFPWidthMM': 27,
'ValdioDFPHeightMM': 28,
'ValidoDFPLengthtMM': 29,
'ValdioDFPWeightG': 30,
'DFPEANCode': 31,
'SafetyStock': 33,
'KfpDfpPurchaseOrder': 36,
'NoNutritionsNeeded': 38,
'NoIngredientsNeeded': 41,
'NoAllergensNeeded': 44,
'DeliveredUnitConversionFactor': 45,
'HandlingUnitQuantity': 46,
'BDMaterialNumber': 49,
'ProductSegment': 55,
'StandardUnitKfp': 56,
'StandardUnitGtin': 59,
'LimitedOfferProduct': 61,
'QLPricing': 64,
'QLMatching': 67,
'FirstSalesDate': 70,
'CategoryManager': 73,
}
return table[key] if key in table else None
def get_origin_id(key):
table = {
752: 1, # Svensk
249: 2, # Fransk
# TODO: MAP THIS ?: 3, # Afrika
# TODO: MAP THIS ?: 4, # Grekiskt
# TODO: MAP THIS ?: 5, # Indien
# TODO: MAP THIS ?: 6, # Nordamerika
# TODO: MAP THIS ?: 7, # Latinamerika
# TODO: MAP THIS ?: 8, # Orienten
# TODO: MAP THIS ?: 9, # Japan
# TODO: MAP THIS ?: 10, # Italienskt
# TODO: MAP THIS ?: 11, # Sydostasien
# TODO: MAP THIS ?: 12, # Spansk
# TODO: MAP THIS ?: 13, # Tyskland
# TODO: MAP THIS ?: 14, # "Ryssland och Östeuropa"
# TODO: MAP THIS ?: 15, # Internationellt
# TODO: MAP THIS ?: 16, # Övriga
# TODO: MAP THIS ?: 73, # Sverige
# TODO: MAP THIS ?: 74, # Norge
# TODO: MAP THIS ?: 75, # Kanada
# TODO: MAP THIS ?: 76, # Frankrike
# TODO: MAP THIS ?: 77, # Grekland
# TODO: MAP THIS ?: 78, # Portugal
# TODO: MAP THIS ?: 79, # Danmark
# TODO: MAP THIS ?: 80, # Italien
# TODO: MAP THIS ?: 81, # Finland
# TODO: MAP THIS ?: 82, # Kalifornien
# TODO: MAP THIS ?: 83, # Thailand
# TODO: MAP THIS ?: 84, # Kina
# TODO: MAP THIS ?: 85, # Belgien
# TODO: MAP THIS ?: 86, # Europa
# TODO: MAP THIS ?: 87, # Turkiet
# TODO: MAP THIS ?: 88, # Holland
# TODO: MAP THIS ?: 89, # England
# TODO: MAP THIS ?: 90, # Spanien
# TODO: MAP THIS ?: 91, # Nederländerna
# TODO: MAP THIS ?: 92, # Polen
# TODO: MAP THIS ?: 93, # "Blandat: EG och icke EG"
# TODO: MAP THIS ?: 94, # Ungern
# TODO: MAP THIS ?: 95, # Bulgarien
# TODO: MAP THIS ?: 96, # Kroatien
# TODO: MAP THIS ?: 98, # India
# TODO: MAP THIS ?: 99, # Uruguay
# TODO: MAP THIS ?: 100, # Irland
# TODO: MAP THIS ?: 101, # "Nya Zeeland"
# TODO: MAP THIS ?: 102, # Sverige/England
# TODO: MAP THIS ?: 103, # Sverige/Danmark
# TODO: MAP THIS ?: 104, # China
# TODO: MAP THIS ?: 105, # Holland/Frankrike
# TODO: MAP THIS ?: 106, # "Costa Rica"
# TODO: MAP THIS ?: 107, # Zaire
# TODO: MAP THIS ?: 108, # Israel/USA
# TODO: MAP THIS ?: 109, # Mexico
# TODO: MAP THIS ?: 110, # Holland/Belgien
# TODO: MAP THIS ?: 111, # Frankrike/Italien
# TODO: MAP THIS ?: 112, # Sverge
# TODO: MAP THIS ?: 113, # Centralamerika
# TODO: MAP THIS ?: 114, # Brasilien
# TODO: MAP THIS ?: 115, # Israel/Indien
# TODO: MAP THIS ?: 116, # "Italien/Nya Zeeland"
# TODO: MAP THIS ?: 117, # Sydafrika
# TODO: MAP THIS ?: 118, # Argentina
# TODO: MAP THIS ?: 119, # China/Thailand
# TODO: MAP THIS ?: 120, # USA
# TODO: MAP THIS ?: 121, # Kenya
# TODO: MAP THIS ?: 122, # Israel
# TODO: MAP THIS ?: 123, # Malaysia
# TODO: MAP THIS ?: 124, # Nordostatlanten
# TODO: MAP THIS ?: 125, # Vietnam
# TODO: MAP THIS ?: 126, # Norden
# TODO: MAP THIS ?: 127, # Litauen
# TODO: MAP THIS ?: 131, # Roslagen
# TODO: MAP THIS ?: 135, # U.S.A.
# TODO: MAP THIS ?: 136, # DK
# TODO: MAP THIS ?: 137, # Egypten
# TODO: MAP THIS ?: 138, # Marocko
# TODO: MAP THIS ?: 139, # Chile
# TODO: MAP THIS ?: 140, # "Dominikanska Republiken"
# TODO: MAP THIS ?: 141, # Iran
# TODO: MAP THIS ?: 142, # Colombia
# TODO: MAP THIS ?: 143, # Peru
# TODO: MAP THIS ?: 144, # Zimbabwe
}
return table[key] if key in table else None
def convert_attributes(product, detail=None):
result = []
for tag in product.tags.all():
id = get_attribute_id(tag.name)
if id is not None:
result.append({
'AttributeId': id
})
# Special case for "Nyhet"
if not detail and product.product_detail:
detail = product.product_detail.filter(store=10).first()
if detail is None:
detail = product.product_detail.first()
if detail:
first_enabled = detail.first_enabled if detail.first_enabled else datetime.now() - \
timedelta(days=60)
result.append({
'AttributeId': 2173,
'FromDate': first_enabled,
'ToDate': first_enabled + timedelta(days=30),
})
return result
def create_dynamic_property(key, value, store=None):
prop = {
'PropertyId': get_dynamic_property_id(key),
'PropertyName': key,
'PropertyValue': value,
}
if store is not None:
prop['StoreId'] = store
return prop
def convert_dynamic_properties(product):
result = [
create_dynamic_property('Volume', product.volume_dm3),
create_dynamic_property('Weight', product.weight_g),
create_dynamic_property('KfpHeightMM', product.height_mm),
create_dynamic_property('KfpLengthtMM', product.length_mm),
create_dynamic_property('KfpWidthMM', product.width_mm),
create_dynamic_property('Season', '.'.join(
convert_season_tags(product))),
create_dynamic_property('LastReceiptDay', product.last_receipt_day),
create_dynamic_property('LastSalesDay', product.last_sales_day),
create_dynamic_property('TruckrouteOptimizationProd3',
convert_order_route_from_product_type(product.product_type)),
create_dynamic_property('BDMaterialNumber',
product.prefered_merchantarticle.external_id if product.prefered_merchantarticle else None),
create_dynamic_property('SupplierArticleNumber',
product.prefered_merchantarticle.external_id if product.prefered_merchantarticle else None),
]
base_unit_quantity = get_base_unit_quantity(product, product.article.gtin)
if base_unit_quantity is not None:
create_dynamic_property('KfpDfp', base_unit_quantity)
for detail in product.product_detail.all():
result.append(create_dynamic_property(
'OrderFactor', 1 if detail.orderfactor else 0, detail.store))
result.append(create_dynamic_property(
'BDMaterialNumber', detail.prefered_merchantarticle.external_id if detail.prefered_merchantarticle else None, detail.store))
result.append(create_dynamic_property(
'SupplierArticleNumber', detail.prefered_merchantarticle.external_id if detail.prefered_merchantarticle else None, detail.store))
base_unit_quantity = get_base_unit_quantity(
detail, product.article.gtin)
if base_unit_quantity is not None:
create_dynamic_property('KfpDfp', base_unit_quantity, detail.store)
return result
def get_base_unit_quantity(product, base_unit_gtin):
if product.prefered_merchantarticle is not None:
if product.prefered_merchantarticle.article.child_gtin == base_unit_gtin:
return product.prefered_merchantarticle.article.quantity_of_lower_layer
else:
upper_quantity = product.prefered_merchantarticle.article.quantity_of_lower_layer
next_lower_article = Article.objects.filter(
gtin=product.prefered_merchantarticle.article.child_gtin).first()
if next_lower_article is not None:
if next_lower_article.child_gtin == product.article.gtin:
return next_lower_article.quantity_of_lower_layer * upper_quantity
return None
def convert_unit(validoo_unit):
# data from prefilledautomaten.unit
unit_table = {
"H87": 1, # st, PIECES
"GRM": 2, # g, WEIGHT
"KGM": 3, # kg, WEIGHT
"DLT": 6, # dl, VOLUME
"LTR": 7, # L, VOLUME
"MLT": 10, # ml, VOLUME
"CLT": 11, # cl, VOLUME
"HGM": 12, # hg, WEIGHT
"G24": 13, # msk, VOLUME
"G25": 14, # tsk, VOLUME
# "???": 16, # st tekoppar, VOLUME
# "???": 17, # st kaffekoppar, VOLUME
# "???": 18, # glas, VOLUME
"MGM": 25, # mg, WEIGHT,
# "???": 26, # krm, VOLUME
# "???": 27, # st klyftor, PARTS,
# "???": 28, # st krukor, PIECES
# "???": 29, # st tärningar, PIECES
# "???": 30, # knippe, PIECES
}
if(validoo_unit in unit_table):
return unit_table[validoo_unit]
return None
def convert_tags(product):
tags = filter(lambda tag: get_season_tag_name(tag.name) is
None and get_attribute_id(tag.name) is None, product.tags.all())
return list(map(lambda tag: tag.id, tags))
def convert_product(product):
from api.serializers import ProductSerializer
serializer = ProductSerializer(product)
article = product.article
image = product.productimage_set.first()
unit_id = convert_unit(serializer.data['net_content_unit_code'])
return filter_dict({
"ProductId": product.product_id, # int
"ProductName": serializer.data['name'], # string
"Quantity": serializer.data['net_content'], # float
# int
"UnitId": unit_id,
"DisplayUnitId": unit_id, # int
"CategoryId": product.product_category.id if product.product_category else None, # int
# "ProductGroupId": ???, # int
# "CalculatedWeight": ???, # float
# "RecommendedPrice": ???, # float
"VatRate": article.vat, # float
"EanCode": article.gtin, # string
# string
"ImageUrl": image.filename if image else None,
# "ProductUrl": ???, # string
# "SupplierId": ???, # int
# "MaximumOrder": ???, # float
"ProductDescription": serializer.data['description'], # string
# "UsageDescription": ???, # string
# string
"IngredientsDescription": serializer.data['ingredient_description'],
# string
"NutritionDescription": serializer.data['nutrition_description'],
# "StorageDescription": ???, # string
# "StoreVarmColdFrozen": ???, # string
# "PossibleToBuy": ???, # bool
# "IsOffer": ???, # bool
"RecycleFee": product.recycle_fee, # double
# "AmountInPackage": ???, # int
# "TempMostBought": ???, # int
# "ExternalComment": ???, # string
# "InternalComment": ???, # string
# "IsPickingCostIncluded": ???, # bool
# "IsDeliveryCostIncluded": ???, # bool
# "RatesSum": ???, # int
# "RatesCount": ???, # int
"OriginId": get_origin_id(product.origin), # int?
# "IsWine": ???, # bool
# "AxfoodSAPId": ???, # string
# "IsEcological": ???, # bool
# "RelatedProductIDs": ???, # string
"IsAdultProduct": product.adult_product, # bool
# "AutomaticSubscription": ???, # bool
# "IsAlreadyRenamed": ???, # bool
# "OriginalAfterRenameFileSize": ???, # string
# "OriginalCurrentFileSize": ???, # string
# "CreationDate": ???, # DateTime?
# "LastModifiedDate": ???, # DateTime?
# "LastUpdatedByUserId": ???, # int
# "RemovedDate": ???, # DateTime?
})
def convert_product_store(detail, product):
return filter_dict({
# "ProductStoreId": ???, # int
"ProductId": product.product_id, # int
"StoreId": detail.store, # int
# "LocalEancode": ???, # string
"CalculatedCustomerPrice": detail.price, # decimal
# "CalculatedCustomerPrice_Per_Unit": ???, # decimal
"IsOutOfStock": detail.status == 2, # bool
# "OutOfStockDate": ???, # DateTime
# "StockBackDate": ???, # DateTime
"IsReplacementProduct": detail.status == 3 # bool
# "IsApproximateWeight": ???, # bool
# "IsShowPricePerUnit": ???, # bool
# "PriceValidFrom": ???, # DateTime
# "PriceValidTo": ???, # DateTime
# "PriceIn": ???, # decimal
# "PercentageAddon": ???, # decimal
# "FixedAddon": ???, # decimal
# "PickingZone1": ???, # string
# "PickingZone2": ???, # string
# "PickingZone3": ???, # string
# "SoldCount": ???, # int
# "IsForeCastPriorityProduct": ???, # bool
# "DontShowAsMissedProduct": ???, # bool
# "StoreLevelOriginId": ???, # int?
# "PickingNote": ???, # string
# "AdvanceDeliveryMinimumOrder": ???, # int
# "MinimumRequiredDeliveryDays": ???, # byte
# "DeliverableWeekDays": ???, # string
# "DeliveryDaysAhead": ???, # int
# "CancelDaysBefore": ???, # int
# "StorePriceIn": ???, # decimal
# "CreationDate": ???, # DateTime?
# "LastModifiedDate": ???, # DateTime?
# "RemovedDate": ???, # DateTime?
# "CanSendAdvanceDeliveryEmail": ???, # bool
# "OldCalculatedCustomerPrice": ???, # decimal
})
def convert_product_stores(product):
return list(map(lambda x: convert_product_store(x, product), product.product_detail.all()))
| r_route_from_product_type(key):
t | identifier_name |
parser.go | package juniperUDP
import (
"path/filepath"
"os"
"log"
"fmt"
"runtime"
"time"
"reflect"
"strconv"
"github.com/golang/protobuf/jsonpb"
"encoding/json"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/telemetry_top"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/qmon" // blank import as it is only used to Unmarshal proto message
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/lsp_mon"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cmerror_data"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cmerror"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cpu_memory_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/fabric"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/firewall"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/inline_jflow"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/logical_port"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/lsp_stats"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/npu_memory_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/npu_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/optics"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/packet_stats"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/port_exp"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/port"
"github.com/golang/protobuf/proto"
_"strings"
)
// JuniperUDPParser is an object for Parsing incoming metrics.
type JuniperUDPParser struct {
// DefaultTags will be added to every parsed metric
DefaultTags map[string]string
}
/*
func (p *JuniperUDPParser) ParseWithDefaultTimePrecision(buf []byte, t time.Time, precision string) ([]telegraf.Metric, error) {
if !bytes.HasSuffix(buf, []byte("\n")) {
buf = append(buf, '\n')
}
// parse even if the buffer begins with a newline
buf = bytes.TrimPrefix(buf, []byte("\n"))
metrics, err := metric.ParseWithDefaultTimePrecision(buf, t, precision)
if len(p.DefaultTags) > 0 {
for _, m := range metrics {
for k, v := range p.DefaultTags {
// only set the default tag if it doesn't already exist:
if !m.HasTag(k) {
m.AddTag(k, v)
}
}
}
}
return metrics, err
}
*/
func parseArray(data []interface{}, masterKey string) []interface{} {
var arrData []interface{}
for _,val := range data{
valType := reflect.ValueOf(val).Kind()
if valType == reflect.Map{
mapData := parseMap(val.(map[string]interface{}), masterKey)
for _,tmpData := range(mapData){
arrData = append(arrData, tmpData)
}
} else {fmt.Println("Error!!!! Leaf elements in array are not coded. Please open a issue.")}
}
return arrData
}
func parseMap(data map[string]interface{}, masterKey string) []interface{} {
var leafData map[string]interface{}
var arrData []interface{}
var arrKey []string
var finalData []interface{}
var newMasterKey string
leafData = make(map[string]interface{})
for key,val := range data{
if masterKey == ""{
newMasterKey = key
} else {
newMasterKey = masterKey + "." + key
}
valType := reflect.ValueOf(val).Kind()
if valType == reflect.Map{
mapData := parseMap(val.(map[string]interface{}), newMasterKey)
if reflect.TypeOf(mapData).Kind() == reflect.Map{
var tmpArr []interface{}
tmpArr = append(tmpArr, mapData)
arrData = append(arrData, tmpArr)
} else if reflect.TypeOf(mapData).Kind() == reflect.Slice{
arrData = append(arrData, mapData)
}
arrKey = append(arrKey, newMasterKey)
} else if valType == reflect.Slice{
arrData = append(arrData, parseArray(val.([]interface{}), newMasterKey))
arrKey = append(arrKey, newMasterKey)
} else { leafData[newMasterKey] = val}
}
if len(leafData) != 0 {
for i,key := range arrKey{
_ = key
for _,data_aa := range arrData[i].([]interface{}){
leafTmp := leafData
if data_aa != nil {
if reflect.ValueOf(data_aa).Kind() == reflect.Map{
for key_aa, value_aa := range data_aa.(map[string]interface{}){
leafTmp[key_aa] = value_aa
}
finalData = append(finalData, make(map[string]interface{}))
for k,v := range leafTmp{
finalData[len(finalData)-1].(map[string]interface{})[k] = v
}
} else {
for _,data_ha := range data_aa.([]interface{}){
leafTmp = leafData
for key_aa,value_aa := range data_ha.(map[string]interface{}){
leafTmp[key_aa] = value_aa
}
finalData = append(finalData, make(map[string]interface{}))
for k,v := range leafTmp{
finalData[len(finalData)-1].(map[string]interface{})[k] = v
}
}
}
}
}
}
} else {finalData = arrData}
arrData = arrData[:0]
if (len(finalData) == 0) && (len(leafData)!= 0) {
finalData = append(finalData, leafData)
}
return finalData
}
// Parse returns a slice of Metrics from a text representation of a
// metric (in line-protocol format)
// with each metric separated by newlines. If any metrics fail to parse,
// a non-nil error will be returned in addition to the metrics that parsed
// successfully.
func (p *JuniperUDPParser) Parse(buf []byte) ([]telegraf.Metric, error) {
//out, _ := os.Create("telegraf_udp.log")
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
path := dir+"/logs"
if _, err := os.Stat(path); os.IsNotExist(err) {
os.Mkdir(path, 0777)
}
out, errFile := os.OpenFile("logs/telegraf_udp.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if errFile != nil {
log.Fatal(errFile)
}
prefix := ""
flag := log.LstdFlags | log.Lmicroseconds | log.Lshortfile
newLog := log.New(out, prefix, flag)
newLog.Printf("Data byte buffer received!!\n")
sensorMapping := map[string]string{"jnpr_interface_ext" : "/junos/system/linecard/interface/",
"cpu_memory_util_ext" : "/junos/system/linecard/cpu/memory/",
"fabricMessageExt" : "/junos/system/linecard/fabric",
"jnpr_firewall_ext" : "/junos/system/linecard/firewall/",
"jnprLogicalInterfaceExt" : "/junos/system/linecard/interface/logical/usage/",
"npu_memory_ext" : "/junos/system/linecard/npu/memory/",
"jnpr_npu_utilization_ext" : "/junos/system/linecard/npu/utilization/",
"jnpr_optics_ext" : "/junos/system/linecard/optics/",
"jnpr_packet_statistics_ext" : "/junos/system/linecard/packet/usage/",
"jnpr_qmon_ext" : "/junos/system/linecard/qmon/",
"inline_jflow_stats_ext" : "/junos/system/linecard/services/inline-jflow/",
"jnpr_cmerror_data_ext" : "NA",
"jnpr_cmerror_ext" : "NA",
"jnpr_lsp_statistics_ext" : "NA",
"jnpr_interface_exp_ext" : "NA",
"jnpr_sr_stats_per_if_egress_ext" : "NA",
"jnpr_sr_stats_per_if_ingress_ext" : "NA",
"jnpr_sr_stats_per_sid_ext" : "NA",
}
_ = sensorMapping
go func() {
for {
var m runtime.MemStats
runtime.ReadMemStats(&m)
//log.Printf("\nAlloc = %v\nTotalAlloc = %v\nSys = %v\nNumGC = %v\n\n", m.Alloc / 1024, m.TotalAlloc / 1024, m.Sys / 1024, m.NumGC)
time.Sleep(5 * time.Second)
}
}()
//s := string(buf[:len(buf)])
//fmt.Println("#######################################################################")
//fmt.Printf("%v",s)
//fmt.Println("#######################################################################")
ts := &telemetry_top.TelemetryStream{}
if err := proto.Unmarshal(buf, ts); err != nil {
fmt.Println("Error!! Unable to parse data: ", err)
return nil,err
}
//fmt.Printf("%v",ts)
host,errHost := os.Hostname()
_ = host
if errHost != nil {
fmt.Println("Error!! Host name not found: ", errHost)
return nil, errHost
}
deviceName := ts.GetSystemId()
newLog.Printf("Device : %v", deviceName)
newLog.Printf("Host : %v", host)
gpbTime := ts.GetTimestamp()
measurementPrefix := "enterprise.juniperNetworks"
jnprSensorName := ts.GetSensorName()
sensorName := jnprSensorName
_ = gpbTime
_ = measurementPrefix
_ = jnprSensorName
m := &jsonpb.Marshaler{}
tsJSON,err := m.MarshalToString(ts)
if err!= nil{
fmt.Println("Error!! ", err)
}
var data map[string]interface{}
errU := json.Unmarshal([]byte(tsJSON), &data)
if errU != nil {
fmt.Println("Error!! Unable to unmarshal: ",errU)
return nil, nil
//panic(errU)
}
//newLog.Printf("Data received : %v", data)
enterpriseSensorData := data["enterprise"]
sensorData, ok := enterpriseSensorData.(map[string]interface{})
jnprSensorData := sensorData["[juniperNetworks]"]
if !ok {
return nil, nil
panic("inner map is not a map!")
}
metrics := make([]telegraf.Metric, 0)
sensorNum := 0 | sequenceNum := 0
for key, sensorData := range jnprSensorData.(map[string]interface{}){
var fields map[string]interface{}
if reflect.ValueOf(sensorData).Kind() == reflect.Map {
_ = sensorName
sensorName = key[1:len(key)-1]
var measurementName string
measurementName = sensorName
/*
if val, ok := sensorMapping[sensorName]; ok {
measurementName = val
} else {
measurementName = sensorName
}
*/
newLog.Printf("Sensor : %v", measurementName)
measurementName = "juniperNetworks." + measurementName
newLog.Printf("Measurement : %v", measurementName)
//newLog.Printf("Data received : %v", data)
parsedData := parseMap(sensorData.(map[string]interface{}), "")
for _,finalData := range(parsedData){
//sequenceNum := 0
for _,fin := range(finalData.([]interface{})){
//fin = fin.(map[string] interface{})
fin.(map[string]interface{})["device"] = deviceName
//fin.(map[string]interface{})["host"] = host
fin.(map[string]interface{})["sensor_name"] = sensorName
//fin.(map[string]interface{})["_seq"] = sequenceNum
fields = fin.(map[string]interface{})
tags := make(map[string]string)
for k, v := range p.DefaultTags {
tags[k] = v
}
tags["_seq"] = strconv.Itoa(sequenceNum)
timestamp := time.Unix(int64(gpbTime)/1000, int64(gpbTime)%1000*1000000)
mtrc,err := metric.New(measurementName, tags, fields,timestamp)
metrics = append(metrics, mtrc)
if err!=nil {
fmt.Println("Error!! Unable to create telegraf metrics: ", err)
}
sensorNum++
sequenceNum++
}
}
}
}
// fmt.Printf("\nData (JSON) = \n%s\n", data)
// fmt.Println("\nJuniper Sensor Name: \n%s\n", jnprSensorName)
// fmt.Println("\nDevice name: \n%s\n", deviceName)
// fmt.Println("\nGPB time: \n%s\n", gpbTime)
// fmt.Println(measurementPrefix)
// fmt.Println("\nMetrics: \n")
// fmt.Println(metrics)
newLog.Printf("Parsed Data : %v\n", metrics)
if errFileClose := out.Close(); err != nil {
log.Fatal(errFileClose)
}
return metrics, err
// return p.ParseWithDefaultTimePrecision(buf, time.Now(), "")
}
func (p *JuniperUDPParser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line + "\n"))
if err != nil {
return nil, err
}
if len(metrics) < 1 {
return nil, fmt.Errorf(
"Can not parse the line: %s, for data format: influx ", line)
}
return metrics[0], nil
}
func (p *JuniperUDPParser) SetDefaultTags(tags map[string]string) {
p.DefaultTags = tags
} | random_line_split |
|
parser.go | package juniperUDP
import (
"path/filepath"
"os"
"log"
"fmt"
"runtime"
"time"
"reflect"
"strconv"
"github.com/golang/protobuf/jsonpb"
"encoding/json"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/telemetry_top"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/qmon" // blank import as it is only used to Unmarshal proto message
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/lsp_mon"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cmerror_data"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cmerror"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cpu_memory_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/fabric"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/firewall"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/inline_jflow"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/logical_port"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/lsp_stats"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/npu_memory_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/npu_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/optics"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/packet_stats"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/port_exp"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/port"
"github.com/golang/protobuf/proto"
_"strings"
)
// JuniperUDPParser is an object for Parsing incoming metrics.
type JuniperUDPParser struct {
// DefaultTags will be added to every parsed metric
DefaultTags map[string]string
}
/*
func (p *JuniperUDPParser) ParseWithDefaultTimePrecision(buf []byte, t time.Time, precision string) ([]telegraf.Metric, error) {
if !bytes.HasSuffix(buf, []byte("\n")) {
buf = append(buf, '\n')
}
// parse even if the buffer begins with a newline
buf = bytes.TrimPrefix(buf, []byte("\n"))
metrics, err := metric.ParseWithDefaultTimePrecision(buf, t, precision)
if len(p.DefaultTags) > 0 {
for _, m := range metrics {
for k, v := range p.DefaultTags {
// only set the default tag if it doesn't already exist:
if !m.HasTag(k) {
m.AddTag(k, v)
}
}
}
}
return metrics, err
}
*/
func parseArray(data []interface{}, masterKey string) []interface{} {
var arrData []interface{}
for _,val := range data{
valType := reflect.ValueOf(val).Kind()
if valType == reflect.Map{
mapData := parseMap(val.(map[string]interface{}), masterKey)
for _,tmpData := range(mapData){
arrData = append(arrData, tmpData)
}
} else {fmt.Println("Error!!!! Leaf elements in array are not coded. Please open a issue.")}
}
return arrData
}
func parseMap(data map[string]interface{}, masterKey string) []interface{} {
var leafData map[string]interface{}
var arrData []interface{}
var arrKey []string
var finalData []interface{}
var newMasterKey string
leafData = make(map[string]interface{})
for key,val := range data{
if masterKey == ""{
newMasterKey = key
} else {
newMasterKey = masterKey + "." + key
}
valType := reflect.ValueOf(val).Kind()
if valType == reflect.Map{
mapData := parseMap(val.(map[string]interface{}), newMasterKey)
if reflect.TypeOf(mapData).Kind() == reflect.Map{
var tmpArr []interface{}
tmpArr = append(tmpArr, mapData)
arrData = append(arrData, tmpArr)
} else if reflect.TypeOf(mapData).Kind() == reflect.Slice{
arrData = append(arrData, mapData)
}
arrKey = append(arrKey, newMasterKey)
} else if valType == reflect.Slice{
arrData = append(arrData, parseArray(val.([]interface{}), newMasterKey))
arrKey = append(arrKey, newMasterKey)
} else { leafData[newMasterKey] = val}
}
if len(leafData) != 0 {
for i,key := range arrKey{
_ = key
for _,data_aa := range arrData[i].([]interface{}){
leafTmp := leafData
if data_aa != nil {
if reflect.ValueOf(data_aa).Kind() == reflect.Map{
for key_aa, value_aa := range data_aa.(map[string]interface{}){
leafTmp[key_aa] = value_aa
}
finalData = append(finalData, make(map[string]interface{}))
for k,v := range leafTmp{
finalData[len(finalData)-1].(map[string]interface{})[k] = v
}
} else {
for _,data_ha := range data_aa.([]interface{}){
leafTmp = leafData
for key_aa,value_aa := range data_ha.(map[string]interface{}){
leafTmp[key_aa] = value_aa
}
finalData = append(finalData, make(map[string]interface{}))
for k,v := range leafTmp{
finalData[len(finalData)-1].(map[string]interface{})[k] = v
}
}
}
}
}
}
} else {finalData = arrData}
arrData = arrData[:0]
if (len(finalData) == 0) && (len(leafData)!= 0) {
finalData = append(finalData, leafData)
}
return finalData
}
// Parse returns a slice of Metrics from a text representation of a
// metric (in line-protocol format)
// with each metric separated by newlines. If any metrics fail to parse,
// a non-nil error will be returned in addition to the metrics that parsed
// successfully.
func (p *JuniperUDPParser) Parse(buf []byte) ([]telegraf.Metric, error) |
func (p *JuniperUDPParser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line + "\n"))
if err != nil {
return nil, err
}
if len(metrics) < 1 {
return nil, fmt.Errorf(
"Can not parse the line: %s, for data format: influx ", line)
}
return metrics[0], nil
}
func (p *JuniperUDPParser) SetDefaultTags(tags map[string]string) {
p.DefaultTags = tags
}
| {
//out, _ := os.Create("telegraf_udp.log")
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
path := dir+"/logs"
if _, err := os.Stat(path); os.IsNotExist(err) {
os.Mkdir(path, 0777)
}
out, errFile := os.OpenFile("logs/telegraf_udp.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if errFile != nil {
log.Fatal(errFile)
}
prefix := ""
flag := log.LstdFlags | log.Lmicroseconds | log.Lshortfile
newLog := log.New(out, prefix, flag)
newLog.Printf("Data byte buffer received!!\n")
sensorMapping := map[string]string{"jnpr_interface_ext" : "/junos/system/linecard/interface/",
"cpu_memory_util_ext" : "/junos/system/linecard/cpu/memory/",
"fabricMessageExt" : "/junos/system/linecard/fabric",
"jnpr_firewall_ext" : "/junos/system/linecard/firewall/",
"jnprLogicalInterfaceExt" : "/junos/system/linecard/interface/logical/usage/",
"npu_memory_ext" : "/junos/system/linecard/npu/memory/",
"jnpr_npu_utilization_ext" : "/junos/system/linecard/npu/utilization/",
"jnpr_optics_ext" : "/junos/system/linecard/optics/",
"jnpr_packet_statistics_ext" : "/junos/system/linecard/packet/usage/",
"jnpr_qmon_ext" : "/junos/system/linecard/qmon/",
"inline_jflow_stats_ext" : "/junos/system/linecard/services/inline-jflow/",
"jnpr_cmerror_data_ext" : "NA",
"jnpr_cmerror_ext" : "NA",
"jnpr_lsp_statistics_ext" : "NA",
"jnpr_interface_exp_ext" : "NA",
"jnpr_sr_stats_per_if_egress_ext" : "NA",
"jnpr_sr_stats_per_if_ingress_ext" : "NA",
"jnpr_sr_stats_per_sid_ext" : "NA",
}
_ = sensorMapping
go func() {
for {
var m runtime.MemStats
runtime.ReadMemStats(&m)
//log.Printf("\nAlloc = %v\nTotalAlloc = %v\nSys = %v\nNumGC = %v\n\n", m.Alloc / 1024, m.TotalAlloc / 1024, m.Sys / 1024, m.NumGC)
time.Sleep(5 * time.Second)
}
}()
//s := string(buf[:len(buf)])
//fmt.Println("#######################################################################")
//fmt.Printf("%v",s)
//fmt.Println("#######################################################################")
ts := &telemetry_top.TelemetryStream{}
if err := proto.Unmarshal(buf, ts); err != nil {
fmt.Println("Error!! Unable to parse data: ", err)
return nil,err
}
//fmt.Printf("%v",ts)
host,errHost := os.Hostname()
_ = host
if errHost != nil {
fmt.Println("Error!! Host name not found: ", errHost)
return nil, errHost
}
deviceName := ts.GetSystemId()
newLog.Printf("Device : %v", deviceName)
newLog.Printf("Host : %v", host)
gpbTime := ts.GetTimestamp()
measurementPrefix := "enterprise.juniperNetworks"
jnprSensorName := ts.GetSensorName()
sensorName := jnprSensorName
_ = gpbTime
_ = measurementPrefix
_ = jnprSensorName
m := &jsonpb.Marshaler{}
tsJSON,err := m.MarshalToString(ts)
if err!= nil{
fmt.Println("Error!! ", err)
}
var data map[string]interface{}
errU := json.Unmarshal([]byte(tsJSON), &data)
if errU != nil {
fmt.Println("Error!! Unable to unmarshal: ",errU)
return nil, nil
//panic(errU)
}
//newLog.Printf("Data received : %v", data)
enterpriseSensorData := data["enterprise"]
sensorData, ok := enterpriseSensorData.(map[string]interface{})
jnprSensorData := sensorData["[juniperNetworks]"]
if !ok {
return nil, nil
panic("inner map is not a map!")
}
metrics := make([]telegraf.Metric, 0)
sensorNum := 0
sequenceNum := 0
for key, sensorData := range jnprSensorData.(map[string]interface{}){
var fields map[string]interface{}
if reflect.ValueOf(sensorData).Kind() == reflect.Map {
_ = sensorName
sensorName = key[1:len(key)-1]
var measurementName string
measurementName = sensorName
/*
if val, ok := sensorMapping[sensorName]; ok {
measurementName = val
} else {
measurementName = sensorName
}
*/
newLog.Printf("Sensor : %v", measurementName)
measurementName = "juniperNetworks." + measurementName
newLog.Printf("Measurement : %v", measurementName)
//newLog.Printf("Data received : %v", data)
parsedData := parseMap(sensorData.(map[string]interface{}), "")
for _,finalData := range(parsedData){
//sequenceNum := 0
for _,fin := range(finalData.([]interface{})){
//fin = fin.(map[string] interface{})
fin.(map[string]interface{})["device"] = deviceName
//fin.(map[string]interface{})["host"] = host
fin.(map[string]interface{})["sensor_name"] = sensorName
//fin.(map[string]interface{})["_seq"] = sequenceNum
fields = fin.(map[string]interface{})
tags := make(map[string]string)
for k, v := range p.DefaultTags {
tags[k] = v
}
tags["_seq"] = strconv.Itoa(sequenceNum)
timestamp := time.Unix(int64(gpbTime)/1000, int64(gpbTime)%1000*1000000)
mtrc,err := metric.New(measurementName, tags, fields,timestamp)
metrics = append(metrics, mtrc)
if err!=nil {
fmt.Println("Error!! Unable to create telegraf metrics: ", err)
}
sensorNum++
sequenceNum++
}
}
}
}
// fmt.Printf("\nData (JSON) = \n%s\n", data)
// fmt.Println("\nJuniper Sensor Name: \n%s\n", jnprSensorName)
// fmt.Println("\nDevice name: \n%s\n", deviceName)
// fmt.Println("\nGPB time: \n%s\n", gpbTime)
// fmt.Println(measurementPrefix)
// fmt.Println("\nMetrics: \n")
// fmt.Println(metrics)
newLog.Printf("Parsed Data : %v\n", metrics)
if errFileClose := out.Close(); err != nil {
log.Fatal(errFileClose)
}
return metrics, err
// return p.ParseWithDefaultTimePrecision(buf, time.Now(), "")
} | identifier_body |
parser.go | package juniperUDP
import (
"path/filepath"
"os"
"log"
"fmt"
"runtime"
"time"
"reflect"
"strconv"
"github.com/golang/protobuf/jsonpb"
"encoding/json"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/telemetry_top"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/qmon" // blank import as it is only used to Unmarshal proto message
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/lsp_mon"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cmerror_data"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cmerror"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cpu_memory_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/fabric"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/firewall"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/inline_jflow"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/logical_port"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/lsp_stats"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/npu_memory_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/npu_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/optics"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/packet_stats"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/port_exp"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/port"
"github.com/golang/protobuf/proto"
_"strings"
)
// JuniperUDPParser is an object for Parsing incoming metrics.
type JuniperUDPParser struct {
// DefaultTags will be added to every parsed metric
DefaultTags map[string]string
}
/*
func (p *JuniperUDPParser) ParseWithDefaultTimePrecision(buf []byte, t time.Time, precision string) ([]telegraf.Metric, error) {
if !bytes.HasSuffix(buf, []byte("\n")) {
buf = append(buf, '\n')
}
// parse even if the buffer begins with a newline
buf = bytes.TrimPrefix(buf, []byte("\n"))
metrics, err := metric.ParseWithDefaultTimePrecision(buf, t, precision)
if len(p.DefaultTags) > 0 {
for _, m := range metrics {
for k, v := range p.DefaultTags {
// only set the default tag if it doesn't already exist:
if !m.HasTag(k) {
m.AddTag(k, v)
}
}
}
}
return metrics, err
}
*/
func parseArray(data []interface{}, masterKey string) []interface{} {
var arrData []interface{}
for _,val := range data{
valType := reflect.ValueOf(val).Kind()
if valType == reflect.Map{
mapData := parseMap(val.(map[string]interface{}), masterKey)
for _,tmpData := range(mapData){
arrData = append(arrData, tmpData)
}
} else {fmt.Println("Error!!!! Leaf elements in array are not coded. Please open a issue.")}
}
return arrData
}
func parseMap(data map[string]interface{}, masterKey string) []interface{} {
var leafData map[string]interface{}
var arrData []interface{}
var arrKey []string
var finalData []interface{}
var newMasterKey string
leafData = make(map[string]interface{})
for key,val := range data{
if masterKey == ""{
newMasterKey = key
} else {
newMasterKey = masterKey + "." + key
}
valType := reflect.ValueOf(val).Kind()
if valType == reflect.Map{
mapData := parseMap(val.(map[string]interface{}), newMasterKey)
if reflect.TypeOf(mapData).Kind() == reflect.Map{
var tmpArr []interface{}
tmpArr = append(tmpArr, mapData)
arrData = append(arrData, tmpArr)
} else if reflect.TypeOf(mapData).Kind() == reflect.Slice{
arrData = append(arrData, mapData)
}
arrKey = append(arrKey, newMasterKey)
} else if valType == reflect.Slice{
arrData = append(arrData, parseArray(val.([]interface{}), newMasterKey))
arrKey = append(arrKey, newMasterKey)
} else { leafData[newMasterKey] = val}
}
if len(leafData) != 0 {
for i,key := range arrKey{
_ = key
for _,data_aa := range arrData[i].([]interface{}){
leafTmp := leafData
if data_aa != nil {
if reflect.ValueOf(data_aa).Kind() == reflect.Map{
for key_aa, value_aa := range data_aa.(map[string]interface{}){
leafTmp[key_aa] = value_aa
}
finalData = append(finalData, make(map[string]interface{}))
for k,v := range leafTmp{
finalData[len(finalData)-1].(map[string]interface{})[k] = v
}
} else {
for _,data_ha := range data_aa.([]interface{}){
leafTmp = leafData
for key_aa,value_aa := range data_ha.(map[string]interface{}){
leafTmp[key_aa] = value_aa
}
finalData = append(finalData, make(map[string]interface{}))
for k,v := range leafTmp{
finalData[len(finalData)-1].(map[string]interface{})[k] = v
}
}
}
}
}
}
} else {finalData = arrData}
arrData = arrData[:0]
if (len(finalData) == 0) && (len(leafData)!= 0) {
finalData = append(finalData, leafData)
}
return finalData
}
// Parse returns a slice of Metrics from a text representation of a
// metric (in line-protocol format)
// with each metric separated by newlines. If any metrics fail to parse,
// a non-nil error will be returned in addition to the metrics that parsed
// successfully.
func (p *JuniperUDPParser) Parse(buf []byte) ([]telegraf.Metric, error) {
//out, _ := os.Create("telegraf_udp.log")
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
path := dir+"/logs"
if _, err := os.Stat(path); os.IsNotExist(err) {
os.Mkdir(path, 0777)
}
out, errFile := os.OpenFile("logs/telegraf_udp.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if errFile != nil {
log.Fatal(errFile)
}
prefix := ""
flag := log.LstdFlags | log.Lmicroseconds | log.Lshortfile
newLog := log.New(out, prefix, flag)
newLog.Printf("Data byte buffer received!!\n")
sensorMapping := map[string]string{"jnpr_interface_ext" : "/junos/system/linecard/interface/",
"cpu_memory_util_ext" : "/junos/system/linecard/cpu/memory/",
"fabricMessageExt" : "/junos/system/linecard/fabric",
"jnpr_firewall_ext" : "/junos/system/linecard/firewall/",
"jnprLogicalInterfaceExt" : "/junos/system/linecard/interface/logical/usage/",
"npu_memory_ext" : "/junos/system/linecard/npu/memory/",
"jnpr_npu_utilization_ext" : "/junos/system/linecard/npu/utilization/",
"jnpr_optics_ext" : "/junos/system/linecard/optics/",
"jnpr_packet_statistics_ext" : "/junos/system/linecard/packet/usage/",
"jnpr_qmon_ext" : "/junos/system/linecard/qmon/",
"inline_jflow_stats_ext" : "/junos/system/linecard/services/inline-jflow/",
"jnpr_cmerror_data_ext" : "NA",
"jnpr_cmerror_ext" : "NA",
"jnpr_lsp_statistics_ext" : "NA",
"jnpr_interface_exp_ext" : "NA",
"jnpr_sr_stats_per_if_egress_ext" : "NA",
"jnpr_sr_stats_per_if_ingress_ext" : "NA",
"jnpr_sr_stats_per_sid_ext" : "NA",
}
_ = sensorMapping
go func() {
for {
var m runtime.MemStats
runtime.ReadMemStats(&m)
//log.Printf("\nAlloc = %v\nTotalAlloc = %v\nSys = %v\nNumGC = %v\n\n", m.Alloc / 1024, m.TotalAlloc / 1024, m.Sys / 1024, m.NumGC)
time.Sleep(5 * time.Second)
}
}()
//s := string(buf[:len(buf)])
//fmt.Println("#######################################################################")
//fmt.Printf("%v",s)
//fmt.Println("#######################################################################")
ts := &telemetry_top.TelemetryStream{}
if err := proto.Unmarshal(buf, ts); err != nil {
fmt.Println("Error!! Unable to parse data: ", err)
return nil,err
}
//fmt.Printf("%v",ts)
host,errHost := os.Hostname()
_ = host
if errHost != nil {
fmt.Println("Error!! Host name not found: ", errHost)
return nil, errHost
}
deviceName := ts.GetSystemId()
newLog.Printf("Device : %v", deviceName)
newLog.Printf("Host : %v", host)
gpbTime := ts.GetTimestamp()
measurementPrefix := "enterprise.juniperNetworks"
jnprSensorName := ts.GetSensorName()
sensorName := jnprSensorName
_ = gpbTime
_ = measurementPrefix
_ = jnprSensorName
m := &jsonpb.Marshaler{}
tsJSON,err := m.MarshalToString(ts)
if err!= nil{
fmt.Println("Error!! ", err)
}
var data map[string]interface{}
errU := json.Unmarshal([]byte(tsJSON), &data)
if errU != nil {
fmt.Println("Error!! Unable to unmarshal: ",errU)
return nil, nil
//panic(errU)
}
//newLog.Printf("Data received : %v", data)
enterpriseSensorData := data["enterprise"]
sensorData, ok := enterpriseSensorData.(map[string]interface{})
jnprSensorData := sensorData["[juniperNetworks]"]
if !ok {
return nil, nil
panic("inner map is not a map!")
}
metrics := make([]telegraf.Metric, 0)
sensorNum := 0
sequenceNum := 0
for key, sensorData := range jnprSensorData.(map[string]interface{}) |
// fmt.Printf("\nData (JSON) = \n%s\n", data)
// fmt.Println("\nJuniper Sensor Name: \n%s\n", jnprSensorName)
// fmt.Println("\nDevice name: \n%s\n", deviceName)
// fmt.Println("\nGPB time: \n%s\n", gpbTime)
// fmt.Println(measurementPrefix)
// fmt.Println("\nMetrics: \n")
// fmt.Println(metrics)
newLog.Printf("Parsed Data : %v\n", metrics)
if errFileClose := out.Close(); err != nil {
log.Fatal(errFileClose)
}
return metrics, err
// return p.ParseWithDefaultTimePrecision(buf, time.Now(), "")
}
func (p *JuniperUDPParser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line + "\n"))
if err != nil {
return nil, err
}
if len(metrics) < 1 {
return nil, fmt.Errorf(
"Can not parse the line: %s, for data format: influx ", line)
}
return metrics[0], nil
}
func (p *JuniperUDPParser) SetDefaultTags(tags map[string]string) {
p.DefaultTags = tags
}
| {
var fields map[string]interface{}
if reflect.ValueOf(sensorData).Kind() == reflect.Map {
_ = sensorName
sensorName = key[1:len(key)-1]
var measurementName string
measurementName = sensorName
/*
if val, ok := sensorMapping[sensorName]; ok {
measurementName = val
} else {
measurementName = sensorName
}
*/
newLog.Printf("Sensor : %v", measurementName)
measurementName = "juniperNetworks." + measurementName
newLog.Printf("Measurement : %v", measurementName)
//newLog.Printf("Data received : %v", data)
parsedData := parseMap(sensorData.(map[string]interface{}), "")
for _,finalData := range(parsedData){
//sequenceNum := 0
for _,fin := range(finalData.([]interface{})){
//fin = fin.(map[string] interface{})
fin.(map[string]interface{})["device"] = deviceName
//fin.(map[string]interface{})["host"] = host
fin.(map[string]interface{})["sensor_name"] = sensorName
//fin.(map[string]interface{})["_seq"] = sequenceNum
fields = fin.(map[string]interface{})
tags := make(map[string]string)
for k, v := range p.DefaultTags {
tags[k] = v
}
tags["_seq"] = strconv.Itoa(sequenceNum)
timestamp := time.Unix(int64(gpbTime)/1000, int64(gpbTime)%1000*1000000)
mtrc,err := metric.New(measurementName, tags, fields,timestamp)
metrics = append(metrics, mtrc)
if err!=nil {
fmt.Println("Error!! Unable to create telegraf metrics: ", err)
}
sensorNum++
sequenceNum++
}
}
}
} | conditional_block |
parser.go | package juniperUDP
import (
"path/filepath"
"os"
"log"
"fmt"
"runtime"
"time"
"reflect"
"strconv"
"github.com/golang/protobuf/jsonpb"
"encoding/json"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/telemetry_top"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/qmon" // blank import as it is only used to Unmarshal proto message
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/lsp_mon"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cmerror_data"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cmerror"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/cpu_memory_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/fabric"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/firewall"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/inline_jflow"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/logical_port"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/lsp_stats"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/npu_memory_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/npu_utilization"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/optics"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/packet_stats"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/port_exp"
_"github.com/influxdata/telegraf/plugins/parsers/juniperUDP/port"
"github.com/golang/protobuf/proto"
_"strings"
)
// JuniperUDPParser is an object for Parsing incoming metrics.
type JuniperUDPParser struct {
// DefaultTags will be added to every parsed metric
DefaultTags map[string]string
}
/*
func (p *JuniperUDPParser) ParseWithDefaultTimePrecision(buf []byte, t time.Time, precision string) ([]telegraf.Metric, error) {
if !bytes.HasSuffix(buf, []byte("\n")) {
buf = append(buf, '\n')
}
// parse even if the buffer begins with a newline
buf = bytes.TrimPrefix(buf, []byte("\n"))
metrics, err := metric.ParseWithDefaultTimePrecision(buf, t, precision)
if len(p.DefaultTags) > 0 {
for _, m := range metrics {
for k, v := range p.DefaultTags {
// only set the default tag if it doesn't already exist:
if !m.HasTag(k) {
m.AddTag(k, v)
}
}
}
}
return metrics, err
}
*/
func parseArray(data []interface{}, masterKey string) []interface{} {
var arrData []interface{}
for _,val := range data{
valType := reflect.ValueOf(val).Kind()
if valType == reflect.Map{
mapData := parseMap(val.(map[string]interface{}), masterKey)
for _,tmpData := range(mapData){
arrData = append(arrData, tmpData)
}
} else {fmt.Println("Error!!!! Leaf elements in array are not coded. Please open a issue.")}
}
return arrData
}
func parseMap(data map[string]interface{}, masterKey string) []interface{} {
var leafData map[string]interface{}
var arrData []interface{}
var arrKey []string
var finalData []interface{}
var newMasterKey string
leafData = make(map[string]interface{})
for key,val := range data{
if masterKey == ""{
newMasterKey = key
} else {
newMasterKey = masterKey + "." + key
}
valType := reflect.ValueOf(val).Kind()
if valType == reflect.Map{
mapData := parseMap(val.(map[string]interface{}), newMasterKey)
if reflect.TypeOf(mapData).Kind() == reflect.Map{
var tmpArr []interface{}
tmpArr = append(tmpArr, mapData)
arrData = append(arrData, tmpArr)
} else if reflect.TypeOf(mapData).Kind() == reflect.Slice{
arrData = append(arrData, mapData)
}
arrKey = append(arrKey, newMasterKey)
} else if valType == reflect.Slice{
arrData = append(arrData, parseArray(val.([]interface{}), newMasterKey))
arrKey = append(arrKey, newMasterKey)
} else { leafData[newMasterKey] = val}
}
if len(leafData) != 0 {
for i,key := range arrKey{
_ = key
for _,data_aa := range arrData[i].([]interface{}){
leafTmp := leafData
if data_aa != nil {
if reflect.ValueOf(data_aa).Kind() == reflect.Map{
for key_aa, value_aa := range data_aa.(map[string]interface{}){
leafTmp[key_aa] = value_aa
}
finalData = append(finalData, make(map[string]interface{}))
for k,v := range leafTmp{
finalData[len(finalData)-1].(map[string]interface{})[k] = v
}
} else {
for _,data_ha := range data_aa.([]interface{}){
leafTmp = leafData
for key_aa,value_aa := range data_ha.(map[string]interface{}){
leafTmp[key_aa] = value_aa
}
finalData = append(finalData, make(map[string]interface{}))
for k,v := range leafTmp{
finalData[len(finalData)-1].(map[string]interface{})[k] = v
}
}
}
}
}
}
} else {finalData = arrData}
arrData = arrData[:0]
if (len(finalData) == 0) && (len(leafData)!= 0) {
finalData = append(finalData, leafData)
}
return finalData
}
// Parse returns a slice of Metrics from a text representation of a
// metric (in line-protocol format)
// with each metric separated by newlines. If any metrics fail to parse,
// a non-nil error will be returned in addition to the metrics that parsed
// successfully.
func (p *JuniperUDPParser) Parse(buf []byte) ([]telegraf.Metric, error) {
//out, _ := os.Create("telegraf_udp.log")
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
path := dir+"/logs"
if _, err := os.Stat(path); os.IsNotExist(err) {
os.Mkdir(path, 0777)
}
out, errFile := os.OpenFile("logs/telegraf_udp.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if errFile != nil {
log.Fatal(errFile)
}
prefix := ""
flag := log.LstdFlags | log.Lmicroseconds | log.Lshortfile
newLog := log.New(out, prefix, flag)
newLog.Printf("Data byte buffer received!!\n")
sensorMapping := map[string]string{"jnpr_interface_ext" : "/junos/system/linecard/interface/",
"cpu_memory_util_ext" : "/junos/system/linecard/cpu/memory/",
"fabricMessageExt" : "/junos/system/linecard/fabric",
"jnpr_firewall_ext" : "/junos/system/linecard/firewall/",
"jnprLogicalInterfaceExt" : "/junos/system/linecard/interface/logical/usage/",
"npu_memory_ext" : "/junos/system/linecard/npu/memory/",
"jnpr_npu_utilization_ext" : "/junos/system/linecard/npu/utilization/",
"jnpr_optics_ext" : "/junos/system/linecard/optics/",
"jnpr_packet_statistics_ext" : "/junos/system/linecard/packet/usage/",
"jnpr_qmon_ext" : "/junos/system/linecard/qmon/",
"inline_jflow_stats_ext" : "/junos/system/linecard/services/inline-jflow/",
"jnpr_cmerror_data_ext" : "NA",
"jnpr_cmerror_ext" : "NA",
"jnpr_lsp_statistics_ext" : "NA",
"jnpr_interface_exp_ext" : "NA",
"jnpr_sr_stats_per_if_egress_ext" : "NA",
"jnpr_sr_stats_per_if_ingress_ext" : "NA",
"jnpr_sr_stats_per_sid_ext" : "NA",
}
_ = sensorMapping
go func() {
for {
var m runtime.MemStats
runtime.ReadMemStats(&m)
//log.Printf("\nAlloc = %v\nTotalAlloc = %v\nSys = %v\nNumGC = %v\n\n", m.Alloc / 1024, m.TotalAlloc / 1024, m.Sys / 1024, m.NumGC)
time.Sleep(5 * time.Second)
}
}()
//s := string(buf[:len(buf)])
//fmt.Println("#######################################################################")
//fmt.Printf("%v",s)
//fmt.Println("#######################################################################")
ts := &telemetry_top.TelemetryStream{}
if err := proto.Unmarshal(buf, ts); err != nil {
fmt.Println("Error!! Unable to parse data: ", err)
return nil,err
}
//fmt.Printf("%v",ts)
host,errHost := os.Hostname()
_ = host
if errHost != nil {
fmt.Println("Error!! Host name not found: ", errHost)
return nil, errHost
}
deviceName := ts.GetSystemId()
newLog.Printf("Device : %v", deviceName)
newLog.Printf("Host : %v", host)
gpbTime := ts.GetTimestamp()
measurementPrefix := "enterprise.juniperNetworks"
jnprSensorName := ts.GetSensorName()
sensorName := jnprSensorName
_ = gpbTime
_ = measurementPrefix
_ = jnprSensorName
m := &jsonpb.Marshaler{}
tsJSON,err := m.MarshalToString(ts)
if err!= nil{
fmt.Println("Error!! ", err)
}
var data map[string]interface{}
errU := json.Unmarshal([]byte(tsJSON), &data)
if errU != nil {
fmt.Println("Error!! Unable to unmarshal: ",errU)
return nil, nil
//panic(errU)
}
//newLog.Printf("Data received : %v", data)
enterpriseSensorData := data["enterprise"]
sensorData, ok := enterpriseSensorData.(map[string]interface{})
jnprSensorData := sensorData["[juniperNetworks]"]
if !ok {
return nil, nil
panic("inner map is not a map!")
}
metrics := make([]telegraf.Metric, 0)
sensorNum := 0
sequenceNum := 0
for key, sensorData := range jnprSensorData.(map[string]interface{}){
var fields map[string]interface{}
if reflect.ValueOf(sensorData).Kind() == reflect.Map {
_ = sensorName
sensorName = key[1:len(key)-1]
var measurementName string
measurementName = sensorName
/*
if val, ok := sensorMapping[sensorName]; ok {
measurementName = val
} else {
measurementName = sensorName
}
*/
newLog.Printf("Sensor : %v", measurementName)
measurementName = "juniperNetworks." + measurementName
newLog.Printf("Measurement : %v", measurementName)
//newLog.Printf("Data received : %v", data)
parsedData := parseMap(sensorData.(map[string]interface{}), "")
for _,finalData := range(parsedData){
//sequenceNum := 0
for _,fin := range(finalData.([]interface{})){
//fin = fin.(map[string] interface{})
fin.(map[string]interface{})["device"] = deviceName
//fin.(map[string]interface{})["host"] = host
fin.(map[string]interface{})["sensor_name"] = sensorName
//fin.(map[string]interface{})["_seq"] = sequenceNum
fields = fin.(map[string]interface{})
tags := make(map[string]string)
for k, v := range p.DefaultTags {
tags[k] = v
}
tags["_seq"] = strconv.Itoa(sequenceNum)
timestamp := time.Unix(int64(gpbTime)/1000, int64(gpbTime)%1000*1000000)
mtrc,err := metric.New(measurementName, tags, fields,timestamp)
metrics = append(metrics, mtrc)
if err!=nil {
fmt.Println("Error!! Unable to create telegraf metrics: ", err)
}
sensorNum++
sequenceNum++
}
}
}
}
// fmt.Printf("\nData (JSON) = \n%s\n", data)
// fmt.Println("\nJuniper Sensor Name: \n%s\n", jnprSensorName)
// fmt.Println("\nDevice name: \n%s\n", deviceName)
// fmt.Println("\nGPB time: \n%s\n", gpbTime)
// fmt.Println(measurementPrefix)
// fmt.Println("\nMetrics: \n")
// fmt.Println(metrics)
newLog.Printf("Parsed Data : %v\n", metrics)
if errFileClose := out.Close(); err != nil {
log.Fatal(errFileClose)
}
return metrics, err
// return p.ParseWithDefaultTimePrecision(buf, time.Now(), "")
}
func (p *JuniperUDPParser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line + "\n"))
if err != nil {
return nil, err
}
if len(metrics) < 1 {
return nil, fmt.Errorf(
"Can not parse the line: %s, for data format: influx ", line)
}
return metrics[0], nil
}
func (p *JuniperUDPParser) | (tags map[string]string) {
p.DefaultTags = tags
}
| SetDefaultTags | identifier_name |
alain.js | "use strict";
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.buildAlain = exports.addValueToVariable = exports.addProviderToModule = exports.addImportToModule = exports.refreshPathRoot = void 0;
const core_1 = require("@angular-devkit/core");
const schematics_1 = require("@angular-devkit/schematics");
const ast_utils_1 = require("@schematics/angular/utility/ast-utils");
const change_1 = require("@schematics/angular/utility/change");
const find_module_1 = require("@schematics/angular/utility/find-module");
const parse_name_1 = require("@schematics/angular/utility/parse-name");
const validation_1 = require("@schematics/angular/utility/validation");
const fs = require("fs");
const path = require("path");
const ts = require("typescript");
const ast_1 = require("./ast");
const workspace_1 = require("./workspace");
const TEMPLATE_FILENAME_RE = /\.template$/;
function buildSelector(schema, projectPrefix) {
const ret = [];
if (!schema.withoutPrefix) {
if (schema.prefix) {
ret.push(schema.prefix);
}
else if (schema.prefix === undefined && projectPrefix) {
ret.push(projectPrefix);
}
}
// module name
if (schema.module) {
ret.push(schema.module);
}
// target name
if (schema.target) {
ret.push(...schema.target.split('/'));
}
// name
ret.push(core_1.strings.dasherize(schema.name));
return ret.join('-');
}
function buildName(schema, prefix) {
const ret = schema.withoutModulePrefixInComponentName === true ? [] : [schema.module];
if (schema.target && schema.target.length > 0) {
ret.push(...schema.target.split('/'));
}
ret.push(schema.name);
// 服务类自动过滤 list, empty 两个页面的后缀
if (prefix === 'Service' && ['list', 'empty'].includes(schema.name)) {
ret.pop();
}
ret.push(prefix);
return core_1.strings.classify(ret.join('-'));
}
function refreshPathRoot(project, sch | ct) {
var _a;
if (schema.path === undefined) {
schema.path = `/${path.join(project.sourceRoot, (_a = alainProject === null || alainProject === void 0 ? void 0 : alainProject.routesRoot) !== null && _a !== void 0 ? _a : 'app/routes')}`;
}
}
exports.refreshPathRoot = refreshPathRoot;
function resolveSchema(tree, project, schema, alainProject) {
// module name
if (!schema.module) {
throw new schematics_1.SchematicsException(`Must specify module name. (e.g: ng g ng-alain:list <list name> -m=<module name>)`);
}
// path
refreshPathRoot(project, schema, alainProject);
schema.path += `/${schema.module}`;
const parsedPath = (0, parse_name_1.parseName)(schema.path, schema.name);
schema.name = parsedPath.name;
schema.path = parsedPath.path;
const rootPath = path.resolve(__dirname, '../../..');
const fullPath = path.join(rootPath, schema.path, schema.name);
if (fs.existsSync(fullPath) && fs.readdirSync(fullPath).length > 0) {
throw new schematics_1.SchematicsException(`The directory (${fullPath}) already exists`);
}
schema.importModulePath = (0, find_module_1.findModuleFromOptions)(tree, schema);
if (!schema._filesPath) {
// 若基础页尝试从 `_cli-tpl/_${schema.schematicName!}` 下查找该目录,若存在则优先使用
if (['list', 'edit', 'view', 'empty'].includes(schema.schematicName)) {
const overrideDir = `/${[project.root, `_cli-tpl/_${schema.schematicName}`].filter(i => !!i).join('/')}`;
const overridePath = `${overrideDir}/__path__/__name@dasherize@if-flat__/__name@dasherize__.component.ts`;
if (tree.exists(overridePath) || tree.exists(`${overridePath}.template`)) {
// 所在目录与命令目录同属一个目录结构,因此无须特殊处理
schema._filesPath = path.relative(__dirname, rootPath) + overrideDir;
}
}
schema._filesPath = schema._filesPath || './files';
}
// fill target
if (schema.target) {
schema.path += core_1.strings.dasherize(`/${schema.target}`);
}
schema.routerModulePath = schema.importModulePath.replace('.module.ts', '-routing.module.ts');
// html selector
schema.selector = schema.selector || buildSelector(schema, project.prefix);
(0, validation_1.validateHtmlSelector)(schema.selector);
}
function addImportToModule(tree, filePath, symbolName, fileName) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const change = (0, ast_utils_1.insertImport)(source, filePath, symbolName, fileName);
if (change.path == null)
return;
const declarationRecorder = tree.beginUpdate(filePath);
declarationRecorder.insertLeft(change.pos, change.toAdd);
tree.commitUpdate(declarationRecorder);
}
exports.addImportToModule = addImportToModule;
function addProviderToModule(tree, filePath, serviceName, importPath) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const changes = (0, ast_utils_1.addProviderToModule)(source, filePath, serviceName, importPath);
const declarationRecorder = tree.beginUpdate(filePath);
changes.forEach(change => {
if (change.path == null)
return;
if (change instanceof change_1.InsertChange) {
declarationRecorder.insertLeft(change.pos, change.toAdd);
}
});
tree.commitUpdate(declarationRecorder);
}
exports.addProviderToModule = addProviderToModule;
function addValueToVariable(tree, filePath, variableName, text, needWrap = true) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const node = (0, ast_utils_1.findNode)(source, ts.SyntaxKind.Identifier, variableName);
if (!node) {
throw new schematics_1.SchematicsException(`Could not find any [${variableName}] variable in path '${filePath}'.`);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const arr = node.parent.initializer;
const change = new change_1.InsertChange(filePath, arr.end - 1, `${arr.elements && arr.elements.length > 0 ? ',' : ''}${needWrap ? '\n ' : ''}${text}`);
const declarationRecorder = tree.beginUpdate(filePath);
declarationRecorder.insertLeft(change.pos, change.toAdd);
tree.commitUpdate(declarationRecorder);
}
exports.addValueToVariable = addValueToVariable;
function getRelativePath(filePath, schema, prefix) {
const importPath = `/${schema.path}/${schema.flat ? '' : `${core_1.strings.dasherize(schema.name)}/`}${core_1.strings.dasherize(schema.name)}.${prefix}`;
return (0, find_module_1.buildRelativePath)(filePath, importPath);
}
function addDeclaration(schema) {
return (tree) => {
if (schema.skipImport || !schema.module) {
return tree;
}
// imports
addImportToModule(tree, schema.importModulePath, schema.componentName, getRelativePath(schema.importModulePath, schema, 'component'));
addValueToVariable(tree, schema.importModulePath, 'COMPONENTS', schema.componentName);
// component
if (schema.modal !== true) {
// routing
addImportToModule(tree, schema.routerModulePath, schema.componentName, getRelativePath(schema.routerModulePath, schema, 'component'));
addValueToVariable(tree, schema.routerModulePath, 'routes', `{ path: '${schema.name}', component: ${schema.componentName} }`);
}
// service
if (schema.service === 'none') {
addProviderToModule(tree, schema.importModulePath, schema.serviceName, getRelativePath(schema.importModulePath, schema, 'service'));
}
return tree;
};
}
function buildAlain(schema) {
return (tree) => __awaiter(this, void 0, void 0, function* () {
const res = yield (0, workspace_1.getProject)(tree, schema.project);
if (schema.project && res.name !== schema.project) {
throw new schematics_1.SchematicsException(`The specified project does not match '${schema.project}', current: ${res.name}`);
}
const project = res.project;
resolveSchema(tree, project, schema, res.alainProject);
schema.componentName = buildName(schema, 'Component');
schema.serviceName = buildName(schema, 'Service');
// Don't support inline
schema.inlineTemplate = false;
const templateSource = (0, schematics_1.apply)((0, schematics_1.url)(schema._filesPath), [
(0, schematics_1.filter)(filePath => !filePath.endsWith('.DS_Store')),
schema.service === 'ignore' ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.service.ts.template')) : (0, schematics_1.noop)(),
schema.skipTests ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.spec.ts.template')) : (0, schematics_1.noop)(),
schema.inlineStyle ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.__style__.template')) : (0, schematics_1.noop)(),
schema.inlineTemplate ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.html.template')) : (0, schematics_1.noop)(),
// schema.spec ? noop() : filter(filePath => !filePath.endsWith('.spec.ts')),
// schema.inlineStyle ? filter(filePath => !filePath.endsWith('.__styleext__')) : noop(),
// schema.inlineTemplate ? filter(filePath => !filePath.endsWith('.html')) : noop(),
(0, schematics_1.applyTemplates)(Object.assign(Object.assign(Object.assign({}, core_1.strings), { 'if-flat': (s) => (schema.flat ? '' : s) }), schema)),
(0, schematics_1.move)(null, `${schema.path}/`)
]);
return (0, schematics_1.chain)([(0, schematics_1.branchAndMerge)((0, schematics_1.chain)([addDeclaration(schema), (0, schematics_1.mergeWith)(templateSource)]))]);
});
}
exports.buildAlain = buildAlain;
//# sourceMappingURL=alain.js.map | ema, alainProje | identifier_name |
alain.js | "use strict";
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.buildAlain = exports.addValueToVariable = exports.addProviderToModule = exports.addImportToModule = exports.refreshPathRoot = void 0;
const core_1 = require("@angular-devkit/core");
const schematics_1 = require("@angular-devkit/schematics");
const ast_utils_1 = require("@schematics/angular/utility/ast-utils");
const change_1 = require("@schematics/angular/utility/change");
const find_module_1 = require("@schematics/angular/utility/find-module");
const parse_name_1 = require("@schematics/angular/utility/parse-name");
const validation_1 = require("@schematics/angular/utility/validation");
const fs = require("fs");
const path = require("path");
const ts = require("typescript");
const ast_1 = require("./ast");
const workspace_1 = require("./workspace");
const TEMPLATE_FILENAME_RE = /\.template$/;
function buildSelector(schema, projectPrefix) {
const ret = [];
if (!schema.withoutPrefix) {
if (schema.prefix) {
ret.push(schema.prefix);
}
else if (schema.prefix === undefined && projectPrefix) {
ret.push(projectPrefix);
}
}
// module name
if (schema.module) {
ret.push(schema.module);
}
// target name
if (schema.target) {
ret.push(...schema.target.split('/'));
}
// name
ret.push(core_1.strings.dasherize(schema.name));
return ret.join('-');
}
function buildName(schema, prefix) {
const ret = schema.withoutModulePrefixInComponentName === true ? [] : [schema.module];
if (schema.target && schema.target.length > 0) {
ret.push(...schema.target.split('/'));
}
ret.push(schema.name);
// 服务类自动过滤 list, empty 两个页面的后缀
if (prefix === 'Service' && ['list', 'empty'].includes(schema.name)) {
ret.pop();
}
ret.push(prefix);
return core_1.strings.classify(ret.join('-'));
}
function refreshPathRoot(project, schema, alainProject) {
var _a;
if (schema.path === undefined) {
schema.path = `/${path.join(project.sourceRoot, (_a = alainProject === null || alainProject === void 0 ? void 0 : alainProject.routesRoot) !== null && _a !== void 0 ? _a : 'app/routes')}`;
}
}
exports.refreshPathRoot = refreshPathRoot;
function resolveSchema(tree, project, schema, alainProject) {
// module name
if (!schema.module) {
throw new schematics_1.SchematicsException(`Must specify module name. (e.g: ng g ng-alain:list <list name> -m=<module name>)`);
}
// path
refreshPathRoot(project, schema, alainProject);
schema.path += `/${schema.module}`;
const parsedPath = (0, parse_name_1.parseName)(schema.path, schema.name);
schema.name = parsedPath.name;
schema.path = parsedPath.path;
const rootPath = path.resolve(__dirname, '../../..');
const fullPath = path.join(rootPath, schema.path, schema.name);
if (fs.existsSync(fullPath) && fs.readdirSync(fullPath).length > 0) {
throw new schematics_1.SchematicsException(`The directory (${fullPath}) already exists`);
}
schema.importModulePath = (0, find_module_1.findModuleFromOptions)(tree, schema);
if (!schema._filesPath) {
// 若基础页尝试从 `_cli-tpl/_${schema.schematicName!}` 下查找该目录,若存在则优先使用
if (['list', 'edit', 'view', 'empty'].includes(schema.schematicName)) {
const overrideDir = `/${[project.root, `_cli-tpl/_${schema.schematicName}`].filter(i => !!i).join('/')}`;
const overridePath = `${overrideDir}/__path__/__name@dasherize@if-flat__/__name@dasherize__.component.ts`;
if (tree.exists(overridePath) || tree.exists(`${overridePath}.template`)) {
// 所在目录与命令目录同属一个目录结构,因此无须特殊处理
schema._filesPath = path.relative(__dirname, rootPath) + overrideDir;
}
}
schema._filesPath = schema._filesPath || './files';
}
// fill target
if (schema.target) {
schema.path += core_1.strings.dasherize(`/${schema.target}`);
}
schema.routerModulePath = schema.importModulePath.replace('.module.ts', '-routing.module.ts');
// html selector
schema.selector = schema.selector || buildSelector(schema, project.prefix);
(0, validation_1.validateHtmlSelector)(schema.selector);
}
function addImportToModule(tree, filePath, symbolName, fileName) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const change = (0, ast_utils_1.insertImport)(source, filePath, symbolName, fileName);
if (change.path == null)
return;
const declarationRecorder = tree.beginUpdate(filePath);
declarationRecorder.insertLeft(change.pos, change.toAdd);
tree.commitUpdate(declarationRecorder);
}
exports.addImportToModule = addImportToModule;
function addProviderToModule(tree, filePath, serviceName, importPath) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const changes = (0, ast_utils_1.addProviderToModule)(sour | = true) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const node = (0, ast_utils_1.findNode)(source, ts.SyntaxKind.Identifier, variableName);
if (!node) {
throw new schematics_1.SchematicsException(`Could not find any [${variableName}] variable in path '${filePath}'.`);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const arr = node.parent.initializer;
const change = new change_1.InsertChange(filePath, arr.end - 1, `${arr.elements && arr.elements.length > 0 ? ',' : ''}${needWrap ? '\n ' : ''}${text}`);
const declarationRecorder = tree.beginUpdate(filePath);
declarationRecorder.insertLeft(change.pos, change.toAdd);
tree.commitUpdate(declarationRecorder);
}
exports.addValueToVariable = addValueToVariable;
function getRelativePath(filePath, schema, prefix) {
const importPath = `/${schema.path}/${schema.flat ? '' : `${core_1.strings.dasherize(schema.name)}/`}${core_1.strings.dasherize(schema.name)}.${prefix}`;
return (0, find_module_1.buildRelativePath)(filePath, importPath);
}
function addDeclaration(schema) {
return (tree) => {
if (schema.skipImport || !schema.module) {
return tree;
}
// imports
addImportToModule(tree, schema.importModulePath, schema.componentName, getRelativePath(schema.importModulePath, schema, 'component'));
addValueToVariable(tree, schema.importModulePath, 'COMPONENTS', schema.componentName);
// component
if (schema.modal !== true) {
// routing
addImportToModule(tree, schema.routerModulePath, schema.componentName, getRelativePath(schema.routerModulePath, schema, 'component'));
addValueToVariable(tree, schema.routerModulePath, 'routes', `{ path: '${schema.name}', component: ${schema.componentName} }`);
}
// service
if (schema.service === 'none') {
addProviderToModule(tree, schema.importModulePath, schema.serviceName, getRelativePath(schema.importModulePath, schema, 'service'));
}
return tree;
};
}
function buildAlain(schema) {
return (tree) => __awaiter(this, void 0, void 0, function* () {
const res = yield (0, workspace_1.getProject)(tree, schema.project);
if (schema.project && res.name !== schema.project) {
throw new schematics_1.SchematicsException(`The specified project does not match '${schema.project}', current: ${res.name}`);
}
const project = res.project;
resolveSchema(tree, project, schema, res.alainProject);
schema.componentName = buildName(schema, 'Component');
schema.serviceName = buildName(schema, 'Service');
// Don't support inline
schema.inlineTemplate = false;
const templateSource = (0, schematics_1.apply)((0, schematics_1.url)(schema._filesPath), [
(0, schematics_1.filter)(filePath => !filePath.endsWith('.DS_Store')),
schema.service === 'ignore' ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.service.ts.template')) : (0, schematics_1.noop)(),
schema.skipTests ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.spec.ts.template')) : (0, schematics_1.noop)(),
schema.inlineStyle ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.__style__.template')) : (0, schematics_1.noop)(),
schema.inlineTemplate ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.html.template')) : (0, schematics_1.noop)(),
// schema.spec ? noop() : filter(filePath => !filePath.endsWith('.spec.ts')),
// schema.inlineStyle ? filter(filePath => !filePath.endsWith('.__styleext__')) : noop(),
// schema.inlineTemplate ? filter(filePath => !filePath.endsWith('.html')) : noop(),
(0, schematics_1.applyTemplates)(Object.assign(Object.assign(Object.assign({}, core_1.strings), { 'if-flat': (s) => (schema.flat ? '' : s) }), schema)),
(0, schematics_1.move)(null, `${schema.path}/`)
]);
return (0, schematics_1.chain)([(0, schematics_1.branchAndMerge)((0, schematics_1.chain)([addDeclaration(schema), (0, schematics_1.mergeWith)(templateSource)]))]);
});
}
exports.buildAlain = buildAlain;
//# sourceMappingURL=alain.js.map | ce, filePath, serviceName, importPath);
const declarationRecorder = tree.beginUpdate(filePath);
changes.forEach(change => {
if (change.path == null)
return;
if (change instanceof change_1.InsertChange) {
declarationRecorder.insertLeft(change.pos, change.toAdd);
}
});
tree.commitUpdate(declarationRecorder);
}
exports.addProviderToModule = addProviderToModule;
function addValueToVariable(tree, filePath, variableName, text, needWrap | identifier_body |
alain.js | "use strict";
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.buildAlain = exports.addValueToVariable = exports.addProviderToModule = exports.addImportToModule = exports.refreshPathRoot = void 0;
const core_1 = require("@angular-devkit/core");
const schematics_1 = require("@angular-devkit/schematics");
const ast_utils_1 = require("@schematics/angular/utility/ast-utils");
const change_1 = require("@schematics/angular/utility/change");
const find_module_1 = require("@schematics/angular/utility/find-module");
const parse_name_1 = require("@schematics/angular/utility/parse-name");
const validation_1 = require("@schematics/angular/utility/validation");
const fs = require("fs");
const path = require("path");
const ts = require("typescript");
const ast_1 = require("./ast");
const workspace_1 = require("./workspace");
const TEMPLATE_FILENAME_RE = /\.template$/;
function buildSelector(schema, projectPrefix) {
const ret = [];
if (!schema.withoutPrefix) {
if (schema.prefix) {
ret.push(schema.prefix);
}
else if (schema.prefix === undefined && projectPrefix) |
}
// module name
if (schema.module) {
ret.push(schema.module);
}
// target name
if (schema.target) {
ret.push(...schema.target.split('/'));
}
// name
ret.push(core_1.strings.dasherize(schema.name));
return ret.join('-');
}
function buildName(schema, prefix) {
const ret = schema.withoutModulePrefixInComponentName === true ? [] : [schema.module];
if (schema.target && schema.target.length > 0) {
ret.push(...schema.target.split('/'));
}
ret.push(schema.name);
// 服务类自动过滤 list, empty 两个页面的后缀
if (prefix === 'Service' && ['list', 'empty'].includes(schema.name)) {
ret.pop();
}
ret.push(prefix);
return core_1.strings.classify(ret.join('-'));
}
function refreshPathRoot(project, schema, alainProject) {
var _a;
if (schema.path === undefined) {
schema.path = `/${path.join(project.sourceRoot, (_a = alainProject === null || alainProject === void 0 ? void 0 : alainProject.routesRoot) !== null && _a !== void 0 ? _a : 'app/routes')}`;
}
}
exports.refreshPathRoot = refreshPathRoot;
function resolveSchema(tree, project, schema, alainProject) {
// module name
if (!schema.module) {
throw new schematics_1.SchematicsException(`Must specify module name. (e.g: ng g ng-alain:list <list name> -m=<module name>)`);
}
// path
refreshPathRoot(project, schema, alainProject);
schema.path += `/${schema.module}`;
const parsedPath = (0, parse_name_1.parseName)(schema.path, schema.name);
schema.name = parsedPath.name;
schema.path = parsedPath.path;
const rootPath = path.resolve(__dirname, '../../..');
const fullPath = path.join(rootPath, schema.path, schema.name);
if (fs.existsSync(fullPath) && fs.readdirSync(fullPath).length > 0) {
throw new schematics_1.SchematicsException(`The directory (${fullPath}) already exists`);
}
schema.importModulePath = (0, find_module_1.findModuleFromOptions)(tree, schema);
if (!schema._filesPath) {
// 若基础页尝试从 `_cli-tpl/_${schema.schematicName!}` 下查找该目录,若存在则优先使用
if (['list', 'edit', 'view', 'empty'].includes(schema.schematicName)) {
const overrideDir = `/${[project.root, `_cli-tpl/_${schema.schematicName}`].filter(i => !!i).join('/')}`;
const overridePath = `${overrideDir}/__path__/__name@dasherize@if-flat__/__name@dasherize__.component.ts`;
if (tree.exists(overridePath) || tree.exists(`${overridePath}.template`)) {
// 所在目录与命令目录同属一个目录结构,因此无须特殊处理
schema._filesPath = path.relative(__dirname, rootPath) + overrideDir;
}
}
schema._filesPath = schema._filesPath || './files';
}
// fill target
if (schema.target) {
schema.path += core_1.strings.dasherize(`/${schema.target}`);
}
schema.routerModulePath = schema.importModulePath.replace('.module.ts', '-routing.module.ts');
// html selector
schema.selector = schema.selector || buildSelector(schema, project.prefix);
(0, validation_1.validateHtmlSelector)(schema.selector);
}
function addImportToModule(tree, filePath, symbolName, fileName) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const change = (0, ast_utils_1.insertImport)(source, filePath, symbolName, fileName);
if (change.path == null)
return;
const declarationRecorder = tree.beginUpdate(filePath);
declarationRecorder.insertLeft(change.pos, change.toAdd);
tree.commitUpdate(declarationRecorder);
}
exports.addImportToModule = addImportToModule;
function addProviderToModule(tree, filePath, serviceName, importPath) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const changes = (0, ast_utils_1.addProviderToModule)(source, filePath, serviceName, importPath);
const declarationRecorder = tree.beginUpdate(filePath);
changes.forEach(change => {
if (change.path == null)
return;
if (change instanceof change_1.InsertChange) {
declarationRecorder.insertLeft(change.pos, change.toAdd);
}
});
tree.commitUpdate(declarationRecorder);
}
exports.addProviderToModule = addProviderToModule;
function addValueToVariable(tree, filePath, variableName, text, needWrap = true) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const node = (0, ast_utils_1.findNode)(source, ts.SyntaxKind.Identifier, variableName);
if (!node) {
throw new schematics_1.SchematicsException(`Could not find any [${variableName}] variable in path '${filePath}'.`);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const arr = node.parent.initializer;
const change = new change_1.InsertChange(filePath, arr.end - 1, `${arr.elements && arr.elements.length > 0 ? ',' : ''}${needWrap ? '\n ' : ''}${text}`);
const declarationRecorder = tree.beginUpdate(filePath);
declarationRecorder.insertLeft(change.pos, change.toAdd);
tree.commitUpdate(declarationRecorder);
}
exports.addValueToVariable = addValueToVariable;
function getRelativePath(filePath, schema, prefix) {
const importPath = `/${schema.path}/${schema.flat ? '' : `${core_1.strings.dasherize(schema.name)}/`}${core_1.strings.dasherize(schema.name)}.${prefix}`;
return (0, find_module_1.buildRelativePath)(filePath, importPath);
}
function addDeclaration(schema) {
return (tree) => {
if (schema.skipImport || !schema.module) {
return tree;
}
// imports
addImportToModule(tree, schema.importModulePath, schema.componentName, getRelativePath(schema.importModulePath, schema, 'component'));
addValueToVariable(tree, schema.importModulePath, 'COMPONENTS', schema.componentName);
// component
if (schema.modal !== true) {
// routing
addImportToModule(tree, schema.routerModulePath, schema.componentName, getRelativePath(schema.routerModulePath, schema, 'component'));
addValueToVariable(tree, schema.routerModulePath, 'routes', `{ path: '${schema.name}', component: ${schema.componentName} }`);
}
// service
if (schema.service === 'none') {
addProviderToModule(tree, schema.importModulePath, schema.serviceName, getRelativePath(schema.importModulePath, schema, 'service'));
}
return tree;
};
}
function buildAlain(schema) {
return (tree) => __awaiter(this, void 0, void 0, function* () {
const res = yield (0, workspace_1.getProject)(tree, schema.project);
if (schema.project && res.name !== schema.project) {
throw new schematics_1.SchematicsException(`The specified project does not match '${schema.project}', current: ${res.name}`);
}
const project = res.project;
resolveSchema(tree, project, schema, res.alainProject);
schema.componentName = buildName(schema, 'Component');
schema.serviceName = buildName(schema, 'Service');
// Don't support inline
schema.inlineTemplate = false;
const templateSource = (0, schematics_1.apply)((0, schematics_1.url)(schema._filesPath), [
(0, schematics_1.filter)(filePath => !filePath.endsWith('.DS_Store')),
schema.service === 'ignore' ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.service.ts.template')) : (0, schematics_1.noop)(),
schema.skipTests ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.spec.ts.template')) : (0, schematics_1.noop)(),
schema.inlineStyle ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.__style__.template')) : (0, schematics_1.noop)(),
schema.inlineTemplate ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.html.template')) : (0, schematics_1.noop)(),
// schema.spec ? noop() : filter(filePath => !filePath.endsWith('.spec.ts')),
// schema.inlineStyle ? filter(filePath => !filePath.endsWith('.__styleext__')) : noop(),
// schema.inlineTemplate ? filter(filePath => !filePath.endsWith('.html')) : noop(),
(0, schematics_1.applyTemplates)(Object.assign(Object.assign(Object.assign({}, core_1.strings), { 'if-flat': (s) => (schema.flat ? '' : s) }), schema)),
(0, schematics_1.move)(null, `${schema.path}/`)
]);
return (0, schematics_1.chain)([(0, schematics_1.branchAndMerge)((0, schematics_1.chain)([addDeclaration(schema), (0, schematics_1.mergeWith)(templateSource)]))]);
});
}
exports.buildAlain = buildAlain;
//# sourceMappingURL=alain.js.map | {
ret.push(projectPrefix);
} | conditional_block |
alain.js | "use strict";
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.buildAlain = exports.addValueToVariable = exports.addProviderToModule = exports.addImportToModule = exports.refreshPathRoot = void 0;
const core_1 = require("@angular-devkit/core");
const schematics_1 = require("@angular-devkit/schematics");
const ast_utils_1 = require("@schematics/angular/utility/ast-utils");
const change_1 = require("@schematics/angular/utility/change");
const find_module_1 = require("@schematics/angular/utility/find-module");
const parse_name_1 = require("@schematics/angular/utility/parse-name");
const validation_1 = require("@schematics/angular/utility/validation");
const fs = require("fs");
const path = require("path");
const ts = require("typescript");
const ast_1 = require("./ast");
const workspace_1 = require("./workspace");
const TEMPLATE_FILENAME_RE = /\.template$/;
function buildSelector(schema, projectPrefix) {
const ret = [];
if (!schema.withoutPrefix) {
if (schema.prefix) {
ret.push(schema.prefix);
}
else if (schema.prefix === undefined && projectPrefix) {
ret.push(projectPrefix);
}
}
// module name
if (schema.module) {
ret.push(schema.module);
}
// target name
if (schema.target) {
ret.push(...schema.target.split('/'));
}
// name
ret.push(core_1.strings.dasherize(schema.name));
return ret.join('-');
}
function buildName(schema, prefix) {
const ret = schema.withoutModulePrefixInComponentName === true ? [] : [schema.module];
if (schema.target && schema.target.length > 0) {
ret.push(...schema.target.split('/'));
}
ret.push(schema.name);
// 服务类自动过滤 list, empty 两个页面的后缀
if (prefix === 'Service' && ['list', 'empty'].includes(schema.name)) {
ret.pop();
}
ret.push(prefix);
return core_1.strings.classify(ret.join('-'));
}
function refreshPathRoot(project, schema, alainProject) {
var _a;
if (schema.path === undefined) {
schema.path = `/${path.join(project.sourceRoot, (_a = alainProject === null || alainProject === void 0 ? void 0 : alainProject.routesRoot) !== null && _a !== void 0 ? _a : 'app/routes')}`;
}
}
exports.refreshPathRoot = refreshPathRoot;
function resolveSchema(tree, project, schema, alainProject) {
// module name
if (!schema.module) {
throw new schematics_1.SchematicsException(`Must specify module name. (e.g: ng g ng-alain:list <list name> -m=<module name>)`);
}
// path
refreshPathRoot(project, schema, alainProject);
schema.path += `/${schema.module}`;
const parsedPath = (0, parse_name_1.parseName)(schema.path, schema.name);
schema.name = parsedPath.name;
schema.path = parsedPath.path;
const rootPath = path.resolve(__dirname, '../../..');
const fullPath = path.join(rootPath, schema.path, schema.name);
if (fs.existsSync(fullPath) && fs.readdirSync(fullPath).length > 0) {
throw new schematics_1.SchematicsException(`The directory (${fullPath}) already exists`);
}
schema.importModulePath = (0, find_module_1.findModuleFromOptions)(tree, schema);
if (!schema._filesPath) {
// 若基础页尝试从 `_cli-tpl/_${schema.schematicName!}` 下查找该目录,若存在则优先使用
if (['list', 'edit', 'view', 'empty'].includes(schema.schematicName)) {
const overrideDir = `/${[project.root, `_cli-tpl/_${schema.schematicName}`].filter(i => !!i).join('/')}`;
const overridePath = `${overrideDir}/__path__/__name@dasherize@if-flat__/__name@dasherize__.component.ts`;
if (tree.exists(overridePath) || tree.exists(`${overridePath}.template`)) {
// 所在目录与命令目录同属一个目录结构,因此无须特殊处理
schema._filesPath = path.relative(__dirname, rootPath) + overrideDir;
}
}
schema._filesPath = schema._filesPath || './files';
}
// fill target
if (schema.target) {
schema.path += core_1.strings.dasherize(`/${schema.target}`);
}
schema.routerModulePath = schema.importModulePath.replace('.module.ts', '-routing.module.ts');
// html selector
schema.selector = schema.selector || buildSelector(schema, project.prefix);
(0, validation_1.validateHtmlSelector)(schema.selector);
}
function addImportToModule(tree, filePath, symbolName, fileName) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const change = (0, ast_utils_1.insertImport)(source, filePath, symbolName, fileName);
if (change.path == null)
return;
const declarationRecorder = tree.beginUpdate(filePath);
declarationRecorder.insertLeft(change.pos, change.toAdd);
tree.commitUpdate(declarationRecorder);
}
exports.addImportToModule = addImportToModule;
function addProviderToModule(tree, filePath, serviceName, importPath) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const changes = (0, ast_utils_1.addProviderToModule)(source, filePath, serviceName, importPath);
const declarationRecorder = tree.beginUpdate(filePath);
changes.forEach(change => {
if (change.path == null)
return;
if (change instanceof change_1.InsertChange) {
declarationRecorder.insertLeft(change.pos, change.toAdd);
}
});
tree.commitUpdate(declarationRecorder);
}
exports.addProviderToModule = addProviderToModule;
function addValueToVariable(tree, filePath, variableName, text, needWrap = true) {
const source = (0, ast_1.getSourceFile)(tree, filePath);
const node = (0, ast_utils_1.findNode)(source, ts.SyntaxKind.Identifier, variableName);
if (!node) {
throw new schematics_1.SchematicsException(`Could not find any [${variableName}] variable in path '${filePath}'.`);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const arr = node.parent.initializer;
const change = new change_1.InsertChange(filePath, arr.end - 1, `${arr.elements && arr.elements.length > 0 ? ',' : ''}${needWrap ? '\n ' : ''}${text}`);
const declarationRecorder = tree.beginUpdate(filePath);
declarationRecorder.insertLeft(change.pos, change.toAdd);
tree.commitUpdate(declarationRecorder);
}
exports.addValueToVariable = addValueToVariable;
function getRelativePath(filePath, schema, prefix) {
const importPath = `/${schema.path}/${schema.flat ? '' : `${core_1.strings.dasherize(schema.name)}/`}${core_1.strings.dasherize(schema.name)}.${prefix}`;
return (0, find_module_1.buildRelativePath)(filePath, importPath);
}
function addDeclaration(schema) {
return (tree) => {
if (schema.skipImport || !schema.module) {
return tree;
}
// imports
addImportToModule(tree, schema.importModulePath, schema.componentName, getRelativePath(schema.importModulePath, schema, 'component'));
addValueToVariable(tree, schema.importModulePath, 'COMPONENTS', schema.componentName);
// component
if (schema.modal !== true) {
// routing
addImportToModule(tree, schema.routerModulePath, schema.componentName, getRelativePath(schema.routerModulePath, schema, 'component'));
addValueToVariable(tree, schema.routerModulePath, 'routes', `{ path: '${schema.name}', component: ${schema.componentName} }`);
}
// service
if (schema.service === 'none') {
addProviderToModule(tree, schema.importModulePath, schema.serviceName, getRelativePath(schema.importModulePath, schema, 'service'));
}
return tree;
};
}
function buildAlain(schema) {
return (tree) => __awaiter(this, void 0, void 0, function* () {
const res = yield (0, workspace_1.getProject)(tree, schema.project);
if (schema.project && res.name !== schema.project) {
throw new schematics_1.SchematicsException(`The specified project does not match '${schema.project}', current: ${res.name}`);
}
const project = res.project;
resolveSchema(tree, project, schema, res.alainProject);
schema.componentName = buildName(schema, 'Component');
schema.serviceName = buildName(schema, 'Service'); | schema.service === 'ignore' ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.service.ts.template')) : (0, schematics_1.noop)(),
schema.skipTests ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.spec.ts.template')) : (0, schematics_1.noop)(),
schema.inlineStyle ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.__style__.template')) : (0, schematics_1.noop)(),
schema.inlineTemplate ? (0, schematics_1.filter)(filePath => !filePath.endsWith('.html.template')) : (0, schematics_1.noop)(),
// schema.spec ? noop() : filter(filePath => !filePath.endsWith('.spec.ts')),
// schema.inlineStyle ? filter(filePath => !filePath.endsWith('.__styleext__')) : noop(),
// schema.inlineTemplate ? filter(filePath => !filePath.endsWith('.html')) : noop(),
(0, schematics_1.applyTemplates)(Object.assign(Object.assign(Object.assign({}, core_1.strings), { 'if-flat': (s) => (schema.flat ? '' : s) }), schema)),
(0, schematics_1.move)(null, `${schema.path}/`)
]);
return (0, schematics_1.chain)([(0, schematics_1.branchAndMerge)((0, schematics_1.chain)([addDeclaration(schema), (0, schematics_1.mergeWith)(templateSource)]))]);
});
}
exports.buildAlain = buildAlain;
//# sourceMappingURL=alain.js.map | // Don't support inline
schema.inlineTemplate = false;
const templateSource = (0, schematics_1.apply)((0, schematics_1.url)(schema._filesPath), [
(0, schematics_1.filter)(filePath => !filePath.endsWith('.DS_Store')), | random_line_split |
plot.py | #!/usr/bin/env python
"""
Author: Matthew Christey-Reid
Email: [email protected]
Date: 02/06/2020
plot.py - Main functions to determine properties of the eclipsing binaries from observed data
"""
# Import libraries for data processing
import math
import numpy as np
import matplotlib.pyplot as plt
from math import log10, floor
from scipy.optimize import curve_fit
# Import lightcurve functions
import lightcurve
# Import Lomb-Scargle periodogram functions
from astropy.timeseries import LombScargle
# Arrays to store common values for each observed band
VegaToAB = [0.528, 0.634, 0.938, 1.379, 1.9] # Constant for conversion between Vega and AB magnitude systems
Wavelength = [0.9, 1.02, 1.25, 1.6, 2.2] # Effective wavelength of observation band
def round_sig(_val):
"""
Rounds the provided value to 2 significant figures
:param _val: Value to be rounded
:return: Float, original value rounded to 2 significant figures
"""
return round(_val, 3 - int(floor(log10(abs(_val)))) - 1)
def plotallbands(_zband, _yband, _jband, _hband, _kband, _period):
"""
Plots all observed bands to the same graph
:param _zband: Observed z-band
:param _yband: Observed y-band
:param _jband: Observed j-band
:param _hband: Observed h-band
:param _kband: Observed k-band
:param _period: Period of variability
"""
# Set pyplot style to be consisten within the program
plt.style.use('seaborn-whitegrid')
# Frequency = 1 / Period
_freq = 1 / _period
# Create single dataset from all bands
_bands = [_zband, _yband, _jband, _hband, _kband]
# Iterate through each band and plot to screen
i = 0
while i < 5:
# Array to set colours for each band
_colours = ['-b', '-g', '-r', '-c', '-m']
# Array to set strings for graph legend
_legend = ['Z-band', 'Y-band', 'J-band', 'H-band', 'K-band']
# Determine the line of best fit for each band
_xfit, _lobf = calclobf(_bands[i], _period)
# Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit
plt.plot(_xfit, _lobf, _colours[i], lw=1, zorder=2, label=_legend[i])
i += 1
# Set x-axis limit to a single period
plt.xlim(0, 1)
# Set graph and axis titles
plt.xlabel("Phase")
plt.ylabel("Magnitude")
plt.title("Folded light curve")
# Show the legend
plt.legend()
# Invert y-axis as convention
plt.gca().invert_yaxis()
# Save to current folder
plt.savefig('curve.png')
# Display to screen
plt.show()
def | (_band, _period):
"""
Plots an observed band using pyplot
:param _band: Array to be plotted
:param _period: Period of object
"""
# Frequency = 1 / Period
_freq = 1 / _period
_xfit, _lobf = calclobf(_band, _period)
# Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit
plt.style.use('seaborn-whitegrid')
plt.errorbar((_band[:, 0] * _freq) % 1, _band[:, 1], _band[:, 2], fmt='.', color='gray',
ecolor='lightgray', capsize=0, zorder=0)
# Plot the graph of the line of best fit
plt.plot(_xfit, _lobf, '-k', lw=2, zorder=2)
# Set x-axis limits to 1 period
plt.xlim(0, 1)
# Set graph and axis titles
plt.xlabel("Phase")
plt.ylabel("Magnitude")
plt.title("Folded light curve")
# Invert y-axis as convention
plt.gca().invert_yaxis()
# Display to screen
plt.show()
def calclobf(_band, _period):
"""
Creates a line of best fit using Lomb-Scargle methods
:param _inputband: Band array to be fit
:param _period: Period of object
:return: Returns a linearly spaced x-axis, with y-axis values for line of best fit
"""
# Create a model with 10 terms
_ls = LombScargle(_band[:, 0], _band[:, 1], _band[:, 2], nterms=10)
# Create n linearly spaced points between phase 0 and 1
_xfit = np.linspace(0, 1, 1000)
# Frequency = 1 / Period
_freq = 1 / _period
# Plot the line of best fit generated
_lobf = _ls.model(_xfit / _freq, _freq)
return _xfit, _lobf
def foldcurve(_band, _period):
"""
Folds the magnitude measurements to a light curve using provided period
:param _band: Observation band to be folded
:param _period: Period of object
:return: Array same size as _band, but with a phase instead of Julian date
"""
# Set epoch to first date observed
_epoch = _band[0][0]
# Iterate through array, update date to phase
for i in range(0, _band.shape[0]):
_band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1
# Return folded array
return _band
def doublearrayphase(_inputarray):
"""
Doubles a band array from phase 0 -> 1 to 0 -> 2 as convention
:param _inputarray: Array to be doubled
:return: Returns an array from phase 0 -> 2, size [n * 2, 3]
"""
# Create a new array twice the size of the input
_newarray = np.zeros((_inputarray.shape[0] * 2, _inputarray.shape[1]), dtype=float)
# Iterate through the input array
for i in range(0, _newarray.shape[0]):
# Before phase 1 simply copy data into new array
if i < _inputarray.shape[0]:
_newarray[i] = _inputarray[i]
# After phase 1, simply shift all phases by +1
else:
_newarray[i] = _inputarray[i - _inputarray.shape[0]]
_newarray[i][0] = _newarray[i][0] + 1
# Return the new doubled array
return _newarray
def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):
"""
Determines the black body curve and determines mass, radius and luminosities in solar units
:param _zband: Observed z-band
:param _yband: Observed y-band
:param _jband: Observed j-band
:param _hband: Observed h-band
:param _kband: Observed k-band
:param _parallax: Parallax angle (mas)
:param _perr: Parallax angle error (mas)
"""
# Set pyplot style to be consistent within the program
plt.style.use('seaborn-whitegrid')
# Import raw data to plot Hertzsprung-Russell diagram
_hrdata = inithr('hr.dat')
# Determine distance in parsecs
_distance = 1 / np.tan(_parallax * 10**-3)
_derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)
# Create single data array with all bands
_bands = [_zband, _yband, _jband, _hband, _kband]
_lambda = [0.9, 1.02, 1.22, 1.63, 2.2]
# Set up empty arrays for each star
_largestar = np.zeros((1, 2))
_smallstar = np.zeros((1, 2))
# Determine the spectral flux density from the large star
i = 0
while i < 5:
# Determine the maximum and minimum values of the observed band
_max, _min = lightcurve.maxminvals(_bands[i])
# The large star uses the maximum flux value (smallest magnitude)
_largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)
i += 1
# Delete first empty row of the array
_largestar = np.delete(_largestar, 0, axis=0)
# Determine the spectral flux density from the small star
i = 0
while i < 5:
# Determine the maximum and minimum values of the observed band
_max, _min = lightcurve.maxminvals(_bands[i])
# Smaller star flux value is combined value minus the large star
_smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -
magtoflux(_min, i))], ndmin=2), axis=0)
i += 1
# Delete the first empty row of the array
_smallstar = np.delete(_smallstar, 0, axis=0)
# Determine the luminosity and effective temperature of each star
_luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)
_lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)
# Calculate luminosities in solar units
_solluma = _luma / (3.828*10**26)
_sollumb = _lumb / (3.828*10**26)
_lumaerr = _lumaerr / (3.828*10**26)
_lumberr = _lumberr / (3.828*10**26)
# Calculate masses using the mass/luminosity relation in solar mass units
# N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this
# approximation
_solmassa = np.power(_solluma, 1/3.5)
_solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2
_solmassb = np.power(_sollumb, 1/3.5)
_solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2
# Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature
_solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))
_solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))
_solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2
_solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2
# Output determined values to the screen and write to file
print('Values for the large star:')
print('Effective temperature: ' + str(round_sig(_wiena)))
print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))
print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))
print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))
print('-----------------------------------------------------')
print('Values for the small star:')
print('Effective temperature: ' + str(round_sig(_wienb)))
print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))
print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))
print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))
# Convert from luminosity to magnitude in solar units
_luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))
_lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))
# Plot Hertzsprung-Russell diagram using provided array
plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)
# Plot determined values for each star
plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')
plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')
# Set the x and y axis limits to sensible values
plt.legend()
plt.xlim(3000, 10000)
plt.ylim(-10, 20)
# Invert both axes as convention
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# Save figure to current folder
plt.savefig('hr.png')
# Display to screen
plt.show()
def getwientemp(_inputdata, _distance, _derr, _id):
"""
Determines the effective temperature using Wien's law
:param _inputdata: Black body curve of object
:param _distance: Distance to object (parsecs)
:param _id: 1 for large star, 2 for small star
:return: Luminosity and effective surface temperature
"""
# Maxwell-Boltzmann distribution formula probability density function
def curve(_x, _a, _scale):
_a1 = np.sqrt(2 / np.pi)
_a2 = _x**2 / (2 * _a**2)
return _scale * _a1 * (_x**2 * np.exp(-_a2)) / _a**3
# Set pyplot style to be consistent through the program
plt.style.use('seaborn-whitegrid')
# Convert the distance in parsecs to metres
_distance = 3.0857 * 10**16 * _distance
_derr = 3.0857 * 10**16 * _derr
# Create array for x and y axis data
_xdata = _inputdata[:, 0]
_ydata = _inputdata[:, 1]
_ydatalum = _ydata
# Iterate through each band and convert from Janskys to W/m^2/um
i = 0
while i < 5:
_ydata[i] = 3*10**14 * (_ydata[i] * 10**-26) / (Wavelength[i]**2)
i += 1
# Calculate optimal values and covariance using scipy curve_fit function
_popt, _pcov = curve_fit(curve, _xdata, _ydata)
# Create x axis to plot curve against
_x = np.linspace(0, 5, 100)
# Determine y value for each point on the x axis
_yplot = curve(_x, *_popt)
# Plot the curve to the screen
plt.plot(_x, _yplot)
# Determine the area under the graph, integral gives total energy recieved per m^2
_area = np.trapz(_yplot, dx=5/100)
# Total luminosity found by multiplying by the surface area of a sphere with diameter of the distance
_lum = 4 * np.pi * _distance**2 * _area
_lumerr = 4 * np.pi * _distance * _derr * _area
# Peak value of Maxwell-Boltzmann distribution
_mu = 2 * _popt[0] * np.sqrt(2 / np.pi)
# Plot data on the graph
plt.plot(_xdata, _ydata, '.')
# Set axis labels
plt.xlabel('Wavelength (um)')
plt.ylabel('Spectral Irradiance (W m^-2 um^-1)')
if _id == 1:
_str = 'Large Star'
else:
_str = 'Small Star'
# Calculate effective surface temperature using Wien's law
_wien = round_sig(2898 / _mu)
# Round luminosity to 2 significant figures
_lum = round_sig(_lum)
# Set graph title
plt.suptitle('Black Body Plot for the ' + _str)
# Save to current folder
_filename = _str + '.png'
plt.savefig(_filename)
# Display to the screen
plt.show()
# Returns calculated values
return _lum, _lumerr, _wien
def inithr(_filename):
"""
Parses required data for plotting a Hertzsprung-Russell diagram
:param _filename: File containing observed data
:return: (n x 3) size array containing magnitude, effective temperature and parallax angle
"""
# Open file provided
_file = open(_filename)
# Create empty array to hold data
_data = np.zeros((1, 3), dtype=float)
# Iterate through the file line by line
for _line in _file:
# Split each line into constituent values
_x = _line.split()
# Append data array with each value, converted to float, convert parallax angle to distance
_data = np.append(_data, np.array([float(_x[1]), float(_x[2]), (1 / float(_x[3]))], ndmin=2), axis=0)
# Iterate through data array
for _row in _data:
np.seterr(divide='ignore')
# Convert magnitude to luminosity
_row[0] = _row[0] - 5 * (np.log10(_row[2]) - 1)
# Convert B-V colour to temperature
_row[1] = 4600 * ((1 / (0.92 * _row[1] + 1.7)) + 1 / (0.92 * _row[1] + 0.62))
# Delete first empty row
_data = np.delete(_data, 0, axis=0)
# Return parsed data
return _data
def magtoflux(_mag, _id):
"""
Converts magnitude to flux in Janskys
:param _mag: Magnitude of object
:param _id: ID of observation band
:return: Spectral flux density in Janskys
"""
return math.pow(10, -0.4*(_mag + VegaToAB[_id] - 8.9))
| plotband | identifier_name |
plot.py | #!/usr/bin/env python
"""
Author: Matthew Christey-Reid
Email: [email protected]
Date: 02/06/2020
plot.py - Main functions to determine properties of the eclipsing binaries from observed data
"""
# Import libraries for data processing
import math
import numpy as np
import matplotlib.pyplot as plt
from math import log10, floor
from scipy.optimize import curve_fit
# Import lightcurve functions
import lightcurve
# Import Lomb-Scargle periodogram functions
from astropy.timeseries import LombScargle
# Arrays to store common values for each observed band
VegaToAB = [0.528, 0.634, 0.938, 1.379, 1.9] # Constant for conversion between Vega and AB magnitude systems
Wavelength = [0.9, 1.02, 1.25, 1.6, 2.2] # Effective wavelength of observation band
def round_sig(_val):
"""
Rounds the provided value to 2 significant figures
:param _val: Value to be rounded
:return: Float, original value rounded to 2 significant figures
"""
return round(_val, 3 - int(floor(log10(abs(_val)))) - 1)
def plotallbands(_zband, _yband, _jband, _hband, _kband, _period):
"""
Plots all observed bands to the same graph
:param _zband: Observed z-band
:param _yband: Observed y-band
:param _jband: Observed j-band
:param _hband: Observed h-band
:param _kband: Observed k-band
:param _period: Period of variability
"""
# Set pyplot style to be consisten within the program
plt.style.use('seaborn-whitegrid')
# Frequency = 1 / Period
_freq = 1 / _period
# Create single dataset from all bands
_bands = [_zband, _yband, _jband, _hband, _kband]
# Iterate through each band and plot to screen
i = 0
while i < 5:
# Array to set colours for each band
_colours = ['-b', '-g', '-r', '-c', '-m']
# Array to set strings for graph legend
_legend = ['Z-band', 'Y-band', 'J-band', 'H-band', 'K-band']
# Determine the line of best fit for each band
_xfit, _lobf = calclobf(_bands[i], _period)
# Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit
plt.plot(_xfit, _lobf, _colours[i], lw=1, zorder=2, label=_legend[i])
i += 1
# Set x-axis limit to a single period
plt.xlim(0, 1)
# Set graph and axis titles
plt.xlabel("Phase")
plt.ylabel("Magnitude")
plt.title("Folded light curve")
# Show the legend
plt.legend()
# Invert y-axis as convention
plt.gca().invert_yaxis()
# Save to current folder
plt.savefig('curve.png')
# Display to screen
plt.show()
def plotband(_band, _period):
"""
Plots an observed band using pyplot
:param _band: Array to be plotted
:param _period: Period of object
"""
# Frequency = 1 / Period
_freq = 1 / _period
_xfit, _lobf = calclobf(_band, _period)
# Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit
plt.style.use('seaborn-whitegrid')
plt.errorbar((_band[:, 0] * _freq) % 1, _band[:, 1], _band[:, 2], fmt='.', color='gray',
ecolor='lightgray', capsize=0, zorder=0)
# Plot the graph of the line of best fit
plt.plot(_xfit, _lobf, '-k', lw=2, zorder=2)
# Set x-axis limits to 1 period
plt.xlim(0, 1)
# Set graph and axis titles
plt.xlabel("Phase")
plt.ylabel("Magnitude")
plt.title("Folded light curve")
# Invert y-axis as convention
plt.gca().invert_yaxis()
# Display to screen
plt.show()
def calclobf(_band, _period):
"""
Creates a line of best fit using Lomb-Scargle methods
:param _inputband: Band array to be fit
:param _period: Period of object
:return: Returns a linearly spaced x-axis, with y-axis values for line of best fit
"""
# Create a model with 10 terms
_ls = LombScargle(_band[:, 0], _band[:, 1], _band[:, 2], nterms=10)
# Create n linearly spaced points between phase 0 and 1
_xfit = np.linspace(0, 1, 1000)
# Frequency = 1 / Period
_freq = 1 / _period
# Plot the line of best fit generated
_lobf = _ls.model(_xfit / _freq, _freq)
return _xfit, _lobf
def foldcurve(_band, _period):
"""
Folds the magnitude measurements to a light curve using provided period
:param _band: Observation band to be folded
:param _period: Period of object
:return: Array same size as _band, but with a phase instead of Julian date
"""
# Set epoch to first date observed
_epoch = _band[0][0]
# Iterate through array, update date to phase
for i in range(0, _band.shape[0]):
_band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1
# Return folded array
return _band
def doublearrayphase(_inputarray):
"""
Doubles a band array from phase 0 -> 1 to 0 -> 2 as convention
:param _inputarray: Array to be doubled
:return: Returns an array from phase 0 -> 2, size [n * 2, 3]
"""
# Create a new array twice the size of the input
_newarray = np.zeros((_inputarray.shape[0] * 2, _inputarray.shape[1]), dtype=float)
# Iterate through the input array
for i in range(0, _newarray.shape[0]):
# Before phase 1 simply copy data into new array
if i < _inputarray.shape[0]:
_newarray[i] = _inputarray[i]
# After phase 1, simply shift all phases by +1
else:
_newarray[i] = _inputarray[i - _inputarray.shape[0]]
_newarray[i][0] = _newarray[i][0] + 1
# Return the new doubled array
return _newarray
def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):
"""
Determines the black body curve and determines mass, radius and luminosities in solar units
:param _zband: Observed z-band
:param _yband: Observed y-band
:param _jband: Observed j-band
:param _hband: Observed h-band
:param _kband: Observed k-band
:param _parallax: Parallax angle (mas)
:param _perr: Parallax angle error (mas)
"""
# Set pyplot style to be consistent within the program
plt.style.use('seaborn-whitegrid')
# Import raw data to plot Hertzsprung-Russell diagram
_hrdata = inithr('hr.dat')
# Determine distance in parsecs
_distance = 1 / np.tan(_parallax * 10**-3)
_derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)
# Create single data array with all bands
_bands = [_zband, _yband, _jband, _hband, _kband]
_lambda = [0.9, 1.02, 1.22, 1.63, 2.2]
# Set up empty arrays for each star
_largestar = np.zeros((1, 2))
_smallstar = np.zeros((1, 2))
# Determine the spectral flux density from the large star
i = 0
while i < 5:
# Determine the maximum and minimum values of the observed band
_max, _min = lightcurve.maxminvals(_bands[i])
# The large star uses the maximum flux value (smallest magnitude)
_largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)
i += 1
# Delete first empty row of the array
_largestar = np.delete(_largestar, 0, axis=0)
# Determine the spectral flux density from the small star
i = 0
while i < 5:
# Determine the maximum and minimum values of the observed band
_max, _min = lightcurve.maxminvals(_bands[i])
# Smaller star flux value is combined value minus the large star
_smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -
magtoflux(_min, i))], ndmin=2), axis=0)
i += 1
# Delete the first empty row of the array
_smallstar = np.delete(_smallstar, 0, axis=0)
# Determine the luminosity and effective temperature of each star
_luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)
_lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)
# Calculate luminosities in solar units
_solluma = _luma / (3.828*10**26)
_sollumb = _lumb / (3.828*10**26)
_lumaerr = _lumaerr / (3.828*10**26)
_lumberr = _lumberr / (3.828*10**26)
# Calculate masses using the mass/luminosity relation in solar mass units
# N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this
# approximation
_solmassa = np.power(_solluma, 1/3.5)
_solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2
_solmassb = np.power(_sollumb, 1/3.5)
_solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2
# Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature
_solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))
_solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))
_solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2
_solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2
# Output determined values to the screen and write to file
print('Values for the large star:')
print('Effective temperature: ' + str(round_sig(_wiena)))
print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))
print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))
print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))
print('-----------------------------------------------------')
print('Values for the small star:')
print('Effective temperature: ' + str(round_sig(_wienb)))
print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))
print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))
print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))
# Convert from luminosity to magnitude in solar units
_luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))
_lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))
# Plot Hertzsprung-Russell diagram using provided array
plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)
# Plot determined values for each star
plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')
plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')
# Set the x and y axis limits to sensible values
plt.legend()
plt.xlim(3000, 10000)
plt.ylim(-10, 20)
# Invert both axes as convention
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# Save figure to current folder
plt.savefig('hr.png')
# Display to screen
plt.show()
def getwientemp(_inputdata, _distance, _derr, _id):
"""
Determines the effective temperature using Wien's law
:param _inputdata: Black body curve of object
:param _distance: Distance to object (parsecs)
:param _id: 1 for large star, 2 for small star
:return: Luminosity and effective surface temperature
"""
# Maxwell-Boltzmann distribution formula probability density function
def curve(_x, _a, _scale):
_a1 = np.sqrt(2 / np.pi)
_a2 = _x**2 / (2 * _a**2)
return _scale * _a1 * (_x**2 * np.exp(-_a2)) / _a**3
# Set pyplot style to be consistent through the program
plt.style.use('seaborn-whitegrid')
# Convert the distance in parsecs to metres
_distance = 3.0857 * 10**16 * _distance
_derr = 3.0857 * 10**16 * _derr
# Create array for x and y axis data
_xdata = _inputdata[:, 0]
_ydata = _inputdata[:, 1]
_ydatalum = _ydata
# Iterate through each band and convert from Janskys to W/m^2/um
i = 0
while i < 5:
_ydata[i] = 3*10**14 * (_ydata[i] * 10**-26) / (Wavelength[i]**2)
i += 1
# Calculate optimal values and covariance using scipy curve_fit function
_popt, _pcov = curve_fit(curve, _xdata, _ydata)
# Create x axis to plot curve against
_x = np.linspace(0, 5, 100)
# Determine y value for each point on the x axis
_yplot = curve(_x, *_popt)
# Plot the curve to the screen
plt.plot(_x, _yplot)
# Determine the area under the graph, integral gives total energy recieved per m^2
_area = np.trapz(_yplot, dx=5/100)
# Total luminosity found by multiplying by the surface area of a sphere with diameter of the distance
_lum = 4 * np.pi * _distance**2 * _area
_lumerr = 4 * np.pi * _distance * _derr * _area
# Peak value of Maxwell-Boltzmann distribution
_mu = 2 * _popt[0] * np.sqrt(2 / np.pi)
# Plot data on the graph
plt.plot(_xdata, _ydata, '.')
# Set axis labels
plt.xlabel('Wavelength (um)')
plt.ylabel('Spectral Irradiance (W m^-2 um^-1)')
if _id == 1:
_str = 'Large Star'
else:
_str = 'Small Star'
# Calculate effective surface temperature using Wien's law
_wien = round_sig(2898 / _mu)
# Round luminosity to 2 significant figures
_lum = round_sig(_lum)
# Set graph title
plt.suptitle('Black Body Plot for the ' + _str)
# Save to current folder
_filename = _str + '.png'
plt.savefig(_filename)
# Display to the screen
plt.show()
# Returns calculated values
return _lum, _lumerr, _wien
def inithr(_filename):
"""
Parses required data for plotting a Hertzsprung-Russell diagram
:param _filename: File containing observed data
:return: (n x 3) size array containing magnitude, effective temperature and parallax angle
"""
# Open file provided
_file = open(_filename)
# Create empty array to hold data
_data = np.zeros((1, 3), dtype=float)
# Iterate through the file line by line |
# Iterate through data array
for _row in _data:
np.seterr(divide='ignore')
# Convert magnitude to luminosity
_row[0] = _row[0] - 5 * (np.log10(_row[2]) - 1)
# Convert B-V colour to temperature
_row[1] = 4600 * ((1 / (0.92 * _row[1] + 1.7)) + 1 / (0.92 * _row[1] + 0.62))
# Delete first empty row
_data = np.delete(_data, 0, axis=0)
# Return parsed data
return _data
def magtoflux(_mag, _id):
"""
Converts magnitude to flux in Janskys
:param _mag: Magnitude of object
:param _id: ID of observation band
:return: Spectral flux density in Janskys
"""
return math.pow(10, -0.4*(_mag + VegaToAB[_id] - 8.9)) | for _line in _file:
# Split each line into constituent values
_x = _line.split()
# Append data array with each value, converted to float, convert parallax angle to distance
_data = np.append(_data, np.array([float(_x[1]), float(_x[2]), (1 / float(_x[3]))], ndmin=2), axis=0) | random_line_split |
plot.py | #!/usr/bin/env python
"""
Author: Matthew Christey-Reid
Email: [email protected]
Date: 02/06/2020
plot.py - Main functions to determine properties of the eclipsing binaries from observed data
"""
# Import libraries for data processing
import math
import numpy as np
import matplotlib.pyplot as plt
from math import log10, floor
from scipy.optimize import curve_fit
# Import lightcurve functions
import lightcurve
# Import Lomb-Scargle periodogram functions
from astropy.timeseries import LombScargle
# Arrays to store common values for each observed band
VegaToAB = [0.528, 0.634, 0.938, 1.379, 1.9] # Constant for conversion between Vega and AB magnitude systems
Wavelength = [0.9, 1.02, 1.25, 1.6, 2.2] # Effective wavelength of observation band
def round_sig(_val):
"""
Rounds the provided value to 2 significant figures
:param _val: Value to be rounded
:return: Float, original value rounded to 2 significant figures
"""
return round(_val, 3 - int(floor(log10(abs(_val)))) - 1)
def plotallbands(_zband, _yband, _jband, _hband, _kband, _period):
"""
Plots all observed bands to the same graph
:param _zband: Observed z-band
:param _yband: Observed y-band
:param _jband: Observed j-band
:param _hband: Observed h-band
:param _kband: Observed k-band
:param _period: Period of variability
"""
# Set pyplot style to be consisten within the program
plt.style.use('seaborn-whitegrid')
# Frequency = 1 / Period
_freq = 1 / _period
# Create single dataset from all bands
_bands = [_zband, _yband, _jband, _hband, _kband]
# Iterate through each band and plot to screen
i = 0
while i < 5:
# Array to set colours for each band
_colours = ['-b', '-g', '-r', '-c', '-m']
# Array to set strings for graph legend
_legend = ['Z-band', 'Y-band', 'J-band', 'H-band', 'K-band']
# Determine the line of best fit for each band
_xfit, _lobf = calclobf(_bands[i], _period)
# Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit
plt.plot(_xfit, _lobf, _colours[i], lw=1, zorder=2, label=_legend[i])
i += 1
# Set x-axis limit to a single period
plt.xlim(0, 1)
# Set graph and axis titles
plt.xlabel("Phase")
plt.ylabel("Magnitude")
plt.title("Folded light curve")
# Show the legend
plt.legend()
# Invert y-axis as convention
plt.gca().invert_yaxis()
# Save to current folder
plt.savefig('curve.png')
# Display to screen
plt.show()
def plotband(_band, _period):
"""
Plots an observed band using pyplot
:param _band: Array to be plotted
:param _period: Period of object
"""
# Frequency = 1 / Period
_freq = 1 / _period
_xfit, _lobf = calclobf(_band, _period)
# Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit
plt.style.use('seaborn-whitegrid')
plt.errorbar((_band[:, 0] * _freq) % 1, _band[:, 1], _band[:, 2], fmt='.', color='gray',
ecolor='lightgray', capsize=0, zorder=0)
# Plot the graph of the line of best fit
plt.plot(_xfit, _lobf, '-k', lw=2, zorder=2)
# Set x-axis limits to 1 period
plt.xlim(0, 1)
# Set graph and axis titles
plt.xlabel("Phase")
plt.ylabel("Magnitude")
plt.title("Folded light curve")
# Invert y-axis as convention
plt.gca().invert_yaxis()
# Display to screen
plt.show()
def calclobf(_band, _period):
"""
Creates a line of best fit using Lomb-Scargle methods
:param _inputband: Band array to be fit
:param _period: Period of object
:return: Returns a linearly spaced x-axis, with y-axis values for line of best fit
"""
# Create a model with 10 terms
_ls = LombScargle(_band[:, 0], _band[:, 1], _band[:, 2], nterms=10)
# Create n linearly spaced points between phase 0 and 1
_xfit = np.linspace(0, 1, 1000)
# Frequency = 1 / Period
_freq = 1 / _period
# Plot the line of best fit generated
_lobf = _ls.model(_xfit / _freq, _freq)
return _xfit, _lobf
def foldcurve(_band, _period):
"""
Folds the magnitude measurements to a light curve using provided period
:param _band: Observation band to be folded
:param _period: Period of object
:return: Array same size as _band, but with a phase instead of Julian date
"""
# Set epoch to first date observed
_epoch = _band[0][0]
# Iterate through array, update date to phase
for i in range(0, _band.shape[0]):
_band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1
# Return folded array
return _band
def doublearrayphase(_inputarray):
"""
Doubles a band array from phase 0 -> 1 to 0 -> 2 as convention
:param _inputarray: Array to be doubled
:return: Returns an array from phase 0 -> 2, size [n * 2, 3]
"""
# Create a new array twice the size of the input
_newarray = np.zeros((_inputarray.shape[0] * 2, _inputarray.shape[1]), dtype=float)
# Iterate through the input array
for i in range(0, _newarray.shape[0]):
# Before phase 1 simply copy data into new array
if i < _inputarray.shape[0]:
_newarray[i] = _inputarray[i]
# After phase 1, simply shift all phases by +1
else:
_newarray[i] = _inputarray[i - _inputarray.shape[0]]
_newarray[i][0] = _newarray[i][0] + 1
# Return the new doubled array
return _newarray
def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):
"""
Determines the black body curve and determines mass, radius and luminosities in solar units
:param _zband: Observed z-band
:param _yband: Observed y-band
:param _jband: Observed j-band
:param _hband: Observed h-band
:param _kband: Observed k-band
:param _parallax: Parallax angle (mas)
:param _perr: Parallax angle error (mas)
"""
# Set pyplot style to be consistent within the program
plt.style.use('seaborn-whitegrid')
# Import raw data to plot Hertzsprung-Russell diagram
_hrdata = inithr('hr.dat')
# Determine distance in parsecs
_distance = 1 / np.tan(_parallax * 10**-3)
_derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)
# Create single data array with all bands
_bands = [_zband, _yband, _jband, _hband, _kband]
_lambda = [0.9, 1.02, 1.22, 1.63, 2.2]
# Set up empty arrays for each star
_largestar = np.zeros((1, 2))
_smallstar = np.zeros((1, 2))
# Determine the spectral flux density from the large star
i = 0
while i < 5:
# Determine the maximum and minimum values of the observed band
|
# Delete first empty row of the array
_largestar = np.delete(_largestar, 0, axis=0)
# Determine the spectral flux density from the small star
i = 0
while i < 5:
# Determine the maximum and minimum values of the observed band
_max, _min = lightcurve.maxminvals(_bands[i])
# Smaller star flux value is combined value minus the large star
_smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -
magtoflux(_min, i))], ndmin=2), axis=0)
i += 1
# Delete the first empty row of the array
_smallstar = np.delete(_smallstar, 0, axis=0)
# Determine the luminosity and effective temperature of each star
_luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)
_lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)
# Calculate luminosities in solar units
_solluma = _luma / (3.828*10**26)
_sollumb = _lumb / (3.828*10**26)
_lumaerr = _lumaerr / (3.828*10**26)
_lumberr = _lumberr / (3.828*10**26)
# Calculate masses using the mass/luminosity relation in solar mass units
# N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this
# approximation
_solmassa = np.power(_solluma, 1/3.5)
_solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2
_solmassb = np.power(_sollumb, 1/3.5)
_solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2
# Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature
_solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))
_solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))
_solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2
_solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2
# Output determined values to the screen and write to file
print('Values for the large star:')
print('Effective temperature: ' + str(round_sig(_wiena)))
print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))
print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))
print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))
print('-----------------------------------------------------')
print('Values for the small star:')
print('Effective temperature: ' + str(round_sig(_wienb)))
print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))
print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))
print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))
# Convert from luminosity to magnitude in solar units
_luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))
_lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))
# Plot Hertzsprung-Russell diagram using provided array
plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)
# Plot determined values for each star
plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')
plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')
# Set the x and y axis limits to sensible values
plt.legend()
plt.xlim(3000, 10000)
plt.ylim(-10, 20)
# Invert both axes as convention
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# Save figure to current folder
plt.savefig('hr.png')
# Display to screen
plt.show()
def getwientemp(_inputdata, _distance, _derr, _id):
"""
Determines the effective temperature using Wien's law
:param _inputdata: Black body curve of object
:param _distance: Distance to object (parsecs)
:param _id: 1 for large star, 2 for small star
:return: Luminosity and effective surface temperature
"""
# Maxwell-Boltzmann distribution formula probability density function
def curve(_x, _a, _scale):
_a1 = np.sqrt(2 / np.pi)
_a2 = _x**2 / (2 * _a**2)
return _scale * _a1 * (_x**2 * np.exp(-_a2)) / _a**3
# Set pyplot style to be consistent through the program
plt.style.use('seaborn-whitegrid')
# Convert the distance in parsecs to metres
_distance = 3.0857 * 10**16 * _distance
_derr = 3.0857 * 10**16 * _derr
# Create array for x and y axis data
_xdata = _inputdata[:, 0]
_ydata = _inputdata[:, 1]
_ydatalum = _ydata
# Iterate through each band and convert from Janskys to W/m^2/um
i = 0
while i < 5:
_ydata[i] = 3*10**14 * (_ydata[i] * 10**-26) / (Wavelength[i]**2)
i += 1
# Calculate optimal values and covariance using scipy curve_fit function
_popt, _pcov = curve_fit(curve, _xdata, _ydata)
# Create x axis to plot curve against
_x = np.linspace(0, 5, 100)
# Determine y value for each point on the x axis
_yplot = curve(_x, *_popt)
# Plot the curve to the screen
plt.plot(_x, _yplot)
# Determine the area under the graph, integral gives total energy recieved per m^2
_area = np.trapz(_yplot, dx=5/100)
# Total luminosity found by multiplying by the surface area of a sphere with diameter of the distance
_lum = 4 * np.pi * _distance**2 * _area
_lumerr = 4 * np.pi * _distance * _derr * _area
# Peak value of Maxwell-Boltzmann distribution
_mu = 2 * _popt[0] * np.sqrt(2 / np.pi)
# Plot data on the graph
plt.plot(_xdata, _ydata, '.')
# Set axis labels
plt.xlabel('Wavelength (um)')
plt.ylabel('Spectral Irradiance (W m^-2 um^-1)')
if _id == 1:
_str = 'Large Star'
else:
_str = 'Small Star'
# Calculate effective surface temperature using Wien's law
_wien = round_sig(2898 / _mu)
# Round luminosity to 2 significant figures
_lum = round_sig(_lum)
# Set graph title
plt.suptitle('Black Body Plot for the ' + _str)
# Save to current folder
_filename = _str + '.png'
plt.savefig(_filename)
# Display to the screen
plt.show()
# Returns calculated values
return _lum, _lumerr, _wien
def inithr(_filename):
"""
Parses required data for plotting a Hertzsprung-Russell diagram
:param _filename: File containing observed data
:return: (n x 3) size array containing magnitude, effective temperature and parallax angle
"""
# Open file provided
_file = open(_filename)
# Create empty array to hold data
_data = np.zeros((1, 3), dtype=float)
# Iterate through the file line by line
for _line in _file:
# Split each line into constituent values
_x = _line.split()
# Append data array with each value, converted to float, convert parallax angle to distance
_data = np.append(_data, np.array([float(_x[1]), float(_x[2]), (1 / float(_x[3]))], ndmin=2), axis=0)
# Iterate through data array
for _row in _data:
np.seterr(divide='ignore')
# Convert magnitude to luminosity
_row[0] = _row[0] - 5 * (np.log10(_row[2]) - 1)
# Convert B-V colour to temperature
_row[1] = 4600 * ((1 / (0.92 * _row[1] + 1.7)) + 1 / (0.92 * _row[1] + 0.62))
# Delete first empty row
_data = np.delete(_data, 0, axis=0)
# Return parsed data
return _data
def magtoflux(_mag, _id):
"""
Converts magnitude to flux in Janskys
:param _mag: Magnitude of object
:param _id: ID of observation band
:return: Spectral flux density in Janskys
"""
return math.pow(10, -0.4*(_mag + VegaToAB[_id] - 8.9))
| _max, _min = lightcurve.maxminvals(_bands[i])
# The large star uses the maximum flux value (smallest magnitude)
_largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)
i += 1 | conditional_block |
plot.py | #!/usr/bin/env python
"""
Author: Matthew Christey-Reid
Email: [email protected]
Date: 02/06/2020
plot.py - Main functions to determine properties of the eclipsing binaries from observed data
"""
# Import libraries for data processing
import math
import numpy as np
import matplotlib.pyplot as plt
from math import log10, floor
from scipy.optimize import curve_fit
# Import lightcurve functions
import lightcurve
# Import Lomb-Scargle periodogram functions
from astropy.timeseries import LombScargle
# Arrays to store common values for each observed band
VegaToAB = [0.528, 0.634, 0.938, 1.379, 1.9] # Constant for conversion between Vega and AB magnitude systems
Wavelength = [0.9, 1.02, 1.25, 1.6, 2.2] # Effective wavelength of observation band
def round_sig(_val):
|
def plotallbands(_zband, _yband, _jband, _hband, _kband, _period):
"""
Plots all observed bands to the same graph
:param _zband: Observed z-band
:param _yband: Observed y-band
:param _jband: Observed j-band
:param _hband: Observed h-band
:param _kband: Observed k-band
:param _period: Period of variability
"""
# Set pyplot style to be consisten within the program
plt.style.use('seaborn-whitegrid')
# Frequency = 1 / Period
_freq = 1 / _period
# Create single dataset from all bands
_bands = [_zband, _yband, _jband, _hband, _kband]
# Iterate through each band and plot to screen
i = 0
while i < 5:
# Array to set colours for each band
_colours = ['-b', '-g', '-r', '-c', '-m']
# Array to set strings for graph legend
_legend = ['Z-band', 'Y-band', 'J-band', 'H-band', 'K-band']
# Determine the line of best fit for each band
_xfit, _lobf = calclobf(_bands[i], _period)
# Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit
plt.plot(_xfit, _lobf, _colours[i], lw=1, zorder=2, label=_legend[i])
i += 1
# Set x-axis limit to a single period
plt.xlim(0, 1)
# Set graph and axis titles
plt.xlabel("Phase")
plt.ylabel("Magnitude")
plt.title("Folded light curve")
# Show the legend
plt.legend()
# Invert y-axis as convention
plt.gca().invert_yaxis()
# Save to current folder
plt.savefig('curve.png')
# Display to screen
plt.show()
def plotband(_band, _period):
"""
Plots an observed band using pyplot
:param _band: Array to be plotted
:param _period: Period of object
"""
# Frequency = 1 / Period
_freq = 1 / _period
_xfit, _lobf = calclobf(_band, _period)
# Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit
plt.style.use('seaborn-whitegrid')
plt.errorbar((_band[:, 0] * _freq) % 1, _band[:, 1], _band[:, 2], fmt='.', color='gray',
ecolor='lightgray', capsize=0, zorder=0)
# Plot the graph of the line of best fit
plt.plot(_xfit, _lobf, '-k', lw=2, zorder=2)
# Set x-axis limits to 1 period
plt.xlim(0, 1)
# Set graph and axis titles
plt.xlabel("Phase")
plt.ylabel("Magnitude")
plt.title("Folded light curve")
# Invert y-axis as convention
plt.gca().invert_yaxis()
# Display to screen
plt.show()
def calclobf(_band, _period):
"""
Creates a line of best fit using Lomb-Scargle methods
:param _inputband: Band array to be fit
:param _period: Period of object
:return: Returns a linearly spaced x-axis, with y-axis values for line of best fit
"""
# Create a model with 10 terms
_ls = LombScargle(_band[:, 0], _band[:, 1], _band[:, 2], nterms=10)
# Create n linearly spaced points between phase 0 and 1
_xfit = np.linspace(0, 1, 1000)
# Frequency = 1 / Period
_freq = 1 / _period
# Plot the line of best fit generated
_lobf = _ls.model(_xfit / _freq, _freq)
return _xfit, _lobf
def foldcurve(_band, _period):
"""
Folds the magnitude measurements to a light curve using provided period
:param _band: Observation band to be folded
:param _period: Period of object
:return: Array same size as _band, but with a phase instead of Julian date
"""
# Set epoch to first date observed
_epoch = _band[0][0]
# Iterate through array, update date to phase
for i in range(0, _band.shape[0]):
_band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1
# Return folded array
return _band
def doublearrayphase(_inputarray):
"""
Doubles a band array from phase 0 -> 1 to 0 -> 2 as convention
:param _inputarray: Array to be doubled
:return: Returns an array from phase 0 -> 2, size [n * 2, 3]
"""
# Create a new array twice the size of the input
_newarray = np.zeros((_inputarray.shape[0] * 2, _inputarray.shape[1]), dtype=float)
# Iterate through the input array
for i in range(0, _newarray.shape[0]):
# Before phase 1 simply copy data into new array
if i < _inputarray.shape[0]:
_newarray[i] = _inputarray[i]
# After phase 1, simply shift all phases by +1
else:
_newarray[i] = _inputarray[i - _inputarray.shape[0]]
_newarray[i][0] = _newarray[i][0] + 1
# Return the new doubled array
return _newarray
def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):
"""
Determines the black body curve and determines mass, radius and luminosities in solar units
:param _zband: Observed z-band
:param _yband: Observed y-band
:param _jband: Observed j-band
:param _hband: Observed h-band
:param _kband: Observed k-band
:param _parallax: Parallax angle (mas)
:param _perr: Parallax angle error (mas)
"""
# Set pyplot style to be consistent within the program
plt.style.use('seaborn-whitegrid')
# Import raw data to plot Hertzsprung-Russell diagram
_hrdata = inithr('hr.dat')
# Determine distance in parsecs
_distance = 1 / np.tan(_parallax * 10**-3)
_derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)
# Create single data array with all bands
_bands = [_zband, _yband, _jband, _hband, _kband]
_lambda = [0.9, 1.02, 1.22, 1.63, 2.2]
# Set up empty arrays for each star
_largestar = np.zeros((1, 2))
_smallstar = np.zeros((1, 2))
# Determine the spectral flux density from the large star
i = 0
while i < 5:
# Determine the maximum and minimum values of the observed band
_max, _min = lightcurve.maxminvals(_bands[i])
# The large star uses the maximum flux value (smallest magnitude)
_largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)
i += 1
# Delete first empty row of the array
_largestar = np.delete(_largestar, 0, axis=0)
# Determine the spectral flux density from the small star
i = 0
while i < 5:
# Determine the maximum and minimum values of the observed band
_max, _min = lightcurve.maxminvals(_bands[i])
# Smaller star flux value is combined value minus the large star
_smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -
magtoflux(_min, i))], ndmin=2), axis=0)
i += 1
# Delete the first empty row of the array
_smallstar = np.delete(_smallstar, 0, axis=0)
# Determine the luminosity and effective temperature of each star
_luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)
_lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)
# Calculate luminosities in solar units
_solluma = _luma / (3.828*10**26)
_sollumb = _lumb / (3.828*10**26)
_lumaerr = _lumaerr / (3.828*10**26)
_lumberr = _lumberr / (3.828*10**26)
# Calculate masses using the mass/luminosity relation in solar mass units
# N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this
# approximation
_solmassa = np.power(_solluma, 1/3.5)
_solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2
_solmassb = np.power(_sollumb, 1/3.5)
_solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2
# Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature
_solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))
_solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))
_solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2
_solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2
# Output determined values to the screen and write to file
print('Values for the large star:')
print('Effective temperature: ' + str(round_sig(_wiena)))
print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))
print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))
print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))
print('-----------------------------------------------------')
print('Values for the small star:')
print('Effective temperature: ' + str(round_sig(_wienb)))
print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))
print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))
print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))
# Convert from luminosity to magnitude in solar units
_luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))
_lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))
# Plot Hertzsprung-Russell diagram using provided array
plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)
# Plot determined values for each star
plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')
plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')
# Set the x and y axis limits to sensible values
plt.legend()
plt.xlim(3000, 10000)
plt.ylim(-10, 20)
# Invert both axes as convention
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# Save figure to current folder
plt.savefig('hr.png')
# Display to screen
plt.show()
def getwientemp(_inputdata, _distance, _derr, _id):
"""
Determines the effective temperature using Wien's law
:param _inputdata: Black body curve of object
:param _distance: Distance to object (parsecs)
:param _id: 1 for large star, 2 for small star
:return: Luminosity and effective surface temperature
"""
# Maxwell-Boltzmann distribution formula probability density function
def curve(_x, _a, _scale):
_a1 = np.sqrt(2 / np.pi)
_a2 = _x**2 / (2 * _a**2)
return _scale * _a1 * (_x**2 * np.exp(-_a2)) / _a**3
# Set pyplot style to be consistent through the program
plt.style.use('seaborn-whitegrid')
# Convert the distance in parsecs to metres
_distance = 3.0857 * 10**16 * _distance
_derr = 3.0857 * 10**16 * _derr
# Create array for x and y axis data
_xdata = _inputdata[:, 0]
_ydata = _inputdata[:, 1]
_ydatalum = _ydata
# Iterate through each band and convert from Janskys to W/m^2/um
i = 0
while i < 5:
_ydata[i] = 3*10**14 * (_ydata[i] * 10**-26) / (Wavelength[i]**2)
i += 1
# Calculate optimal values and covariance using scipy curve_fit function
_popt, _pcov = curve_fit(curve, _xdata, _ydata)
# Create x axis to plot curve against
_x = np.linspace(0, 5, 100)
# Determine y value for each point on the x axis
_yplot = curve(_x, *_popt)
# Plot the curve to the screen
plt.plot(_x, _yplot)
# Determine the area under the graph, integral gives total energy recieved per m^2
_area = np.trapz(_yplot, dx=5/100)
# Total luminosity found by multiplying by the surface area of a sphere with diameter of the distance
_lum = 4 * np.pi * _distance**2 * _area
_lumerr = 4 * np.pi * _distance * _derr * _area
# Peak value of Maxwell-Boltzmann distribution
_mu = 2 * _popt[0] * np.sqrt(2 / np.pi)
# Plot data on the graph
plt.plot(_xdata, _ydata, '.')
# Set axis labels
plt.xlabel('Wavelength (um)')
plt.ylabel('Spectral Irradiance (W m^-2 um^-1)')
if _id == 1:
_str = 'Large Star'
else:
_str = 'Small Star'
# Calculate effective surface temperature using Wien's law
_wien = round_sig(2898 / _mu)
# Round luminosity to 2 significant figures
_lum = round_sig(_lum)
# Set graph title
plt.suptitle('Black Body Plot for the ' + _str)
# Save to current folder
_filename = _str + '.png'
plt.savefig(_filename)
# Display to the screen
plt.show()
# Returns calculated values
return _lum, _lumerr, _wien
def inithr(_filename):
"""
Parses required data for plotting a Hertzsprung-Russell diagram
:param _filename: File containing observed data
:return: (n x 3) size array containing magnitude, effective temperature and parallax angle
"""
# Open file provided
_file = open(_filename)
# Create empty array to hold data
_data = np.zeros((1, 3), dtype=float)
# Iterate through the file line by line
for _line in _file:
# Split each line into constituent values
_x = _line.split()
# Append data array with each value, converted to float, convert parallax angle to distance
_data = np.append(_data, np.array([float(_x[1]), float(_x[2]), (1 / float(_x[3]))], ndmin=2), axis=0)
# Iterate through data array
for _row in _data:
np.seterr(divide='ignore')
# Convert magnitude to luminosity
_row[0] = _row[0] - 5 * (np.log10(_row[2]) - 1)
# Convert B-V colour to temperature
_row[1] = 4600 * ((1 / (0.92 * _row[1] + 1.7)) + 1 / (0.92 * _row[1] + 0.62))
# Delete first empty row
_data = np.delete(_data, 0, axis=0)
# Return parsed data
return _data
def magtoflux(_mag, _id):
"""
Converts magnitude to flux in Janskys
:param _mag: Magnitude of object
:param _id: ID of observation band
:return: Spectral flux density in Janskys
"""
return math.pow(10, -0.4*(_mag + VegaToAB[_id] - 8.9))
| """
Rounds the provided value to 2 significant figures
:param _val: Value to be rounded
:return: Float, original value rounded to 2 significant figures
"""
return round(_val, 3 - int(floor(log10(abs(_val)))) - 1) | identifier_body |
routing.rs | //! Functions for adding ingress/egress nodes.
//!
//! In particular:
//!
//! - New nodes that are children of nodes in a different domain must be preceeded by an ingress
//! - Egress nodes must be added to nodes that now have children in a different domain
//! - Egress nodes that gain new children must gain channels to facilitate forwarding
//! - Timestamp ingress nodes for existing domains must be connected to new base nodes
//! - Timestamp ingress nodes must be added to all new domains
use flow::prelude::*;
use flow::domain;
use flow::node;
use petgraph;
use petgraph::graph::NodeIndex;
use std::collections::{HashSet, HashMap};
use std::sync::mpsc;
use slog::Logger;
/// Add in ingress and egress nodes as appropriate in the graph to facilitate cross-domain
/// communication.
pub fn add(log: &Logger,
graph: &mut Graph,
source: NodeIndex,
new: &mut HashSet<NodeIndex>)
-> HashMap<domain::Index, HashMap<NodeIndex, NodeIndex>> {
// find all new nodes in topological order. we collect first since we'll be mutating the graph
// below. it's convenient to have the nodes in topological order, because we then know that
// we'll first add egress nodes, and then the related ingress nodes. if we're ever required to
// add an ingress node, and its parent isn't an egress node, we know that we're seeing a
// connection between an old node in one domain, and a new node in a different domain.
let mut topo_list = Vec::with_capacity(new.len());
let mut topo = petgraph::visit::Topo::new(&*graph);
while let Some(node) = topo.next(&*graph) {
if node == source {
continue;
}
if !new.contains(&node) {
continue;
}
topo_list.push(node);
}
// we need to keep track of all the times we change the parent of a node (by replacing it with
// an egress, and then with an ingress), since this remapping must be communicated to the nodes
// so they know the true identifier of their parent in the graph.
let mut swaps = HashMap::new();
// we also need to keep track of the ingress nodes we've added to each domain so that we don't
// end up with two ingress nodes for a given egress node. that would cause unnecessary
// cross-domain communication. this is domain => source => NodeIndex (of ingress). note that
// `source` here is actually the *egress* node. this is because by the time we add ingress
// nodes, we know our incoming edges have already been updated to point to the egress nodes.
let mut ingresses = HashMap::new();
for node in topo_list {
let domain = graph[node].domain();
// First, we add egress nodes for any of our cross-domain children
let children: Vec<_> = graph.neighbors_directed(node, petgraph::EdgeDirection::Outgoing)
.collect(); // collect so we can mutate graph
// We then need to make sure that we're acting on up-to-date information about existing
// egress/ingress pairs. In particular, we want to know about egresses this node already
// has (and to which domains). In the process we also populate the information about
// ingress nodes in other domains that point here (those shouldn't be re-created if new
// nodes are created in the corresponding domains).
let mut egresses = HashMap::new();
for child in &children {
if !new.contains(child) {
continue;
}
if let node::Type::Egress { .. } = *graph[*child] {
for ingress in graph.neighbors_directed(*child, petgraph::EdgeDirection::Outgoing) {
// this egress already contains this node to the ingress' domain
egresses.insert(graph[ingress].domain(), *child);
// also keep track of the corresponding ingress node so we can re-use it
ingresses.entry(graph[ingress].domain())
.or_insert_with(HashMap::new)
.insert(node, ingress);
}
}
}
for child in children {
let cdomain = graph[child].domain();
if domain != cdomain {
// child is in a different domain
if !egresses.contains_key(&cdomain) {
// create an egress node to handle that
// NOTE: technically, this doesn't need to mirror its parent, but meh
let proxy = graph[node].mirror(node::Type::Egress {
tags: Default::default(),
txs: Default::default(),
});
let egress = graph.add_node(proxy);
graph.add_edge(node, egress, false);
new.insert(egress);
egresses.insert(cdomain, egress);
trace!(log, "adding cross-domain egress to new node"; "node" => node.index(), "egress" => egress.index());
} else {
trace!(log, "re-using cross-domain egress to new node"; "node" => node.index(), "egress" => egresses[&cdomain].index());
}
// we need to hook that node in between us and this child
let egress = egresses[&cdomain];
let old = graph.find_edge(node, child).unwrap();
let was_materialized = graph.remove_edge(old).unwrap();
graph.add_edge(egress, child, was_materialized);
// this ends up being re-executed, but that's okay
swaps.entry(cdomain).or_insert_with(HashMap::new).insert(node, egress);
}
}
// Then, we look for any parents in the graph that
//
// a) are in a different domain, and
// b) aren't egress nodes
//
// This situation arises whenever a cross-domain edge is added as the result of a
// migration. We need to find or make an egress domain in that other domain, and hook that
// up as the parent of this node instead of the original internal foreign domain node.
//
// Note that same-domain parents are never interesting to us for this purpose.
let mut parents: Vec<_> = graph.neighbors_directed(node, petgraph::EdgeDirection::Incoming)
.filter(|&ni| ni == source || graph[ni].domain() != domain)
.collect(); // collect so we can mutate graph
for parent in &mut parents {
if *parent == source {
// no egress needed
continue;
}
// since we are traversing in topological order, egress nodes should have been added to
// all our parents, and our incoming edges should have been updated. if that *isn't*
// the case for a given parent, it must be a pre-existing parent.
if let node::Type::Egress { .. } = *graph[*parent] {
continue;
}
// let's first see if this parent already has an egress we can use
let egress = graph.neighbors_directed(*parent, petgraph::EdgeDirection::Outgoing)
.find(|&ni| graph[ni].is_egress());
let egress = egress.unwrap_or_else(|| {
// no, okay, so we need to add an egress for that other node,
let proxy = graph[*parent].mirror(node::Type::Egress{
txs: Default::default(),
tags: Default::default()
});
let egress = graph.add_node(proxy);
trace!(log, "adding cross-domain egress to existing node"; "node" => parent.index(), "egress" => egress.index());
graph.add_edge(*parent, egress, false);
new.insert(egress);
egress
});
// now, let's use that egress as our parent instead
let old = graph.find_edge(*parent, node).unwrap();
let was_materialized = graph.remove_edge(old).unwrap();
graph.add_edge(egress, node, was_materialized);
// all references to our original parent should now refer to the egress
swaps.entry(domain).or_insert_with(HashMap::new).insert(*parent, egress);
// and we should now just consider the egress our parent instead
*parent = egress;
}
// Now that we know all our foreign parents are egress nodes, we can add ingress nodes.
// Note that by this time (due to the topological walk), we know that `ingresses` has been
// sufficiently populated to contain any relevant existing ingress nodes.
for parent in parents {
// is there already an ingress node we can re-use?
let mut ingress =
ingresses.get(&domain).and_then(|ingresses| ingresses.get(&parent)).map(|ni| *ni);
if ingress.is_none() {
// nope -- create our new ingress node
let mut i = graph[parent].mirror(node::Type::Ingress);
i.add_to(domain); // it belongs to this domain, not that of the parent
let i = graph.add_node(i);
graph.add_edge(parent, i, false);
// we also now need to deal with this ingress node
new.insert(i);
if parent == source {
trace!(log, "adding source ingress"; "base" => node.index(), "ingress" => i.index());
// we don't re-use source ingress nodes
} else {
trace!(log, "adding cross-domain ingress"; "to" => node.index(), "from" => parent.index(), "ingress" => i.index());
ingresses.entry(domain).or_insert_with(HashMap::new).insert(parent, i);
}
ingress = Some(i);
} else {
trace!(log, "re-using cross-domain ingress"; "to" => node.index(), "from" => parent.index(), "ingress" => ingress.unwrap().index());
}
let ingress = ingress.unwrap();
// we need to hook the ingress node in between us and the parent
let old = graph.find_edge(parent, node).unwrap();
let was_materialized = graph.remove_edge(old).unwrap();
graph.add_edge(ingress, node, was_materialized);
// tracking swaps here is a bit tricky because we've already swapped the "true" parents
// of `node` with the ids of the egress nodes. thus, we actually need to do swaps on
// the values in `swaps`, not insert new entries (that, or we'd need to change the
// resolution process to be recursive, which is painful and unnecessary). note that we
// *also* need to special-case handing base nodes, because there there *won't* be a
// parent egress swap
if parent != source {
for (_, to) in swaps.get_mut(&domain).unwrap().iter_mut() {
if *to == parent {
*to = ingress;
}
}
}
}
}
swaps
}
pub fn connect(log: &Logger,
graph: &mut Graph,
main_txs: &HashMap<domain::Index, mpsc::SyncSender<Packet>>,
new: &HashSet<NodeIndex>) | {
// ensure all egress nodes contain the tx channel of the domains of their child ingress nodes
for &node in new {
let n = &graph[node];
if let node::Type::Ingress = **n {
// check the egress connected to this ingress
} else {
continue;
}
for egress in graph.neighbors_directed(node, petgraph::EdgeDirection::Incoming) {
match *graph[egress] {
node::Type::Egress { ref txs, .. } => {
trace!(log, "connecting"; "egress" => egress.index(), "ingress" => node.index());
txs.lock()
.unwrap()
.push((node.into(), n.addr(), main_txs[&n.domain()].clone()));
continue;
}
node::Type::Source => continue,
_ => unreachable!("ingress parent is not egress"),
}
}
}
} | identifier_body |
|
routing.rs | //! Functions for adding ingress/egress nodes.
//!
//! In particular:
//!
//! - New nodes that are children of nodes in a different domain must be preceeded by an ingress
//! - Egress nodes must be added to nodes that now have children in a different domain
//! - Egress nodes that gain new children must gain channels to facilitate forwarding
//! - Timestamp ingress nodes for existing domains must be connected to new base nodes
//! - Timestamp ingress nodes must be added to all new domains
use flow::prelude::*;
use flow::domain;
use flow::node;
use petgraph;
use petgraph::graph::NodeIndex;
use std::collections::{HashSet, HashMap};
use std::sync::mpsc;
use slog::Logger;
/// Add in ingress and egress nodes as appropriate in the graph to facilitate cross-domain
/// communication.
pub fn add(log: &Logger,
graph: &mut Graph,
source: NodeIndex,
new: &mut HashSet<NodeIndex>)
-> HashMap<domain::Index, HashMap<NodeIndex, NodeIndex>> {
// find all new nodes in topological order. we collect first since we'll be mutating the graph
// below. it's convenient to have the nodes in topological order, because we then know that
// we'll first add egress nodes, and then the related ingress nodes. if we're ever required to
// add an ingress node, and its parent isn't an egress node, we know that we're seeing a
// connection between an old node in one domain, and a new node in a different domain.
let mut topo_list = Vec::with_capacity(new.len());
let mut topo = petgraph::visit::Topo::new(&*graph);
while let Some(node) = topo.next(&*graph) {
if node == source {
continue;
}
if !new.contains(&node) {
continue;
}
topo_list.push(node);
}
// we need to keep track of all the times we change the parent of a node (by replacing it with
// an egress, and then with an ingress), since this remapping must be communicated to the nodes
// so they know the true identifier of their parent in the graph.
let mut swaps = HashMap::new();
// we also need to keep track of the ingress nodes we've added to each domain so that we don't
// end up with two ingress nodes for a given egress node. that would cause unnecessary
// cross-domain communication. this is domain => source => NodeIndex (of ingress). note that
// `source` here is actually the *egress* node. this is because by the time we add ingress
// nodes, we know our incoming edges have already been updated to point to the egress nodes.
let mut ingresses = HashMap::new();
for node in topo_list {
let domain = graph[node].domain();
// First, we add egress nodes for any of our cross-domain children
let children: Vec<_> = graph.neighbors_directed(node, petgraph::EdgeDirection::Outgoing)
.collect(); // collect so we can mutate graph
// We then need to make sure that we're acting on up-to-date information about existing
// egress/ingress pairs. In particular, we want to know about egresses this node already
// has (and to which domains). In the process we also populate the information about
// ingress nodes in other domains that point here (those shouldn't be re-created if new
// nodes are created in the corresponding domains).
let mut egresses = HashMap::new();
for child in &children {
if !new.contains(child) {
continue;
}
if let node::Type::Egress { .. } = *graph[*child] {
for ingress in graph.neighbors_directed(*child, petgraph::EdgeDirection::Outgoing) {
// this egress already contains this node to the ingress' domain
egresses.insert(graph[ingress].domain(), *child);
// also keep track of the corresponding ingress node so we can re-use it
ingresses.entry(graph[ingress].domain())
.or_insert_with(HashMap::new)
.insert(node, ingress);
}
}
}
for child in children {
let cdomain = graph[child].domain();
if domain != cdomain {
// child is in a different domain
if !egresses.contains_key(&cdomain) {
// create an egress node to handle that
// NOTE: technically, this doesn't need to mirror its parent, but meh
let proxy = graph[node].mirror(node::Type::Egress {
tags: Default::default(),
txs: Default::default(),
});
let egress = graph.add_node(proxy);
graph.add_edge(node, egress, false);
new.insert(egress);
egresses.insert(cdomain, egress);
trace!(log, "adding cross-domain egress to new node"; "node" => node.index(), "egress" => egress.index());
} else {
trace!(log, "re-using cross-domain egress to new node"; "node" => node.index(), "egress" => egresses[&cdomain].index());
}
// we need to hook that node in between us and this child
let egress = egresses[&cdomain];
let old = graph.find_edge(node, child).unwrap();
let was_materialized = graph.remove_edge(old).unwrap();
graph.add_edge(egress, child, was_materialized);
// this ends up being re-executed, but that's okay
swaps.entry(cdomain).or_insert_with(HashMap::new).insert(node, egress);
}
}
// Then, we look for any parents in the graph that
//
// a) are in a different domain, and
// b) aren't egress nodes
//
// This situation arises whenever a cross-domain edge is added as the result of a
// migration. We need to find or make an egress domain in that other domain, and hook that
// up as the parent of this node instead of the original internal foreign domain node.
//
// Note that same-domain parents are never interesting to us for this purpose.
let mut parents: Vec<_> = graph.neighbors_directed(node, petgraph::EdgeDirection::Incoming)
.filter(|&ni| ni == source || graph[ni].domain() != domain)
.collect(); // collect so we can mutate graph
for parent in &mut parents {
if *parent == source {
// no egress needed
continue;
}
// since we are traversing in topological order, egress nodes should have been added to
// all our parents, and our incoming edges should have been updated. if that *isn't*
// the case for a given parent, it must be a pre-existing parent.
if let node::Type::Egress { .. } = *graph[*parent] {
continue;
}
// let's first see if this parent already has an egress we can use
let egress = graph.neighbors_directed(*parent, petgraph::EdgeDirection::Outgoing)
.find(|&ni| graph[ni].is_egress());
let egress = egress.unwrap_or_else(|| {
// no, okay, so we need to add an egress for that other node,
let proxy = graph[*parent].mirror(node::Type::Egress{
txs: Default::default(),
tags: Default::default()
});
let egress = graph.add_node(proxy);
trace!(log, "adding cross-domain egress to existing node"; "node" => parent.index(), "egress" => egress.index());
graph.add_edge(*parent, egress, false);
new.insert(egress);
egress
});
// now, let's use that egress as our parent instead
let old = graph.find_edge(*parent, node).unwrap();
let was_materialized = graph.remove_edge(old).unwrap();
graph.add_edge(egress, node, was_materialized);
// all references to our original parent should now refer to the egress
swaps.entry(domain).or_insert_with(HashMap::new).insert(*parent, egress);
// and we should now just consider the egress our parent instead
*parent = egress;
}
// Now that we know all our foreign parents are egress nodes, we can add ingress nodes.
// Note that by this time (due to the topological walk), we know that `ingresses` has been
// sufficiently populated to contain any relevant existing ingress nodes.
for parent in parents {
// is there already an ingress node we can re-use?
let mut ingress =
ingresses.get(&domain).and_then(|ingresses| ingresses.get(&parent)).map(|ni| *ni);
if ingress.is_none() {
// nope -- create our new ingress node
let mut i = graph[parent].mirror(node::Type::Ingress);
i.add_to(domain); // it belongs to this domain, not that of the parent
let i = graph.add_node(i);
graph.add_edge(parent, i, false);
// we also now need to deal with this ingress node
new.insert(i);
if parent == source {
trace!(log, "adding source ingress"; "base" => node.index(), "ingress" => i.index());
// we don't re-use source ingress nodes
} else {
trace!(log, "adding cross-domain ingress"; "to" => node.index(), "from" => parent.index(), "ingress" => i.index());
ingresses.entry(domain).or_insert_with(HashMap::new).insert(parent, i);
}
ingress = Some(i);
} else {
trace!(log, "re-using cross-domain ingress"; "to" => node.index(), "from" => parent.index(), "ingress" => ingress.unwrap().index());
}
let ingress = ingress.unwrap();
// we need to hook the ingress node in between us and the parent
let old = graph.find_edge(parent, node).unwrap();
let was_materialized = graph.remove_edge(old).unwrap();
graph.add_edge(ingress, node, was_materialized);
// tracking swaps here is a bit tricky because we've already swapped the "true" parents
// of `node` with the ids of the egress nodes. thus, we actually need to do swaps on
// the values in `swaps`, not insert new entries (that, or we'd need to change the
// resolution process to be recursive, which is painful and unnecessary). note that we
// *also* need to special-case handing base nodes, because there there *won't* be a
// parent egress swap
if parent != source {
for (_, to) in swaps.get_mut(&domain).unwrap().iter_mut() {
if *to == parent {
*to = ingress;
}
}
}
}
}
swaps
}
pub fn | (log: &Logger,
graph: &mut Graph,
main_txs: &HashMap<domain::Index, mpsc::SyncSender<Packet>>,
new: &HashSet<NodeIndex>) {
// ensure all egress nodes contain the tx channel of the domains of their child ingress nodes
for &node in new {
let n = &graph[node];
if let node::Type::Ingress = **n {
// check the egress connected to this ingress
} else {
continue;
}
for egress in graph.neighbors_directed(node, petgraph::EdgeDirection::Incoming) {
match *graph[egress] {
node::Type::Egress { ref txs, .. } => {
trace!(log, "connecting"; "egress" => egress.index(), "ingress" => node.index());
txs.lock()
.unwrap()
.push((node.into(), n.addr(), main_txs[&n.domain()].clone()));
continue;
}
node::Type::Source => continue,
_ => unreachable!("ingress parent is not egress"),
}
}
}
}
| connect | identifier_name |
routing.rs | //! Functions for adding ingress/egress nodes.
//!
//! In particular:
//!
//! - New nodes that are children of nodes in a different domain must be preceeded by an ingress
//! - Egress nodes must be added to nodes that now have children in a different domain
//! - Egress nodes that gain new children must gain channels to facilitate forwarding
//! - Timestamp ingress nodes for existing domains must be connected to new base nodes
//! - Timestamp ingress nodes must be added to all new domains
use flow::prelude::*;
use flow::domain;
use flow::node;
use petgraph;
use petgraph::graph::NodeIndex;
use std::collections::{HashSet, HashMap};
use std::sync::mpsc;
use slog::Logger;
/// Add in ingress and egress nodes as appropriate in the graph to facilitate cross-domain
/// communication.
pub fn add(log: &Logger,
graph: &mut Graph,
source: NodeIndex,
new: &mut HashSet<NodeIndex>)
-> HashMap<domain::Index, HashMap<NodeIndex, NodeIndex>> {
// find all new nodes in topological order. we collect first since we'll be mutating the graph
// below. it's convenient to have the nodes in topological order, because we then know that
// we'll first add egress nodes, and then the related ingress nodes. if we're ever required to
// add an ingress node, and its parent isn't an egress node, we know that we're seeing a
// connection between an old node in one domain, and a new node in a different domain.
let mut topo_list = Vec::with_capacity(new.len());
let mut topo = petgraph::visit::Topo::new(&*graph);
while let Some(node) = topo.next(&*graph) {
if node == source {
continue;
}
if !new.contains(&node) {
continue;
}
topo_list.push(node);
}
// we need to keep track of all the times we change the parent of a node (by replacing it with
// an egress, and then with an ingress), since this remapping must be communicated to the nodes
// so they know the true identifier of their parent in the graph.
let mut swaps = HashMap::new();
// we also need to keep track of the ingress nodes we've added to each domain so that we don't
// end up with two ingress nodes for a given egress node. that would cause unnecessary
// cross-domain communication. this is domain => source => NodeIndex (of ingress). note that
// `source` here is actually the *egress* node. this is because by the time we add ingress
// nodes, we know our incoming edges have already been updated to point to the egress nodes.
let mut ingresses = HashMap::new();
for node in topo_list {
let domain = graph[node].domain();
// First, we add egress nodes for any of our cross-domain children
let children: Vec<_> = graph.neighbors_directed(node, petgraph::EdgeDirection::Outgoing)
.collect(); // collect so we can mutate graph
// We then need to make sure that we're acting on up-to-date information about existing
// egress/ingress pairs. In particular, we want to know about egresses this node already
// has (and to which domains). In the process we also populate the information about
// ingress nodes in other domains that point here (those shouldn't be re-created if new
// nodes are created in the corresponding domains).
let mut egresses = HashMap::new();
for child in &children {
if !new.contains(child) {
continue;
}
if let node::Type::Egress { .. } = *graph[*child] {
for ingress in graph.neighbors_directed(*child, petgraph::EdgeDirection::Outgoing) {
// this egress already contains this node to the ingress' domain
egresses.insert(graph[ingress].domain(), *child);
// also keep track of the corresponding ingress node so we can re-use it
ingresses.entry(graph[ingress].domain())
.or_insert_with(HashMap::new)
.insert(node, ingress);
}
}
}
for child in children {
let cdomain = graph[child].domain();
if domain != cdomain {
// child is in a different domain
if !egresses.contains_key(&cdomain) {
// create an egress node to handle that
// NOTE: technically, this doesn't need to mirror its parent, but meh
let proxy = graph[node].mirror(node::Type::Egress {
tags: Default::default(),
txs: Default::default(),
});
let egress = graph.add_node(proxy);
graph.add_edge(node, egress, false);
new.insert(egress);
egresses.insert(cdomain, egress);
trace!(log, "adding cross-domain egress to new node"; "node" => node.index(), "egress" => egress.index());
} else {
trace!(log, "re-using cross-domain egress to new node"; "node" => node.index(), "egress" => egresses[&cdomain].index());
}
// we need to hook that node in between us and this child
let egress = egresses[&cdomain];
let old = graph.find_edge(node, child).unwrap();
let was_materialized = graph.remove_edge(old).unwrap();
graph.add_edge(egress, child, was_materialized);
// this ends up being re-executed, but that's okay
swaps.entry(cdomain).or_insert_with(HashMap::new).insert(node, egress);
}
}
// Then, we look for any parents in the graph that
//
// a) are in a different domain, and
// b) aren't egress nodes
//
// This situation arises whenever a cross-domain edge is added as the result of a
// migration. We need to find or make an egress domain in that other domain, and hook that
// up as the parent of this node instead of the original internal foreign domain node.
//
// Note that same-domain parents are never interesting to us for this purpose.
let mut parents: Vec<_> = graph.neighbors_directed(node, petgraph::EdgeDirection::Incoming)
.filter(|&ni| ni == source || graph[ni].domain() != domain)
.collect(); // collect so we can mutate graph
for parent in &mut parents {
if *parent == source {
// no egress needed
continue;
}
// since we are traversing in topological order, egress nodes should have been added to
// all our parents, and our incoming edges should have been updated. if that *isn't*
// the case for a given parent, it must be a pre-existing parent.
if let node::Type::Egress { .. } = *graph[*parent] {
continue;
}
// let's first see if this parent already has an egress we can use
let egress = graph.neighbors_directed(*parent, petgraph::EdgeDirection::Outgoing)
.find(|&ni| graph[ni].is_egress());
let egress = egress.unwrap_or_else(|| {
// no, okay, so we need to add an egress for that other node,
let proxy = graph[*parent].mirror(node::Type::Egress{
txs: Default::default(),
tags: Default::default()
});
let egress = graph.add_node(proxy);
trace!(log, "adding cross-domain egress to existing node"; "node" => parent.index(), "egress" => egress.index());
graph.add_edge(*parent, egress, false);
new.insert(egress);
egress
});
// now, let's use that egress as our parent instead
let old = graph.find_edge(*parent, node).unwrap();
let was_materialized = graph.remove_edge(old).unwrap();
graph.add_edge(egress, node, was_materialized);
// all references to our original parent should now refer to the egress
swaps.entry(domain).or_insert_with(HashMap::new).insert(*parent, egress);
// and we should now just consider the egress our parent instead
*parent = egress;
}
// Now that we know all our foreign parents are egress nodes, we can add ingress nodes.
// Note that by this time (due to the topological walk), we know that `ingresses` has been
// sufficiently populated to contain any relevant existing ingress nodes.
for parent in parents {
// is there already an ingress node we can re-use?
let mut ingress =
ingresses.get(&domain).and_then(|ingresses| ingresses.get(&parent)).map(|ni| *ni);
if ingress.is_none() {
// nope -- create our new ingress node
let mut i = graph[parent].mirror(node::Type::Ingress);
i.add_to(domain); // it belongs to this domain, not that of the parent
let i = graph.add_node(i);
graph.add_edge(parent, i, false);
// we also now need to deal with this ingress node
new.insert(i);
if parent == source {
trace!(log, "adding source ingress"; "base" => node.index(), "ingress" => i.index());
// we don't re-use source ingress nodes
} else {
trace!(log, "adding cross-domain ingress"; "to" => node.index(), "from" => parent.index(), "ingress" => i.index());
ingresses.entry(domain).or_insert_with(HashMap::new).insert(parent, i);
}
ingress = Some(i);
} else {
trace!(log, "re-using cross-domain ingress"; "to" => node.index(), "from" => parent.index(), "ingress" => ingress.unwrap().index());
}
let ingress = ingress.unwrap();
// we need to hook the ingress node in between us and the parent
let old = graph.find_edge(parent, node).unwrap(); | // of `node` with the ids of the egress nodes. thus, we actually need to do swaps on
// the values in `swaps`, not insert new entries (that, or we'd need to change the
// resolution process to be recursive, which is painful and unnecessary). note that we
// *also* need to special-case handing base nodes, because there there *won't* be a
// parent egress swap
if parent != source {
for (_, to) in swaps.get_mut(&domain).unwrap().iter_mut() {
if *to == parent {
*to = ingress;
}
}
}
}
}
swaps
}
pub fn connect(log: &Logger,
graph: &mut Graph,
main_txs: &HashMap<domain::Index, mpsc::SyncSender<Packet>>,
new: &HashSet<NodeIndex>) {
// ensure all egress nodes contain the tx channel of the domains of their child ingress nodes
for &node in new {
let n = &graph[node];
if let node::Type::Ingress = **n {
// check the egress connected to this ingress
} else {
continue;
}
for egress in graph.neighbors_directed(node, petgraph::EdgeDirection::Incoming) {
match *graph[egress] {
node::Type::Egress { ref txs, .. } => {
trace!(log, "connecting"; "egress" => egress.index(), "ingress" => node.index());
txs.lock()
.unwrap()
.push((node.into(), n.addr(), main_txs[&n.domain()].clone()));
continue;
}
node::Type::Source => continue,
_ => unreachable!("ingress parent is not egress"),
}
}
}
} | let was_materialized = graph.remove_edge(old).unwrap();
graph.add_edge(ingress, node, was_materialized);
// tracking swaps here is a bit tricky because we've already swapped the "true" parents | random_line_split |
main.rs | // cargo build --target=mips-unknown-linux-gnu
//extern crate rand;
//#![deny(warnings)]
#![feature(type_ascription)]
#[macro_use]
extern crate log;
extern crate bytes;
extern crate futures;
extern crate hyper;
extern crate pretty_env_logger;
extern crate regex;
//extern crate reqwest;
//extern crate tokio;
#[macro_use]
extern crate lazy_static;
use futures::{future, Async, Poll};
use hyper::rt::{Future, Stream};
use hyper::service::service_fn;
use hyper::{Body, Chunk, Method, Request, Response, Server, StatusCode, Uri};
//use hyper::{Client, Server, Method, Body, Response, Request};
use std::error::Error;
use std::net::SocketAddr;
use std::fs::File;
//use std::io;
//use std::io::prelude::*;
use core::borrow::Borrow;
use futures::future::FutureResult;
use std::io;
use std::io::prelude::*;
use std::process::{Command, Stdio};
use regex::Regex;
use std::borrow::Cow;
mod chunk;
mod fileio;
mod stream;
//use tokio::codec::{Decoder, FramedRead};
//use tokio::prelude::{AsyncRead};
//use tokio::fs::File;
//use tokio::io;
//use std::thread;
//use rand::Rng;
//static NTHREADS: i32 = 10;
fn get_in_addr() -> SocketAddr {
let mut in_addr: SocketAddr = ([192, 168, 3, 43], 3333).into();
if cfg!(target_os = "windows") {
in_addr: SocketAddr = ([127, 0, 0, 1], 3333).into();
}
return in_addr;
}
fn reduce_forwarded_uri(uri: &Uri) -> String {
//let in_addr: SocketAddr = get_in_addr();
let uri_string = uri.path_and_query().map(|x| x.as_str()).unwrap_or("");
//let uri: String = uri_string.parse().unwrap();
//let in_uri_string = format!("http://{}/{}", in_addr, req.uri());
let in_remove_string = "/fwd/";
debug!("uri_string: {}", uri_string);
let result = uri_string.replace(&in_remove_string, "");
debug!("result: {}", result);
//let result = in_uri_string.split(in_remove_string.unwrap_or("")).take(1).next().unwrap_or("");
result
}
fn reformat_dates(before: &str) -> Cow<str> {
lazy_static! {
static ref ISO8601_DATE_REGEX : Regex = Regex::new(
//r"(?P<y>\d{4})-(?P<m>\d{2})-(?P<d>\d{2})"
//r"/^(?:(?:(?:https?|ftp):)?\/\/)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?[a-z0-9\u00a1-\uffff]\.)+(?:[a-z\u00a1-\uffff]{2,}\.?))(?::\d{2,5})?(?:[/?#]\S*)?$/i"
//r"(?:(?:(?:https?|ftp):)?\/\/)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?[a-z0-9\u00a1-\uffff]\.)+(?:[a-z\u00a1-\uffff]{2,}\.?))(?::\d{2,5})?(?:[/?#]\S*)?$/i"
r"(https?://(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)*.(\.ogg))"
).unwrap();
}
ISO8601_DATE_REGEX.replace_all(before, "FUCK YA")
}
/// We need to return different futures depending on the route matched,
/// and we can do that with an enum, such as `futures::Either`, or with
/// trait objects.
///
/// A boxed Future (trait object) is used as it is easier to understand
/// and extend with more types. Advanced users could switch to `Either`.
type BoxFut = Box<Future<Item = Response<Body>, Error = hyper::Error> + Send>;
/// This is our service handler. It receives a Request, routes on its
/// path, and returns a Future of a Response.
//fn echo(req: Request<Body>, buf: Vec<u8>) -> BoxFut {
fn echo(req: Request<Body>) -> BoxFut {
let mut response = Response::new(Body::empty());
debug!("method: {}, uri: {}", req.method(), req.uri());
match req.method() {
&Method::GET => {
if req.uri().path().starts_with("/fwd/") {
let req_uri = reduce_forwarded_uri(req.uri());
//let forwarded_uri = Uri::from_static(&req_uri);
*response.body_mut() = Body::from("Lets forward: ".to_owned() + &req_uri);
let body = reqwest::get(req_uri.as_str()) //.unwrap();
//.danger_disable_certs_verification()
.expect(&format!("cannot get '{}'", &req_uri))
.text() //.unwrap();
.expect(&format!("cannot get text for '{}'", &req_uri));
/*let body = reqwest::Client::builder()
.danger_accept_invalid_hostnames(true)
.danger_accept_invalid_certs(true)
.build()
.unwrap()
.get("https://www.google.de/")
.send()
.unwrap()
.text()
.unwrap();*/
println!("body = {}", body.lines().take(3).collect::<String>());
/*let re_weburl = Regex::new(
r"/^(?:(?:(?:https?|ftp):)?\/\/)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?[a-z0-9\u00a1-\uffff]\.)+(?:[a-z\u00a1-\uffff]{2,}\.?))(?::\d{2,5})?(?:[/?#]\S*)?$/i"
);*/
// check if there is an alternative to the ogg-vorbis stream
// when true, then prioritize the mp3 over it
// else create a reference to the mp3 forwarding endpoint
// SSL Certificates on the host are important. make shure:
// ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
// ENV SSL_CERT_DIR=/etc/ssl/certs
// are set.
let after = reformat_dates(&body);
//println!("body = {}", after);
//let chunk = Chunk::from(after);
//*response.body_mut() = Body::from(after.to_string());
*response.body_mut() = Body::from(after.to_string());
//*response.body_mut() = Body::from("got regex");
return Box::new(future::ok(response));
}
}
_ => {}
}
match (req.method(), req.uri().path()) {
// Serve some instructions at /
(&Method::GET, "/") => {
//command_name = 'ffmpeg',
//command_opts = ['-i', 'pipe:0', '-f', 'mp3', '-acodec', 'libvorbis', '-ab', '128k', '-aq', '60', '-f', 'ogg', '-'];
/*
let command_name = "ffmpeg";
//let command_opts = ["-i", "pipe:0", "-f", "mp3", "-acodec", "libvorbis", "-ab", "128k", "-aq", "60", "-f", "ogg", "-"];
//"D:\Program Files\ffmpeg\bin\ffmpeg" -re -i "https://cdn.netzpolitik.org/wp-upload/2019/02/NPP169-Worum-geht-es-eigentlich-bei-der-ePrivacy-Reform.ogg"
// -acodec libmp3lame -ab 128k -aq 60 -f mp3 - > bla.mp3
//let media_addr = "https://cdn.netzpolitik.org/wp-upload/2019/02/NPP169-Worum-geht-es-eigentlich-bei-der-ePrivacy-Reform.ogg";
let media_addr = "https://upload.wikimedia.org/wikipedia/commons/f/f2/Median_test.ogg";
let command_opts = ["-i", media_addr,
"-acodec", "libmp3lame", "-ab", "128k", "-aq", "60", "-f", "mp3", "-"];
let mut ffmpeg_path = command_name;
if cfg!(target_os = "windows") {
ffmpeg_path = "D:/Program Files/ffmpeg/bin/ffmpeg.exe";
}
// Spawn the `wc` command
let process = match Command::new(ffmpeg_path)
.args(&command_opts)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
{
Err(why) => panic!("couldn't spawn {}: {}", command_name, why.description()),
Ok(process) => process,
};
// The `stdout` field also has type `Option<ChildStdout>` so must be unwrapped.
let mut buffer: Vec<u8> = Vec::new();
match process.stdout.unwrap().read_to_end(&mut buffer) {
Err(why) => panic!("couldn't read {} stdout: {}", command_name, why.description()),
Ok(_) => println!("buffer size:[{}]", buffer.len()),
}
*response.body_mut() = Body::from(buffer);
return Box::new( future::ok(response));
*/
/*let mapping = || -> Vec(u8)
{
}*/
//let chunks = vec!["hello", " ", "world"];
//let stream = futures::stream::iter_ok::<_, ::std::io::Error>(chunks);
/*let mapping = req
.into_body()
.map(|chunk| {
chunk.iter()
.map(|byte| byte.to_ascii_uppercase())
.collect::<Vec<u8>>()
});*/
let mapping1 = req.into_body().map(|chunk| {
chunk
.iter()
.map(|byte| {
println!("chunk {}", byte.to_ascii_uppercase());
byte.to_ascii_uppercase()
})
.collect::<Vec<u8>>()
});
let data_fuck = vec!["FUCK", " ", "YOU!"];
let chunk_fuck = Chunk::from("fuck");
let stream_fuck = futures::stream::iter_ok::<_, ::std::io::Error>(data_fuck);
/* //let data2 = vec!["hello", " ", "world"];
let data2: Vec<u8> = vec![0x55, 0x20, 0x66];
//let chunk2 = Chunk::from(data2);
//let conv = |x: Vec<u8>| x.iter();
let stream2 = futures::stream::iter_ok::<_, ::std::io::Error>(data2);
//let stream2 = futures::stream::iter_ok::<_, ::std::io::Error>(data2);
let chunks = fileio::load_local_mp3_buffer();
let c: &[u8] = &chunks; // c: &[u8]
//let chunk = Chunk::from(c);
let stream = futures::stream::iter_ok::<_, ::std::io::Error>(c);
*response.body_mut() = Body::from(chunks);
return Box::new( future::ok(response));
*/
// type BoxFut = Box<Future<Item = Response<Body>, Error = hyper::Error> + Send>;
//let bbb = Box::new(future::ok(Response::new("Fuck YOU")));
//let xxx: BoxFut = Box::new(future::ok(response));
//xxx
let my_stream = MyStream::new(5);
//let xstream = futures::stream::iter_ok::<_, ::std::io::Error>(my_stream.iter());
//let mut file = &get_local_mp3_path();
let mut filepath = "/media/hdd/jedzia/rust/p.mp3";
if cfg!(target_os = "windows") {
filepath = "p.mp3";
}
//let file = File::open(filepath).map(file_response).or_else(|_| status_response(StatusCode::NOT_FOUND));
//.expect("failed to open file")
//let file = tokio::fs::File::open(filepath).catch_unwind();
//let fstream = FramedRead::new(file, ChunkDecoder);
/*fn decode(buf: Vec<u8>) -> Result<Option<Chunk>, io::Error> {
let len = buf.len();
if len > 0 {
//Ok(Some(buf.iter().take(32).freeze().into()))
Ok(Some(buf.iter().take(32).into()))
} else {
Ok(None)
}
}
let akjsd = decode(chunks);*/
use bytes::{BigEndian, Buf, BufMut, BytesMut, IntoBuf};
let bytes = b"\x00\x01hello world";
let mut bytes_buf = bytes.into_buf();
let bytes_stream = futures::stream::iter_ok::<_, ::std::io::Error>(bytes);
//*response.body_mut() = Body::wrap_stream(bytes_stream);
*response.body_mut() = Body::wrap_stream(stream_fuck);
//*response.body_mut() = Body::empty();
//*response.set_body(Box::new(stream));
// type BoxFut = Box<Future<Item = Response<Body>, Error = hyper::Error> + Send>;
//let future_result = future::ok(response);
// let mut buf = BytesMut::with_capacity(1024);
// buf.put(&b"hello world"[..]);
//let mut response1 = Response::new("Fuck");
// let mut response1 = Response::new(Body::from(buf.freeze()));
// let future_result: FutureResult<Response<Body>, hyper::Error> = future::ok(response1);
//return Box::new( future_result);
//let (method, uri, version, headers, body) = req.deconstruct();
let myresp =
chunk::handle_request(Request::new(Body::from("Fuck ya to chunk::handle_request")));
return Box::new(myresp);
//let future_result: FutureResult<Response<Body>, hyper::Error> = future::ok(response);
//return Box::new(future_result);
}
// Simply echo the body back to the client.
(&Method::POST, "/echo") => {
*response.body_mut() = req.into_body();
}
//(&Method::GET, Some("/fwd/")) => {
// *response.body_mut() = Body::from("Jahahahahaha");
//}
// Convert to uppercase before sending back to client.
(&Method::POST, "/echo/uppercase") => {
let mapping = req.into_body().map(|chunk| {
chunk
.iter()
.map(|byte| byte.to_ascii_uppercase())
.collect::<Vec<u8>>()
});
*response.body_mut() = Body::wrap_stream(mapping);
}
// Reverse the entire body before sending back to the client.
//
// Since we don't know the end yet, we can't simply stream
// the chunks as they arrive. So, this returns a different
// future, waiting on concatenating the full body, so that
// it can be reversed. Only then can we return a `Response`.
(&Method::POST, "/echo/reversed") => {
let reversed = req.into_body().concat2().map(move |chunk| {
let body = chunk.iter().rev().cloned().collect::<Vec<u8>>();
*response.body_mut() = Body::from(body);
response
});
return Box::new(reversed);
}
// The 404 Not Found route...
_ => {
println!("404 not found.");
*response.status_mut() = StatusCode::NOT_FOUND;
}
};
Box::new(future::ok(response))
}
/*struct ChunkDecoder;
impl Decoder for ChunkDecoder {
type Item = Chunk;
type Error = io::Error;
fn decode(&mut self, buf: &mut bytes::BytesMut) -> Result<Option<Chunk>, io::Error> {
let len = buf.len();
if len > 0 {
Ok(Some(buf.take().freeze().into()))
} else {
Ok(None)
}
}
}*/
struct MyStream {
current: u32,
max: u32,
}
impl MyStream {
pub fn new(max: u32) -> MyStream {
MyStream {
current: 0,
max: max,
}
}
}
impl Stream for MyStream {
type Item = u32;
type Error = Box<Error>;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self.current {
ref mut x if *x < self.max => {
*x = *x + 1;
Ok(Async::Ready(Some(*x)))
}
_ => Ok(Async::Ready(None)),
}
}
}
/*fn get_local_mp3_path() -> &str {
//let mut f = File::open("./p.mp3").expect("failed to open mp3 file!");
let mut filepath = "/media/hdd/jedzia/rust/p.mp3";
if cfg!(target_os = "windows") {
filepath = "p.mp3";
}
filepath
}*/
fn main() -> Result<(), Box<dyn Error>> {
println!("Hello, lovely VU Duo!");
let y: f32 = 5.0;
if y < 4.0 {
// https://stackoverflow.com/questions/51550167/how-to-manually-return-a-result-boxerror
return Err("Bad request".into());
}
// http://192.168.2.43:3000/
//let addr = ([0, 0, 0, 0], 3000).into();
//let addr = ([127, 0, 0, 1], 3000).into();
//let addr = ([192, 168, 2, 43], 3000).into();
pretty_env_logger::init();
//fun_with_ssl();
//return Ok(());
/*
// helps when certificates are not found
extern crate openssl_probe;
let ssl = openssl_probe::init_ssl_cert_env_vars();
*/
//let mut buffer = String::new();
//f.read_to_string(&mut buffer)?;
//let in_addr: SocketAddr = ([127, 0, 0, 1], 3333).into();
let in_addr = get_in_addr();
/*let out_addr: SocketAddr = ([127, 0, 0, 1], 3000).into();
// google.de 216.58.208.35
//let out_addr: SocketAddr = ([216, 58, 208, 35], 443).into();
let client_main = Client::new();
let out_addr_clone = out_addr.clone();
// new_service is run for each connection, creating a 'service'
// to handle requests for that specific connection.
let new_service = move || {
let client = client_main.clone();
// This is the `Service` that will handle the connection.
// `service_fn_ok` is a helper to convert a function that
// returns a Response into a `Service`.
service_fn(move |mut req| {
let uri_string = format!(
"http://{}/{}",
out_addr_clone,
req.uri().path_and_query().map(|x| x.as_str()).unwrap_or("")
);
let uri = uri_string.parse().unwrap();
let in_uri_string = format!("http://{}/{}", in_addr, req.uri());
let in_remove_string = format!("http://{}//", in_addr);
println!("req.uri(): {}", in_uri_string);
let result = in_uri_string.replace(&in_remove_string, "");
//let result = in_uri_string.split(in_remove_string.unwrap_or("")).take(1).next().unwrap_or("");
println!("result: {}", result);
*req.uri_mut() = uri;
client.request(req)
})
};
let server = Server::bind(&in_addr)
.serve(new_service)
.map_err(|e| eprintln!("server error: {}", e));
println!("Listening on http://{}", in_addr);
println!("Proxying on http://{}", out_addr);
rt::run(server);*/
//let mut f = File::open("p.mp3")?;
//let mut buffer: Vec<u8> = Vec::new();
//f.read_to_end(&mut buffer)?;
//let b = buffer.clone();
let server = Server::bind(&in_addr)
//.serve(|| service_fn(|req| echo(req, Vec::new())))
.serve(|| service_fn(echo))
.map_err(|e| eprintln!("server error: {}", e));
println!("Listening on http://{}", in_addr);
hyper::rt::run(server);
println!("finished.");
Ok(())
}
/*//#cfg!(target_os = "windows")
fn testOpenSSL1() {
extern crate openssl;
use openssl::rsa::{Padding, Rsa};
let rsa = Rsa::generate(2048).unwrap();
let data = b"foobar";
println!("data {:?}", data);
let mut buf = vec![0; rsa.size() as usize];
let encrypted_len = rsa.public_encrypt(data, &mut buf, Padding::PKCS1).unwrap();
println!("encripted {:?}", buf);
}*/
fn testOpenSSL() |
fn testGoogleSSL() {
println!("===== TestBody =====");
let body = reqwest::Client::builder()
//.danger_accept_invalid_hostnames(true)
//.danger_accept_invalid_certs(true)
//.add_root_certificate(cert)
.build()
.unwrap()
.get("https://www.google.de/")
.send()
.unwrap()
.text()
.unwrap();
//let bla = body.lines().take(3).collect::<String>();
println!("body = {}", body.lines().take(1).collect::<String>());
}
fn fun_with_ssl() {
/*extern crate openssl_static_sys = "openssl-static-sys";
if cfg!(target_os = "linux") {
openssl_static_sys::init_ssl_cert_env_vars();
}*/
use std::env;
use std::fs;
use std::path::PathBuf;
println!("Entry");
let cert_file = env::var_os("SSL_CERT_FILE").map(PathBuf::from);
println!(" env: cert_file {:?}", cert_file);
let cert_dir = env::var_os("SSL_CERT_DIR").map(PathBuf::from);
println!(" env: cert_dir {:?}", cert_dir);
//println!("ssl {:?}", ssl);
println!("After openssl_probe");
let cert_file = env::var_os("SSL_CERT_FILE").map(PathBuf::from);
println!(" env: cert_file {:?}", cert_file);
let cert_dir = env::var_os("SSL_CERT_DIR").map(PathBuf::from);
println!(" env: cert_dir {:?}", cert_dir);
/* // /media/hdd/jedzia/rust/ca-bundle.trust.crt
env::set_var("SSL_CERT_DIR", "/etc/ssl/certs");
//env::set_var("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt");
env::set_var("SSL_CERT_FILE", "/media/hdd/jedzia/rust/ca-bundle.trust.crt");
if cfg!(target_os = "windows") {
//env::set_var("SSL_CERT_DIR", "/etc/ssl/certs");
env::set_var(
"SSL_CERT_FILE",
r"C:\msys64\etc\pki\ca-trust\extracted\openssl\ca-bundle.trust.crt",
);
// set SSL_CERT_FILE=C:\msys64\etc\pki\ca-trust\extracted\openssl\ca-bundle.trust.crt
}
*/
println!("After env::set_var");
let cert_file = env::var_os("SSL_CERT_FILE").map(PathBuf::from);
println!(" env: cert_file {:?}", cert_file);
let cert_dir = env::var_os("SSL_CERT_DIR").map(PathBuf::from);
println!(" env: cert_dir {:?}", cert_dir);
//let cert_file_path = "/etc/ssl/certs/ca-certificates.crt";
let mut cert_file_path = "/media/hdd/jedzia/rust/ca-bundle.trust.crt";
if cfg!(target_os = "windows") {
cert_file_path = r"\\VUDUO2X\Harddisk\jedzia\rust\ssl\ca-bundle.trust.crt";
}
/*let mut buf = Vec::new();
File::open(cert_file_path)
.unwrap()
.read_to_end(&mut buf)
.unwrap();
let cert = reqwest::Certificate::from_der(&buf).unwrap();
println!(" cert {:?}", cert);*/
//return Ok(());
testOpenSSL();
testGoogleSSL();
}
| {
/* extern crate openssl;
println!("===== testOpenSSL =====");
use openssl::ssl::{SslConnector, SslMethod};
use std::io::{Read, Write};
use std::net::TcpStream;
let connector = SslConnector::builder(SslMethod::tls()).unwrap().build();
let stream = TcpStream::connect("google.com:443").unwrap();
let mut stream = connector.connect("google.com", stream).unwrap();
stream.write_all(b"GET / HTTP/1.0\r\n\r\n").unwrap();
let mut res = vec![];
stream.read_to_end(&mut res).unwrap();
println!(
"{}",
String::from_utf8_lossy(&res)
.lines()
.take(3)
.collect::<String>()
);*/
} | identifier_body |
main.rs | // cargo build --target=mips-unknown-linux-gnu
//extern crate rand;
//#![deny(warnings)]
#![feature(type_ascription)]
#[macro_use]
extern crate log;
extern crate bytes;
extern crate futures;
extern crate hyper;
extern crate pretty_env_logger;
extern crate regex;
//extern crate reqwest;
//extern crate tokio;
#[macro_use]
extern crate lazy_static;
use futures::{future, Async, Poll};
use hyper::rt::{Future, Stream};
use hyper::service::service_fn;
use hyper::{Body, Chunk, Method, Request, Response, Server, StatusCode, Uri};
//use hyper::{Client, Server, Method, Body, Response, Request};
use std::error::Error;
use std::net::SocketAddr;
use std::fs::File;
//use std::io;
//use std::io::prelude::*;
use core::borrow::Borrow;
use futures::future::FutureResult;
use std::io;
use std::io::prelude::*;
use std::process::{Command, Stdio};
use regex::Regex;
use std::borrow::Cow;
mod chunk;
mod fileio;
mod stream;
//use tokio::codec::{Decoder, FramedRead};
//use tokio::prelude::{AsyncRead};
//use tokio::fs::File;
//use tokio::io;
//use std::thread;
//use rand::Rng;
//static NTHREADS: i32 = 10;
fn get_in_addr() -> SocketAddr {
let mut in_addr: SocketAddr = ([192, 168, 3, 43], 3333).into();
if cfg!(target_os = "windows") {
in_addr: SocketAddr = ([127, 0, 0, 1], 3333).into();
}
return in_addr;
}
fn | (uri: &Uri) -> String {
//let in_addr: SocketAddr = get_in_addr();
let uri_string = uri.path_and_query().map(|x| x.as_str()).unwrap_or("");
//let uri: String = uri_string.parse().unwrap();
//let in_uri_string = format!("http://{}/{}", in_addr, req.uri());
let in_remove_string = "/fwd/";
debug!("uri_string: {}", uri_string);
let result = uri_string.replace(&in_remove_string, "");
debug!("result: {}", result);
//let result = in_uri_string.split(in_remove_string.unwrap_or("")).take(1).next().unwrap_or("");
result
}
fn reformat_dates(before: &str) -> Cow<str> {
lazy_static! {
static ref ISO8601_DATE_REGEX : Regex = Regex::new(
//r"(?P<y>\d{4})-(?P<m>\d{2})-(?P<d>\d{2})"
//r"/^(?:(?:(?:https?|ftp):)?\/\/)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?[a-z0-9\u00a1-\uffff]\.)+(?:[a-z\u00a1-\uffff]{2,}\.?))(?::\d{2,5})?(?:[/?#]\S*)?$/i"
//r"(?:(?:(?:https?|ftp):)?\/\/)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?[a-z0-9\u00a1-\uffff]\.)+(?:[a-z\u00a1-\uffff]{2,}\.?))(?::\d{2,5})?(?:[/?#]\S*)?$/i"
r"(https?://(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)*.(\.ogg))"
).unwrap();
}
ISO8601_DATE_REGEX.replace_all(before, "FUCK YA")
}
/// We need to return different futures depending on the route matched,
/// and we can do that with an enum, such as `futures::Either`, or with
/// trait objects.
///
/// A boxed Future (trait object) is used as it is easier to understand
/// and extend with more types. Advanced users could switch to `Either`.
type BoxFut = Box<Future<Item = Response<Body>, Error = hyper::Error> + Send>;
/// This is our service handler. It receives a Request, routes on its
/// path, and returns a Future of a Response.
//fn echo(req: Request<Body>, buf: Vec<u8>) -> BoxFut {
fn echo(req: Request<Body>) -> BoxFut {
let mut response = Response::new(Body::empty());
debug!("method: {}, uri: {}", req.method(), req.uri());
match req.method() {
&Method::GET => {
if req.uri().path().starts_with("/fwd/") {
let req_uri = reduce_forwarded_uri(req.uri());
//let forwarded_uri = Uri::from_static(&req_uri);
*response.body_mut() = Body::from("Lets forward: ".to_owned() + &req_uri);
let body = reqwest::get(req_uri.as_str()) //.unwrap();
//.danger_disable_certs_verification()
.expect(&format!("cannot get '{}'", &req_uri))
.text() //.unwrap();
.expect(&format!("cannot get text for '{}'", &req_uri));
/*let body = reqwest::Client::builder()
.danger_accept_invalid_hostnames(true)
.danger_accept_invalid_certs(true)
.build()
.unwrap()
.get("https://www.google.de/")
.send()
.unwrap()
.text()
.unwrap();*/
println!("body = {}", body.lines().take(3).collect::<String>());
/*let re_weburl = Regex::new(
r"/^(?:(?:(?:https?|ftp):)?\/\/)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?[a-z0-9\u00a1-\uffff]\.)+(?:[a-z\u00a1-\uffff]{2,}\.?))(?::\d{2,5})?(?:[/?#]\S*)?$/i"
);*/
// check if there is an alternative to the ogg-vorbis stream
// when true, then prioritize the mp3 over it
// else create a reference to the mp3 forwarding endpoint
// SSL Certificates on the host are important. make shure:
// ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
// ENV SSL_CERT_DIR=/etc/ssl/certs
// are set.
let after = reformat_dates(&body);
//println!("body = {}", after);
//let chunk = Chunk::from(after);
//*response.body_mut() = Body::from(after.to_string());
*response.body_mut() = Body::from(after.to_string());
//*response.body_mut() = Body::from("got regex");
return Box::new(future::ok(response));
}
}
_ => {}
}
match (req.method(), req.uri().path()) {
// Serve some instructions at /
(&Method::GET, "/") => {
//command_name = 'ffmpeg',
//command_opts = ['-i', 'pipe:0', '-f', 'mp3', '-acodec', 'libvorbis', '-ab', '128k', '-aq', '60', '-f', 'ogg', '-'];
/*
let command_name = "ffmpeg";
//let command_opts = ["-i", "pipe:0", "-f", "mp3", "-acodec", "libvorbis", "-ab", "128k", "-aq", "60", "-f", "ogg", "-"];
//"D:\Program Files\ffmpeg\bin\ffmpeg" -re -i "https://cdn.netzpolitik.org/wp-upload/2019/02/NPP169-Worum-geht-es-eigentlich-bei-der-ePrivacy-Reform.ogg"
// -acodec libmp3lame -ab 128k -aq 60 -f mp3 - > bla.mp3
//let media_addr = "https://cdn.netzpolitik.org/wp-upload/2019/02/NPP169-Worum-geht-es-eigentlich-bei-der-ePrivacy-Reform.ogg";
let media_addr = "https://upload.wikimedia.org/wikipedia/commons/f/f2/Median_test.ogg";
let command_opts = ["-i", media_addr,
"-acodec", "libmp3lame", "-ab", "128k", "-aq", "60", "-f", "mp3", "-"];
let mut ffmpeg_path = command_name;
if cfg!(target_os = "windows") {
ffmpeg_path = "D:/Program Files/ffmpeg/bin/ffmpeg.exe";
}
// Spawn the `wc` command
let process = match Command::new(ffmpeg_path)
.args(&command_opts)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
{
Err(why) => panic!("couldn't spawn {}: {}", command_name, why.description()),
Ok(process) => process,
};
// The `stdout` field also has type `Option<ChildStdout>` so must be unwrapped.
let mut buffer: Vec<u8> = Vec::new();
match process.stdout.unwrap().read_to_end(&mut buffer) {
Err(why) => panic!("couldn't read {} stdout: {}", command_name, why.description()),
Ok(_) => println!("buffer size:[{}]", buffer.len()),
}
*response.body_mut() = Body::from(buffer);
return Box::new( future::ok(response));
*/
/*let mapping = || -> Vec(u8)
{
}*/
//let chunks = vec!["hello", " ", "world"];
//let stream = futures::stream::iter_ok::<_, ::std::io::Error>(chunks);
/*let mapping = req
.into_body()
.map(|chunk| {
chunk.iter()
.map(|byte| byte.to_ascii_uppercase())
.collect::<Vec<u8>>()
});*/
let mapping1 = req.into_body().map(|chunk| {
chunk
.iter()
.map(|byte| {
println!("chunk {}", byte.to_ascii_uppercase());
byte.to_ascii_uppercase()
})
.collect::<Vec<u8>>()
});
let data_fuck = vec!["FUCK", " ", "YOU!"];
let chunk_fuck = Chunk::from("fuck");
let stream_fuck = futures::stream::iter_ok::<_, ::std::io::Error>(data_fuck);
/* //let data2 = vec!["hello", " ", "world"];
let data2: Vec<u8> = vec![0x55, 0x20, 0x66];
//let chunk2 = Chunk::from(data2);
//let conv = |x: Vec<u8>| x.iter();
let stream2 = futures::stream::iter_ok::<_, ::std::io::Error>(data2);
//let stream2 = futures::stream::iter_ok::<_, ::std::io::Error>(data2);
let chunks = fileio::load_local_mp3_buffer();
let c: &[u8] = &chunks; // c: &[u8]
//let chunk = Chunk::from(c);
let stream = futures::stream::iter_ok::<_, ::std::io::Error>(c);
*response.body_mut() = Body::from(chunks);
return Box::new( future::ok(response));
*/
// type BoxFut = Box<Future<Item = Response<Body>, Error = hyper::Error> + Send>;
//let bbb = Box::new(future::ok(Response::new("Fuck YOU")));
//let xxx: BoxFut = Box::new(future::ok(response));
//xxx
let my_stream = MyStream::new(5);
//let xstream = futures::stream::iter_ok::<_, ::std::io::Error>(my_stream.iter());
//let mut file = &get_local_mp3_path();
let mut filepath = "/media/hdd/jedzia/rust/p.mp3";
if cfg!(target_os = "windows") {
filepath = "p.mp3";
}
//let file = File::open(filepath).map(file_response).or_else(|_| status_response(StatusCode::NOT_FOUND));
//.expect("failed to open file")
//let file = tokio::fs::File::open(filepath).catch_unwind();
//let fstream = FramedRead::new(file, ChunkDecoder);
/*fn decode(buf: Vec<u8>) -> Result<Option<Chunk>, io::Error> {
let len = buf.len();
if len > 0 {
//Ok(Some(buf.iter().take(32).freeze().into()))
Ok(Some(buf.iter().take(32).into()))
} else {
Ok(None)
}
}
let akjsd = decode(chunks);*/
use bytes::{BigEndian, Buf, BufMut, BytesMut, IntoBuf};
let bytes = b"\x00\x01hello world";
let mut bytes_buf = bytes.into_buf();
let bytes_stream = futures::stream::iter_ok::<_, ::std::io::Error>(bytes);
//*response.body_mut() = Body::wrap_stream(bytes_stream);
*response.body_mut() = Body::wrap_stream(stream_fuck);
//*response.body_mut() = Body::empty();
//*response.set_body(Box::new(stream));
// type BoxFut = Box<Future<Item = Response<Body>, Error = hyper::Error> + Send>;
//let future_result = future::ok(response);
// let mut buf = BytesMut::with_capacity(1024);
// buf.put(&b"hello world"[..]);
//let mut response1 = Response::new("Fuck");
// let mut response1 = Response::new(Body::from(buf.freeze()));
// let future_result: FutureResult<Response<Body>, hyper::Error> = future::ok(response1);
//return Box::new( future_result);
//let (method, uri, version, headers, body) = req.deconstruct();
let myresp =
chunk::handle_request(Request::new(Body::from("Fuck ya to chunk::handle_request")));
return Box::new(myresp);
//let future_result: FutureResult<Response<Body>, hyper::Error> = future::ok(response);
//return Box::new(future_result);
}
// Simply echo the body back to the client.
(&Method::POST, "/echo") => {
*response.body_mut() = req.into_body();
}
//(&Method::GET, Some("/fwd/")) => {
// *response.body_mut() = Body::from("Jahahahahaha");
//}
// Convert to uppercase before sending back to client.
(&Method::POST, "/echo/uppercase") => {
let mapping = req.into_body().map(|chunk| {
chunk
.iter()
.map(|byte| byte.to_ascii_uppercase())
.collect::<Vec<u8>>()
});
*response.body_mut() = Body::wrap_stream(mapping);
}
// Reverse the entire body before sending back to the client.
//
// Since we don't know the end yet, we can't simply stream
// the chunks as they arrive. So, this returns a different
// future, waiting on concatenating the full body, so that
// it can be reversed. Only then can we return a `Response`.
(&Method::POST, "/echo/reversed") => {
let reversed = req.into_body().concat2().map(move |chunk| {
let body = chunk.iter().rev().cloned().collect::<Vec<u8>>();
*response.body_mut() = Body::from(body);
response
});
return Box::new(reversed);
}
// The 404 Not Found route...
_ => {
println!("404 not found.");
*response.status_mut() = StatusCode::NOT_FOUND;
}
};
Box::new(future::ok(response))
}
/*struct ChunkDecoder;
impl Decoder for ChunkDecoder {
type Item = Chunk;
type Error = io::Error;
fn decode(&mut self, buf: &mut bytes::BytesMut) -> Result<Option<Chunk>, io::Error> {
let len = buf.len();
if len > 0 {
Ok(Some(buf.take().freeze().into()))
} else {
Ok(None)
}
}
}*/
struct MyStream {
current: u32,
max: u32,
}
impl MyStream {
pub fn new(max: u32) -> MyStream {
MyStream {
current: 0,
max: max,
}
}
}
impl Stream for MyStream {
type Item = u32;
type Error = Box<Error>;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self.current {
ref mut x if *x < self.max => {
*x = *x + 1;
Ok(Async::Ready(Some(*x)))
}
_ => Ok(Async::Ready(None)),
}
}
}
/*fn get_local_mp3_path() -> &str {
//let mut f = File::open("./p.mp3").expect("failed to open mp3 file!");
let mut filepath = "/media/hdd/jedzia/rust/p.mp3";
if cfg!(target_os = "windows") {
filepath = "p.mp3";
}
filepath
}*/
fn main() -> Result<(), Box<dyn Error>> {
println!("Hello, lovely VU Duo!");
let y: f32 = 5.0;
if y < 4.0 {
// https://stackoverflow.com/questions/51550167/how-to-manually-return-a-result-boxerror
return Err("Bad request".into());
}
// http://192.168.2.43:3000/
//let addr = ([0, 0, 0, 0], 3000).into();
//let addr = ([127, 0, 0, 1], 3000).into();
//let addr = ([192, 168, 2, 43], 3000).into();
pretty_env_logger::init();
//fun_with_ssl();
//return Ok(());
/*
// helps when certificates are not found
extern crate openssl_probe;
let ssl = openssl_probe::init_ssl_cert_env_vars();
*/
//let mut buffer = String::new();
//f.read_to_string(&mut buffer)?;
//let in_addr: SocketAddr = ([127, 0, 0, 1], 3333).into();
let in_addr = get_in_addr();
/*let out_addr: SocketAddr = ([127, 0, 0, 1], 3000).into();
// google.de 216.58.208.35
//let out_addr: SocketAddr = ([216, 58, 208, 35], 443).into();
let client_main = Client::new();
let out_addr_clone = out_addr.clone();
// new_service is run for each connection, creating a 'service'
// to handle requests for that specific connection.
let new_service = move || {
let client = client_main.clone();
// This is the `Service` that will handle the connection.
// `service_fn_ok` is a helper to convert a function that
// returns a Response into a `Service`.
service_fn(move |mut req| {
let uri_string = format!(
"http://{}/{}",
out_addr_clone,
req.uri().path_and_query().map(|x| x.as_str()).unwrap_or("")
);
let uri = uri_string.parse().unwrap();
let in_uri_string = format!("http://{}/{}", in_addr, req.uri());
let in_remove_string = format!("http://{}//", in_addr);
println!("req.uri(): {}", in_uri_string);
let result = in_uri_string.replace(&in_remove_string, "");
//let result = in_uri_string.split(in_remove_string.unwrap_or("")).take(1).next().unwrap_or("");
println!("result: {}", result);
*req.uri_mut() = uri;
client.request(req)
})
};
let server = Server::bind(&in_addr)
.serve(new_service)
.map_err(|e| eprintln!("server error: {}", e));
println!("Listening on http://{}", in_addr);
println!("Proxying on http://{}", out_addr);
rt::run(server);*/
//let mut f = File::open("p.mp3")?;
//let mut buffer: Vec<u8> = Vec::new();
//f.read_to_end(&mut buffer)?;
//let b = buffer.clone();
let server = Server::bind(&in_addr)
//.serve(|| service_fn(|req| echo(req, Vec::new())))
.serve(|| service_fn(echo))
.map_err(|e| eprintln!("server error: {}", e));
println!("Listening on http://{}", in_addr);
hyper::rt::run(server);
println!("finished.");
Ok(())
}
/*//#cfg!(target_os = "windows")
fn testOpenSSL1() {
extern crate openssl;
use openssl::rsa::{Padding, Rsa};
let rsa = Rsa::generate(2048).unwrap();
let data = b"foobar";
println!("data {:?}", data);
let mut buf = vec![0; rsa.size() as usize];
let encrypted_len = rsa.public_encrypt(data, &mut buf, Padding::PKCS1).unwrap();
println!("encripted {:?}", buf);
}*/
fn testOpenSSL() {
/* extern crate openssl;
println!("===== testOpenSSL =====");
use openssl::ssl::{SslConnector, SslMethod};
use std::io::{Read, Write};
use std::net::TcpStream;
let connector = SslConnector::builder(SslMethod::tls()).unwrap().build();
let stream = TcpStream::connect("google.com:443").unwrap();
let mut stream = connector.connect("google.com", stream).unwrap();
stream.write_all(b"GET / HTTP/1.0\r\n\r\n").unwrap();
let mut res = vec![];
stream.read_to_end(&mut res).unwrap();
println!(
"{}",
String::from_utf8_lossy(&res)
.lines()
.take(3)
.collect::<String>()
);*/
}
fn testGoogleSSL() {
println!("===== TestBody =====");
let body = reqwest::Client::builder()
//.danger_accept_invalid_hostnames(true)
//.danger_accept_invalid_certs(true)
//.add_root_certificate(cert)
.build()
.unwrap()
.get("https://www.google.de/")
.send()
.unwrap()
.text()
.unwrap();
//let bla = body.lines().take(3).collect::<String>();
println!("body = {}", body.lines().take(1).collect::<String>());
}
fn fun_with_ssl() {
/*extern crate openssl_static_sys = "openssl-static-sys";
if cfg!(target_os = "linux") {
openssl_static_sys::init_ssl_cert_env_vars();
}*/
use std::env;
use std::fs;
use std::path::PathBuf;
println!("Entry");
let cert_file = env::var_os("SSL_CERT_FILE").map(PathBuf::from);
println!(" env: cert_file {:?}", cert_file);
let cert_dir = env::var_os("SSL_CERT_DIR").map(PathBuf::from);
println!(" env: cert_dir {:?}", cert_dir);
//println!("ssl {:?}", ssl);
println!("After openssl_probe");
let cert_file = env::var_os("SSL_CERT_FILE").map(PathBuf::from);
println!(" env: cert_file {:?}", cert_file);
let cert_dir = env::var_os("SSL_CERT_DIR").map(PathBuf::from);
println!(" env: cert_dir {:?}", cert_dir);
/* // /media/hdd/jedzia/rust/ca-bundle.trust.crt
env::set_var("SSL_CERT_DIR", "/etc/ssl/certs");
//env::set_var("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt");
env::set_var("SSL_CERT_FILE", "/media/hdd/jedzia/rust/ca-bundle.trust.crt");
if cfg!(target_os = "windows") {
//env::set_var("SSL_CERT_DIR", "/etc/ssl/certs");
env::set_var(
"SSL_CERT_FILE",
r"C:\msys64\etc\pki\ca-trust\extracted\openssl\ca-bundle.trust.crt",
);
// set SSL_CERT_FILE=C:\msys64\etc\pki\ca-trust\extracted\openssl\ca-bundle.trust.crt
}
*/
println!("After env::set_var");
let cert_file = env::var_os("SSL_CERT_FILE").map(PathBuf::from);
println!(" env: cert_file {:?}", cert_file);
let cert_dir = env::var_os("SSL_CERT_DIR").map(PathBuf::from);
println!(" env: cert_dir {:?}", cert_dir);
//let cert_file_path = "/etc/ssl/certs/ca-certificates.crt";
let mut cert_file_path = "/media/hdd/jedzia/rust/ca-bundle.trust.crt";
if cfg!(target_os = "windows") {
cert_file_path = r"\\VUDUO2X\Harddisk\jedzia\rust\ssl\ca-bundle.trust.crt";
}
/*let mut buf = Vec::new();
File::open(cert_file_path)
.unwrap()
.read_to_end(&mut buf)
.unwrap();
let cert = reqwest::Certificate::from_der(&buf).unwrap();
println!(" cert {:?}", cert);*/
//return Ok(());
testOpenSSL();
testGoogleSSL();
}
| reduce_forwarded_uri | identifier_name |
main.rs | // cargo build --target=mips-unknown-linux-gnu
//extern crate rand;
//#![deny(warnings)]
#![feature(type_ascription)]
#[macro_use]
extern crate log;
extern crate bytes;
extern crate futures;
extern crate hyper;
extern crate pretty_env_logger;
extern crate regex;
//extern crate reqwest;
//extern crate tokio;
#[macro_use]
extern crate lazy_static;
use futures::{future, Async, Poll};
use hyper::rt::{Future, Stream};
use hyper::service::service_fn;
use hyper::{Body, Chunk, Method, Request, Response, Server, StatusCode, Uri};
//use hyper::{Client, Server, Method, Body, Response, Request};
use std::error::Error;
use std::net::SocketAddr;
use std::fs::File;
//use std::io;
//use std::io::prelude::*;
use core::borrow::Borrow;
use futures::future::FutureResult;
use std::io;
use std::io::prelude::*;
use std::process::{Command, Stdio};
use regex::Regex;
use std::borrow::Cow;
mod chunk;
mod fileio;
mod stream;
//use tokio::codec::{Decoder, FramedRead};
//use tokio::prelude::{AsyncRead};
//use tokio::fs::File;
//use tokio::io;
//use std::thread;
//use rand::Rng;
//static NTHREADS: i32 = 10;
fn get_in_addr() -> SocketAddr {
let mut in_addr: SocketAddr = ([192, 168, 3, 43], 3333).into();
if cfg!(target_os = "windows") {
in_addr: SocketAddr = ([127, 0, 0, 1], 3333).into();
}
return in_addr;
}
fn reduce_forwarded_uri(uri: &Uri) -> String {
//let in_addr: SocketAddr = get_in_addr();
let uri_string = uri.path_and_query().map(|x| x.as_str()).unwrap_or("");
//let uri: String = uri_string.parse().unwrap();
//let in_uri_string = format!("http://{}/{}", in_addr, req.uri());
let in_remove_string = "/fwd/";
debug!("uri_string: {}", uri_string);
let result = uri_string.replace(&in_remove_string, "");
debug!("result: {}", result);
//let result = in_uri_string.split(in_remove_string.unwrap_or("")).take(1).next().unwrap_or("");
result
}
fn reformat_dates(before: &str) -> Cow<str> {
lazy_static! {
static ref ISO8601_DATE_REGEX : Regex = Regex::new(
//r"(?P<y>\d{4})-(?P<m>\d{2})-(?P<d>\d{2})"
//r"/^(?:(?:(?:https?|ftp):)?\/\/)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?[a-z0-9\u00a1-\uffff]\.)+(?:[a-z\u00a1-\uffff]{2,}\.?))(?::\d{2,5})?(?:[/?#]\S*)?$/i"
//r"(?:(?:(?:https?|ftp):)?\/\/)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?[a-z0-9\u00a1-\uffff]\.)+(?:[a-z\u00a1-\uffff]{2,}\.?))(?::\d{2,5})?(?:[/?#]\S*)?$/i"
r"(https?://(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)*.(\.ogg))"
).unwrap();
}
ISO8601_DATE_REGEX.replace_all(before, "FUCK YA")
}
/// We need to return different futures depending on the route matched,
/// and we can do that with an enum, such as `futures::Either`, or with
/// trait objects.
///
/// A boxed Future (trait object) is used as it is easier to understand
/// and extend with more types. Advanced users could switch to `Either`.
type BoxFut = Box<Future<Item = Response<Body>, Error = hyper::Error> + Send>;
/// This is our service handler. It receives a Request, routes on its
/// path, and returns a Future of a Response.
//fn echo(req: Request<Body>, buf: Vec<u8>) -> BoxFut {
fn echo(req: Request<Body>) -> BoxFut {
let mut response = Response::new(Body::empty());
debug!("method: {}, uri: {}", req.method(), req.uri());
match req.method() {
&Method::GET => {
if req.uri().path().starts_with("/fwd/") {
let req_uri = reduce_forwarded_uri(req.uri());
//let forwarded_uri = Uri::from_static(&req_uri);
*response.body_mut() = Body::from("Lets forward: ".to_owned() + &req_uri);
let body = reqwest::get(req_uri.as_str()) //.unwrap();
//.danger_disable_certs_verification()
.expect(&format!("cannot get '{}'", &req_uri))
.text() //.unwrap();
.expect(&format!("cannot get text for '{}'", &req_uri));
/*let body = reqwest::Client::builder()
.danger_accept_invalid_hostnames(true)
.danger_accept_invalid_certs(true)
.build()
.unwrap()
.get("https://www.google.de/")
.send()
.unwrap()
.text()
.unwrap();*/
println!("body = {}", body.lines().take(3).collect::<String>());
/*let re_weburl = Regex::new(
r"/^(?:(?:(?:https?|ftp):)?\/\/)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?[a-z0-9\u00a1-\uffff]\.)+(?:[a-z\u00a1-\uffff]{2,}\.?))(?::\d{2,5})?(?:[/?#]\S*)?$/i"
);*/
// check if there is an alternative to the ogg-vorbis stream
// when true, then prioritize the mp3 over it
// else create a reference to the mp3 forwarding endpoint
// SSL Certificates on the host are important. make shure:
// ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
// ENV SSL_CERT_DIR=/etc/ssl/certs
// are set.
let after = reformat_dates(&body);
//println!("body = {}", after);
//let chunk = Chunk::from(after);
//*response.body_mut() = Body::from(after.to_string());
*response.body_mut() = Body::from(after.to_string());
//*response.body_mut() = Body::from("got regex");
return Box::new(future::ok(response));
}
}
_ => {}
}
match (req.method(), req.uri().path()) {
// Serve some instructions at /
(&Method::GET, "/") => {
//command_name = 'ffmpeg',
//command_opts = ['-i', 'pipe:0', '-f', 'mp3', '-acodec', 'libvorbis', '-ab', '128k', '-aq', '60', '-f', 'ogg', '-'];
/*
let command_name = "ffmpeg";
//let command_opts = ["-i", "pipe:0", "-f", "mp3", "-acodec", "libvorbis", "-ab", "128k", "-aq", "60", "-f", "ogg", "-"];
//"D:\Program Files\ffmpeg\bin\ffmpeg" -re -i "https://cdn.netzpolitik.org/wp-upload/2019/02/NPP169-Worum-geht-es-eigentlich-bei-der-ePrivacy-Reform.ogg"
// -acodec libmp3lame -ab 128k -aq 60 -f mp3 - > bla.mp3
//let media_addr = "https://cdn.netzpolitik.org/wp-upload/2019/02/NPP169-Worum-geht-es-eigentlich-bei-der-ePrivacy-Reform.ogg";
let media_addr = "https://upload.wikimedia.org/wikipedia/commons/f/f2/Median_test.ogg";
let command_opts = ["-i", media_addr,
"-acodec", "libmp3lame", "-ab", "128k", "-aq", "60", "-f", "mp3", "-"];
let mut ffmpeg_path = command_name;
if cfg!(target_os = "windows") {
ffmpeg_path = "D:/Program Files/ffmpeg/bin/ffmpeg.exe";
}
// Spawn the `wc` command
let process = match Command::new(ffmpeg_path)
.args(&command_opts)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
{
Err(why) => panic!("couldn't spawn {}: {}", command_name, why.description()),
Ok(process) => process,
};
// The `stdout` field also has type `Option<ChildStdout>` so must be unwrapped.
let mut buffer: Vec<u8> = Vec::new();
match process.stdout.unwrap().read_to_end(&mut buffer) {
Err(why) => panic!("couldn't read {} stdout: {}", command_name, why.description()),
Ok(_) => println!("buffer size:[{}]", buffer.len()),
}
*response.body_mut() = Body::from(buffer);
return Box::new( future::ok(response));
*/
/*let mapping = || -> Vec(u8)
{
}*/
//let chunks = vec!["hello", " ", "world"];
//let stream = futures::stream::iter_ok::<_, ::std::io::Error>(chunks);
/*let mapping = req
.into_body()
.map(|chunk| {
chunk.iter()
.map(|byte| byte.to_ascii_uppercase())
.collect::<Vec<u8>>()
});*/
let mapping1 = req.into_body().map(|chunk| {
chunk
.iter()
.map(|byte| {
println!("chunk {}", byte.to_ascii_uppercase());
byte.to_ascii_uppercase()
})
.collect::<Vec<u8>>()
});
let data_fuck = vec!["FUCK", " ", "YOU!"];
let chunk_fuck = Chunk::from("fuck");
let stream_fuck = futures::stream::iter_ok::<_, ::std::io::Error>(data_fuck);
/* //let data2 = vec!["hello", " ", "world"];
let data2: Vec<u8> = vec![0x55, 0x20, 0x66];
//let chunk2 = Chunk::from(data2);
//let conv = |x: Vec<u8>| x.iter();
let stream2 = futures::stream::iter_ok::<_, ::std::io::Error>(data2);
//let stream2 = futures::stream::iter_ok::<_, ::std::io::Error>(data2);
let chunks = fileio::load_local_mp3_buffer();
let c: &[u8] = &chunks; // c: &[u8]
//let chunk = Chunk::from(c);
let stream = futures::stream::iter_ok::<_, ::std::io::Error>(c);
*response.body_mut() = Body::from(chunks);
return Box::new( future::ok(response));
*/
// type BoxFut = Box<Future<Item = Response<Body>, Error = hyper::Error> + Send>;
//let bbb = Box::new(future::ok(Response::new("Fuck YOU")));
//let xxx: BoxFut = Box::new(future::ok(response));
//xxx
let my_stream = MyStream::new(5);
//let xstream = futures::stream::iter_ok::<_, ::std::io::Error>(my_stream.iter());
//let mut file = &get_local_mp3_path();
let mut filepath = "/media/hdd/jedzia/rust/p.mp3";
if cfg!(target_os = "windows") {
filepath = "p.mp3";
}
//let file = File::open(filepath).map(file_response).or_else(|_| status_response(StatusCode::NOT_FOUND));
//.expect("failed to open file")
//let file = tokio::fs::File::open(filepath).catch_unwind();
//let fstream = FramedRead::new(file, ChunkDecoder);
/*fn decode(buf: Vec<u8>) -> Result<Option<Chunk>, io::Error> {
let len = buf.len(); | if len > 0 {
//Ok(Some(buf.iter().take(32).freeze().into()))
Ok(Some(buf.iter().take(32).into()))
} else {
Ok(None)
}
}
let akjsd = decode(chunks);*/
use bytes::{BigEndian, Buf, BufMut, BytesMut, IntoBuf};
let bytes = b"\x00\x01hello world";
let mut bytes_buf = bytes.into_buf();
let bytes_stream = futures::stream::iter_ok::<_, ::std::io::Error>(bytes);
//*response.body_mut() = Body::wrap_stream(bytes_stream);
*response.body_mut() = Body::wrap_stream(stream_fuck);
//*response.body_mut() = Body::empty();
//*response.set_body(Box::new(stream));
// type BoxFut = Box<Future<Item = Response<Body>, Error = hyper::Error> + Send>;
//let future_result = future::ok(response);
// let mut buf = BytesMut::with_capacity(1024);
// buf.put(&b"hello world"[..]);
//let mut response1 = Response::new("Fuck");
// let mut response1 = Response::new(Body::from(buf.freeze()));
// let future_result: FutureResult<Response<Body>, hyper::Error> = future::ok(response1);
//return Box::new( future_result);
//let (method, uri, version, headers, body) = req.deconstruct();
let myresp =
chunk::handle_request(Request::new(Body::from("Fuck ya to chunk::handle_request")));
return Box::new(myresp);
//let future_result: FutureResult<Response<Body>, hyper::Error> = future::ok(response);
//return Box::new(future_result);
}
// Simply echo the body back to the client.
(&Method::POST, "/echo") => {
*response.body_mut() = req.into_body();
}
//(&Method::GET, Some("/fwd/")) => {
// *response.body_mut() = Body::from("Jahahahahaha");
//}
// Convert to uppercase before sending back to client.
(&Method::POST, "/echo/uppercase") => {
let mapping = req.into_body().map(|chunk| {
chunk
.iter()
.map(|byte| byte.to_ascii_uppercase())
.collect::<Vec<u8>>()
});
*response.body_mut() = Body::wrap_stream(mapping);
}
// Reverse the entire body before sending back to the client.
//
// Since we don't know the end yet, we can't simply stream
// the chunks as they arrive. So, this returns a different
// future, waiting on concatenating the full body, so that
// it can be reversed. Only then can we return a `Response`.
(&Method::POST, "/echo/reversed") => {
let reversed = req.into_body().concat2().map(move |chunk| {
let body = chunk.iter().rev().cloned().collect::<Vec<u8>>();
*response.body_mut() = Body::from(body);
response
});
return Box::new(reversed);
}
// The 404 Not Found route...
_ => {
println!("404 not found.");
*response.status_mut() = StatusCode::NOT_FOUND;
}
};
Box::new(future::ok(response))
}
/*struct ChunkDecoder;
impl Decoder for ChunkDecoder {
type Item = Chunk;
type Error = io::Error;
fn decode(&mut self, buf: &mut bytes::BytesMut) -> Result<Option<Chunk>, io::Error> {
let len = buf.len();
if len > 0 {
Ok(Some(buf.take().freeze().into()))
} else {
Ok(None)
}
}
}*/
struct MyStream {
current: u32,
max: u32,
}
impl MyStream {
pub fn new(max: u32) -> MyStream {
MyStream {
current: 0,
max: max,
}
}
}
impl Stream for MyStream {
type Item = u32;
type Error = Box<Error>;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self.current {
ref mut x if *x < self.max => {
*x = *x + 1;
Ok(Async::Ready(Some(*x)))
}
_ => Ok(Async::Ready(None)),
}
}
}
/*fn get_local_mp3_path() -> &str {
//let mut f = File::open("./p.mp3").expect("failed to open mp3 file!");
let mut filepath = "/media/hdd/jedzia/rust/p.mp3";
if cfg!(target_os = "windows") {
filepath = "p.mp3";
}
filepath
}*/
fn main() -> Result<(), Box<dyn Error>> {
println!("Hello, lovely VU Duo!");
let y: f32 = 5.0;
if y < 4.0 {
// https://stackoverflow.com/questions/51550167/how-to-manually-return-a-result-boxerror
return Err("Bad request".into());
}
// http://192.168.2.43:3000/
//let addr = ([0, 0, 0, 0], 3000).into();
//let addr = ([127, 0, 0, 1], 3000).into();
//let addr = ([192, 168, 2, 43], 3000).into();
pretty_env_logger::init();
//fun_with_ssl();
//return Ok(());
/*
// helps when certificates are not found
extern crate openssl_probe;
let ssl = openssl_probe::init_ssl_cert_env_vars();
*/
//let mut buffer = String::new();
//f.read_to_string(&mut buffer)?;
//let in_addr: SocketAddr = ([127, 0, 0, 1], 3333).into();
let in_addr = get_in_addr();
/*let out_addr: SocketAddr = ([127, 0, 0, 1], 3000).into();
// google.de 216.58.208.35
//let out_addr: SocketAddr = ([216, 58, 208, 35], 443).into();
let client_main = Client::new();
let out_addr_clone = out_addr.clone();
// new_service is run for each connection, creating a 'service'
// to handle requests for that specific connection.
let new_service = move || {
let client = client_main.clone();
// This is the `Service` that will handle the connection.
// `service_fn_ok` is a helper to convert a function that
// returns a Response into a `Service`.
service_fn(move |mut req| {
let uri_string = format!(
"http://{}/{}",
out_addr_clone,
req.uri().path_and_query().map(|x| x.as_str()).unwrap_or("")
);
let uri = uri_string.parse().unwrap();
let in_uri_string = format!("http://{}/{}", in_addr, req.uri());
let in_remove_string = format!("http://{}//", in_addr);
println!("req.uri(): {}", in_uri_string);
let result = in_uri_string.replace(&in_remove_string, "");
//let result = in_uri_string.split(in_remove_string.unwrap_or("")).take(1).next().unwrap_or("");
println!("result: {}", result);
*req.uri_mut() = uri;
client.request(req)
})
};
let server = Server::bind(&in_addr)
.serve(new_service)
.map_err(|e| eprintln!("server error: {}", e));
println!("Listening on http://{}", in_addr);
println!("Proxying on http://{}", out_addr);
rt::run(server);*/
//let mut f = File::open("p.mp3")?;
//let mut buffer: Vec<u8> = Vec::new();
//f.read_to_end(&mut buffer)?;
//let b = buffer.clone();
let server = Server::bind(&in_addr)
//.serve(|| service_fn(|req| echo(req, Vec::new())))
.serve(|| service_fn(echo))
.map_err(|e| eprintln!("server error: {}", e));
println!("Listening on http://{}", in_addr);
hyper::rt::run(server);
println!("finished.");
Ok(())
}
/*//#cfg!(target_os = "windows")
fn testOpenSSL1() {
extern crate openssl;
use openssl::rsa::{Padding, Rsa};
let rsa = Rsa::generate(2048).unwrap();
let data = b"foobar";
println!("data {:?}", data);
let mut buf = vec![0; rsa.size() as usize];
let encrypted_len = rsa.public_encrypt(data, &mut buf, Padding::PKCS1).unwrap();
println!("encripted {:?}", buf);
}*/
fn testOpenSSL() {
/* extern crate openssl;
println!("===== testOpenSSL =====");
use openssl::ssl::{SslConnector, SslMethod};
use std::io::{Read, Write};
use std::net::TcpStream;
let connector = SslConnector::builder(SslMethod::tls()).unwrap().build();
let stream = TcpStream::connect("google.com:443").unwrap();
let mut stream = connector.connect("google.com", stream).unwrap();
stream.write_all(b"GET / HTTP/1.0\r\n\r\n").unwrap();
let mut res = vec![];
stream.read_to_end(&mut res).unwrap();
println!(
"{}",
String::from_utf8_lossy(&res)
.lines()
.take(3)
.collect::<String>()
);*/
}
fn testGoogleSSL() {
println!("===== TestBody =====");
let body = reqwest::Client::builder()
//.danger_accept_invalid_hostnames(true)
//.danger_accept_invalid_certs(true)
//.add_root_certificate(cert)
.build()
.unwrap()
.get("https://www.google.de/")
.send()
.unwrap()
.text()
.unwrap();
//let bla = body.lines().take(3).collect::<String>();
println!("body = {}", body.lines().take(1).collect::<String>());
}
fn fun_with_ssl() {
/*extern crate openssl_static_sys = "openssl-static-sys";
if cfg!(target_os = "linux") {
openssl_static_sys::init_ssl_cert_env_vars();
}*/
use std::env;
use std::fs;
use std::path::PathBuf;
println!("Entry");
let cert_file = env::var_os("SSL_CERT_FILE").map(PathBuf::from);
println!(" env: cert_file {:?}", cert_file);
let cert_dir = env::var_os("SSL_CERT_DIR").map(PathBuf::from);
println!(" env: cert_dir {:?}", cert_dir);
//println!("ssl {:?}", ssl);
println!("After openssl_probe");
let cert_file = env::var_os("SSL_CERT_FILE").map(PathBuf::from);
println!(" env: cert_file {:?}", cert_file);
let cert_dir = env::var_os("SSL_CERT_DIR").map(PathBuf::from);
println!(" env: cert_dir {:?}", cert_dir);
/* // /media/hdd/jedzia/rust/ca-bundle.trust.crt
env::set_var("SSL_CERT_DIR", "/etc/ssl/certs");
//env::set_var("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt");
env::set_var("SSL_CERT_FILE", "/media/hdd/jedzia/rust/ca-bundle.trust.crt");
if cfg!(target_os = "windows") {
//env::set_var("SSL_CERT_DIR", "/etc/ssl/certs");
env::set_var(
"SSL_CERT_FILE",
r"C:\msys64\etc\pki\ca-trust\extracted\openssl\ca-bundle.trust.crt",
);
// set SSL_CERT_FILE=C:\msys64\etc\pki\ca-trust\extracted\openssl\ca-bundle.trust.crt
}
*/
println!("After env::set_var");
let cert_file = env::var_os("SSL_CERT_FILE").map(PathBuf::from);
println!(" env: cert_file {:?}", cert_file);
let cert_dir = env::var_os("SSL_CERT_DIR").map(PathBuf::from);
println!(" env: cert_dir {:?}", cert_dir);
//let cert_file_path = "/etc/ssl/certs/ca-certificates.crt";
let mut cert_file_path = "/media/hdd/jedzia/rust/ca-bundle.trust.crt";
if cfg!(target_os = "windows") {
cert_file_path = r"\\VUDUO2X\Harddisk\jedzia\rust\ssl\ca-bundle.trust.crt";
}
/*let mut buf = Vec::new();
File::open(cert_file_path)
.unwrap()
.read_to_end(&mut buf)
.unwrap();
let cert = reqwest::Certificate::from_der(&buf).unwrap();
println!(" cert {:?}", cert);*/
//return Ok(());
testOpenSSL();
testGoogleSSL();
} | random_line_split |
|
mod.rs | // Copyright (C) 2020 Alibaba Cloud. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//! Fuse passthrough file system, mirroring an existing FS hierarchy.
//!
//! This file system mirrors the existing file system hierarchy of the system, starting at the
//! root file system. This is implemented by just "passing through" all requests to the
//! corresponding underlying file system.
//!
//! The code is derived from the
//! [CrosVM](https://chromium.googlesource.com/chromiumos/platform/crosvm/) project,
//! with heavy modification/enhancements from Alibaba Cloud OS team.
use std::any::Any;
use std::collections::{btree_map, BTreeMap};
use std::ffi::{CStr, CString};
use std::fs::File;
use std::io;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockWriteGuard};
use std::time::Duration;
use vm_memory::ByteValued;
use crate::abi::linux_abi as fuse;
use crate::api::filesystem::Entry;
use crate::api::{BackendFileSystem, VFS_MAX_INO};
#[cfg(feature = "async-io")]
mod async_io;
mod sync_io;
mod multikey;
use multikey::MultikeyBTreeMap;
use crate::async_util::AsyncDrive;
const CURRENT_DIR_CSTR: &[u8] = b".\0";
const PARENT_DIR_CSTR: &[u8] = b"..\0";
const EMPTY_CSTR: &[u8] = b"\0";
const PROC_CSTR: &[u8] = b"/proc\0";
type Inode = u64;
type Handle = u64;
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
struct InodeAltKey {
ino: libc::ino64_t,
dev: libc::dev_t,
}
impl InodeAltKey {
fn from_stat(st: &libc::stat64) -> Self {
InodeAltKey {
ino: st.st_ino,
dev: st.st_dev,
}
}
}
struct InodeData {
inode: Inode,
// Most of these aren't actually files but ¯\_(ツ)_/¯.
file: File,
refcount: AtomicU64,
}
impl InodeData {
fn new(inode: Inode, file: File, refcount: u64) -> Self {
InodeData {
inode,
file,
refcount: AtomicU64::new(refcount),
}
}
// When making use of the underlying RawFd, the caller must ensure that the Arc<InodeData>
// object is within scope. Otherwise it may cause race window to access wrong target fd.
// By introducing this method, we could explicitly audit all callers making use of the
// underlying RawFd.
fn get_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
/// Data structures to manage accessed inodes.
struct InodeMap {
inodes: RwLock<MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>>,
}
impl InodeMap {
fn new() -> Self {
InodeMap {
inodes: RwLock::new(MultikeyBTreeMap::new()),
}
}
fn clear(&self) {
self.inodes.write().unwrap().clear();
}
fn get(&self, inode: Inode) -> io::Result<Arc<InodeData>> {
self.inodes
.read()
.unwrap()
.get(&inode)
.map(Arc::clone)
.ok_or_else(ebadf)
}
fn get_alt(&self, altkey: &InodeAltKey) -> Option<Arc<InodeData>> {
self.inodes.read().unwrap().get_alt(altkey).map(Arc::clone)
}
fn get_map_mut(
&self,
) -> RwLockWriteGuard<MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>> {
self.inodes.write().unwrap()
}
fn insert(&self, inode: Inode, altkey: InodeAltKey, data: InodeData) {
self.inodes
.write()
.unwrap()
.insert(inode, altkey, Arc::new(data));
}
}
struct HandleData {
inode: Inode,
file: File,
lock: Mutex<()>,
}
impl HandleData {
fn new(inode: Inode, file: File) -> Self {
HandleData {
inode,
file,
lock: Mutex::new(()),
}
}
fn get_file_mut(&self) -> (MutexGuard<()>, &File) {
(self.lock.lock().unwrap(), &self.file)
}
// When making use of the underlying RawFd, the caller must ensure that the Arc<HandleData>
// object is within scope. Otherwise it may cause race window to access wrong target fd.
// By introducing this method, we could explicitly audit all callers making use of the
// underlying RawFd.
fn get_handle_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
struct HandleMap {
handles: RwLock<BTreeMap<Handle, Arc<HandleData>>>,
}
impl HandleMap {
fn new() -> Self {
HandleMap {
handles: RwLock::new(BTreeMap::new()),
}
}
fn clear(&self) {
self.handles.write().unwrap().clear();
}
fn insert(&self, handle: Handle, data: HandleData) {
self.handles.write().unwrap().insert(handle, Arc::new(data));
}
fn release(&self, handle: Handle, inode: Inode) -> io::Result<()> {
let mut handles = self.handles.write().unwrap();
if let btree_map::Entry::Occupied(e) = handles.entry(handle) {
if e.get().inode == inode {
// We don't need to close the file here because that will happen automatically when
// the last `Arc` is dropped.
e.remove();
return Ok(());
}
}
Err(ebadf())
}
fn get(&self, handle: Handle, inode: Inode) -> io::Result<Arc<HandleData>> {
self.handles
.read()
.unwrap()
.get(&handle)
.filter(|hd| hd.inode == inode)
.map(Arc::clone)
.ok_or_else(ebadf)
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
struct LinuxDirent64 {
d_ino: libc::ino64_t,
d_off: libc::off64_t,
d_reclen: libc::c_ushort,
d_ty: libc::c_uchar,
}
unsafe impl ByteValued for LinuxDirent64 {}
/// The caching policy that the file system should report to the FUSE client. By default the FUSE
/// protocol uses close-to-open consistency. This means that any cached contents of the file are
/// invalidated the next time that file is opened.
#[derive(Debug, Clone, PartialEq)]
pub enum CachePolicy {
/// The client should never cache file data and all I/O should be directly forwarded to the
/// server. This policy must be selected when file contents may change without the knowledge of
/// the FUSE client (i.e., the file system does not have exclusive access to the directory).
Never,
/// The client is free to choose when and how to cache file data. This is the default policy and
/// uses close-to-open consistency as described in the enum documentation.
Auto,
/// The client should always cache file data. This means that the FUSE client will not
/// invalidate any cached data that was returned by the file system the last time the file was
/// opened. This policy should only be selected when the file system has exclusive access to the
/// directory.
Always,
}
impl FromStr for CachePolicy {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"never" | "Never" | "NEVER" | "none" | "None" | "NONE" => Ok(CachePolicy::Never),
"auto" | "Auto" | "AUTO" => Ok(CachePolicy::Auto),
"always" | "Always" | "ALWAYS" => Ok(CachePolicy::Always),
_ => Err("invalid cache policy"),
}
}
}
impl Default for CachePolicy {
fn default() -> Self {
CachePolicy::Auto
}
}
/// Options that configure the behavior of the passthrough fuse file system.
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
/// How long the FUSE client should consider directory entries to be valid. If the contents of a
/// directory can only be modified by the FUSE client (i.e., the file system has exclusive
/// access), then this should be a large value.
///
/// The default value for this option is 5 seconds.
pub entry_timeout: Duration,
/// How long the FUSE client should consider file and directory attributes to be valid. If the
/// attributes of a file or directory can only be modified by the FUSE client (i.e., the file
/// system has exclusive access), then this should be set to a large value.
///
/// The default value for this option is 5 seconds.
pub attr_timeout: Duration,
/// The caching policy the file system should use. See the documentation of `CachePolicy` for
/// more details.
pub cache_policy: CachePolicy,
/// Whether the file system should enabled writeback caching. This can improve performance as it
/// allows the FUSE client to cache and coalesce multiple writes before sending them to the file
/// system. However, enabling this option can increase the risk of data corruption if the file
/// contents can change without the knowledge of the FUSE client (i.e., the server does **NOT**
/// have exclusive access). Additionally, the file system should have read access to all files
/// in the directory it is serving as the FUSE client may send read requests even for files
/// opened with `O_WRONLY`.
///
/// Therefore callers should only enable this option when they can guarantee that: 1) the file
/// system has exclusive access to the directory and 2) the file system has read permissions for
/// all files in that directory.
///
/// The default value for this option is `false`.
pub writeback: bool,
/// The path of the root directory.
///
/// The default is `/`.
pub root_dir: String,
/// Whether the file system should support Extended Attributes (xattr). Enabling this feature may
/// have a significant impact on performance, especially on write parallelism. This is the result
/// of FUSE attempting to remove the special file privileges after each write request.
///
/// The default value for this options is `false`.
pub xattr: bool,
/// To be compatible with Vfs and PseudoFs, PassthroughFs needs to prepare
/// root inode before accepting INIT request.
///
/// The default value for this option is `true`.
pub do_import: bool,
/// Control whether no_open is allowed.
///
/// The default value for this option is `false`.
pub no_open: bool,
/// Control whether no_opendir is allowed.
///
/// The default value for this option is `false`.
pub no_opendir: bool,
}
impl Default for Config {
fn default() -> Self {
Config {
entry_timeout: Duration::from_secs(5),
attr_timeout: Duration::from_secs(5),
cache_policy: Default::default(),
writeback: false,
root_dir: String::from("/"),
xattr: false,
do_import: true,
no_open: false,
no_opendir: false,
}
}
}
/// A file system that simply "passes through" all requests it receives to the underlying file
/// system.
///
/// To keep the implementation simple it servers the contents of its root directory. Users
/// that wish to serve only a specific directory should set up the environment so that that
/// directory ends up as the root of the file system process. One way to accomplish this is via a
/// combination of mount namespaces and the pivot_root system call.
pub struct PassthroughFs<D> {
// File descriptors for various points in the file system tree. These fds are always opened with
// the `O_PATH` option so they cannot be used for reading or writing any data. See the
// documentation of the `O_PATH` flag in `open(2)` for more details on what one can and cannot
// do with an fd opened with this flag.
inode_map: InodeMap,
next_inode: AtomicU64,
// File descriptors for open files and directories. Unlike the fds in `inodes`, these _can_ be
// used for reading and writing data.
handle_map: HandleMap,
next_handle: AtomicU64,
// File descriptor pointing to the `/proc` directory. This is used to convert an fd from
// `inodes` into one that can go into `handles`. This is accomplished by reading the
// `self/fd/{}` symlink. We keep an open fd here in case the file system tree that we are meant
// to be serving doesn't have access to `/proc`.
proc: File,
// Whether writeback caching is enabled for this directory. This will only be true when
// `cfg.writeback` is true and `init` was called with `FsOptions::WRITEBACK_CACHE`.
writeback: AtomicBool,
// Whether no_open is enabled.
no_open: AtomicBool,
// Whether no_opendir is enabled.
no_opendir: AtomicBool,
cfg: Config,
phantom: PhantomData<D>,
}
impl<D: AsyncDrive> PassthroughFs<D> {
/// Create a Passthrough file system instance.
pub fn new(cfg: Config) -> io::Result<PassthroughFs<D>> {
// Safe because this is a constant value and a valid C string.
let proc_cstr = unsafe { CStr::from_bytes_with_nul_unchecked(PROC_CSTR) };
let proc = Self::open_file(
libc::AT_FDCWD,
proc_cstr,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
Ok(PassthroughFs {
inode_map: InodeMap::new(),
next_inode: AtomicU64::new(fuse::ROOT_ID + 1),
handle_map: HandleMap::new(),
next_handle: AtomicU64::new(1),
proc,
writeback: AtomicBool::new(false),
no_open: AtomicBool::new(false),
no_opendir: AtomicBool::new(false),
cfg,
phantom: PhantomData,
})
}
/// Initialize the Passthrough file system.
pub fn import(&self) -> io::Result<()> {
let root = CString::new(self.cfg.root_dir.as_str()).expect("CString::new failed");
// We use `O_PATH` because we just want this for traversing the directory tree
// and not for actually reading the contents.
let f = Self::open_file(
libc::AT_FDCWD,
&root,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
let st = Self::stat(&f)?;
// Safe because this doesn't modify any memory and there is no need to check the return
// value because this system call always succeeds. We need to clear the umask here because
// we want the client to be able to set all the bits in the mode.
unsafe { libc::umask(0o000) };
// Not sure why the root inode gets a refcount of 2 but that's what libfuse does.
self.inode_map.insert(
fuse::ROOT_ID,
InodeAltKey::from_stat(&st),
InodeData::new(fuse::ROOT_ID, f, 2),
);
Ok(())
}
/// Get the list of file descriptors which should be reserved across live upgrade.
pub fn keep_fds(&self) -> Vec<RawFd> {
vec![self.proc.as_raw_fd()]
}
fn stat(f: &File) -> io::Result<libc::stat64> {
// Safe because this is a constant value and a valid C string.
let pathname = unsafe { CStr::from_bytes_with_nul_unchecked(EMPTY_CSTR) };
let mut st = MaybeUninit::<libc::stat64>::zeroed();
// Safe because the kernel will only write data in `st` and we check the return value.
let res = unsafe {
libc::fstatat64(
f.as_raw_fd(),
pathname.as_ptr(),
st.as_mut_ptr(),
libc::AT_EMPTY_PATH | libc::AT_SYMLINK_NOFOLLOW,
)
};
if res >= 0 {
// Safe because the kernel guarantees that the struct is now fully initialized.
Ok(unsafe { st.assume_init() })
} else {
Err(io::Error::last_os_error())
}
}
fn open_file(dfd: i32, pathname: &CStr, flags: i32, mode: u32) -> io::Result<File> {
let fd = if flags & libc::O_CREAT == libc::O_CREAT {
unsafe { libc::openat(dfd, pathname.as_ptr(), flags, mode) }
} else {
| if fd < 0 {
return Err(io::Error::last_os_error());
}
// Safe because we just opened this fd.
Ok(unsafe { File::from_raw_fd(fd) })
}
fn do_lookup(&self, parent: Inode, name: &CStr) -> io::Result<Entry> {
let p = self.inode_map.get(parent)?;
let f = Self::open_file(
p.get_raw_fd(),
name,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
let st = Self::stat(&f)?;
let altkey = InodeAltKey::from_stat(&st);
let mut found = None;
'search: loop {
match self.inode_map.get_alt(&altkey) {
// No existing entry found
None => break 'search,
Some(data) => {
let curr = data.refcount.load(Ordering::Acquire);
// forgot_one() has just destroyed the entry, retry...
if curr == 0 {
continue 'search;
}
// Saturating add to avoid integer overflow, it's not realistic to saturate u64.
let new = curr.saturating_add(1);
// Synchronizes with the forgot_one()
if data
.refcount
.compare_exchange(curr, new, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
found = Some(data.inode);
break;
}
}
}
}
let inode = if let Some(v) = found {
v
} else {
let mut inodes = self.inode_map.get_map_mut();
// Lookup inode_map again after acquiring the inode_map lock, as there might be another
// racing thread already added an inode with the same altkey while we're not holding
// the lock. If so just use the newly added inode, otherwise the inode will be replaced
// and results in EBADF.
match inodes.get_alt(&altkey).map(Arc::clone) {
Some(data) => {
trace!(
"fuse: do_lookup sees existing inode {} altkey {:?}",
data.inode,
altkey
);
data.refcount.fetch_add(1, Ordering::Relaxed);
data.inode
}
None => {
let inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
if inode > VFS_MAX_INO {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("max inode number reached: {}", VFS_MAX_INO),
));
}
trace!(
"fuse: do_lookup adds new inode {} altkey {:?}",
inode,
altkey
);
inodes.insert(inode, altkey, Arc::new(InodeData::new(inode, f, 1)));
inode
}
}
};
Ok(Entry {
inode,
generation: 0,
attr: st,
attr_timeout: self.cfg.attr_timeout,
entry_timeout: self.cfg.entry_timeout,
})
}
fn forget_one(
inodes: &mut MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>,
inode: Inode,
count: u64,
) {
// ROOT_ID should not be forgotten, or we're not able to access to files any more.
if inode == fuse::ROOT_ID {
return;
}
if let Some(data) = inodes.get(&inode) {
// Acquiring the write lock on the inode map prevents new lookups from incrementing the
// refcount but there is the possibility that a previous lookup already acquired a
// reference to the inode data and is in the process of updating the refcount so we need
// to loop here until we can decrement successfully.
loop {
let curr = data.refcount.load(Ordering::Acquire);
// Saturating sub because it doesn't make sense for a refcount to go below zero and
// we don't want misbehaving clients to cause integer overflow.
let new = curr.saturating_sub(count);
trace!(
"fuse: forget inode {} refcount {}, count {}, new_count {}",
inode,
curr,
count,
new
);
// Synchronizes with the acquire load in `do_lookup`.
if data
.refcount
.compare_exchange(curr, new, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
if new == 0 {
// We just removed the last refcount for this inode.
inodes.remove(&inode);
}
break;
}
}
}
}
fn do_release(&self, inode: Inode, handle: Handle) -> io::Result<()> {
self.handle_map.release(handle, inode)
}
}
#[cfg(not(feature = "async-io"))]
impl<D: AsyncDrive> BackendFileSystem for PassthroughFs<D> {
type D = D;
fn mount(&self) -> io::Result<(Entry, u64)> {
let entry = self.do_lookup(fuse::ROOT_ID, &CString::new(".").unwrap())?;
Ok((entry, VFS_MAX_INO))
}
fn as_any(&self) -> &dyn Any {
self
}
}
fn ebadf() -> io::Error {
io::Error::from_raw_os_error(libc::EBADF)
}
| unsafe { libc::openat(dfd, pathname.as_ptr(), flags) }
};
| conditional_block |
mod.rs | // Copyright (C) 2020 Alibaba Cloud. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//! Fuse passthrough file system, mirroring an existing FS hierarchy.
//!
//! This file system mirrors the existing file system hierarchy of the system, starting at the
//! root file system. This is implemented by just "passing through" all requests to the
//! corresponding underlying file system.
//!
//! The code is derived from the
//! [CrosVM](https://chromium.googlesource.com/chromiumos/platform/crosvm/) project,
//! with heavy modification/enhancements from Alibaba Cloud OS team.
use std::any::Any;
use std::collections::{btree_map, BTreeMap}; | use std::mem::MaybeUninit;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockWriteGuard};
use std::time::Duration;
use vm_memory::ByteValued;
use crate::abi::linux_abi as fuse;
use crate::api::filesystem::Entry;
use crate::api::{BackendFileSystem, VFS_MAX_INO};
#[cfg(feature = "async-io")]
mod async_io;
mod sync_io;
mod multikey;
use multikey::MultikeyBTreeMap;
use crate::async_util::AsyncDrive;
const CURRENT_DIR_CSTR: &[u8] = b".\0";
const PARENT_DIR_CSTR: &[u8] = b"..\0";
const EMPTY_CSTR: &[u8] = b"\0";
const PROC_CSTR: &[u8] = b"/proc\0";
type Inode = u64;
type Handle = u64;
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
struct InodeAltKey {
ino: libc::ino64_t,
dev: libc::dev_t,
}
impl InodeAltKey {
fn from_stat(st: &libc::stat64) -> Self {
InodeAltKey {
ino: st.st_ino,
dev: st.st_dev,
}
}
}
struct InodeData {
inode: Inode,
// Most of these aren't actually files but ¯\_(ツ)_/¯.
file: File,
refcount: AtomicU64,
}
impl InodeData {
fn new(inode: Inode, file: File, refcount: u64) -> Self {
InodeData {
inode,
file,
refcount: AtomicU64::new(refcount),
}
}
// When making use of the underlying RawFd, the caller must ensure that the Arc<InodeData>
// object is within scope. Otherwise it may cause race window to access wrong target fd.
// By introducing this method, we could explicitly audit all callers making use of the
// underlying RawFd.
fn get_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
/// Data structures to manage accessed inodes.
struct InodeMap {
inodes: RwLock<MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>>,
}
impl InodeMap {
fn new() -> Self {
InodeMap {
inodes: RwLock::new(MultikeyBTreeMap::new()),
}
}
fn clear(&self) {
self.inodes.write().unwrap().clear();
}
fn get(&self, inode: Inode) -> io::Result<Arc<InodeData>> {
self.inodes
.read()
.unwrap()
.get(&inode)
.map(Arc::clone)
.ok_or_else(ebadf)
}
fn get_alt(&self, altkey: &InodeAltKey) -> Option<Arc<InodeData>> {
self.inodes.read().unwrap().get_alt(altkey).map(Arc::clone)
}
fn get_map_mut(
&self,
) -> RwLockWriteGuard<MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>> {
self.inodes.write().unwrap()
}
fn insert(&self, inode: Inode, altkey: InodeAltKey, data: InodeData) {
self.inodes
.write()
.unwrap()
.insert(inode, altkey, Arc::new(data));
}
}
struct HandleData {
inode: Inode,
file: File,
lock: Mutex<()>,
}
impl HandleData {
fn new(inode: Inode, file: File) -> Self {
HandleData {
inode,
file,
lock: Mutex::new(()),
}
}
fn get_file_mut(&self) -> (MutexGuard<()>, &File) {
(self.lock.lock().unwrap(), &self.file)
}
// When making use of the underlying RawFd, the caller must ensure that the Arc<HandleData>
// object is within scope. Otherwise it may cause race window to access wrong target fd.
// By introducing this method, we could explicitly audit all callers making use of the
// underlying RawFd.
fn get_handle_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
struct HandleMap {
handles: RwLock<BTreeMap<Handle, Arc<HandleData>>>,
}
impl HandleMap {
fn new() -> Self {
HandleMap {
handles: RwLock::new(BTreeMap::new()),
}
}
fn clear(&self) {
self.handles.write().unwrap().clear();
}
fn insert(&self, handle: Handle, data: HandleData) {
self.handles.write().unwrap().insert(handle, Arc::new(data));
}
fn release(&self, handle: Handle, inode: Inode) -> io::Result<()> {
let mut handles = self.handles.write().unwrap();
if let btree_map::Entry::Occupied(e) = handles.entry(handle) {
if e.get().inode == inode {
// We don't need to close the file here because that will happen automatically when
// the last `Arc` is dropped.
e.remove();
return Ok(());
}
}
Err(ebadf())
}
fn get(&self, handle: Handle, inode: Inode) -> io::Result<Arc<HandleData>> {
self.handles
.read()
.unwrap()
.get(&handle)
.filter(|hd| hd.inode == inode)
.map(Arc::clone)
.ok_or_else(ebadf)
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
struct LinuxDirent64 {
d_ino: libc::ino64_t,
d_off: libc::off64_t,
d_reclen: libc::c_ushort,
d_ty: libc::c_uchar,
}
unsafe impl ByteValued for LinuxDirent64 {}
/// The caching policy that the file system should report to the FUSE client. By default the FUSE
/// protocol uses close-to-open consistency. This means that any cached contents of the file are
/// invalidated the next time that file is opened.
#[derive(Debug, Clone, PartialEq)]
pub enum CachePolicy {
/// The client should never cache file data and all I/O should be directly forwarded to the
/// server. This policy must be selected when file contents may change without the knowledge of
/// the FUSE client (i.e., the file system does not have exclusive access to the directory).
Never,
/// The client is free to choose when and how to cache file data. This is the default policy and
/// uses close-to-open consistency as described in the enum documentation.
Auto,
/// The client should always cache file data. This means that the FUSE client will not
/// invalidate any cached data that was returned by the file system the last time the file was
/// opened. This policy should only be selected when the file system has exclusive access to the
/// directory.
Always,
}
impl FromStr for CachePolicy {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"never" | "Never" | "NEVER" | "none" | "None" | "NONE" => Ok(CachePolicy::Never),
"auto" | "Auto" | "AUTO" => Ok(CachePolicy::Auto),
"always" | "Always" | "ALWAYS" => Ok(CachePolicy::Always),
_ => Err("invalid cache policy"),
}
}
}
impl Default for CachePolicy {
fn default() -> Self {
CachePolicy::Auto
}
}
/// Options that configure the behavior of the passthrough fuse file system.
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
/// How long the FUSE client should consider directory entries to be valid. If the contents of a
/// directory can only be modified by the FUSE client (i.e., the file system has exclusive
/// access), then this should be a large value.
///
/// The default value for this option is 5 seconds.
pub entry_timeout: Duration,
/// How long the FUSE client should consider file and directory attributes to be valid. If the
/// attributes of a file or directory can only be modified by the FUSE client (i.e., the file
/// system has exclusive access), then this should be set to a large value.
///
/// The default value for this option is 5 seconds.
pub attr_timeout: Duration,
/// The caching policy the file system should use. See the documentation of `CachePolicy` for
/// more details.
pub cache_policy: CachePolicy,
/// Whether the file system should enabled writeback caching. This can improve performance as it
/// allows the FUSE client to cache and coalesce multiple writes before sending them to the file
/// system. However, enabling this option can increase the risk of data corruption if the file
/// contents can change without the knowledge of the FUSE client (i.e., the server does **NOT**
/// have exclusive access). Additionally, the file system should have read access to all files
/// in the directory it is serving as the FUSE client may send read requests even for files
/// opened with `O_WRONLY`.
///
/// Therefore callers should only enable this option when they can guarantee that: 1) the file
/// system has exclusive access to the directory and 2) the file system has read permissions for
/// all files in that directory.
///
/// The default value for this option is `false`.
pub writeback: bool,
/// The path of the root directory.
///
/// The default is `/`.
pub root_dir: String,
/// Whether the file system should support Extended Attributes (xattr). Enabling this feature may
/// have a significant impact on performance, especially on write parallelism. This is the result
/// of FUSE attempting to remove the special file privileges after each write request.
///
/// The default value for this options is `false`.
pub xattr: bool,
/// To be compatible with Vfs and PseudoFs, PassthroughFs needs to prepare
/// root inode before accepting INIT request.
///
/// The default value for this option is `true`.
pub do_import: bool,
/// Control whether no_open is allowed.
///
/// The default value for this option is `false`.
pub no_open: bool,
/// Control whether no_opendir is allowed.
///
/// The default value for this option is `false`.
pub no_opendir: bool,
}
impl Default for Config {
fn default() -> Self {
Config {
entry_timeout: Duration::from_secs(5),
attr_timeout: Duration::from_secs(5),
cache_policy: Default::default(),
writeback: false,
root_dir: String::from("/"),
xattr: false,
do_import: true,
no_open: false,
no_opendir: false,
}
}
}
/// A file system that simply "passes through" all requests it receives to the underlying file
/// system.
///
/// To keep the implementation simple it servers the contents of its root directory. Users
/// that wish to serve only a specific directory should set up the environment so that that
/// directory ends up as the root of the file system process. One way to accomplish this is via a
/// combination of mount namespaces and the pivot_root system call.
pub struct PassthroughFs<D> {
// File descriptors for various points in the file system tree. These fds are always opened with
// the `O_PATH` option so they cannot be used for reading or writing any data. See the
// documentation of the `O_PATH` flag in `open(2)` for more details on what one can and cannot
// do with an fd opened with this flag.
inode_map: InodeMap,
next_inode: AtomicU64,
// File descriptors for open files and directories. Unlike the fds in `inodes`, these _can_ be
// used for reading and writing data.
handle_map: HandleMap,
next_handle: AtomicU64,
// File descriptor pointing to the `/proc` directory. This is used to convert an fd from
// `inodes` into one that can go into `handles`. This is accomplished by reading the
// `self/fd/{}` symlink. We keep an open fd here in case the file system tree that we are meant
// to be serving doesn't have access to `/proc`.
proc: File,
// Whether writeback caching is enabled for this directory. This will only be true when
// `cfg.writeback` is true and `init` was called with `FsOptions::WRITEBACK_CACHE`.
writeback: AtomicBool,
// Whether no_open is enabled.
no_open: AtomicBool,
// Whether no_opendir is enabled.
no_opendir: AtomicBool,
cfg: Config,
phantom: PhantomData<D>,
}
impl<D: AsyncDrive> PassthroughFs<D> {
/// Create a Passthrough file system instance.
pub fn new(cfg: Config) -> io::Result<PassthroughFs<D>> {
// Safe because this is a constant value and a valid C string.
let proc_cstr = unsafe { CStr::from_bytes_with_nul_unchecked(PROC_CSTR) };
let proc = Self::open_file(
libc::AT_FDCWD,
proc_cstr,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
Ok(PassthroughFs {
inode_map: InodeMap::new(),
next_inode: AtomicU64::new(fuse::ROOT_ID + 1),
handle_map: HandleMap::new(),
next_handle: AtomicU64::new(1),
proc,
writeback: AtomicBool::new(false),
no_open: AtomicBool::new(false),
no_opendir: AtomicBool::new(false),
cfg,
phantom: PhantomData,
})
}
/// Initialize the Passthrough file system.
pub fn import(&self) -> io::Result<()> {
let root = CString::new(self.cfg.root_dir.as_str()).expect("CString::new failed");
// We use `O_PATH` because we just want this for traversing the directory tree
// and not for actually reading the contents.
let f = Self::open_file(
libc::AT_FDCWD,
&root,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
let st = Self::stat(&f)?;
// Safe because this doesn't modify any memory and there is no need to check the return
// value because this system call always succeeds. We need to clear the umask here because
// we want the client to be able to set all the bits in the mode.
unsafe { libc::umask(0o000) };
// Not sure why the root inode gets a refcount of 2 but that's what libfuse does.
self.inode_map.insert(
fuse::ROOT_ID,
InodeAltKey::from_stat(&st),
InodeData::new(fuse::ROOT_ID, f, 2),
);
Ok(())
}
/// Get the list of file descriptors which should be reserved across live upgrade.
pub fn keep_fds(&self) -> Vec<RawFd> {
vec![self.proc.as_raw_fd()]
}
fn stat(f: &File) -> io::Result<libc::stat64> {
// Safe because this is a constant value and a valid C string.
let pathname = unsafe { CStr::from_bytes_with_nul_unchecked(EMPTY_CSTR) };
let mut st = MaybeUninit::<libc::stat64>::zeroed();
// Safe because the kernel will only write data in `st` and we check the return value.
let res = unsafe {
libc::fstatat64(
f.as_raw_fd(),
pathname.as_ptr(),
st.as_mut_ptr(),
libc::AT_EMPTY_PATH | libc::AT_SYMLINK_NOFOLLOW,
)
};
if res >= 0 {
// Safe because the kernel guarantees that the struct is now fully initialized.
Ok(unsafe { st.assume_init() })
} else {
Err(io::Error::last_os_error())
}
}
fn open_file(dfd: i32, pathname: &CStr, flags: i32, mode: u32) -> io::Result<File> {
let fd = if flags & libc::O_CREAT == libc::O_CREAT {
unsafe { libc::openat(dfd, pathname.as_ptr(), flags, mode) }
} else {
unsafe { libc::openat(dfd, pathname.as_ptr(), flags) }
};
if fd < 0 {
return Err(io::Error::last_os_error());
}
// Safe because we just opened this fd.
Ok(unsafe { File::from_raw_fd(fd) })
}
fn do_lookup(&self, parent: Inode, name: &CStr) -> io::Result<Entry> {
let p = self.inode_map.get(parent)?;
let f = Self::open_file(
p.get_raw_fd(),
name,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
let st = Self::stat(&f)?;
let altkey = InodeAltKey::from_stat(&st);
let mut found = None;
'search: loop {
match self.inode_map.get_alt(&altkey) {
// No existing entry found
None => break 'search,
Some(data) => {
let curr = data.refcount.load(Ordering::Acquire);
// forgot_one() has just destroyed the entry, retry...
if curr == 0 {
continue 'search;
}
// Saturating add to avoid integer overflow, it's not realistic to saturate u64.
let new = curr.saturating_add(1);
// Synchronizes with the forgot_one()
if data
.refcount
.compare_exchange(curr, new, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
found = Some(data.inode);
break;
}
}
}
}
let inode = if let Some(v) = found {
v
} else {
let mut inodes = self.inode_map.get_map_mut();
// Lookup inode_map again after acquiring the inode_map lock, as there might be another
// racing thread already added an inode with the same altkey while we're not holding
// the lock. If so just use the newly added inode, otherwise the inode will be replaced
// and results in EBADF.
match inodes.get_alt(&altkey).map(Arc::clone) {
Some(data) => {
trace!(
"fuse: do_lookup sees existing inode {} altkey {:?}",
data.inode,
altkey
);
data.refcount.fetch_add(1, Ordering::Relaxed);
data.inode
}
None => {
let inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
if inode > VFS_MAX_INO {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("max inode number reached: {}", VFS_MAX_INO),
));
}
trace!(
"fuse: do_lookup adds new inode {} altkey {:?}",
inode,
altkey
);
inodes.insert(inode, altkey, Arc::new(InodeData::new(inode, f, 1)));
inode
}
}
};
Ok(Entry {
inode,
generation: 0,
attr: st,
attr_timeout: self.cfg.attr_timeout,
entry_timeout: self.cfg.entry_timeout,
})
}
fn forget_one(
inodes: &mut MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>,
inode: Inode,
count: u64,
) {
// ROOT_ID should not be forgotten, or we're not able to access to files any more.
if inode == fuse::ROOT_ID {
return;
}
if let Some(data) = inodes.get(&inode) {
// Acquiring the write lock on the inode map prevents new lookups from incrementing the
// refcount but there is the possibility that a previous lookup already acquired a
// reference to the inode data and is in the process of updating the refcount so we need
// to loop here until we can decrement successfully.
loop {
let curr = data.refcount.load(Ordering::Acquire);
// Saturating sub because it doesn't make sense for a refcount to go below zero and
// we don't want misbehaving clients to cause integer overflow.
let new = curr.saturating_sub(count);
trace!(
"fuse: forget inode {} refcount {}, count {}, new_count {}",
inode,
curr,
count,
new
);
// Synchronizes with the acquire load in `do_lookup`.
if data
.refcount
.compare_exchange(curr, new, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
if new == 0 {
// We just removed the last refcount for this inode.
inodes.remove(&inode);
}
break;
}
}
}
}
fn do_release(&self, inode: Inode, handle: Handle) -> io::Result<()> {
self.handle_map.release(handle, inode)
}
}
#[cfg(not(feature = "async-io"))]
impl<D: AsyncDrive> BackendFileSystem for PassthroughFs<D> {
type D = D;
fn mount(&self) -> io::Result<(Entry, u64)> {
let entry = self.do_lookup(fuse::ROOT_ID, &CString::new(".").unwrap())?;
Ok((entry, VFS_MAX_INO))
}
fn as_any(&self) -> &dyn Any {
self
}
}
fn ebadf() -> io::Error {
io::Error::from_raw_os_error(libc::EBADF)
} | use std::ffi::{CStr, CString};
use std::fs::File;
use std::io;
use std::marker::PhantomData; | random_line_split |
mod.rs | // Copyright (C) 2020 Alibaba Cloud. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//! Fuse passthrough file system, mirroring an existing FS hierarchy.
//!
//! This file system mirrors the existing file system hierarchy of the system, starting at the
//! root file system. This is implemented by just "passing through" all requests to the
//! corresponding underlying file system.
//!
//! The code is derived from the
//! [CrosVM](https://chromium.googlesource.com/chromiumos/platform/crosvm/) project,
//! with heavy modification/enhancements from Alibaba Cloud OS team.
use std::any::Any;
use std::collections::{btree_map, BTreeMap};
use std::ffi::{CStr, CString};
use std::fs::File;
use std::io;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockWriteGuard};
use std::time::Duration;
use vm_memory::ByteValued;
use crate::abi::linux_abi as fuse;
use crate::api::filesystem::Entry;
use crate::api::{BackendFileSystem, VFS_MAX_INO};
#[cfg(feature = "async-io")]
mod async_io;
mod sync_io;
mod multikey;
use multikey::MultikeyBTreeMap;
use crate::async_util::AsyncDrive;
const CURRENT_DIR_CSTR: &[u8] = b".\0";
const PARENT_DIR_CSTR: &[u8] = b"..\0";
const EMPTY_CSTR: &[u8] = b"\0";
const PROC_CSTR: &[u8] = b"/proc\0";
type Inode = u64;
type Handle = u64;
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
struct InodeAltKey {
ino: libc::ino64_t,
dev: libc::dev_t,
}
impl InodeAltKey {
fn from_stat(st: &libc::stat64) -> Self {
InodeAltKey {
ino: st.st_ino,
dev: st.st_dev,
}
}
}
struct InodeData {
inode: Inode,
// Most of these aren't actually files but ¯\_(ツ)_/¯.
file: File,
refcount: AtomicU64,
}
impl InodeData {
fn new(inode: Inode, file: File, refcount: u64) -> Self {
InodeData {
inode,
file,
refcount: AtomicU64::new(refcount),
}
}
// When making use of the underlying RawFd, the caller must ensure that the Arc<InodeData>
// object is within scope. Otherwise it may cause race window to access wrong target fd.
// By introducing this method, we could explicitly audit all callers making use of the
// underlying RawFd.
fn get_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
/// Data structures to manage accessed inodes.
struct InodeMap {
inodes: RwLock<MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>>,
}
impl InodeMap {
fn new() -> Self {
InodeMap {
inodes: RwLock::new(MultikeyBTreeMap::new()),
}
}
fn clear(&self) {
self.inodes.write().unwrap().clear();
}
fn get(&self, inode: Inode) -> io::Result<Arc<InodeData>> {
self.inodes
.read()
.unwrap()
.get(&inode)
.map(Arc::clone)
.ok_or_else(ebadf)
}
fn get_alt(&self, altkey: &InodeAltKey) -> Option<Arc<InodeData>> {
self.inodes.read().unwrap().get_alt(altkey).map(Arc::clone)
}
fn get_map_mut(
&self,
) -> RwLockWriteGuard<MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>> {
self.inodes.write().unwrap()
}
fn insert(&self, inode: Inode, altkey: InodeAltKey, data: InodeData) {
self.inodes
.write()
.unwrap()
.insert(inode, altkey, Arc::new(data));
}
}
struct HandleData {
inode: Inode,
file: File,
lock: Mutex<()>,
}
impl HandleData {
fn new(inode: Inode, file: File) -> Self {
HandleData {
inode,
file,
lock: Mutex::new(()),
}
}
fn get_file_mut(&self) -> (MutexGuard<()>, &File) {
(self.lock.lock().unwrap(), &self.file)
}
// When making use of the underlying RawFd, the caller must ensure that the Arc<HandleData>
// object is within scope. Otherwise it may cause race window to access wrong target fd.
// By introducing this method, we could explicitly audit all callers making use of the
// underlying RawFd.
fn get_handle_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
struct HandleMap {
handles: RwLock<BTreeMap<Handle, Arc<HandleData>>>,
}
impl HandleMap {
fn new() -> Self {
HandleMap {
handles: RwLock::new(BTreeMap::new()),
}
}
fn clear(&self) {
self.handles.write().unwrap().clear();
}
fn insert(&self, handle: Handle, data: HandleData) {
self.handles.write().unwrap().insert(handle, Arc::new(data));
}
fn release(&self, handle: Handle, inode: Inode) -> io::Result<()> {
let mut handles = self.handles.write().unwrap();
if let btree_map::Entry::Occupied(e) = handles.entry(handle) {
if e.get().inode == inode {
// We don't need to close the file here because that will happen automatically when
// the last `Arc` is dropped.
e.remove();
return Ok(());
}
}
Err(ebadf())
}
fn get(&self, handle: Handle, inode: Inode) -> io::Result<Arc<HandleData>> {
self.handles
.read()
.unwrap()
.get(&handle)
.filter(|hd| hd.inode == inode)
.map(Arc::clone)
.ok_or_else(ebadf)
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
struct LinuxDirent64 {
d_ino: libc::ino64_t,
d_off: libc::off64_t,
d_reclen: libc::c_ushort,
d_ty: libc::c_uchar,
}
unsafe impl ByteValued for LinuxDirent64 {}
/// The caching policy that the file system should report to the FUSE client. By default the FUSE
/// protocol uses close-to-open consistency. This means that any cached contents of the file are
/// invalidated the next time that file is opened.
#[derive(Debug, Clone, PartialEq)]
pub enum CachePolicy {
/// The client should never cache file data and all I/O should be directly forwarded to the
/// server. This policy must be selected when file contents may change without the knowledge of
/// the FUSE client (i.e., the file system does not have exclusive access to the directory).
Never,
/// The client is free to choose when and how to cache file data. This is the default policy and
/// uses close-to-open consistency as described in the enum documentation.
Auto,
/// The client should always cache file data. This means that the FUSE client will not
/// invalidate any cached data that was returned by the file system the last time the file was
/// opened. This policy should only be selected when the file system has exclusive access to the
/// directory.
Always,
}
impl FromStr for CachePolicy {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"never" | "Never" | "NEVER" | "none" | "None" | "NONE" => Ok(CachePolicy::Never),
"auto" | "Auto" | "AUTO" => Ok(CachePolicy::Auto),
"always" | "Always" | "ALWAYS" => Ok(CachePolicy::Always),
_ => Err("invalid cache policy"),
}
}
}
impl Default for CachePolicy {
fn default() -> Self {
CachePolicy::Auto
}
}
/// Options that configure the behavior of the passthrough fuse file system.
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
/// How long the FUSE client should consider directory entries to be valid. If the contents of a
/// directory can only be modified by the FUSE client (i.e., the file system has exclusive
/// access), then this should be a large value.
///
/// The default value for this option is 5 seconds.
pub entry_timeout: Duration,
/// How long the FUSE client should consider file and directory attributes to be valid. If the
/// attributes of a file or directory can only be modified by the FUSE client (i.e., the file
/// system has exclusive access), then this should be set to a large value.
///
/// The default value for this option is 5 seconds.
pub attr_timeout: Duration,
/// The caching policy the file system should use. See the documentation of `CachePolicy` for
/// more details.
pub cache_policy: CachePolicy,
/// Whether the file system should enabled writeback caching. This can improve performance as it
/// allows the FUSE client to cache and coalesce multiple writes before sending them to the file
/// system. However, enabling this option can increase the risk of data corruption if the file
/// contents can change without the knowledge of the FUSE client (i.e., the server does **NOT**
/// have exclusive access). Additionally, the file system should have read access to all files
/// in the directory it is serving as the FUSE client may send read requests even for files
/// opened with `O_WRONLY`.
///
/// Therefore callers should only enable this option when they can guarantee that: 1) the file
/// system has exclusive access to the directory and 2) the file system has read permissions for
/// all files in that directory.
///
/// The default value for this option is `false`.
pub writeback: bool,
/// The path of the root directory.
///
/// The default is `/`.
pub root_dir: String,
/// Whether the file system should support Extended Attributes (xattr). Enabling this feature may
/// have a significant impact on performance, especially on write parallelism. This is the result
/// of FUSE attempting to remove the special file privileges after each write request.
///
/// The default value for this options is `false`.
pub xattr: bool,
/// To be compatible with Vfs and PseudoFs, PassthroughFs needs to prepare
/// root inode before accepting INIT request.
///
/// The default value for this option is `true`.
pub do_import: bool,
/// Control whether no_open is allowed.
///
/// The default value for this option is `false`.
pub no_open: bool,
/// Control whether no_opendir is allowed.
///
/// The default value for this option is `false`.
pub no_opendir: bool,
}
impl Default for Config {
fn default() -> Self {
Config {
entry_timeout: Duration::from_secs(5),
attr_timeout: Duration::from_secs(5),
cache_policy: Default::default(),
writeback: false,
root_dir: String::from("/"),
xattr: false,
do_import: true,
no_open: false,
no_opendir: false,
}
}
}
/// A file system that simply "passes through" all requests it receives to the underlying file
/// system.
///
/// To keep the implementation simple it servers the contents of its root directory. Users
/// that wish to serve only a specific directory should set up the environment so that that
/// directory ends up as the root of the file system process. One way to accomplish this is via a
/// combination of mount namespaces and the pivot_root system call.
pub struct PassthroughFs<D> {
// File descriptors for various points in the file system tree. These fds are always opened with
// the `O_PATH` option so they cannot be used for reading or writing any data. See the
// documentation of the `O_PATH` flag in `open(2)` for more details on what one can and cannot
// do with an fd opened with this flag.
inode_map: InodeMap,
next_inode: AtomicU64,
// File descriptors for open files and directories. Unlike the fds in `inodes`, these _can_ be
// used for reading and writing data.
handle_map: HandleMap,
next_handle: AtomicU64,
// File descriptor pointing to the `/proc` directory. This is used to convert an fd from
// `inodes` into one that can go into `handles`. This is accomplished by reading the
// `self/fd/{}` symlink. We keep an open fd here in case the file system tree that we are meant
// to be serving doesn't have access to `/proc`.
proc: File,
// Whether writeback caching is enabled for this directory. This will only be true when
// `cfg.writeback` is true and `init` was called with `FsOptions::WRITEBACK_CACHE`.
writeback: AtomicBool,
// Whether no_open is enabled.
no_open: AtomicBool,
// Whether no_opendir is enabled.
no_opendir: AtomicBool,
cfg: Config,
phantom: PhantomData<D>,
}
impl<D: AsyncDrive> PassthroughFs<D> {
/// Create a Passthrough file system instance.
pub fn new(cfg: Config) -> io::Result<PassthroughFs<D>> {
// Safe because this is a constant value and a valid C string.
let proc_cstr = unsafe { CStr::from_bytes_with_nul_unchecked(PROC_CSTR) };
let proc = Self::open_file(
libc::AT_FDCWD,
proc_cstr,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
Ok(PassthroughFs {
inode_map: InodeMap::new(),
next_inode: AtomicU64::new(fuse::ROOT_ID + 1),
handle_map: HandleMap::new(),
next_handle: AtomicU64::new(1),
proc,
writeback: AtomicBool::new(false),
no_open: AtomicBool::new(false),
no_opendir: AtomicBool::new(false),
cfg,
phantom: PhantomData,
})
}
/// Initialize the Passthrough file system.
pub fn import(&self) -> io::Result<()> {
let root = CString::new(self.cfg.root_dir.as_str()).expect("CString::new failed");
// We use `O_PATH` because we just want this for traversing the directory tree
// and not for actually reading the contents.
let f = Self::open_file(
libc::AT_FDCWD,
&root,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
let st = Self::stat(&f)?;
// Safe because this doesn't modify any memory and there is no need to check the return
// value because this system call always succeeds. We need to clear the umask here because
// we want the client to be able to set all the bits in the mode.
unsafe { libc::umask(0o000) };
// Not sure why the root inode gets a refcount of 2 but that's what libfuse does.
self.inode_map.insert(
fuse::ROOT_ID,
InodeAltKey::from_stat(&st),
InodeData::new(fuse::ROOT_ID, f, 2),
);
Ok(())
}
/// Get the list of file descriptors which should be reserved across live upgrade.
pub fn keep_fds(&self) -> Vec<RawFd> {
vec![self.proc.as_raw_fd()]
}
fn stat(f: &File) -> io::Result<libc::stat64> {
// Safe because this is a constant value and a valid C string.
let pathname = unsafe { CStr::from_bytes_with_nul_unchecked(EMPTY_CSTR) };
let mut st = MaybeUninit::<libc::stat64>::zeroed();
// Safe because the kernel will only write data in `st` and we check the return value.
let res = unsafe {
libc::fstatat64(
f.as_raw_fd(),
pathname.as_ptr(),
st.as_mut_ptr(),
libc::AT_EMPTY_PATH | libc::AT_SYMLINK_NOFOLLOW,
)
};
if res >= 0 {
// Safe because the kernel guarantees that the struct is now fully initialized.
Ok(unsafe { st.assume_init() })
} else {
Err(io::Error::last_os_error())
}
}
fn open_file(dfd: i32, pathname: &CStr, flags: i32, mode: u32) -> io::Result<File> {
let fd = if flags & libc::O_CREAT == libc::O_CREAT {
unsafe { libc::openat(dfd, pathname.as_ptr(), flags, mode) }
} else {
unsafe { libc::openat(dfd, pathname.as_ptr(), flags) }
};
if fd < 0 {
return Err(io::Error::last_os_error());
}
// Safe because we just opened this fd.
Ok(unsafe { File::from_raw_fd(fd) })
}
fn do_lookup(&self, parent: Inode, name: &CStr) -> io::Result<Entry> {
let p = self.inode_map.get(parent)?;
let f = Self::open_file(
p.get_raw_fd(),
name,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
let st = Self::stat(&f)?;
let altkey = InodeAltKey::from_stat(&st);
let mut found = None;
'search: loop {
match self.inode_map.get_alt(&altkey) {
// No existing entry found
None => break 'search,
Some(data) => {
let curr = data.refcount.load(Ordering::Acquire);
// forgot_one() has just destroyed the entry, retry...
if curr == 0 {
continue 'search;
}
// Saturating add to avoid integer overflow, it's not realistic to saturate u64.
let new = curr.saturating_add(1);
// Synchronizes with the forgot_one()
if data
.refcount
.compare_exchange(curr, new, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
found = Some(data.inode);
break;
}
}
}
}
let inode = if let Some(v) = found {
v
} else {
let mut inodes = self.inode_map.get_map_mut();
// Lookup inode_map again after acquiring the inode_map lock, as there might be another
// racing thread already added an inode with the same altkey while we're not holding
// the lock. If so just use the newly added inode, otherwise the inode will be replaced
// and results in EBADF.
match inodes.get_alt(&altkey).map(Arc::clone) {
Some(data) => {
trace!(
"fuse: do_lookup sees existing inode {} altkey {:?}",
data.inode,
altkey
);
data.refcount.fetch_add(1, Ordering::Relaxed);
data.inode
}
None => {
let inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
if inode > VFS_MAX_INO {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("max inode number reached: {}", VFS_MAX_INO),
));
}
trace!(
"fuse: do_lookup adds new inode {} altkey {:?}",
inode,
altkey
);
inodes.insert(inode, altkey, Arc::new(InodeData::new(inode, f, 1)));
inode
}
}
};
Ok(Entry {
inode,
generation: 0,
attr: st,
attr_timeout: self.cfg.attr_timeout,
entry_timeout: self.cfg.entry_timeout,
})
}
fn forget_one(
inodes: &mut MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>,
inode: Inode,
count: u64,
) {
// ROOT_ID should not be forgotten, or we're not able to access to files any more.
if inode == fuse::ROOT_ID {
return;
}
if let Some(data) = inodes.get(&inode) {
// Acquiring the write lock on the inode map prevents new lookups from incrementing the
// refcount but there is the possibility that a previous lookup already acquired a
// reference to the inode data and is in the process of updating the refcount so we need
// to loop here until we can decrement successfully.
loop {
let curr = data.refcount.load(Ordering::Acquire);
// Saturating sub because it doesn't make sense for a refcount to go below zero and
// we don't want misbehaving clients to cause integer overflow.
let new = curr.saturating_sub(count);
trace!(
"fuse: forget inode {} refcount {}, count {}, new_count {}",
inode,
curr,
count,
new
);
// Synchronizes with the acquire load in `do_lookup`.
if data
.refcount
.compare_exchange(curr, new, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
if new == 0 {
// We just removed the last refcount for this inode.
inodes.remove(&inode);
}
break;
}
}
}
}
fn do_r | lf, inode: Inode, handle: Handle) -> io::Result<()> {
self.handle_map.release(handle, inode)
}
}
#[cfg(not(feature = "async-io"))]
impl<D: AsyncDrive> BackendFileSystem for PassthroughFs<D> {
type D = D;
fn mount(&self) -> io::Result<(Entry, u64)> {
let entry = self.do_lookup(fuse::ROOT_ID, &CString::new(".").unwrap())?;
Ok((entry, VFS_MAX_INO))
}
fn as_any(&self) -> &dyn Any {
self
}
}
fn ebadf() -> io::Error {
io::Error::from_raw_os_error(libc::EBADF)
}
| elease(&se | identifier_name |
mod.rs | // Copyright (C) 2020 Alibaba Cloud. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//! Fuse passthrough file system, mirroring an existing FS hierarchy.
//!
//! This file system mirrors the existing file system hierarchy of the system, starting at the
//! root file system. This is implemented by just "passing through" all requests to the
//! corresponding underlying file system.
//!
//! The code is derived from the
//! [CrosVM](https://chromium.googlesource.com/chromiumos/platform/crosvm/) project,
//! with heavy modification/enhancements from Alibaba Cloud OS team.
use std::any::Any;
use std::collections::{btree_map, BTreeMap};
use std::ffi::{CStr, CString};
use std::fs::File;
use std::io;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::str::FromStr;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockWriteGuard};
use std::time::Duration;
use vm_memory::ByteValued;
use crate::abi::linux_abi as fuse;
use crate::api::filesystem::Entry;
use crate::api::{BackendFileSystem, VFS_MAX_INO};
#[cfg(feature = "async-io")]
mod async_io;
mod sync_io;
mod multikey;
use multikey::MultikeyBTreeMap;
use crate::async_util::AsyncDrive;
const CURRENT_DIR_CSTR: &[u8] = b".\0";
const PARENT_DIR_CSTR: &[u8] = b"..\0";
const EMPTY_CSTR: &[u8] = b"\0";
const PROC_CSTR: &[u8] = b"/proc\0";
type Inode = u64;
type Handle = u64;
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
struct InodeAltKey {
ino: libc::ino64_t,
dev: libc::dev_t,
}
impl InodeAltKey {
fn from_stat(st: &libc::stat64) -> Self {
InodeAltKey {
ino: st.st_ino,
dev: st.st_dev,
}
}
}
struct InodeData {
inode: Inode,
// Most of these aren't actually files but ¯\_(ツ)_/¯.
file: File,
refcount: AtomicU64,
}
impl InodeData {
fn new(inode: Inode, file: File, refcount: u64) -> Self {
InodeData {
inode,
file,
refcount: AtomicU64::new(refcount),
}
}
// When making use of the underlying RawFd, the caller must ensure that the Arc<InodeData>
// object is within scope. Otherwise it may cause race window to access wrong target fd.
// By introducing this method, we could explicitly audit all callers making use of the
// underlying RawFd.
fn get_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
/// Data structures to manage accessed inodes.
struct InodeMap {
inodes: RwLock<MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>>,
}
impl InodeMap {
fn new() -> Self {
InodeMap {
inodes: RwLock::new(MultikeyBTreeMap::new()),
}
}
fn clear(&self) {
self.inodes.write().unwrap().clear();
}
fn get(&self, inode: Inode) -> io::Result<Arc<InodeData>> {
self.inodes
.read()
.unwrap()
.get(&inode)
.map(Arc::clone)
.ok_or_else(ebadf)
}
fn get_alt(&self, altkey: &InodeAltKey) -> Option<Arc<InodeData>> {
self.inodes.read().unwrap().get_alt(altkey).map(Arc::clone)
}
fn get_map_mut(
&self,
) -> RwLockWriteGuard<MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>> {
self.inodes.write().unwrap()
}
fn insert(&self, inode: Inode, altkey: InodeAltKey, data: InodeData) {
self.inodes
.write()
.unwrap()
.insert(inode, altkey, Arc::new(data));
}
}
struct HandleData {
inode: Inode,
file: File,
lock: Mutex<()>,
}
impl HandleData {
fn new(inode: Inode, file: File) -> Self {
HandleData {
inode,
file,
lock: Mutex::new(()),
}
}
fn get_file_mut(&self) -> (MutexGuard<()>, &File) {
(self.lock.lock().unwrap(), &self.file)
}
// When making use of the underlying RawFd, the caller must ensure that the Arc<HandleData>
// object is within scope. Otherwise it may cause race window to access wrong target fd.
// By introducing this method, we could explicitly audit all callers making use of the
// underlying RawFd.
fn get_handle_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
struct HandleMap {
handles: RwLock<BTreeMap<Handle, Arc<HandleData>>>,
}
impl HandleMap {
fn new() -> Self {
HandleMap {
handles: RwLock::new(BTreeMap::new()),
}
}
fn clear(&self) {
self.handles.write().unwrap().clear();
}
fn insert(&self, handle: Handle, data: HandleData) {
self.handles.write().unwrap().insert(handle, Arc::new(data));
}
fn release(&self, handle: Handle, inode: Inode) -> io::Result<()> {
let mut handles = self.handles.write().unwrap();
if let btree_map::Entry::Occupied(e) = handles.entry(handle) {
if e.get().inode == inode {
// We don't need to close the file here because that will happen automatically when
// the last `Arc` is dropped.
e.remove();
return Ok(());
}
}
Err(ebadf())
}
fn get(&self, handle: Handle, inode: Inode) -> io::Result<Arc<HandleData>> {
self.handles
.read()
.unwrap()
.get(&handle)
.filter(|hd| hd.inode == inode)
.map(Arc::clone)
.ok_or_else(ebadf)
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
struct LinuxDirent64 {
d_ino: libc::ino64_t,
d_off: libc::off64_t,
d_reclen: libc::c_ushort,
d_ty: libc::c_uchar,
}
unsafe impl ByteValued for LinuxDirent64 {}
/// The caching policy that the file system should report to the FUSE client. By default the FUSE
/// protocol uses close-to-open consistency. This means that any cached contents of the file are
/// invalidated the next time that file is opened.
#[derive(Debug, Clone, PartialEq)]
pub enum CachePolicy {
/// The client should never cache file data and all I/O should be directly forwarded to the
/// server. This policy must be selected when file contents may change without the knowledge of
/// the FUSE client (i.e., the file system does not have exclusive access to the directory).
Never,
/// The client is free to choose when and how to cache file data. This is the default policy and
/// uses close-to-open consistency as described in the enum documentation.
Auto,
/// The client should always cache file data. This means that the FUSE client will not
/// invalidate any cached data that was returned by the file system the last time the file was
/// opened. This policy should only be selected when the file system has exclusive access to the
/// directory.
Always,
}
impl FromStr for CachePolicy {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
| impl Default for CachePolicy {
fn default() -> Self {
CachePolicy::Auto
}
}
/// Options that configure the behavior of the passthrough fuse file system.
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
/// How long the FUSE client should consider directory entries to be valid. If the contents of a
/// directory can only be modified by the FUSE client (i.e., the file system has exclusive
/// access), then this should be a large value.
///
/// The default value for this option is 5 seconds.
pub entry_timeout: Duration,
/// How long the FUSE client should consider file and directory attributes to be valid. If the
/// attributes of a file or directory can only be modified by the FUSE client (i.e., the file
/// system has exclusive access), then this should be set to a large value.
///
/// The default value for this option is 5 seconds.
pub attr_timeout: Duration,
/// The caching policy the file system should use. See the documentation of `CachePolicy` for
/// more details.
pub cache_policy: CachePolicy,
/// Whether the file system should enabled writeback caching. This can improve performance as it
/// allows the FUSE client to cache and coalesce multiple writes before sending them to the file
/// system. However, enabling this option can increase the risk of data corruption if the file
/// contents can change without the knowledge of the FUSE client (i.e., the server does **NOT**
/// have exclusive access). Additionally, the file system should have read access to all files
/// in the directory it is serving as the FUSE client may send read requests even for files
/// opened with `O_WRONLY`.
///
/// Therefore callers should only enable this option when they can guarantee that: 1) the file
/// system has exclusive access to the directory and 2) the file system has read permissions for
/// all files in that directory.
///
/// The default value for this option is `false`.
pub writeback: bool,
/// The path of the root directory.
///
/// The default is `/`.
pub root_dir: String,
/// Whether the file system should support Extended Attributes (xattr). Enabling this feature may
/// have a significant impact on performance, especially on write parallelism. This is the result
/// of FUSE attempting to remove the special file privileges after each write request.
///
/// The default value for this options is `false`.
pub xattr: bool,
/// To be compatible with Vfs and PseudoFs, PassthroughFs needs to prepare
/// root inode before accepting INIT request.
///
/// The default value for this option is `true`.
pub do_import: bool,
/// Control whether no_open is allowed.
///
/// The default value for this option is `false`.
pub no_open: bool,
/// Control whether no_opendir is allowed.
///
/// The default value for this option is `false`.
pub no_opendir: bool,
}
impl Default for Config {
fn default() -> Self {
Config {
entry_timeout: Duration::from_secs(5),
attr_timeout: Duration::from_secs(5),
cache_policy: Default::default(),
writeback: false,
root_dir: String::from("/"),
xattr: false,
do_import: true,
no_open: false,
no_opendir: false,
}
}
}
/// A file system that simply "passes through" all requests it receives to the underlying file
/// system.
///
/// To keep the implementation simple it servers the contents of its root directory. Users
/// that wish to serve only a specific directory should set up the environment so that that
/// directory ends up as the root of the file system process. One way to accomplish this is via a
/// combination of mount namespaces and the pivot_root system call.
pub struct PassthroughFs<D> {
// File descriptors for various points in the file system tree. These fds are always opened with
// the `O_PATH` option so they cannot be used for reading or writing any data. See the
// documentation of the `O_PATH` flag in `open(2)` for more details on what one can and cannot
// do with an fd opened with this flag.
inode_map: InodeMap,
next_inode: AtomicU64,
// File descriptors for open files and directories. Unlike the fds in `inodes`, these _can_ be
// used for reading and writing data.
handle_map: HandleMap,
next_handle: AtomicU64,
// File descriptor pointing to the `/proc` directory. This is used to convert an fd from
// `inodes` into one that can go into `handles`. This is accomplished by reading the
// `self/fd/{}` symlink. We keep an open fd here in case the file system tree that we are meant
// to be serving doesn't have access to `/proc`.
proc: File,
// Whether writeback caching is enabled for this directory. This will only be true when
// `cfg.writeback` is true and `init` was called with `FsOptions::WRITEBACK_CACHE`.
writeback: AtomicBool,
// Whether no_open is enabled.
no_open: AtomicBool,
// Whether no_opendir is enabled.
no_opendir: AtomicBool,
cfg: Config,
phantom: PhantomData<D>,
}
impl<D: AsyncDrive> PassthroughFs<D> {
/// Create a Passthrough file system instance.
pub fn new(cfg: Config) -> io::Result<PassthroughFs<D>> {
// Safe because this is a constant value and a valid C string.
let proc_cstr = unsafe { CStr::from_bytes_with_nul_unchecked(PROC_CSTR) };
let proc = Self::open_file(
libc::AT_FDCWD,
proc_cstr,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
Ok(PassthroughFs {
inode_map: InodeMap::new(),
next_inode: AtomicU64::new(fuse::ROOT_ID + 1),
handle_map: HandleMap::new(),
next_handle: AtomicU64::new(1),
proc,
writeback: AtomicBool::new(false),
no_open: AtomicBool::new(false),
no_opendir: AtomicBool::new(false),
cfg,
phantom: PhantomData,
})
}
/// Initialize the Passthrough file system.
pub fn import(&self) -> io::Result<()> {
let root = CString::new(self.cfg.root_dir.as_str()).expect("CString::new failed");
// We use `O_PATH` because we just want this for traversing the directory tree
// and not for actually reading the contents.
let f = Self::open_file(
libc::AT_FDCWD,
&root,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
let st = Self::stat(&f)?;
// Safe because this doesn't modify any memory and there is no need to check the return
// value because this system call always succeeds. We need to clear the umask here because
// we want the client to be able to set all the bits in the mode.
unsafe { libc::umask(0o000) };
// Not sure why the root inode gets a refcount of 2 but that's what libfuse does.
self.inode_map.insert(
fuse::ROOT_ID,
InodeAltKey::from_stat(&st),
InodeData::new(fuse::ROOT_ID, f, 2),
);
Ok(())
}
/// Get the list of file descriptors which should be reserved across live upgrade.
pub fn keep_fds(&self) -> Vec<RawFd> {
vec![self.proc.as_raw_fd()]
}
fn stat(f: &File) -> io::Result<libc::stat64> {
// Safe because this is a constant value and a valid C string.
let pathname = unsafe { CStr::from_bytes_with_nul_unchecked(EMPTY_CSTR) };
let mut st = MaybeUninit::<libc::stat64>::zeroed();
// Safe because the kernel will only write data in `st` and we check the return value.
let res = unsafe {
libc::fstatat64(
f.as_raw_fd(),
pathname.as_ptr(),
st.as_mut_ptr(),
libc::AT_EMPTY_PATH | libc::AT_SYMLINK_NOFOLLOW,
)
};
if res >= 0 {
// Safe because the kernel guarantees that the struct is now fully initialized.
Ok(unsafe { st.assume_init() })
} else {
Err(io::Error::last_os_error())
}
}
fn open_file(dfd: i32, pathname: &CStr, flags: i32, mode: u32) -> io::Result<File> {
let fd = if flags & libc::O_CREAT == libc::O_CREAT {
unsafe { libc::openat(dfd, pathname.as_ptr(), flags, mode) }
} else {
unsafe { libc::openat(dfd, pathname.as_ptr(), flags) }
};
if fd < 0 {
return Err(io::Error::last_os_error());
}
// Safe because we just opened this fd.
Ok(unsafe { File::from_raw_fd(fd) })
}
fn do_lookup(&self, parent: Inode, name: &CStr) -> io::Result<Entry> {
let p = self.inode_map.get(parent)?;
let f = Self::open_file(
p.get_raw_fd(),
name,
libc::O_PATH | libc::O_NOFOLLOW | libc::O_CLOEXEC,
0,
)?;
let st = Self::stat(&f)?;
let altkey = InodeAltKey::from_stat(&st);
let mut found = None;
'search: loop {
match self.inode_map.get_alt(&altkey) {
// No existing entry found
None => break 'search,
Some(data) => {
let curr = data.refcount.load(Ordering::Acquire);
// forgot_one() has just destroyed the entry, retry...
if curr == 0 {
continue 'search;
}
// Saturating add to avoid integer overflow, it's not realistic to saturate u64.
let new = curr.saturating_add(1);
// Synchronizes with the forgot_one()
if data
.refcount
.compare_exchange(curr, new, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
found = Some(data.inode);
break;
}
}
}
}
let inode = if let Some(v) = found {
v
} else {
let mut inodes = self.inode_map.get_map_mut();
// Lookup inode_map again after acquiring the inode_map lock, as there might be another
// racing thread already added an inode with the same altkey while we're not holding
// the lock. If so just use the newly added inode, otherwise the inode will be replaced
// and results in EBADF.
match inodes.get_alt(&altkey).map(Arc::clone) {
Some(data) => {
trace!(
"fuse: do_lookup sees existing inode {} altkey {:?}",
data.inode,
altkey
);
data.refcount.fetch_add(1, Ordering::Relaxed);
data.inode
}
None => {
let inode = self.next_inode.fetch_add(1, Ordering::Relaxed);
if inode > VFS_MAX_INO {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("max inode number reached: {}", VFS_MAX_INO),
));
}
trace!(
"fuse: do_lookup adds new inode {} altkey {:?}",
inode,
altkey
);
inodes.insert(inode, altkey, Arc::new(InodeData::new(inode, f, 1)));
inode
}
}
};
Ok(Entry {
inode,
generation: 0,
attr: st,
attr_timeout: self.cfg.attr_timeout,
entry_timeout: self.cfg.entry_timeout,
})
}
fn forget_one(
inodes: &mut MultikeyBTreeMap<Inode, InodeAltKey, Arc<InodeData>>,
inode: Inode,
count: u64,
) {
// ROOT_ID should not be forgotten, or we're not able to access to files any more.
if inode == fuse::ROOT_ID {
return;
}
if let Some(data) = inodes.get(&inode) {
// Acquiring the write lock on the inode map prevents new lookups from incrementing the
// refcount but there is the possibility that a previous lookup already acquired a
// reference to the inode data and is in the process of updating the refcount so we need
// to loop here until we can decrement successfully.
loop {
let curr = data.refcount.load(Ordering::Acquire);
// Saturating sub because it doesn't make sense for a refcount to go below zero and
// we don't want misbehaving clients to cause integer overflow.
let new = curr.saturating_sub(count);
trace!(
"fuse: forget inode {} refcount {}, count {}, new_count {}",
inode,
curr,
count,
new
);
// Synchronizes with the acquire load in `do_lookup`.
if data
.refcount
.compare_exchange(curr, new, Ordering::AcqRel, Ordering::Acquire)
.is_ok()
{
if new == 0 {
// We just removed the last refcount for this inode.
inodes.remove(&inode);
}
break;
}
}
}
}
fn do_release(&self, inode: Inode, handle: Handle) -> io::Result<()> {
self.handle_map.release(handle, inode)
}
}
#[cfg(not(feature = "async-io"))]
impl<D: AsyncDrive> BackendFileSystem for PassthroughFs<D> {
type D = D;
fn mount(&self) -> io::Result<(Entry, u64)> {
let entry = self.do_lookup(fuse::ROOT_ID, &CString::new(".").unwrap())?;
Ok((entry, VFS_MAX_INO))
}
fn as_any(&self) -> &dyn Any {
self
}
}
fn ebadf() -> io::Error {
io::Error::from_raw_os_error(libc::EBADF)
}
| match s {
"never" | "Never" | "NEVER" | "none" | "None" | "NONE" => Ok(CachePolicy::Never),
"auto" | "Auto" | "AUTO" => Ok(CachePolicy::Auto),
"always" | "Always" | "ALWAYS" => Ok(CachePolicy::Always),
_ => Err("invalid cache policy"),
}
}
}
| identifier_body |
CollectGame.js | BasicGame.CollectGame = function (game) {
this.state_label = 'CollectGame';
this.current_level = 1;
this.level_images = [];
this.level_images[1] = {
//'background': 'sky',
'background': 'bg_desert',
'tilemap': '',
'tileset': ''
};
this.text_style = {font: '65px kenvector_future', fill: 'white', align: 'center'};
this.instructions = null;
this.scoreboard = null;
this.score = 0;
this.score_text = null;
this.high_score = 0;
this.level_text = null;
this.background = null;
this.started = false;
this.background_layer = null;
this.move_button_group = null;
this.ui_layer = null;
this.problem_text = null;
this.answer = 0;
this.boulders = null;
this.zizo = null;
this.gametime = 0;
this.numCollected = 0;
this.win_sound = null;
this.inputActive = false;
this.difficulty_text = null;
//this.boulder_text = null;
this.lastBoulderPosition = 0;
this.lastBoulderValue = 0;
};
BasicGame.CollectGame.prototype = {
preload: function() {
//this.game.load.image('rock', 'assets/boss_game/rock.png');
this.game.load.image('gun', 'assets/boss_game/gun.png');
this.game.load.image('zcar', 'assets/collect_game/zcar.png');
this.game.load.image('arrowUp', 'assets/collect_game/arrowUp.png');
this.game.load.image('arrowDown', 'assets/collect_game/arrowDown.png');
this.game.load.image('boulder', 'assets/collect_game/boulder.png');
if ($.cookie('collect_high_score') != null && $.cookie('collect_high_score') != '') {
this.high_score = $.cookie('collect_high_score');
}
},
create: function() {
console.log('Collect game');
this.background_music = this.game.add.audio('boss_background_music');
this.right_answer_sound = this.game.add.audio('right_answer_sound');
this.wrong_answer_sound = this.game.add.audio('wrong_answer_sound');
this.win_sound = this.game.add.audio('win_sound');
// Manage Layers
this.background_layer = this.game.add.group();
this.background_layer.z = 0;
// move button group
this.move_button_group = this.game.add.group();
this.move_button_group.z = 1;
this.up_button = this.game.add.sprite(50, 175, 'arrowUp');
this.up_button.inputEnabled = true;
this.down_button = this.game.add.sprite(50, 425, 'arrowDown');
this.down_button.inputEnabled = true;
this.game.input.onDown.add(this.activeInput, this);
this.game.input.onUp.add(this.releaseInput, this);
// Pause button / ui group
this.ui_layer = this.game.add.group();
this.ui_layer.z = 2;
// Create rock group
this.boulders = this.game.add.group();
// Score display
this.score_text = this.game.add.text(this.game.width - 145, 9, this.score + '', this.text_style);
this.score_text.anchor.setTo(1, 0);
//this.score_text.fixedToCamera = true;
this.ui_layer.add(this.score_text);
// Create zcar
this.zcar = this.game.add.sprite(337, 443, 'zcar');
this.zcar.anchor.setTo(.5, 1);
// Instructions
this.instructions = this.game.add.sprite(0, 0, 'collect_instructions');
this.start_button = this.game.add.button(this.game.world.centerX, this.game.world.height - 90, 'yellow_buttons', this.killInstructions, this, 3, 3, 4);
this.start_button.alpha = 0;
this.start_button.anchor.setTo(0.5, 0.5);
this.start_text = this.game.add.text(4, 0, 'START', {font: '30pt kenvector_future', fill: '#000', align: 'center'});
this.start_text.anchor.setTo(0.5, 0.5);
this.start_button.addChild(this.start_text);
// Initialize scoreboard
this.scoreboard = this.game.add.group();
var instruction_audio = this.game.add.audio('collect_instruction_sound');
instruction_audio.onStop.add(function(){
this.game.add.tween(this.start_button).to({alpha: 1}, 500, null, true);
}, this);
instruction_audio.play();
},
activeInput: function() {
this.inputActive = true;
},
releaseInput: function() {
this.inputActive = false;
},
killInstructions: function() {
this.start_button.destroy();
this.instructions.destroy();
this.startLevel();
},
update: function() {
if (!this.started) {
return;
}
if (this.inputActive) {
if (this.up_button.input.pointerOver()) {
//this.zcar.body.velocity.y = 200;
this.zcar.y -= 5;
if (this.zcar.y <= 200) {
this.zcar.y = 200;
}
}
if (this.down_button.input.pointerOver()) {
//this.zcar.body.velocity.y = 200;
this.zcar.y += 5;
if (this.zcar.y >= 650) {
this.zcar.y = 650;
}
}
}
this.gametime = this.gametime+1;
if (this.gametime >= 150) {
this.gametime = this.gametime-150;
this.generateBoulder();
}
this.game.physics.overlap(this.zcar, this.boulders, this.touchBoulder, null, this);
if (this.numCollected == 3) {
this.winLevel();
}
},
generateBoulder: function() {
boulderPos = this.game.rnd.integerInRange(1, 6)
while (boulderPos == this.lastBoulderPosition) {
boulderPos = this.game.rnd.integerInRange(1, 6)
}
this.lastBoulderPosition = boulderPos;
boulder = this.boulders.create(1220, 100*boulderPos, 'boulder');
boulder.body.gravity.x = -80;
boulder.anchor.setTo(0,0);
// 1 in 4 change of generating the right answer
boulderrnd = this.game.rnd.integerInRange(1, 4);
console.log('boulderrnd' + boulderrnd);
if (boulderrnd == 2) {
boulderVal = this.answer;
}
else {
boulderVal = this.game.rnd.integerInRange(1, 20);
}
while (boulderVal == this.lastBoulderValue) {
boulderVal = this.game.rnd.integerInRange(1, 20);
}
this.lastBoulderValue = boulderVal;
boulder_text = this.game.add.text(15, 15, boulderVal, {font: '40px kenvector_future', fill: '#fff', align: 'center'});
boulder.value = boulderVal;
boulder_text.anchor.setTo(0, 0);
boulder.addChild(boulder_text);
/*theB = this.boulders.create(1000, 800, 'boulder');
rock = this.rocks.create(this.game.rnd.integerInRange(1, 4)*280 +82, 1, 'rock');
theB.body.gravity.x = -40;
theB.anchor.setTo(0,0);*/
//boulder.value = this.boulderVal;
//console.log('VALUE: ' + boulder.value);
//console.log('boulder');
//boulder_text = this.game.add.text(15, 15, boulder.value, {font: '40px kenvector_future', fill: '#fff', align: 'center'});
},
/*reset: function() {
this.zcar.x = 337;
this.started = false;
this.boulders.destroy();
this.boulders = this.game.add.group();
},*/
startLevel: function() {
this.scoreboard.destroy();
/*if (this.game.global_vars.load_saved_state) {
this.current_level = this.game.global_vars.saved_level;
this.game.global_vars.load_saved_state = false;
}*/
// Load level specific things
this.background = this.game.add.sprite(0, 0, this.level_images[this.current_level]['background']);
this.background_layer.add(this.background);
this.difficulty_text = this.game.add.text(80, 10, 'Difficulty ' + this.game.global_vars.diff_score, {font: '20px kenvector_future', fill: '#fff'});
//this.started = false;
//this.boulders = this.game.add.group();
//this.startRace();
this.started = true;
this.displayNewProblem();
// commented out to test 10/17
//this.background_music.play(0,0,1,true);
},
startRace: function() {
this.started = true;
this.displayNewProblem();
// commented out to test 10/17
//this.background_music.play(0,0,1,true);
},
displayNewProblem: function() {
var problem = this.game.getMathProblem('addSolution', 'easy');
this.answer = problem.answer;
if (this.problem_text == null || !this.problem_text.exists) {
this.problem_text = this.game.add.text(640, 100, problem.text, {font: '40px kenvector_future', fill: '#fff'});
this.problem_text.anchor.setTo(0.5, 0.5);
} else {
this.problem_text.setText(problem.text);
}
},
moveUp: function() {
this.zcar.body.velocity.y = 200;
},
moveDown: function() {
this.zcar.body.velocity.y = -200;
},
checkAnswer: function(answer) {
console.log('checking: ' + answer + " and " + this.answer);
if (this.answer != answer){
this.wrong_answer_sound.play();
this.game.global_vars.diff_score -= 1;
if (this.game.global_vars.diff_score < 0) {
this.game.global_vars.diff_score = 0;
}
this.displayNewProblem();
this.score -= 30;
if (this.score < 0) |
}
if (this.answer == answer) {
this.right_answer_sound.play();
this.displayNewProblem();
this.game.global_vars.diff_score += 1;
this.score += 100;
this.numCollected += 1;
}
this.difficulty_text.destroy();
this.difficulty_text = this.game.add.text(80, 10, 'Difficulty ' + this.game.global_vars.diff_score, {font: '20px kenvector_future', fill: '#fff'});
this.score_text.setText(this.score);
},
winLevel: function() {
this.started = false;
this.zcar.body.velocity.x = 0;
this.background_music.stop();
this.win_sound.play();
this.showScoreboard(true);
},
touchBoulder: function(zcar, boulder) {
/*if (!this.started) {
return;
}*/
// This is so collision seems better towards the back of the car
if (boulder.x > 250) {
this.checkAnswer(boulder.value);
boulder.destroy();
}
},
showScoreboard: function(win) {
this.scoreboard = this.game.add.group();
this.scoreboard.add(this.game.add.sprite(0, 0, 'score_board'));
var start_button = null;
var new_pr = false;
if (this.score > this.high_score) {
new_pr = true;
$.cookie('collect_high_score', this.score);
this.high_score = this.score;
}
/*if (!this.game.global_vars.story_mode) {
start_button = this.game.add.button(this.game.world.centerX, this.game.world.height - 100, 'yellow_buttons', this.backToMainMenu, this, 3, 3, 4);
start_button.anchor.setTo(0.5, 0.5);
start_text = this.game.add.text(4, 0, 'Menu', {font: '30pt kenvector_future', fill: '#000', align: 'center'});
start_text.anchor.setTo(0.5, 0.5);
start_button.addChild(start_text);
} else {*/
start_button = this.game.add.button(this.game.world.centerX, this.game.world.height - 100, 'yellow_buttons', this.winGame, this, 3, 3, 4);
start_button.anchor.setTo(0.5, 0.5);
start_text = this.game.add.text(4, 0, 'Next', {font: '20pt kenvector_future', fill: '#000', align: 'center'});
start_text.anchor.setTo(0.5, 0.5);
start_button.addChild(start_text);
//}
result = 'Score:\n' + this.score + '\n';
if (new_pr) {
result += '**NEW**'
}
var header = this.game.add.text(this.game.world.centerX, 130, 'FINISHED!', {font: '75pt kenvector_future', fill: '#fff', align: 'center'});
header.anchor.setTo(0.5, 0);
var text = this.game.add.text(this.game.world.centerX, 290, result, this.text_style);
text.anchor.setTo(0.5, 0);
this.scoreboard.add(text);
this.scoreboard.add(start_button);
this.scoreboard.add(header);
},
/*endRace: function() {
this.started = false;
//this.problem_text.destroy();
this.zcar.body.velocity.x = 0;
this.background_music.stop();
//this.opponents.setAll('body.velocity.x', 0);
//this.opponents.callAll('play', null, 'wait');
},*/
winGame: function() {
//console.log('Won horse game!');
this.current_level = 1;
// Unlock this mini game
this.game.unlockMiniGame(this.state_label);
this.game.goToNextState.call(this);
}
/*backToMainMenu: function() {
this.game.state.start('MainMenu');
}*/
/*shutdown: function() {
console.log('shutdown');
console.log(this.zcar.body.velocity.x);
}*/
}; | {
this.score = 0;
} | conditional_block |
CollectGame.js | BasicGame.CollectGame = function (game) {
this.state_label = 'CollectGame';
this.current_level = 1;
this.level_images = [];
this.level_images[1] = {
//'background': 'sky',
'background': 'bg_desert',
'tilemap': '',
'tileset': ''
};
this.text_style = {font: '65px kenvector_future', fill: 'white', align: 'center'};
this.instructions = null;
this.scoreboard = null;
this.score = 0;
this.score_text = null;
this.high_score = 0;
this.level_text = null;
this.background = null;
this.started = false;
this.background_layer = null;
this.move_button_group = null;
this.ui_layer = null;
this.problem_text = null;
this.answer = 0;
this.boulders = null;
this.zizo = null;
this.gametime = 0;
this.numCollected = 0;
this.win_sound = null;
this.inputActive = false;
this.difficulty_text = null;
//this.boulder_text = null;
this.lastBoulderPosition = 0;
this.lastBoulderValue = 0;
};
BasicGame.CollectGame.prototype = {
preload: function() {
//this.game.load.image('rock', 'assets/boss_game/rock.png');
this.game.load.image('gun', 'assets/boss_game/gun.png');
this.game.load.image('zcar', 'assets/collect_game/zcar.png');
this.game.load.image('arrowUp', 'assets/collect_game/arrowUp.png');
this.game.load.image('arrowDown', 'assets/collect_game/arrowDown.png');
this.game.load.image('boulder', 'assets/collect_game/boulder.png');
if ($.cookie('collect_high_score') != null && $.cookie('collect_high_score') != '') {
this.high_score = $.cookie('collect_high_score');
}
},
create: function() {
console.log('Collect game');
this.background_music = this.game.add.audio('boss_background_music');
this.right_answer_sound = this.game.add.audio('right_answer_sound');
this.wrong_answer_sound = this.game.add.audio('wrong_answer_sound');
this.win_sound = this.game.add.audio('win_sound');
// Manage Layers
this.background_layer = this.game.add.group();
this.background_layer.z = 0;
// move button group
this.move_button_group = this.game.add.group();
this.move_button_group.z = 1;
this.up_button = this.game.add.sprite(50, 175, 'arrowUp');
this.up_button.inputEnabled = true;
this.down_button = this.game.add.sprite(50, 425, 'arrowDown');
this.down_button.inputEnabled = true;
this.game.input.onDown.add(this.activeInput, this);
this.game.input.onUp.add(this.releaseInput, this);
// Pause button / ui group
this.ui_layer = this.game.add.group();
this.ui_layer.z = 2;
// Create rock group
this.boulders = this.game.add.group();
// Score display
this.score_text = this.game.add.text(this.game.width - 145, 9, this.score + '', this.text_style);
this.score_text.anchor.setTo(1, 0);
//this.score_text.fixedToCamera = true;
this.ui_layer.add(this.score_text);
// Create zcar
this.zcar = this.game.add.sprite(337, 443, 'zcar');
this.zcar.anchor.setTo(.5, 1);
// Instructions
this.instructions = this.game.add.sprite(0, 0, 'collect_instructions');
this.start_button = this.game.add.button(this.game.world.centerX, this.game.world.height - 90, 'yellow_buttons', this.killInstructions, this, 3, 3, 4);
this.start_button.alpha = 0;
this.start_button.anchor.setTo(0.5, 0.5);
this.start_text = this.game.add.text(4, 0, 'START', {font: '30pt kenvector_future', fill: '#000', align: 'center'});
this.start_text.anchor.setTo(0.5, 0.5);
this.start_button.addChild(this.start_text);
// Initialize scoreboard
this.scoreboard = this.game.add.group();
var instruction_audio = this.game.add.audio('collect_instruction_sound');
instruction_audio.onStop.add(function(){
this.game.add.tween(this.start_button).to({alpha: 1}, 500, null, true);
}, this);
instruction_audio.play();
},
activeInput: function() {
this.inputActive = true;
},
releaseInput: function() {
this.inputActive = false;
},
killInstructions: function() {
this.start_button.destroy();
this.instructions.destroy();
this.startLevel();
},
update: function() {
if (!this.started) {
return;
}
if (this.inputActive) {
if (this.up_button.input.pointerOver()) {
//this.zcar.body.velocity.y = 200;
this.zcar.y -= 5;
if (this.zcar.y <= 200) {
this.zcar.y = 200;
}
}
if (this.down_button.input.pointerOver()) {
//this.zcar.body.velocity.y = 200;
this.zcar.y += 5;
if (this.zcar.y >= 650) {
this.zcar.y = 650;
}
}
}
this.gametime = this.gametime+1;
if (this.gametime >= 150) {
this.gametime = this.gametime-150;
this.generateBoulder();
}
this.game.physics.overlap(this.zcar, this.boulders, this.touchBoulder, null, this);
if (this.numCollected == 3) {
this.winLevel();
}
},
generateBoulder: function() {
boulderPos = this.game.rnd.integerInRange(1, 6)
while (boulderPos == this.lastBoulderPosition) {
boulderPos = this.game.rnd.integerInRange(1, 6)
}
this.lastBoulderPosition = boulderPos;
boulder = this.boulders.create(1220, 100*boulderPos, 'boulder');
boulder.body.gravity.x = -80;
boulder.anchor.setTo(0,0);
// 1 in 4 change of generating the right answer
boulderrnd = this.game.rnd.integerInRange(1, 4);
console.log('boulderrnd' + boulderrnd);
if (boulderrnd == 2) {
boulderVal = this.answer;
}
else {
boulderVal = this.game.rnd.integerInRange(1, 20);
}
while (boulderVal == this.lastBoulderValue) {
boulderVal = this.game.rnd.integerInRange(1, 20);
}
this.lastBoulderValue = boulderVal;
boulder_text = this.game.add.text(15, 15, boulderVal, {font: '40px kenvector_future', fill: '#fff', align: 'center'});
boulder.value = boulderVal;
boulder_text.anchor.setTo(0, 0);
boulder.addChild(boulder_text);
/*theB = this.boulders.create(1000, 800, 'boulder');
rock = this.rocks.create(this.game.rnd.integerInRange(1, 4)*280 +82, 1, 'rock');
theB.body.gravity.x = -40;
theB.anchor.setTo(0,0);*/ | //boulder.value = this.boulderVal;
//console.log('VALUE: ' + boulder.value);
//console.log('boulder');
//boulder_text = this.game.add.text(15, 15, boulder.value, {font: '40px kenvector_future', fill: '#fff', align: 'center'});
},
/*reset: function() {
this.zcar.x = 337;
this.started = false;
this.boulders.destroy();
this.boulders = this.game.add.group();
},*/
startLevel: function() {
this.scoreboard.destroy();
/*if (this.game.global_vars.load_saved_state) {
this.current_level = this.game.global_vars.saved_level;
this.game.global_vars.load_saved_state = false;
}*/
// Load level specific things
this.background = this.game.add.sprite(0, 0, this.level_images[this.current_level]['background']);
this.background_layer.add(this.background);
this.difficulty_text = this.game.add.text(80, 10, 'Difficulty ' + this.game.global_vars.diff_score, {font: '20px kenvector_future', fill: '#fff'});
//this.started = false;
//this.boulders = this.game.add.group();
//this.startRace();
this.started = true;
this.displayNewProblem();
// commented out to test 10/17
//this.background_music.play(0,0,1,true);
},
startRace: function() {
this.started = true;
this.displayNewProblem();
// commented out to test 10/17
//this.background_music.play(0,0,1,true);
},
displayNewProblem: function() {
var problem = this.game.getMathProblem('addSolution', 'easy');
this.answer = problem.answer;
if (this.problem_text == null || !this.problem_text.exists) {
this.problem_text = this.game.add.text(640, 100, problem.text, {font: '40px kenvector_future', fill: '#fff'});
this.problem_text.anchor.setTo(0.5, 0.5);
} else {
this.problem_text.setText(problem.text);
}
},
moveUp: function() {
this.zcar.body.velocity.y = 200;
},
moveDown: function() {
this.zcar.body.velocity.y = -200;
},
checkAnswer: function(answer) {
console.log('checking: ' + answer + " and " + this.answer);
if (this.answer != answer){
this.wrong_answer_sound.play();
this.game.global_vars.diff_score -= 1;
if (this.game.global_vars.diff_score < 0) {
this.game.global_vars.diff_score = 0;
}
this.displayNewProblem();
this.score -= 30;
if (this.score < 0) {
this.score = 0;
}
}
if (this.answer == answer) {
this.right_answer_sound.play();
this.displayNewProblem();
this.game.global_vars.diff_score += 1;
this.score += 100;
this.numCollected += 1;
}
this.difficulty_text.destroy();
this.difficulty_text = this.game.add.text(80, 10, 'Difficulty ' + this.game.global_vars.diff_score, {font: '20px kenvector_future', fill: '#fff'});
this.score_text.setText(this.score);
},
winLevel: function() {
this.started = false;
this.zcar.body.velocity.x = 0;
this.background_music.stop();
this.win_sound.play();
this.showScoreboard(true);
},
touchBoulder: function(zcar, boulder) {
/*if (!this.started) {
return;
}*/
// This is so collision seems better towards the back of the car
if (boulder.x > 250) {
this.checkAnswer(boulder.value);
boulder.destroy();
}
},
showScoreboard: function(win) {
this.scoreboard = this.game.add.group();
this.scoreboard.add(this.game.add.sprite(0, 0, 'score_board'));
var start_button = null;
var new_pr = false;
if (this.score > this.high_score) {
new_pr = true;
$.cookie('collect_high_score', this.score);
this.high_score = this.score;
}
/*if (!this.game.global_vars.story_mode) {
start_button = this.game.add.button(this.game.world.centerX, this.game.world.height - 100, 'yellow_buttons', this.backToMainMenu, this, 3, 3, 4);
start_button.anchor.setTo(0.5, 0.5);
start_text = this.game.add.text(4, 0, 'Menu', {font: '30pt kenvector_future', fill: '#000', align: 'center'});
start_text.anchor.setTo(0.5, 0.5);
start_button.addChild(start_text);
} else {*/
start_button = this.game.add.button(this.game.world.centerX, this.game.world.height - 100, 'yellow_buttons', this.winGame, this, 3, 3, 4);
start_button.anchor.setTo(0.5, 0.5);
start_text = this.game.add.text(4, 0, 'Next', {font: '20pt kenvector_future', fill: '#000', align: 'center'});
start_text.anchor.setTo(0.5, 0.5);
start_button.addChild(start_text);
//}
result = 'Score:\n' + this.score + '\n';
if (new_pr) {
result += '**NEW**'
}
var header = this.game.add.text(this.game.world.centerX, 130, 'FINISHED!', {font: '75pt kenvector_future', fill: '#fff', align: 'center'});
header.anchor.setTo(0.5, 0);
var text = this.game.add.text(this.game.world.centerX, 290, result, this.text_style);
text.anchor.setTo(0.5, 0);
this.scoreboard.add(text);
this.scoreboard.add(start_button);
this.scoreboard.add(header);
},
/*endRace: function() {
this.started = false;
//this.problem_text.destroy();
this.zcar.body.velocity.x = 0;
this.background_music.stop();
//this.opponents.setAll('body.velocity.x', 0);
//this.opponents.callAll('play', null, 'wait');
},*/
winGame: function() {
//console.log('Won horse game!');
this.current_level = 1;
// Unlock this mini game
this.game.unlockMiniGame(this.state_label);
this.game.goToNextState.call(this);
}
/*backToMainMenu: function() {
this.game.state.start('MainMenu');
}*/
/*shutdown: function() {
console.log('shutdown');
console.log(this.zcar.body.velocity.x);
}*/
}; | random_line_split |
|
app.js | var canvas = document.getElementById("canvas");
var ctx = canvas.getContext("2d");
var offsetX = document.getElementById('canvas').offsetLeft;
var offsetY = document.getElementById('canvas').offsetTop;
var xPadding = 50;
var yPadding = 50;
var canvasWidth = 600;
var canvasHeight = 600;
canvas.style.width = '100%';
canvas.style.height = '100%';
// ...then set the internal size to match
canvas.width = canvas.offsetWidth;
canvas.height = canvas.offsetHeight;
ctx.canvas.width = canvasWidth;
ctx.canvas.height = canvasHeight;
var startX;
var startY;
var numMaxX = 24;
var numMaxY = 100;
var numMinX = 0;
var numMinY = 0;
var numStepX = 1;
var numStepY = 10;
var flagPin = false;
var isDown = false;
var pi2 = Math.PI * 2;
var resizerRadius = 8;
var rr = resizerRadius * resizerRadius;
var draggingResizer = {
x: 0,
y: 0
};
var imageX = 50;
var imageY = 50;
var tableGlobal;
// var numXscale = canvas.clientWidth / canvas.width;
// var numYscale = canvas.clientHeight / canvas.height;
var imageWidth, imageHeight, imageRight, imageBottom;
var draggingImage = false;
var startX;
var startY;
// var img = new Image();
// img.onload = function () {
// imageWidth = img.width;
// imageHeight = img.height;
// imageRight = imageX + imageWidth;
// imageBottom = imageY + imageHeight;
// draw(true, false);
// };
// img.src = "https://dl.dropboxusercontent.com/u/139992952/stackoverflow/facesSmall.png";
var imagDrop = document.createElement("img");
imagDrop.addEventListener("load", function() {
//clearCanvas();
imageWidth = imagDrop.width;
imageHeight = imagDrop.height;
imageRight = imageX + imageWidth;
imageBottom = imageY + imageHeight;
draw(true, false);
console.log("test");
//ctx.drawImage(imagDrop, 0, 0);
}, false);
// To enable drag and drop
canvas.addEventListener("dragover", function(evt) {
evt.preventDefault();
}, false);
// Handle dropped image file - only Firefox and Google Chrome
canvas.addEventListener("drop", function(evt) {
var files = evt.dataTransfer.files;
if (files.length > 0) {
var file = files[0];
if (typeof FileReader !== "undefined" && file.type.indexOf("image") != -1) {
var reader = new FileReader();
// Note: addEventListener doesn't work in Google Chrome for this event
reader.onload = function(evt) {
imagDrop.src = evt.target.result;
};
reader.readAsDataURL(file);
}
}
evt.preventDefault();
}, false);
updateAxis();
addTable();
function togglePin() {
if (flagPin) {
document.getElementById('idButtonPin').innerHTML = 'Start pinning';
flagPin = false;
} else {
document.getElementById('idButtonPin').innerHTML = 'Stop pinning';
flagPin = true;
}
}
function addTable() {
var newTable = document.createElement('table');
newTable.setAttribute("style", "float:left;position:relative");
var header = newTable.createTHead();
var row = header.insertRow(0);
var cell = row.insertCell(0);
cell.innerHTML = "X";
var cell = row.insertCell(1);
cell.innerHTML = "Y";
document.getElementById('tables').appendChild(newTable);
tableGlobal = newTable;
}
function canvas2realX(numX) {
return numX*canvas.clientWidth / canvas.width;
}
function canvas2realY(numY) {
return numY*canvas.clientHeight / canvas.height;
}
function drawAxis() {
ctx.lineWidth = 3;
ctx.strokeStyle = '#333';
ctx.font = 'italic 8pt sans-serif';
ctx.textAlign = "center";
// Draw the axises
ctx.beginPath();
ctx.moveTo(xPadding, canvas.height - yPadding);
ctx.lineTo(xPadding, 0);
ctx.stroke();
ctx.beginPath();
ctx.moveTo(xPadding, canvas.height - yPadding);
ctx.lineTo(canvas.width, canvas.height - yPadding);
ctx.stroke();
// // Draw the X value texts
ctx.lineWidth = 1;
for (var i = 0; i <= numMaxX - numMinX; i += numStepX) {
ctx.fillText(i, getXPixel(i), canvas.height - yPadding + 20);
ctx.beginPath();
ctx.moveTo(getXPixel(i), canvas.height - yPadding);
ctx.lineTo(getXPixel(i), 0);
ctx.stroke();
}
ctx.textAlign = "right";
ctx.textBaseline = "middle";
for (var i = 0; i <= numMaxY - numMinY; i += numStepY) {
ctx.fillText(i, xPadding - 10, getYPixel(i));
ctx.beginPath();
ctx.moveTo(xPadding, getYPixel(i));
ctx.lineTo(canvas.width, getYPixel(i));
ctx.stroke();
}
ctx.strokeStyle = '#f00';
}
function updateAxis() {
numMaxX = parseInt(document.getElementById('MaxX').value);
numMaxY = parseInt(document.getElementById('MaxY').value);
numStepX = parseInt(document.getElementById('StepX').value);
numStepY = parseInt(document.getElementById('StepY').value);
ctx.clearRect(0, 0, canvas.width, canvas.height);
drawAxis();
}
function getXPixel(val) {
return ((canvas.width - 2 * xPadding) / numMaxX) * val + (xPadding);
}
// Return the y pixel for a graph point
function getYPixel(val) {
return canvas.height - (((canvas.height - 2 * yPadding) / numMaxY) * val) - yPadding;
}
function draw(withAnchors, withBorders) {
// clear the canvas
ctx.clearRect(0, 0, canvas.width, canvas.height);
updateAxis();
// draw the image
ctx.globalAlpha = 0.4;
ctx.drawImage(imagDrop, 0, 0, imagDrop.width, imagDrop.height, imageX, imageY, imageWidth, imageHeight);
// optionally draw the draggable anchors
if (withAnchors) {
drawDragAnchor(imageX, imageY);
drawDragAnchor(imageRight, imageY);
drawDragAnchor(imageRight, imageBottom);
drawDragAnchor(imageX, imageBottom);
}
// optionally draw the connecting anchor lines
if (withBorders) {
ctx.beginPath();
ctx.moveTo(imageX, imageY);
ctx.lineTo(imageRight, imageY);
ctx.lineTo(imageRight, imageBottom);
ctx.lineTo(imageX, imageBottom);
ctx.closePath();
ctx.stroke();
}
}
function drawDragAnchor(x, y) {
ctx.beginPath();
ctx.arc(x, y, resizerRadius, 0, pi2, false);
ctx.closePath();
ctx.fill();
}
function anchorHitTest(x, y) {
var dx, dy;
// top-left
dx = x - canvas2realX(imageX);
dy = y - canvas2realY(imageY);
if (dx * dx + dy * dy <= rr) {
return (0);
}
// top-right
dx = x - canvas2realX(imageRight);
dy = y - canvas2realY(imageY);
console.log(dx.toString()+' '+dy.toString());
if (dx * dx + dy * dy <= rr) {
return (1);
}
// bottom-right
dx = x - canvas2realX(imageRight);
dy = y - canvas2realY(imageBottom);
if (dx * dx + dy * dy <= rr) {
return (2);
}
// bottom-left
dx = x - canvas2realX(imageX);
dy = y - canvas2realY(imageBottom);
if (dx * dx + dy * dy <= rr) {
return (3);
}
return (-1);
}
function hitImage(x, y) {
return (x > canvas2realX(imageX) && x < canvas2realX(imageX + imageWidth) && y > canvas2realY(imageY) && y < canvas2realY(imageY + imageHeight));
}
function handleMouseDown(e) {
if (!flagPin) {
startX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft);
startY = parseInt(e.clientY - document.getElementById('canvas').offsetTop);
console.log(startX.toString()+'_'+startY.toString());
draggingResizer = anchorHitTest(startX, startY);
draggingImage = draggingResizer < 0 && hitImage(startX, startY);
} else {
var row = tableGlobal.insertRow(tableGlobal.rows.length);
var cell1 = row.insertCell(0);
var cell2 = row.insertCell(1);
cell2.innerHTML = Math.round((canvas.clientHeight - parseInt(e.clientY - document.getElementById('canvas').offsetTop) - canvas2realY(yPadding)) / ((canvas.clientHeight - 2 * canvas2realY(yPadding)) / numMaxY));
cell1.innerHTML = Math.round((parseInt(e.clientX - document.getElementById('canvas').offsetLeft) - canvas2realX(xPadding)) / ((canvas.clientWidth - 2 * canvas2realX(xPadding)) / numMaxX));
}
}
function handleMouseUp(e) |
function handleMouseOut(e) {
if (!flagPin) {
handleMouseUp(e);
}
}
function handleMouseMove(e) {
if (!flagPin) {
if (draggingResizer > -1) {
mouseX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft)*canvas.width/canvas.clientWidth;
mouseY = parseInt(e.clientY - document.getElementById('canvas').offsetTop)*canvas.height/canvas.clientHeight;
console.log(mouseX.toString()+','+mouseY.toString());
// resize the image
switch (draggingResizer) {
case 0:
//top-left
imageX = mouseX;
imageWidth = imageRight - mouseX;
imageY = mouseY;
imageHeight = imageBottom - mouseY;
break;
case 1:
//top-right
imageY = mouseY;
imageWidth = mouseX - imageX;
imageHeight = imageBottom - mouseY;
break;
case 2:
//bottom-right
imageWidth = mouseX - imageX;
imageHeight = mouseY - imageY;
break;
case 3:
//bottom-left
imageX = mouseX;
imageWidth = imageRight - mouseX;
imageHeight = mouseY - imageY;
break;
}
if (imageWidth < 25) { imageWidth = 25; }
if (imageHeight < 25) { imageHeight = 25; }
// set the image right and bottom
imageRight = imageX + imageWidth;
imageBottom = imageY + imageHeight;
// redraw the image with resizing anchors
draw(true, true);
} else if (draggingImage) {
imageClick = false;
mouseX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft);
mouseY = parseInt(e.clientY - document.getElementById('canvas').offsetTop);
// move the image by the amount of the latest drag
var dx = mouseX - startX;
var dy = mouseY - startY;
imageX += dx;
imageY += dy;
imageRight += dx;
imageBottom += dy;
// reset the startXY for next time
startX = mouseX;
startY = mouseY;
// redraw the image with border
draw(false, true);
}
}
}
function downloadTables() {
var vecTables = document.getElementById('tables').children;
var strTextFinal = '';
for (var i = 0; i < vecTables.length; i++) {
var vecRows = vecTables[i].children[0].children;
for (var j = 0; j < vecRows.length; j++) {
strTextFinal = strTextFinal+vecRows[j].children[0].textContent+','+vecRows[j].children[1].textContent+'\n';
}
}
var blob = new Blob([strTextFinal], { type: "text/plain;charset=utf-8" });
saveAs(blob, "chartdigitizer.txt");
}
$("#canvas").mousedown(function(e) {
handleMouseDown(e);
});
$("#canvas").mousemove(function(e) {
handleMouseMove(e);
});
$("#canvas").mouseup(function(e) {
handleMouseUp(e);
});
$("#canvas").mouseout(function(e) {
handleMouseOut(e);
});
| {
if (!flagPin) {
draggingResizer = -1;
draggingImage = false;
draw(true, false);
}
} | identifier_body |
app.js | var canvas = document.getElementById("canvas");
var ctx = canvas.getContext("2d");
var offsetX = document.getElementById('canvas').offsetLeft;
var offsetY = document.getElementById('canvas').offsetTop;
var xPadding = 50;
var yPadding = 50;
var canvasWidth = 600;
var canvasHeight = 600;
canvas.style.width = '100%';
canvas.style.height = '100%';
// ...then set the internal size to match
canvas.width = canvas.offsetWidth;
canvas.height = canvas.offsetHeight;
ctx.canvas.width = canvasWidth;
ctx.canvas.height = canvasHeight;
var startX;
var startY;
var numMaxX = 24;
var numMaxY = 100;
var numMinX = 0;
var numMinY = 0;
var numStepX = 1;
var numStepY = 10;
var flagPin = false;
var isDown = false;
var pi2 = Math.PI * 2;
var resizerRadius = 8;
var rr = resizerRadius * resizerRadius;
var draggingResizer = {
x: 0,
y: 0
};
var imageX = 50;
var imageY = 50;
var tableGlobal;
// var numXscale = canvas.clientWidth / canvas.width;
// var numYscale = canvas.clientHeight / canvas.height;
var imageWidth, imageHeight, imageRight, imageBottom;
var draggingImage = false;
var startX;
var startY;
// var img = new Image();
// img.onload = function () {
// imageWidth = img.width;
// imageHeight = img.height;
// imageRight = imageX + imageWidth;
// imageBottom = imageY + imageHeight;
// draw(true, false);
// };
// img.src = "https://dl.dropboxusercontent.com/u/139992952/stackoverflow/facesSmall.png";
var imagDrop = document.createElement("img");
imagDrop.addEventListener("load", function() {
//clearCanvas();
imageWidth = imagDrop.width;
imageHeight = imagDrop.height;
imageRight = imageX + imageWidth;
imageBottom = imageY + imageHeight;
draw(true, false);
console.log("test");
//ctx.drawImage(imagDrop, 0, 0);
}, false);
// To enable drag and drop
canvas.addEventListener("dragover", function(evt) {
evt.preventDefault();
}, false);
// Handle dropped image file - only Firefox and Google Chrome
canvas.addEventListener("drop", function(evt) {
var files = evt.dataTransfer.files;
if (files.length > 0) {
var file = files[0];
if (typeof FileReader !== "undefined" && file.type.indexOf("image") != -1) {
var reader = new FileReader();
// Note: addEventListener doesn't work in Google Chrome for this event
reader.onload = function(evt) {
imagDrop.src = evt.target.result;
};
reader.readAsDataURL(file);
}
}
evt.preventDefault();
}, false);
updateAxis();
addTable();
function togglePin() {
if (flagPin) {
document.getElementById('idButtonPin').innerHTML = 'Start pinning';
flagPin = false;
} else {
document.getElementById('idButtonPin').innerHTML = 'Stop pinning';
flagPin = true;
}
}
function addTable() {
var newTable = document.createElement('table');
newTable.setAttribute("style", "float:left;position:relative");
var header = newTable.createTHead();
var row = header.insertRow(0);
var cell = row.insertCell(0);
cell.innerHTML = "X";
var cell = row.insertCell(1);
cell.innerHTML = "Y";
document.getElementById('tables').appendChild(newTable);
tableGlobal = newTable;
}
function canvas2realX(numX) {
return numX*canvas.clientWidth / canvas.width;
}
function canvas2realY(numY) {
return numY*canvas.clientHeight / canvas.height;
}
function drawAxis() {
ctx.lineWidth = 3;
ctx.strokeStyle = '#333';
ctx.font = 'italic 8pt sans-serif';
ctx.textAlign = "center";
// Draw the axises
ctx.beginPath();
ctx.moveTo(xPadding, canvas.height - yPadding);
ctx.lineTo(xPadding, 0);
ctx.stroke();
ctx.beginPath();
ctx.moveTo(xPadding, canvas.height - yPadding);
ctx.lineTo(canvas.width, canvas.height - yPadding);
ctx.stroke();
// // Draw the X value texts
ctx.lineWidth = 1;
for (var i = 0; i <= numMaxX - numMinX; i += numStepX) {
ctx.fillText(i, getXPixel(i), canvas.height - yPadding + 20);
ctx.beginPath();
ctx.moveTo(getXPixel(i), canvas.height - yPadding);
ctx.lineTo(getXPixel(i), 0);
ctx.stroke();
}
ctx.textAlign = "right";
ctx.textBaseline = "middle";
for (var i = 0; i <= numMaxY - numMinY; i += numStepY) {
ctx.fillText(i, xPadding - 10, getYPixel(i));
ctx.beginPath();
ctx.moveTo(xPadding, getYPixel(i));
ctx.lineTo(canvas.width, getYPixel(i));
ctx.stroke();
}
ctx.strokeStyle = '#f00';
}
function updateAxis() {
numMaxX = parseInt(document.getElementById('MaxX').value);
numMaxY = parseInt(document.getElementById('MaxY').value);
numStepX = parseInt(document.getElementById('StepX').value);
numStepY = parseInt(document.getElementById('StepY').value);
ctx.clearRect(0, 0, canvas.width, canvas.height);
drawAxis();
}
function getXPixel(val) {
return ((canvas.width - 2 * xPadding) / numMaxX) * val + (xPadding);
}
// Return the y pixel for a graph point
function getYPixel(val) {
return canvas.height - (((canvas.height - 2 * yPadding) / numMaxY) * val) - yPadding;
}
function draw(withAnchors, withBorders) {
// clear the canvas
ctx.clearRect(0, 0, canvas.width, canvas.height);
updateAxis();
// draw the image
ctx.globalAlpha = 0.4;
ctx.drawImage(imagDrop, 0, 0, imagDrop.width, imagDrop.height, imageX, imageY, imageWidth, imageHeight);
// optionally draw the draggable anchors
if (withAnchors) {
drawDragAnchor(imageX, imageY);
drawDragAnchor(imageRight, imageY);
drawDragAnchor(imageRight, imageBottom);
drawDragAnchor(imageX, imageBottom);
}
// optionally draw the connecting anchor lines
if (withBorders) {
ctx.beginPath();
ctx.moveTo(imageX, imageY);
ctx.lineTo(imageRight, imageY);
ctx.lineTo(imageRight, imageBottom);
ctx.lineTo(imageX, imageBottom);
ctx.closePath();
ctx.stroke();
}
}
function drawDragAnchor(x, y) {
ctx.beginPath();
ctx.arc(x, y, resizerRadius, 0, pi2, false);
ctx.closePath();
ctx.fill();
}
function anchorHitTest(x, y) {
var dx, dy;
// top-left
dx = x - canvas2realX(imageX);
dy = y - canvas2realY(imageY);
if (dx * dx + dy * dy <= rr) {
return (0);
}
// top-right
dx = x - canvas2realX(imageRight);
dy = y - canvas2realY(imageY);
console.log(dx.toString()+' '+dy.toString());
if (dx * dx + dy * dy <= rr) {
return (1);
}
// bottom-right
dx = x - canvas2realX(imageRight);
dy = y - canvas2realY(imageBottom);
if (dx * dx + dy * dy <= rr) |
// bottom-left
dx = x - canvas2realX(imageX);
dy = y - canvas2realY(imageBottom);
if (dx * dx + dy * dy <= rr) {
return (3);
}
return (-1);
}
function hitImage(x, y) {
return (x > canvas2realX(imageX) && x < canvas2realX(imageX + imageWidth) && y > canvas2realY(imageY) && y < canvas2realY(imageY + imageHeight));
}
function handleMouseDown(e) {
if (!flagPin) {
startX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft);
startY = parseInt(e.clientY - document.getElementById('canvas').offsetTop);
console.log(startX.toString()+'_'+startY.toString());
draggingResizer = anchorHitTest(startX, startY);
draggingImage = draggingResizer < 0 && hitImage(startX, startY);
} else {
var row = tableGlobal.insertRow(tableGlobal.rows.length);
var cell1 = row.insertCell(0);
var cell2 = row.insertCell(1);
cell2.innerHTML = Math.round((canvas.clientHeight - parseInt(e.clientY - document.getElementById('canvas').offsetTop) - canvas2realY(yPadding)) / ((canvas.clientHeight - 2 * canvas2realY(yPadding)) / numMaxY));
cell1.innerHTML = Math.round((parseInt(e.clientX - document.getElementById('canvas').offsetLeft) - canvas2realX(xPadding)) / ((canvas.clientWidth - 2 * canvas2realX(xPadding)) / numMaxX));
}
}
function handleMouseUp(e) {
if (!flagPin) {
draggingResizer = -1;
draggingImage = false;
draw(true, false);
}
}
function handleMouseOut(e) {
if (!flagPin) {
handleMouseUp(e);
}
}
function handleMouseMove(e) {
if (!flagPin) {
if (draggingResizer > -1) {
mouseX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft)*canvas.width/canvas.clientWidth;
mouseY = parseInt(e.clientY - document.getElementById('canvas').offsetTop)*canvas.height/canvas.clientHeight;
console.log(mouseX.toString()+','+mouseY.toString());
// resize the image
switch (draggingResizer) {
case 0:
//top-left
imageX = mouseX;
imageWidth = imageRight - mouseX;
imageY = mouseY;
imageHeight = imageBottom - mouseY;
break;
case 1:
//top-right
imageY = mouseY;
imageWidth = mouseX - imageX;
imageHeight = imageBottom - mouseY;
break;
case 2:
//bottom-right
imageWidth = mouseX - imageX;
imageHeight = mouseY - imageY;
break;
case 3:
//bottom-left
imageX = mouseX;
imageWidth = imageRight - mouseX;
imageHeight = mouseY - imageY;
break;
}
if (imageWidth < 25) { imageWidth = 25; }
if (imageHeight < 25) { imageHeight = 25; }
// set the image right and bottom
imageRight = imageX + imageWidth;
imageBottom = imageY + imageHeight;
// redraw the image with resizing anchors
draw(true, true);
} else if (draggingImage) {
imageClick = false;
mouseX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft);
mouseY = parseInt(e.clientY - document.getElementById('canvas').offsetTop);
// move the image by the amount of the latest drag
var dx = mouseX - startX;
var dy = mouseY - startY;
imageX += dx;
imageY += dy;
imageRight += dx;
imageBottom += dy;
// reset the startXY for next time
startX = mouseX;
startY = mouseY;
// redraw the image with border
draw(false, true);
}
}
}
function downloadTables() {
var vecTables = document.getElementById('tables').children;
var strTextFinal = '';
for (var i = 0; i < vecTables.length; i++) {
var vecRows = vecTables[i].children[0].children;
for (var j = 0; j < vecRows.length; j++) {
strTextFinal = strTextFinal+vecRows[j].children[0].textContent+','+vecRows[j].children[1].textContent+'\n';
}
}
var blob = new Blob([strTextFinal], { type: "text/plain;charset=utf-8" });
saveAs(blob, "chartdigitizer.txt");
}
$("#canvas").mousedown(function(e) {
handleMouseDown(e);
});
$("#canvas").mousemove(function(e) {
handleMouseMove(e);
});
$("#canvas").mouseup(function(e) {
handleMouseUp(e);
});
$("#canvas").mouseout(function(e) {
handleMouseOut(e);
});
| {
return (2);
} | conditional_block |
app.js | var canvas = document.getElementById("canvas");
var ctx = canvas.getContext("2d");
var offsetX = document.getElementById('canvas').offsetLeft;
var offsetY = document.getElementById('canvas').offsetTop;
var xPadding = 50;
var yPadding = 50;
var canvasWidth = 600;
var canvasHeight = 600;
canvas.style.width = '100%';
canvas.style.height = '100%';
// ...then set the internal size to match
canvas.width = canvas.offsetWidth;
canvas.height = canvas.offsetHeight;
ctx.canvas.width = canvasWidth;
ctx.canvas.height = canvasHeight;
var startX;
var startY;
var numMaxX = 24;
var numMaxY = 100;
var numMinX = 0;
var numMinY = 0;
var numStepX = 1;
var numStepY = 10;
var flagPin = false;
var isDown = false;
var pi2 = Math.PI * 2;
var resizerRadius = 8;
var rr = resizerRadius * resizerRadius;
var draggingResizer = {
x: 0,
y: 0
};
var imageX = 50;
var imageY = 50;
var tableGlobal;
// var numXscale = canvas.clientWidth / canvas.width;
// var numYscale = canvas.clientHeight / canvas.height;
var imageWidth, imageHeight, imageRight, imageBottom;
var draggingImage = false;
var startX;
var startY;
// var img = new Image();
// img.onload = function () {
// imageWidth = img.width;
// imageHeight = img.height;
// imageRight = imageX + imageWidth;
// imageBottom = imageY + imageHeight;
// draw(true, false);
// };
// img.src = "https://dl.dropboxusercontent.com/u/139992952/stackoverflow/facesSmall.png";
var imagDrop = document.createElement("img");
imagDrop.addEventListener("load", function() {
//clearCanvas();
imageWidth = imagDrop.width;
imageHeight = imagDrop.height;
imageRight = imageX + imageWidth;
imageBottom = imageY + imageHeight;
draw(true, false);
console.log("test");
//ctx.drawImage(imagDrop, 0, 0);
}, false);
// To enable drag and drop
canvas.addEventListener("dragover", function(evt) {
evt.preventDefault();
}, false);
// Handle dropped image file - only Firefox and Google Chrome
canvas.addEventListener("drop", function(evt) {
var files = evt.dataTransfer.files;
if (files.length > 0) {
var file = files[0];
if (typeof FileReader !== "undefined" && file.type.indexOf("image") != -1) {
var reader = new FileReader();
// Note: addEventListener doesn't work in Google Chrome for this event
reader.onload = function(evt) {
imagDrop.src = evt.target.result; | };
reader.readAsDataURL(file);
}
}
evt.preventDefault();
}, false);
updateAxis();
addTable();
function togglePin() {
if (flagPin) {
document.getElementById('idButtonPin').innerHTML = 'Start pinning';
flagPin = false;
} else {
document.getElementById('idButtonPin').innerHTML = 'Stop pinning';
flagPin = true;
}
}
function addTable() {
var newTable = document.createElement('table');
newTable.setAttribute("style", "float:left;position:relative");
var header = newTable.createTHead();
var row = header.insertRow(0);
var cell = row.insertCell(0);
cell.innerHTML = "X";
var cell = row.insertCell(1);
cell.innerHTML = "Y";
document.getElementById('tables').appendChild(newTable);
tableGlobal = newTable;
}
function canvas2realX(numX) {
return numX*canvas.clientWidth / canvas.width;
}
function canvas2realY(numY) {
return numY*canvas.clientHeight / canvas.height;
}
function drawAxis() {
ctx.lineWidth = 3;
ctx.strokeStyle = '#333';
ctx.font = 'italic 8pt sans-serif';
ctx.textAlign = "center";
// Draw the axises
ctx.beginPath();
ctx.moveTo(xPadding, canvas.height - yPadding);
ctx.lineTo(xPadding, 0);
ctx.stroke();
ctx.beginPath();
ctx.moveTo(xPadding, canvas.height - yPadding);
ctx.lineTo(canvas.width, canvas.height - yPadding);
ctx.stroke();
// // Draw the X value texts
ctx.lineWidth = 1;
for (var i = 0; i <= numMaxX - numMinX; i += numStepX) {
ctx.fillText(i, getXPixel(i), canvas.height - yPadding + 20);
ctx.beginPath();
ctx.moveTo(getXPixel(i), canvas.height - yPadding);
ctx.lineTo(getXPixel(i), 0);
ctx.stroke();
}
ctx.textAlign = "right";
ctx.textBaseline = "middle";
for (var i = 0; i <= numMaxY - numMinY; i += numStepY) {
ctx.fillText(i, xPadding - 10, getYPixel(i));
ctx.beginPath();
ctx.moveTo(xPadding, getYPixel(i));
ctx.lineTo(canvas.width, getYPixel(i));
ctx.stroke();
}
ctx.strokeStyle = '#f00';
}
function updateAxis() {
numMaxX = parseInt(document.getElementById('MaxX').value);
numMaxY = parseInt(document.getElementById('MaxY').value);
numStepX = parseInt(document.getElementById('StepX').value);
numStepY = parseInt(document.getElementById('StepY').value);
ctx.clearRect(0, 0, canvas.width, canvas.height);
drawAxis();
}
function getXPixel(val) {
return ((canvas.width - 2 * xPadding) / numMaxX) * val + (xPadding);
}
// Return the y pixel for a graph point
function getYPixel(val) {
return canvas.height - (((canvas.height - 2 * yPadding) / numMaxY) * val) - yPadding;
}
function draw(withAnchors, withBorders) {
// clear the canvas
ctx.clearRect(0, 0, canvas.width, canvas.height);
updateAxis();
// draw the image
ctx.globalAlpha = 0.4;
ctx.drawImage(imagDrop, 0, 0, imagDrop.width, imagDrop.height, imageX, imageY, imageWidth, imageHeight);
// optionally draw the draggable anchors
if (withAnchors) {
drawDragAnchor(imageX, imageY);
drawDragAnchor(imageRight, imageY);
drawDragAnchor(imageRight, imageBottom);
drawDragAnchor(imageX, imageBottom);
}
// optionally draw the connecting anchor lines
if (withBorders) {
ctx.beginPath();
ctx.moveTo(imageX, imageY);
ctx.lineTo(imageRight, imageY);
ctx.lineTo(imageRight, imageBottom);
ctx.lineTo(imageX, imageBottom);
ctx.closePath();
ctx.stroke();
}
}
function drawDragAnchor(x, y) {
ctx.beginPath();
ctx.arc(x, y, resizerRadius, 0, pi2, false);
ctx.closePath();
ctx.fill();
}
function anchorHitTest(x, y) {
var dx, dy;
// top-left
dx = x - canvas2realX(imageX);
dy = y - canvas2realY(imageY);
if (dx * dx + dy * dy <= rr) {
return (0);
}
// top-right
dx = x - canvas2realX(imageRight);
dy = y - canvas2realY(imageY);
console.log(dx.toString()+' '+dy.toString());
if (dx * dx + dy * dy <= rr) {
return (1);
}
// bottom-right
dx = x - canvas2realX(imageRight);
dy = y - canvas2realY(imageBottom);
if (dx * dx + dy * dy <= rr) {
return (2);
}
// bottom-left
dx = x - canvas2realX(imageX);
dy = y - canvas2realY(imageBottom);
if (dx * dx + dy * dy <= rr) {
return (3);
}
return (-1);
}
function hitImage(x, y) {
return (x > canvas2realX(imageX) && x < canvas2realX(imageX + imageWidth) && y > canvas2realY(imageY) && y < canvas2realY(imageY + imageHeight));
}
function handleMouseDown(e) {
if (!flagPin) {
startX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft);
startY = parseInt(e.clientY - document.getElementById('canvas').offsetTop);
console.log(startX.toString()+'_'+startY.toString());
draggingResizer = anchorHitTest(startX, startY);
draggingImage = draggingResizer < 0 && hitImage(startX, startY);
} else {
var row = tableGlobal.insertRow(tableGlobal.rows.length);
var cell1 = row.insertCell(0);
var cell2 = row.insertCell(1);
cell2.innerHTML = Math.round((canvas.clientHeight - parseInt(e.clientY - document.getElementById('canvas').offsetTop) - canvas2realY(yPadding)) / ((canvas.clientHeight - 2 * canvas2realY(yPadding)) / numMaxY));
cell1.innerHTML = Math.round((parseInt(e.clientX - document.getElementById('canvas').offsetLeft) - canvas2realX(xPadding)) / ((canvas.clientWidth - 2 * canvas2realX(xPadding)) / numMaxX));
}
}
function handleMouseUp(e) {
if (!flagPin) {
draggingResizer = -1;
draggingImage = false;
draw(true, false);
}
}
function handleMouseOut(e) {
if (!flagPin) {
handleMouseUp(e);
}
}
function handleMouseMove(e) {
if (!flagPin) {
if (draggingResizer > -1) {
mouseX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft)*canvas.width/canvas.clientWidth;
mouseY = parseInt(e.clientY - document.getElementById('canvas').offsetTop)*canvas.height/canvas.clientHeight;
console.log(mouseX.toString()+','+mouseY.toString());
// resize the image
switch (draggingResizer) {
case 0:
//top-left
imageX = mouseX;
imageWidth = imageRight - mouseX;
imageY = mouseY;
imageHeight = imageBottom - mouseY;
break;
case 1:
//top-right
imageY = mouseY;
imageWidth = mouseX - imageX;
imageHeight = imageBottom - mouseY;
break;
case 2:
//bottom-right
imageWidth = mouseX - imageX;
imageHeight = mouseY - imageY;
break;
case 3:
//bottom-left
imageX = mouseX;
imageWidth = imageRight - mouseX;
imageHeight = mouseY - imageY;
break;
}
if (imageWidth < 25) { imageWidth = 25; }
if (imageHeight < 25) { imageHeight = 25; }
// set the image right and bottom
imageRight = imageX + imageWidth;
imageBottom = imageY + imageHeight;
// redraw the image with resizing anchors
draw(true, true);
} else if (draggingImage) {
imageClick = false;
mouseX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft);
mouseY = parseInt(e.clientY - document.getElementById('canvas').offsetTop);
// move the image by the amount of the latest drag
var dx = mouseX - startX;
var dy = mouseY - startY;
imageX += dx;
imageY += dy;
imageRight += dx;
imageBottom += dy;
// reset the startXY for next time
startX = mouseX;
startY = mouseY;
// redraw the image with border
draw(false, true);
}
}
}
function downloadTables() {
var vecTables = document.getElementById('tables').children;
var strTextFinal = '';
for (var i = 0; i < vecTables.length; i++) {
var vecRows = vecTables[i].children[0].children;
for (var j = 0; j < vecRows.length; j++) {
strTextFinal = strTextFinal+vecRows[j].children[0].textContent+','+vecRows[j].children[1].textContent+'\n';
}
}
var blob = new Blob([strTextFinal], { type: "text/plain;charset=utf-8" });
saveAs(blob, "chartdigitizer.txt");
}
$("#canvas").mousedown(function(e) {
handleMouseDown(e);
});
$("#canvas").mousemove(function(e) {
handleMouseMove(e);
});
$("#canvas").mouseup(function(e) {
handleMouseUp(e);
});
$("#canvas").mouseout(function(e) {
handleMouseOut(e);
}); | random_line_split |
|
app.js | var canvas = document.getElementById("canvas");
var ctx = canvas.getContext("2d");
var offsetX = document.getElementById('canvas').offsetLeft;
var offsetY = document.getElementById('canvas').offsetTop;
var xPadding = 50;
var yPadding = 50;
var canvasWidth = 600;
var canvasHeight = 600;
canvas.style.width = '100%';
canvas.style.height = '100%';
// ...then set the internal size to match
canvas.width = canvas.offsetWidth;
canvas.height = canvas.offsetHeight;
ctx.canvas.width = canvasWidth;
ctx.canvas.height = canvasHeight;
var startX;
var startY;
var numMaxX = 24;
var numMaxY = 100;
var numMinX = 0;
var numMinY = 0;
var numStepX = 1;
var numStepY = 10;
var flagPin = false;
var isDown = false;
var pi2 = Math.PI * 2;
var resizerRadius = 8;
var rr = resizerRadius * resizerRadius;
var draggingResizer = {
x: 0,
y: 0
};
var imageX = 50;
var imageY = 50;
var tableGlobal;
// var numXscale = canvas.clientWidth / canvas.width;
// var numYscale = canvas.clientHeight / canvas.height;
var imageWidth, imageHeight, imageRight, imageBottom;
var draggingImage = false;
var startX;
var startY;
// var img = new Image();
// img.onload = function () {
// imageWidth = img.width;
// imageHeight = img.height;
// imageRight = imageX + imageWidth;
// imageBottom = imageY + imageHeight;
// draw(true, false);
// };
// img.src = "https://dl.dropboxusercontent.com/u/139992952/stackoverflow/facesSmall.png";
var imagDrop = document.createElement("img");
imagDrop.addEventListener("load", function() {
//clearCanvas();
imageWidth = imagDrop.width;
imageHeight = imagDrop.height;
imageRight = imageX + imageWidth;
imageBottom = imageY + imageHeight;
draw(true, false);
console.log("test");
//ctx.drawImage(imagDrop, 0, 0);
}, false);
// To enable drag and drop
canvas.addEventListener("dragover", function(evt) {
evt.preventDefault();
}, false);
// Handle dropped image file - only Firefox and Google Chrome
canvas.addEventListener("drop", function(evt) {
var files = evt.dataTransfer.files;
if (files.length > 0) {
var file = files[0];
if (typeof FileReader !== "undefined" && file.type.indexOf("image") != -1) {
var reader = new FileReader();
// Note: addEventListener doesn't work in Google Chrome for this event
reader.onload = function(evt) {
imagDrop.src = evt.target.result;
};
reader.readAsDataURL(file);
}
}
evt.preventDefault();
}, false);
updateAxis();
addTable();
function togglePin() {
if (flagPin) {
document.getElementById('idButtonPin').innerHTML = 'Start pinning';
flagPin = false;
} else {
document.getElementById('idButtonPin').innerHTML = 'Stop pinning';
flagPin = true;
}
}
function addTable() {
var newTable = document.createElement('table');
newTable.setAttribute("style", "float:left;position:relative");
var header = newTable.createTHead();
var row = header.insertRow(0);
var cell = row.insertCell(0);
cell.innerHTML = "X";
var cell = row.insertCell(1);
cell.innerHTML = "Y";
document.getElementById('tables').appendChild(newTable);
tableGlobal = newTable;
}
function canvas2realX(numX) {
return numX*canvas.clientWidth / canvas.width;
}
function canvas2realY(numY) {
return numY*canvas.clientHeight / canvas.height;
}
function drawAxis() {
ctx.lineWidth = 3;
ctx.strokeStyle = '#333';
ctx.font = 'italic 8pt sans-serif';
ctx.textAlign = "center";
// Draw the axises
ctx.beginPath();
ctx.moveTo(xPadding, canvas.height - yPadding);
ctx.lineTo(xPadding, 0);
ctx.stroke();
ctx.beginPath();
ctx.moveTo(xPadding, canvas.height - yPadding);
ctx.lineTo(canvas.width, canvas.height - yPadding);
ctx.stroke();
// // Draw the X value texts
ctx.lineWidth = 1;
for (var i = 0; i <= numMaxX - numMinX; i += numStepX) {
ctx.fillText(i, getXPixel(i), canvas.height - yPadding + 20);
ctx.beginPath();
ctx.moveTo(getXPixel(i), canvas.height - yPadding);
ctx.lineTo(getXPixel(i), 0);
ctx.stroke();
}
ctx.textAlign = "right";
ctx.textBaseline = "middle";
for (var i = 0; i <= numMaxY - numMinY; i += numStepY) {
ctx.fillText(i, xPadding - 10, getYPixel(i));
ctx.beginPath();
ctx.moveTo(xPadding, getYPixel(i));
ctx.lineTo(canvas.width, getYPixel(i));
ctx.stroke();
}
ctx.strokeStyle = '#f00';
}
function updateAxis() {
numMaxX = parseInt(document.getElementById('MaxX').value);
numMaxY = parseInt(document.getElementById('MaxY').value);
numStepX = parseInt(document.getElementById('StepX').value);
numStepY = parseInt(document.getElementById('StepY').value);
ctx.clearRect(0, 0, canvas.width, canvas.height);
drawAxis();
}
function getXPixel(val) {
return ((canvas.width - 2 * xPadding) / numMaxX) * val + (xPadding);
}
// Return the y pixel for a graph point
function getYPixel(val) {
return canvas.height - (((canvas.height - 2 * yPadding) / numMaxY) * val) - yPadding;
}
function | (withAnchors, withBorders) {
// clear the canvas
ctx.clearRect(0, 0, canvas.width, canvas.height);
updateAxis();
// draw the image
ctx.globalAlpha = 0.4;
ctx.drawImage(imagDrop, 0, 0, imagDrop.width, imagDrop.height, imageX, imageY, imageWidth, imageHeight);
// optionally draw the draggable anchors
if (withAnchors) {
drawDragAnchor(imageX, imageY);
drawDragAnchor(imageRight, imageY);
drawDragAnchor(imageRight, imageBottom);
drawDragAnchor(imageX, imageBottom);
}
// optionally draw the connecting anchor lines
if (withBorders) {
ctx.beginPath();
ctx.moveTo(imageX, imageY);
ctx.lineTo(imageRight, imageY);
ctx.lineTo(imageRight, imageBottom);
ctx.lineTo(imageX, imageBottom);
ctx.closePath();
ctx.stroke();
}
}
function drawDragAnchor(x, y) {
ctx.beginPath();
ctx.arc(x, y, resizerRadius, 0, pi2, false);
ctx.closePath();
ctx.fill();
}
function anchorHitTest(x, y) {
var dx, dy;
// top-left
dx = x - canvas2realX(imageX);
dy = y - canvas2realY(imageY);
if (dx * dx + dy * dy <= rr) {
return (0);
}
// top-right
dx = x - canvas2realX(imageRight);
dy = y - canvas2realY(imageY);
console.log(dx.toString()+' '+dy.toString());
if (dx * dx + dy * dy <= rr) {
return (1);
}
// bottom-right
dx = x - canvas2realX(imageRight);
dy = y - canvas2realY(imageBottom);
if (dx * dx + dy * dy <= rr) {
return (2);
}
// bottom-left
dx = x - canvas2realX(imageX);
dy = y - canvas2realY(imageBottom);
if (dx * dx + dy * dy <= rr) {
return (3);
}
return (-1);
}
function hitImage(x, y) {
return (x > canvas2realX(imageX) && x < canvas2realX(imageX + imageWidth) && y > canvas2realY(imageY) && y < canvas2realY(imageY + imageHeight));
}
function handleMouseDown(e) {
if (!flagPin) {
startX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft);
startY = parseInt(e.clientY - document.getElementById('canvas').offsetTop);
console.log(startX.toString()+'_'+startY.toString());
draggingResizer = anchorHitTest(startX, startY);
draggingImage = draggingResizer < 0 && hitImage(startX, startY);
} else {
var row = tableGlobal.insertRow(tableGlobal.rows.length);
var cell1 = row.insertCell(0);
var cell2 = row.insertCell(1);
cell2.innerHTML = Math.round((canvas.clientHeight - parseInt(e.clientY - document.getElementById('canvas').offsetTop) - canvas2realY(yPadding)) / ((canvas.clientHeight - 2 * canvas2realY(yPadding)) / numMaxY));
cell1.innerHTML = Math.round((parseInt(e.clientX - document.getElementById('canvas').offsetLeft) - canvas2realX(xPadding)) / ((canvas.clientWidth - 2 * canvas2realX(xPadding)) / numMaxX));
}
}
function handleMouseUp(e) {
if (!flagPin) {
draggingResizer = -1;
draggingImage = false;
draw(true, false);
}
}
function handleMouseOut(e) {
if (!flagPin) {
handleMouseUp(e);
}
}
function handleMouseMove(e) {
if (!flagPin) {
if (draggingResizer > -1) {
mouseX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft)*canvas.width/canvas.clientWidth;
mouseY = parseInt(e.clientY - document.getElementById('canvas').offsetTop)*canvas.height/canvas.clientHeight;
console.log(mouseX.toString()+','+mouseY.toString());
// resize the image
switch (draggingResizer) {
case 0:
//top-left
imageX = mouseX;
imageWidth = imageRight - mouseX;
imageY = mouseY;
imageHeight = imageBottom - mouseY;
break;
case 1:
//top-right
imageY = mouseY;
imageWidth = mouseX - imageX;
imageHeight = imageBottom - mouseY;
break;
case 2:
//bottom-right
imageWidth = mouseX - imageX;
imageHeight = mouseY - imageY;
break;
case 3:
//bottom-left
imageX = mouseX;
imageWidth = imageRight - mouseX;
imageHeight = mouseY - imageY;
break;
}
if (imageWidth < 25) { imageWidth = 25; }
if (imageHeight < 25) { imageHeight = 25; }
// set the image right and bottom
imageRight = imageX + imageWidth;
imageBottom = imageY + imageHeight;
// redraw the image with resizing anchors
draw(true, true);
} else if (draggingImage) {
imageClick = false;
mouseX = parseInt(e.clientX - document.getElementById('canvas').offsetLeft);
mouseY = parseInt(e.clientY - document.getElementById('canvas').offsetTop);
// move the image by the amount of the latest drag
var dx = mouseX - startX;
var dy = mouseY - startY;
imageX += dx;
imageY += dy;
imageRight += dx;
imageBottom += dy;
// reset the startXY for next time
startX = mouseX;
startY = mouseY;
// redraw the image with border
draw(false, true);
}
}
}
function downloadTables() {
var vecTables = document.getElementById('tables').children;
var strTextFinal = '';
for (var i = 0; i < vecTables.length; i++) {
var vecRows = vecTables[i].children[0].children;
for (var j = 0; j < vecRows.length; j++) {
strTextFinal = strTextFinal+vecRows[j].children[0].textContent+','+vecRows[j].children[1].textContent+'\n';
}
}
var blob = new Blob([strTextFinal], { type: "text/plain;charset=utf-8" });
saveAs(blob, "chartdigitizer.txt");
}
$("#canvas").mousedown(function(e) {
handleMouseDown(e);
});
$("#canvas").mousemove(function(e) {
handleMouseMove(e);
});
$("#canvas").mouseup(function(e) {
handleMouseUp(e);
});
$("#canvas").mouseout(function(e) {
handleMouseOut(e);
});
| draw | identifier_name |
Z_normal_8_2.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt,mpld3
from collections import defaultdict
from sklearn.metrics.pairwise import euclidean_distances
from flask import Flask, render_template, request
import math
import itertools
"""------------- Intialization ------------- """
y_alphabet_size=4
word_lenth=3
window_size=10
skip_offset=5
ham_distance=1
epsilon = 1e-6
"""------------- import Data -------------"""
"""
file_name='test_data2.csv'
data2 = pd.read_csv(file_name, sep=',', header=None)
x1 = data2.iloc[1:,1].values.flatten()
x1 = x1.astype(np.float)
"""
data = pd.read_csv('car_sales.csv', sep=',', header=None)
x1 = data.iloc[1:,1].values.flatten()
x1=np.asfarray(x1,float)
#os.remove("./Output/sliding_half_segment/")
"""------------- Helper Functions ------------- """
def split(arr, size):
arrs = []
while len(arr) > size:
pice = arr[:size]
arrs.append(pice)
arr = arr[size:]
arrs.append(arr)
return arrs
"""------------- Y-axis Distribution ------------- """
def break_points_gaussian(size):
options = {
3: np.array([ -0.43, 0.43]),
4: np.array([ -0.67, 0, 0.67]),
5: np.array([ -0.84, -0.25, 0.25, 0.84]),
6: np.array([ -0.97, -0.43, 0, 0.43, 0.97]),
7: np.array([ -1.07, -0.57, -0.18, 0.18, 0.57, 1.07]),
8: np.array([ -1.15, -0.67, -0.32, 0, 0.32, 0.67, 1.15]),
9: np.array([ -1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22]),
10: np.array([ -1.28, -0.84, -0.52, -0.25, 0, 0.25, 0.52, 0.84, 1.28]),
11: np.array([ -1.34, -0.91, -0.6, -0.35, -0.11, 0.11, 0.35, 0.6, 0.91, 1.34]),
12: np.array([ -1.38, -0.97, -0.67, -0.43, -0.21, 0, 0.21, 0.43, 0.67, 0.97, 1.38]),
13: np.array([ -1.43, -1.02, -0.74, -0.5, -0.29, -0.1, 0.1, 0.29, 0.5, 0.74, 1.02, 1.43]),
14: np.array([ -1.47, -1.07, -0.79, -0.57, -0.37, -0.18, 0, 0.18, 0.37, 0.57, 0.79, 1.07, 1.47]),
15: np.array([ -1.5, -1.11, -0.84, -0.62, -0.43, -0.25, -0.08, 0.08, 0.25, 0.43, 0.62, 0.84, 1.11, 1.5]),
16: np.array([ -1.53, -1.15, -0.89, -0.67, -0.49, -0.32, -0.16, 0, 0.16, 0.32, 0.49, 0.67, 0.89, 1.15, 1.53]),
17: np.array([ -1.56, -1.19, -0.93, -0.72, -0.54, -0.38, -0.22, -0.07, 0.07, 0.22, 0.38, 0.54, 0.72, 0.93, 1.19, 1.56]),
18: np.array([ -1.59, -1.22, -0.97, -0.76, -0.59, -0.43, -0.28, -0.14, 0, 0.14, 0.28, 0.43, 0.59, 0.76, 0.97, 1.22, 1.59]),
19: np.array([ -1.62, -1.25, -1, -0.8, -0.63, -0.48, -0.34, -0.2, -0.07, 0.07, 0.2, 0.34, 0.48, 0.63, 0.8, 1, 1.25, 1.62]),
20: np.array([ -1.64, -1.28, -1.04, -0.84, -0.67, -0.52, -0.39, -0.25, -0.13, 0, 0.13, 0.25, 0.39, 0.52, 0.67, 0.84, 1.04, 1.28, 1.64]),
}
return options[size]
def break_points_quantiles(size):
options=np.linspace(0, 1, size+1)[1:]
return options
#y_alphabets = break_points_quantiles(y_alphabet_size).tolist()
y_alphabets = break_points_gaussian(y_alphabet_size).tolist()
def hamming_distance1(string1, string2):
distance = 0
L = len(string1)
for i in range(L):
if string1[i] != string2[i]:
distance += 1
return distance
def hamming_distance(s1, s2):
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
return sum(el1 != el2 for el1, el2 in zip(s1, s2))
"""------------- X-axis Distribution ------------- """
def x_distrubted_values(series):
mean=np.mean(series)
#median=sorted(series)[len(series) // 2]
return mean
"""------------- Index to Letter conversion ------------- """
def index_to_letter(idx):
"""Convert a numerical index to a char."""
if 0 <= idx < 20:
return chr(97 + idx)
else:
raise ValueError('A wrong idx value supplied.')
def normalize(x):
X = np.asanyarray(x)
if np.nanstd(X) < epsilon:
res = []
for entry in X:
if not np.isnan(entry):
res.append(0)
else:
res.append(np.nan)
return res
return (X - np.nanmean(X)) / np.nanstd(X)
def normal_distribution(x):
x = (x-min(x))/(max(x)-min(x))
return x
"""------------- 1- Normalize Data ------------- """
x1=normalize(x1)
plt.plot(x1)
plt.show()
"""------------- 5.2- Y_Alphabetize ------------- """
def alphabetize_ts(sub_section):
mean_val=x_distrubted_values(sub_section)
y_alpha_val=min(y_alphabets, key=lambda x:abs(x-mean_val))
y_alpha_idx=y_alphabets.index(y_alpha_val)
curr_word = index_to_letter(y_alpha_idx)
return(curr_word)
"""------------- 2- Segmentization Data ------------- """
def segment_ts(series,windowSize=window_size,skip_offset=skip_offset):
ts_len=len(x1)
mod = ts_len%windowSize
rnge=0
if(skip_offset==0):
ts_len=int((ts_len-mod-window_size)/1)
rnge=int(ts_len/window_size)
else:
ts_len=int(math.ceil((ts_len-mod-window_size)/skip_offset))
rnge=int(ts_len)
curr_count=0
words=list()
indices=list()
complete_indices=list()
for i in range(0, rnge):
sub_section = series[curr_count:(curr_count+windowSize)]
sub_section=normalize(sub_section)
#print(curr_count,(curr_count+windowSize))
#print(sub_section)
curr_word=""
chunk_size=int(len(sub_section)/word_lenth)
num=0
zlp=""
| num+=chunk_size
words.append(zlp)
indices.append(curr_count)
curr_count=curr_count+skip_offset-1
temp_list=[]
temp_list.append(sub_section)
temp_df = pd.DataFrame(temp_list)
temp_df.insert(loc=0, column='keys', value=zlp)
temp_df.insert(loc=1, column='position', value=sorted(sub_section)[len(sub_section) // 2])
temp_df.insert(loc=2, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=3, column='scale_low', value=np.min(sub_section))
if(i==0):
df_sax =temp_df.copy()
else:
df_sax=df_sax.append(temp_df, ignore_index=True)
return (words,indices,df_sax)
alphabetize11,indices,df_sax=segment_ts(x1)
"""------------- SAX ------------- """
""" Complete Words """
def complete_word(series=x1,word_len=word_lenth,skip_len=skip_offset):
alphabetize,indices,df_sax=segment_ts(series)
complete_word=list()
complete_indices=indices
""" Simillar Words """
complete_word=alphabetize
sax = defaultdict(list)
for i in range(0,len(complete_word)):
if(len(complete_word[i])==word_lenth):
sax[complete_word[i]].append(complete_indices[i])
return sax
simillar_word=complete_word()
"""------------- Compare Shape Algorithm ------------- """
def Compare_Shape():
simillar_word=complete_word()
map_keys = defaultdict(list)
map_indices=defaultdict(list)
for key_i in simillar_word:
temp_list=list()
temp_list.append(simillar_word.get(key_i))
for key_j in simillar_word:
dist=hamming_distance(key_i, key_j)
if(dist==ham_distance and key_i !=key_j):
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
tempp=list()
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return (map_keys,map_indices)
comapre=Compare_Shape()
"""------------- Visualization ------------- """
def visualize(data,alph_size,lent,key):
row=int(lent/4)
print(key)
if(lent > 4):
fig = plt.figure(figsize=(4*row, 5*row))
#ax.set_ylim(-2.5,2.5)
for i in range(0,lent):
slice_range=slice(i*alph_size,(i+1)*alph_size)
nData=data[slice_range]
fig.add_subplot(row+1, 4,i+1 )
plt.plot(nData)
else:
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
slice_range=slice(i*alph_size,(i+1)*alph_size)
nData=data[slice_range]
fig.add_subplot(5, 2,i+1 )
plt.plot(nData)
#plt.savefig('./Output/sliding_half_segment/'+key+'.png')
#plt.savefig('books_read.png')
plt.show()
def prep_visualize ():
i=0
simillar_word=complete_word()
sax_keys =list(simillar_word.keys())
sax_values =list(simillar_word.values())
for n_val in sax_values:
key=sax_keys[i]
x2= list();
for n1_val in n_val:
alpha_count=0
while (alpha_count < window_size):
x2.append(x1[n1_val+alpha_count])
alpha_count=alpha_count+1
visualize(x2,window_size,len(n_val ),key)
i=i+1
def prep_visualize1 ():
compare_keys,compare_indices = Compare_Shape()
sax_keys =list(compare_indices.keys())
sax_values =list(compare_indices.values())
for i in range(len(sax_values)):
key=sax_keys[i]
x2= list();
for n_val in sax_values[i][0]:
alpha_count=0
while (alpha_count < window_size):
x2.append(x1[n_val+alpha_count])
alpha_count=alpha_count+1
visualize(x2,window_size,len(sax_values[i][0]),key)
"""------------- Matrix ------------- """
def matrix_calculation (df,key):
df_temp = df.drop(columns=[ 'indexx','simillar_key'])
width=len(df)
s = (width,width)
mat = np.zeros(s)
if(width>=3):
for i in range(len(df)):
for j in range(len(df)):
row1= df_temp.iloc[[i]].values[0]
row2= df_temp.iloc[[j]].values[0]
dist= row1-row2
mat[i][j]=(dist)
dist_array = np.triu(mat, 0)
print(key)
print(dist_array)
alphabetize,indices,feat_vector=segment_ts(x1)
def matrix_prep ():
alphabetize,indices,feat_vector=segment_ts(x1)
compare_keys,compare_indices = Compare_Shape()
sax_keys =list(compare_keys.keys())
sax_values =list(compare_keys.values())
i=0
for n_val in sax_values:
key=sax_keys[i]
temp_df = pd.DataFrame()
index_list=list()
position_list=list()
simillar_key_list=list()
for n1_val in n_val:
print(n1_val)
for index, row in feat_vector.iterrows():
if(row['keys']==n1_val):
# print(row['position'],index)
index_list.append(index)
position_list.append(row['position'])
simillar_key_list.append(n1_val)
temp_df['indexx']=index_list
temp_df['position']=position_list
temp_df['simillar_key']=simillar_key_list
matrix_calculation(temp_df,key)
i=i+1
print("===========================Before Compare Shape============================")
#prep_visualize()
print("===========================After Compare Shape============================")
#prep_visualize1 ()
print("===========================Position Matrix ============================")
matrix_prep() | for j in range(0,word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_word=alphabetize_ts(chunk)
zlp+=str(curr_word)
complete_indices.append(curr_count)
| random_line_split |
Z_normal_8_2.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt,mpld3
from collections import defaultdict
from sklearn.metrics.pairwise import euclidean_distances
from flask import Flask, render_template, request
import math
import itertools
"""------------- Intialization ------------- """
y_alphabet_size=4
word_lenth=3
window_size=10
skip_offset=5
ham_distance=1
epsilon = 1e-6
"""------------- import Data -------------"""
"""
file_name='test_data2.csv'
data2 = pd.read_csv(file_name, sep=',', header=None)
x1 = data2.iloc[1:,1].values.flatten()
x1 = x1.astype(np.float)
"""
data = pd.read_csv('car_sales.csv', sep=',', header=None)
x1 = data.iloc[1:,1].values.flatten()
x1=np.asfarray(x1,float)
#os.remove("./Output/sliding_half_segment/")
"""------------- Helper Functions ------------- """
def split(arr, size):
arrs = []
while len(arr) > size:
pice = arr[:size]
arrs.append(pice)
arr = arr[size:]
arrs.append(arr)
return arrs
"""------------- Y-axis Distribution ------------- """
def break_points_gaussian(size):
options = {
3: np.array([ -0.43, 0.43]),
4: np.array([ -0.67, 0, 0.67]),
5: np.array([ -0.84, -0.25, 0.25, 0.84]),
6: np.array([ -0.97, -0.43, 0, 0.43, 0.97]),
7: np.array([ -1.07, -0.57, -0.18, 0.18, 0.57, 1.07]),
8: np.array([ -1.15, -0.67, -0.32, 0, 0.32, 0.67, 1.15]),
9: np.array([ -1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22]),
10: np.array([ -1.28, -0.84, -0.52, -0.25, 0, 0.25, 0.52, 0.84, 1.28]),
11: np.array([ -1.34, -0.91, -0.6, -0.35, -0.11, 0.11, 0.35, 0.6, 0.91, 1.34]),
12: np.array([ -1.38, -0.97, -0.67, -0.43, -0.21, 0, 0.21, 0.43, 0.67, 0.97, 1.38]),
13: np.array([ -1.43, -1.02, -0.74, -0.5, -0.29, -0.1, 0.1, 0.29, 0.5, 0.74, 1.02, 1.43]),
14: np.array([ -1.47, -1.07, -0.79, -0.57, -0.37, -0.18, 0, 0.18, 0.37, 0.57, 0.79, 1.07, 1.47]),
15: np.array([ -1.5, -1.11, -0.84, -0.62, -0.43, -0.25, -0.08, 0.08, 0.25, 0.43, 0.62, 0.84, 1.11, 1.5]),
16: np.array([ -1.53, -1.15, -0.89, -0.67, -0.49, -0.32, -0.16, 0, 0.16, 0.32, 0.49, 0.67, 0.89, 1.15, 1.53]),
17: np.array([ -1.56, -1.19, -0.93, -0.72, -0.54, -0.38, -0.22, -0.07, 0.07, 0.22, 0.38, 0.54, 0.72, 0.93, 1.19, 1.56]),
18: np.array([ -1.59, -1.22, -0.97, -0.76, -0.59, -0.43, -0.28, -0.14, 0, 0.14, 0.28, 0.43, 0.59, 0.76, 0.97, 1.22, 1.59]),
19: np.array([ -1.62, -1.25, -1, -0.8, -0.63, -0.48, -0.34, -0.2, -0.07, 0.07, 0.2, 0.34, 0.48, 0.63, 0.8, 1, 1.25, 1.62]),
20: np.array([ -1.64, -1.28, -1.04, -0.84, -0.67, -0.52, -0.39, -0.25, -0.13, 0, 0.13, 0.25, 0.39, 0.52, 0.67, 0.84, 1.04, 1.28, 1.64]),
}
return options[size]
def break_points_quantiles(size):
options=np.linspace(0, 1, size+1)[1:]
return options
#y_alphabets = break_points_quantiles(y_alphabet_size).tolist()
y_alphabets = break_points_gaussian(y_alphabet_size).tolist()
def hamming_distance1(string1, string2):
distance = 0
L = len(string1)
for i in range(L):
if string1[i] != string2[i]:
distance += 1
return distance
def hamming_distance(s1, s2):
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
return sum(el1 != el2 for el1, el2 in zip(s1, s2))
"""------------- X-axis Distribution ------------- """
def x_distrubted_values(series):
mean=np.mean(series)
#median=sorted(series)[len(series) // 2]
return mean
"""------------- Index to Letter conversion ------------- """
def index_to_letter(idx):
"""Convert a numerical index to a char."""
if 0 <= idx < 20:
|
else:
raise ValueError('A wrong idx value supplied.')
def normalize(x):
X = np.asanyarray(x)
if np.nanstd(X) < epsilon:
res = []
for entry in X:
if not np.isnan(entry):
res.append(0)
else:
res.append(np.nan)
return res
return (X - np.nanmean(X)) / np.nanstd(X)
def normal_distribution(x):
x = (x-min(x))/(max(x)-min(x))
return x
"""------------- 1- Normalize Data ------------- """
x1=normalize(x1)
plt.plot(x1)
plt.show()
"""------------- 5.2- Y_Alphabetize ------------- """
def alphabetize_ts(sub_section):
mean_val=x_distrubted_values(sub_section)
y_alpha_val=min(y_alphabets, key=lambda x:abs(x-mean_val))
y_alpha_idx=y_alphabets.index(y_alpha_val)
curr_word = index_to_letter(y_alpha_idx)
return(curr_word)
"""------------- 2- Segmentization Data ------------- """
def segment_ts(series,windowSize=window_size,skip_offset=skip_offset):
ts_len=len(x1)
mod = ts_len%windowSize
rnge=0
if(skip_offset==0):
ts_len=int((ts_len-mod-window_size)/1)
rnge=int(ts_len/window_size)
else:
ts_len=int(math.ceil((ts_len-mod-window_size)/skip_offset))
rnge=int(ts_len)
curr_count=0
words=list()
indices=list()
complete_indices=list()
for i in range(0, rnge):
sub_section = series[curr_count:(curr_count+windowSize)]
sub_section=normalize(sub_section)
#print(curr_count,(curr_count+windowSize))
#print(sub_section)
curr_word=""
chunk_size=int(len(sub_section)/word_lenth)
num=0
zlp=""
for j in range(0,word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_word=alphabetize_ts(chunk)
zlp+=str(curr_word)
complete_indices.append(curr_count)
num+=chunk_size
words.append(zlp)
indices.append(curr_count)
curr_count=curr_count+skip_offset-1
temp_list=[]
temp_list.append(sub_section)
temp_df = pd.DataFrame(temp_list)
temp_df.insert(loc=0, column='keys', value=zlp)
temp_df.insert(loc=1, column='position', value=sorted(sub_section)[len(sub_section) // 2])
temp_df.insert(loc=2, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=3, column='scale_low', value=np.min(sub_section))
if(i==0):
df_sax =temp_df.copy()
else:
df_sax=df_sax.append(temp_df, ignore_index=True)
return (words,indices,df_sax)
alphabetize11,indices,df_sax=segment_ts(x1)
"""------------- SAX ------------- """
""" Complete Words """
def complete_word(series=x1,word_len=word_lenth,skip_len=skip_offset):
alphabetize,indices,df_sax=segment_ts(series)
complete_word=list()
complete_indices=indices
""" Simillar Words """
complete_word=alphabetize
sax = defaultdict(list)
for i in range(0,len(complete_word)):
if(len(complete_word[i])==word_lenth):
sax[complete_word[i]].append(complete_indices[i])
return sax
simillar_word=complete_word()
"""------------- Compare Shape Algorithm ------------- """
def Compare_Shape():
simillar_word=complete_word()
map_keys = defaultdict(list)
map_indices=defaultdict(list)
for key_i in simillar_word:
temp_list=list()
temp_list.append(simillar_word.get(key_i))
for key_j in simillar_word:
dist=hamming_distance(key_i, key_j)
if(dist==ham_distance and key_i !=key_j):
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
tempp=list()
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return (map_keys,map_indices)
comapre=Compare_Shape()
"""------------- Visualization ------------- """
def visualize(data,alph_size,lent,key):
row=int(lent/4)
print(key)
if(lent > 4):
fig = plt.figure(figsize=(4*row, 5*row))
#ax.set_ylim(-2.5,2.5)
for i in range(0,lent):
slice_range=slice(i*alph_size,(i+1)*alph_size)
nData=data[slice_range]
fig.add_subplot(row+1, 4,i+1 )
plt.plot(nData)
else:
fig = plt.figure(figsize=(3*3, 4*3))
for i in range(0,lent):
slice_range=slice(i*alph_size,(i+1)*alph_size)
nData=data[slice_range]
fig.add_subplot(5, 2,i+1 )
plt.plot(nData)
#plt.savefig('./Output/sliding_half_segment/'+key+'.png')
#plt.savefig('books_read.png')
plt.show()
def prep_visualize ():
i=0
simillar_word=complete_word()
sax_keys =list(simillar_word.keys())
sax_values =list(simillar_word.values())
for n_val in sax_values:
key=sax_keys[i]
x2= list();
for n1_val in n_val:
alpha_count=0
while (alpha_count < window_size):
x2.append(x1[n1_val+alpha_count])
alpha_count=alpha_count+1
visualize(x2,window_size,len(n_val ),key)
i=i+1
def prep_visualize1 ():
compare_keys,compare_indices = Compare_Shape()
sax_keys =list(compare_indices.keys())
sax_values =list(compare_indices.values())
for i in range(len(sax_values)):
key=sax_keys[i]
x2= list();
for n_val in sax_values[i][0]:
alpha_count=0
while (alpha_count < window_size):
x2.append(x1[n_val+alpha_count])
alpha_count=alpha_count+1
visualize(x2,window_size,len(sax_values[i][0]),key)
"""------------- Matrix ------------- """
def matrix_calculation (df,key):
df_temp = df.drop(columns=[ 'indexx','simillar_key'])
width=len(df)
s = (width,width)
mat = np.zeros(s)
if(width>=3):
for i in range(len(df)):
for j in range(len(df)):
row1= df_temp.iloc[[i]].values[0]
row2= df_temp.iloc[[j]].values[0]
dist= row1-row2
mat[i][j]=(dist)
dist_array = np.triu(mat, 0)
print(key)
print(dist_array)
alphabetize,indices,feat_vector=segment_ts(x1)
def matrix_prep ():
alphabetize,indices,feat_vector=segment_ts(x1)
compare_keys,compare_indices = Compare_Shape()
sax_keys =list(compare_keys.keys())
sax_values =list(compare_keys.values())
i=0
for n_val in sax_values:
key=sax_keys[i]
temp_df = pd.DataFrame()
index_list=list()
position_list=list()
simillar_key_list=list()
for n1_val in n_val:
print(n1_val)
for index, row in feat_vector.iterrows():
if(row['keys']==n1_val):
# print(row['position'],index)
index_list.append(index)
position_list.append(row['position'])
simillar_key_list.append(n1_val)
temp_df['indexx']=index_list
temp_df['position']=position_list
temp_df['simillar_key']=simillar_key_list
matrix_calculation(temp_df,key)
i=i+1
print("===========================Before Compare Shape============================")
#prep_visualize()
print("===========================After Compare Shape============================")
#prep_visualize1 ()
print("===========================Position Matrix ============================")
matrix_prep()
| return chr(97 + idx) | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.