repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
UNSW-CEEM/NEMPRO
|
[
"04cc0a21a21fdadfee1bdeadb4540ddfab366399"
] |
[
"NEMPRO/historical_inputs.py"
] |
[
"from nemosis import data_fetch_methods, defaults\nimport pandas as pd\n\naemo_price_names = {'energy': 'RRP',\n 'raise_regulation': 'RAISEREGRRP',\n 'raise_6_second': 'RAISE6SECRRP',\n 'raise_60_second': 'RAISE60SECRRP',\n 'raise_5_minute': 'RAISE5MINRRP'}\n\n\ndef get_model_training_data(start_time, end_time, region, raw_data_cache):\n price_data = get_regional_prices(start_time, end_time, raw_data_cache)\n price_data = price_data.loc[:, ['SETTLEMENTDATE', '{}-energy'.format(region)]]\n demand_data = get_residual_demand(start_time, end_time, raw_data_cache)\n historical_data = pd.merge(price_data, demand_data, on='SETTLEMENTDATE')\n historical_data = historical_data.reset_index(drop=True)\n historical_data['interval'] = historical_data.index\n historical_data['hour'] = historical_data['SETTLEMENTDATE'].dt.hour\n historical_data = historical_data.drop(columns=['SETTLEMENTDATE'])\n return historical_data\n\n\ndef get_forward_data_for_forecast(start_time, end_time, raw_data_cache):\n demand_data = get_residual_demand(start_time, end_time, raw_data_cache)\n demand_data = demand_data.sort_values('SETTLEMENTDATE')\n demand_data = demand_data.reset_index(drop=True)\n forward_data = demand_data.copy()\n forward_data['interval'] = demand_data.index\n forward_data['hour'] = forward_data['SETTLEMENTDATE'].dt.hour\n forward_data = forward_data.drop(columns=['SETTLEMENTDATE'])\n return forward_data\n\n\ndef get_regional_prices(start_time, end_time, raw_data_cache):\n\n dispatch_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHPRICE', raw_data_cache,\n select_columns=['SETTLEMENTDATE', 'INTERVENTION',\n 'REGIONID', 'RRP', 'RAISEREGRRP',\n 'RAISE6SECRRP', 'RAISE60SECRRP',\n 'RAISE5MINRRP'])\n\n dispatch_data = dispatch_data[dispatch_data['INTERVENTION'] == 0]\n data = pd.DataFrame()\n for name, aemo_name in aemo_price_names.items():\n dispatch_data[aemo_name] = pd.to_numeric(dispatch_data[aemo_name])\n data_temp = dispatch_data.pivot_table(values=aemo_name, index='SETTLEMENTDATE', columns='REGIONID')\n data_temp = data_temp.reset_index().fillna('0.0')\n data_temp = data_temp.rename(columns={'QLD1': 'qld', 'NSW1': 'nsw', 'VIC1': 'vic', 'SA1': 'sa', 'TAS1': 'tas'})\n data_temp.columns = [col + '-' + name if col != 'SETTLEMENTDATE' else col for col in data_temp.columns]\n if data.empty:\n data = data_temp\n else:\n data = pd.merge(data, data_temp, on=['SETTLEMENTDATE'])\n\n return data\n\n\ndef get_regional_demand(start_time, end_time, raw_data_cache):\n\n dispatch_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHREGIONSUM', raw_data_cache,\n select_columns=['SETTLEMENTDATE', 'INTERVENTION',\n 'REGIONID', 'TOTALDEMAND'])\n\n dispatch_data = dispatch_data[dispatch_data['INTERVENTION'] == 0]\n\n dispatch_data['TOTALDEMAND'] = pd.to_numeric(dispatch_data['TOTALDEMAND'])\n\n dispatch_data = dispatch_data.pivot_table(values='TOTALDEMAND', index='SETTLEMENTDATE', columns='REGIONID')\n\n dispatch_data = dispatch_data.reset_index().fillna('0.0')\n\n dispatch_data = dispatch_data.rename(columns={'QLD1': 'qld', 'NSW1': 'nsw', 'VIC1': 'vic', 'SA1': 'sa',\n 'TAS1': 'tas'})\n\n dispatch_data.columns = [col + '-demand' if col != 'SETTLEMENTDATE' else col for col in dispatch_data.columns]\n\n return dispatch_data\n\n\ndef get_duid_techs(raw_data_cache):\n\n cols = ['DUID', 'Region', 'Fuel Source - Descriptor', 'Technology Type - Descriptor']\n tech_data = data_fetch_methods.static_table_xl('Generators and Scheduled Loads', raw_data_cache, select_columns=cols)\n\n def tech_classifier(fuel_source, technology_type):\n category = fuel_source\n if technology_type == 'Hydro - Gravity':\n category = 'Hydro'\n elif technology_type == 'Open Cycle Gas turbines (OCGT)':\n category = 'OCGT'\n elif technology_type == 'Combined Cycle Gas Turbine (CCGT)':\n category = 'CCGT'\n elif technology_type == 'Run of River' or fuel_source == 'Solar' or fuel_source == 'Wind' or fuel_source == 'Solar ':\n category = 'ZEROSRMC'\n elif technology_type == 'Spark Ignition Reciprocating Engine':\n category = 'Engine'\n elif technology_type == 'Compression Reciprocating Engine':\n category = 'Engine'\n elif technology_type == 'Steam Sub-Critical' and (fuel_source == 'Natural Gas / Fuel Oil' or fuel_source == 'Natural Gas'):\n category = 'Gas Thermal'\n elif technology_type == 'Pump Storage' or technology_type == 'Battery':\n category = 'Storage'\n return category\n\n tech_data['TECH'] = tech_data.apply(lambda x: tech_classifier(x['Fuel Source - Descriptor'],\n x['Technology Type - Descriptor']),\n axis=1)\n\n return tech_data.loc[:, ['DUID', 'Region', 'TECH']]\n\n\ndef get_tech_operating_capacities(start_time, end_time, raw_data_cache):\n tech_data = get_duid_techs(raw_data_cache)\n\n dispatch_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHLOAD', raw_data_cache,\n select_columns=['DUID', 'SETTLEMENTDATE',\n 'INTERVENTION', 'AVAILABILITY'])\n\n dispatch_data = dispatch_data[dispatch_data['INTERVENTION'] == 0]\n\n\n dispatch_data = pd.merge(dispatch_data, tech_data, on='DUID')\n\n dispatch_data['AVAILABILITY'] = pd.to_numeric(dispatch_data['AVAILABILITY'])\n\n dispatch_data = dispatch_data.groupby(['TECH', 'SETTLEMENTDATE'], as_index=False).aggregate({'AVAILABILITY': 'sum'})\n\n dispatch_data['tech_region'] = dispatch_data['TECH'] + '-capacity'\n\n dispatch_data = dispatch_data.pivot_table(values='AVAILABILITY', index='SETTLEMENTDATE', columns='tech_region')\n\n dispatch_data = dispatch_data.reset_index().fillna('0.0')\n\n return dispatch_data\n\n\ndef get_fleet_dispatch(start_time, end_time, fleet_units, region, raw_data_cache):\n\n dispatch_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHLOAD', raw_data_cache,\n select_columns=['DUID', 'SETTLEMENTDATE', 'TOTALCLEARED',\n 'INTERVENTION', 'RAISEREG', 'RAISE6SEC',\n 'RAISE60SEC', 'RAISE5MIN'])\n dispatch_data = dispatch_data[dispatch_data['INTERVENTION'] == 0]\n\n dispatch_data = dispatch_data[dispatch_data['DUID'].isin(fleet_units)]\n\n dispatch_data['TOTALCLEARED'] = pd.to_numeric(dispatch_data['TOTALCLEARED'])\n dispatch_data['RAISEREG'] = pd.to_numeric(dispatch_data['RAISEREG'])\n dispatch_data['RAISE6SEC'] = pd.to_numeric(dispatch_data['RAISE6SEC'])\n dispatch_data['RAISE60SEC'] = pd.to_numeric(dispatch_data['RAISE60SEC'])\n dispatch_data['RAISE5MIN'] = pd.to_numeric(dispatch_data['RAISE5MIN'])\n\n dispatch_data = dispatch_data.groupby('SETTLEMENTDATE', as_index=False).aggregate(\n {'TOTALCLEARED': 'sum', 'RAISEREG': 'sum', 'RAISE6SEC': 'sum', 'RAISE60SEC': 'sum', 'RAISE5MIN': 'sum'})\n\n aemo_dispatch_names = {'TOTALCLEARED': region + '-energy-fleet-dispatch',\n 'RAISEREG': region + '-raise_regulation-fleet-dispatch',\n 'RAISE6SEC': region + '-raise_6_second-fleet-dispatch',\n 'RAISE60SEC': region + '-raise_60_second-fleet-dispatch',\n 'RAISE5MIN': region + '-raise_5_minute-fleet-dispatch'}\n\n dispatch_data = dispatch_data.rename(columns=aemo_dispatch_names)\n\n return dispatch_data\n\n\ndef get_unit_dispatch(start_time, end_time, unit, raw_data_cache):\n dispatch_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHLOAD', raw_data_cache,\n select_columns=['DUID', 'SETTLEMENTDATE', 'INTERVENTION',\n 'INITIALMW'])\n dispatch_data = dispatch_data[dispatch_data['INTERVENTION'] == 0]\n dispatch_data = dispatch_data[dispatch_data['DUID'] == unit]\n initial_mw = dispatch_data['INITIALMW'].iloc[0]\n return float(initial_mw)\n\n\ndef get_residual_demand(start_time, end_time, raw_data_cache):\n cols = ['DUID', 'Region', 'Fuel Source - Descriptor']\n tech_data = data_fetch_methods.static_table_xl('Generators and Scheduled Loads', raw_data_cache, select_columns=cols)\n zero_srmc_techs = ['Wind', 'Solar', 'Solar ']\n tech_data = tech_data[tech_data['Fuel Source - Descriptor'].isin(zero_srmc_techs)]\n scada_data = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCH_UNIT_SCADA', raw_data_cache)\n scada_data = pd.merge(scada_data, tech_data, on='DUID')\n scada_data['SCADAVALUE'] = pd.to_numeric(scada_data['SCADAVALUE'])\n scada_data = scada_data.groupby(['SETTLEMENTDATE', 'Region'], as_index=False).agg({'SCADAVALUE': 'sum'})\n regional_demand = data_fetch_methods.dynamic_data_compiler(start_time, end_time, 'DISPATCHREGIONSUM', raw_data_cache)\n regional_demand = regional_demand[regional_demand['INTERVENTION'] == 0]\n regional_demand = pd.merge(regional_demand, scada_data, left_on=['SETTLEMENTDATE', 'REGIONID'],\n right_on=['SETTLEMENTDATE', 'Region'])\n regional_demand['TOTALDEMAND'] = pd.to_numeric(regional_demand['TOTALDEMAND'])\n regional_demand['RESIDUALDEMAND'] = regional_demand['TOTALDEMAND'] - regional_demand['SCADAVALUE']\n\n regional_demand = regional_demand.pivot_table(values='RESIDUALDEMAND', index='SETTLEMENTDATE', columns='REGIONID')\n\n regional_demand = regional_demand.reset_index().fillna('0.0')\n\n regional_demand = regional_demand.rename(columns={'QLD1': 'qld', 'NSW1': 'nsw', 'VIC1': 'vic', 'SA1': 'sa',\n 'TAS1': 'tas'})\n\n regional_demand.columns = [col + '-demand' if col != 'SETTLEMENTDATE' else col for col in regional_demand.columns]\n return regional_demand\n\n\ndef get_region_fraction_of_max_residual_demand(regional_demand, region):\n regional_demand = regional_demand.groupby('REGIONID', as_index=False).agg({'RESIDUALDEMAND': 'max'})\n sum_regional_max_regional_demands = regional_demand['RESIDUALDEMAND'].max()\n regional_max_demand = regional_demand[regional_demand['REGIONID'] == region]['RESIDUALDEMAND'].iloc[0]\n return regional_max_demand / sum_regional_max_regional_demands"
] |
[
[
"pandas.merge",
"pandas.to_numeric",
"pandas.DataFrame"
]
] |
ohadshapira/Machine-Learning-Collection
|
[
"3e73548ec3ef17ed90a654950717559252814e13"
] |
[
"ML/Pytorch/Basics/custom_dataset/custom_dataset.py"
] |
[
"\"\"\"\nExample of how to create custom dataset in Pytorch. In this case\nwe have images of cats and dogs in a separate folder and a csv\nfile containing the name to the jpg file as well as the target\nlabel (0 for cat, 1 for dog).\n\nProgrammed by Aladdin Persson <aladdin.persson at hotmail dot com>\n* 2020-04-03 Initial coding\n\n\"\"\"\n\n# Imports\nimport torch\nimport torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions\nimport torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.\nimport torchvision.transforms as transforms # Transformations we can perform on our dataset\nimport torchvision\nimport os\nimport pandas as pd\nfrom skimage import io\nfrom torch.utils.data import (\n Dataset,\n DataLoader,\n) # Gives easier dataset managment and creates mini batches\n\n\nclass CatsAndDogsDataset(Dataset):\n def __init__(self, csv_file, root_dir, transform=None):\n self.annotations = pd.read_csv(csv_file)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.annotations)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0])\n image = io.imread(img_path)\n y_label = torch.tensor(int(self.annotations.iloc[index, 1]))\n\n if self.transform:\n image = self.transform(image)\n\n return (image, y_label)\n\n\n# Set device\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Hyperparameters\nin_channel = 3\nnum_classes = 2\nlearning_rate = 1e-3\nbatch_size = 32\nnum_epochs = 10\n\n# Load Data\ndataset = CatsAndDogsDataset(\n csv_file=\"custom_dataset/cats_dogs.csv\",\n root_dir=\"custom_dataset/cats_dogs_resized\",\n transform=transforms.ToTensor(),\n)\n\n# Dataset is actually a lot larger ~25k images, just took out 10 pictures\n# to upload to Github. It's enough to understand the structure and scale\n# if you got more images.\ntrain_set, test_set = torch.utils.data.random_split(dataset, [5, 5])\ntrain_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)\ntest_loader = DataLoader(dataset=test_set, batch_size=batch_size, shuffle=True)\n\n# Model\nmodel = torchvision.models.googlenet(pretrained=True)\nmodel.to(device)\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n# Train Network\nfor epoch in range(num_epochs):\n losses = []\n\n for batch_idx, (data, targets) in enumerate(train_loader):\n # Get data to cuda if possible\n data = data.to(device=device)\n targets = targets.to(device=device)\n\n # forward\n scores = model(data)\n loss = criterion(scores, targets)\n\n losses.append(loss.item())\n\n # backward\n optimizer.zero_grad()\n loss.backward()\n\n # gradient descent or adam step\n optimizer.step()\n\n print(f\"Cost at epoch {epoch} is {sum(losses)/len(losses)}\")\n\n# Check accuracy on training to see how good our model is\ndef check_accuracy(loader, model):\n num_correct = 0\n num_samples = 0\n model.eval()\n\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n\n scores = model(x)\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n\n print(\n f\"Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}\"\n )\n\n model.train()\n\n\nprint(\"Checking accuracy on Training Set\")\ncheck_accuracy(train_loader, model)\n\nprint(\"Checking accuracy on Test Set\")\ncheck_accuracy(test_loader, model)\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.utils.data.DataLoader",
"torch.utils.data.random_split",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
miettij/Intelligent-fault-diagnosis
|
[
"f43fb5d9cdbb9838c278bf8ca8354c8bf33b8861"
] |
[
"code/cwru_utils.py"
] |
[
"import matplotlib.pyplot as plt\nfrom collections import OrderedDict\nimport datetime\nimport numpy as np\nfrom scipy.io import loadmat\nimport torch\nimport os\nfrom os import listdir\n\ndef get_cwru_filepaths(root_dir,health_states,motor_load):\n \"\"\"\n Utility function that fetches all data filepaths corresponding\n to the motor load used during testbench measurements.\n\n root_dir needs to be of the format:\n '../original.tmp/12k/'\n\n Returns a list of tuples (filepath, healt state).\n \"\"\"\n\n #filename dict corresponding to the motor load parametere\n filenames = {\n 1 : ['1.mat'],\n 2 : ['2.mat'],\n 3 : ['3.mat'],\n 4 : ['1.mat','2.mat','3.mat']\n }\n #class directories correspond to labels\n class_dirs = listdir(root_dir)\n filepaths = []\n if '.DS_Store' in class_dirs:\n class_dirs.remove('.DS_Store')\n for class_dir in class_dirs:\n PATH = os.path.join(root_dir,class_dir)\n if 'unnecessary' not in PATH:\n\n #Choose the correct file corresponding to the given motor load\n temp_path = PATH # if motor_load = 4\n for filename in filenames[motor_load]:\n PATH = temp_path + '/' + filename\n\n filepaths.append((PATH,health_states[class_dir]))\n\n return filepaths\n\ndef get_trainsets(filepaths, normalize, balance, len_fold):\n\n \"\"\"\n A utility function that reads each file with\n\n \"\"\"\n # From every 10 files, select overlapping test windows of lenght 2048.\n #Choose overlapping windows from the first 50% of the sequence.\n # Test data windows are selected from the last 50% of the sequence.\n # Return len_fold amount of window sets.\n\n #print(\"getting train folds for:\",filepaths[0][1])\n\n window_len = 2048\n stride = 32 #overlap\n\n fold_dict = {}\n for i in range(len_fold):\n fold_dict[i] = []\n\n num_windows = 1834# prior knowledge, that this is max number for balanced dataset if stride = 32 and wlen = 2048\n\n # go through each file with a vibration sequence per health state\n for filepath in filepaths:\n\n path, label = filepath[0], filepath[1]\n data = loadmat(path)\n array = get_array(data)\n\n train_length = 60705 # roughly the first 50 % of the timeseries signal\n\n array = np.copy(array[:train_length,:])\n\n if normalize:\n\n array = normalize_arr(array)\n\n\n for i in range(len_fold):\n\n start_p, stop_p = i/len_fold, (i+1)/len_fold\n start, stop = int(train_length*start_p), int(train_length*stop_p)\n\n window_start, window_end = start, start + 2048\n\n #No overlap between folds!\n while not window_end > stop :\n\n fold_dict[i].append((array[window_start:window_end,:],label))\n window_start, window_end = window_start+32, window_end +32\n\n\n train_folds = []\n\n for i in range(len_fold):\n train_folds.append(fold_dict[i])\n return train_folds\n\ndef get_testset(filepaths,normalize,balance):\n #from every 10 files, select non-overlapping test windows of lenght 2048'\n #Choose non-overlapping windows from the last 50 % of the sequnence\n # Training (also validation) data windows are selected from the first 50 %\n # of the sequence\n\n num_windows = 29 #prior knowledge\n\n testset = []\n\n for filepath in filepaths:\n path, label = filepath[0], filepath[1]\n data = loadmat(path)\n array = get_array(data)\n test_length = int(0.5*array.shape[0])\n array = np.copy(array[-test_length:,:])\n if normalize:\n array = normalize_arr(array)\n\n\n for i in range(num_windows):\n start, stop =2048*i,2048*(i+1)\n testset.append((torch.from_numpy(array[start:stop,:].T).float(),label))\n\n return testset\n\ndef get_array(data):\n drive_end_key = 'DE'\n fan_end_key = 'FE'\n de_arr = None\n fe_arr = None\n for key in data.keys():\n if drive_end_key in key:\n de_arr = data[key]\n if fan_end_key in key:\n fe_arr = data[key]\n #print(de_arr.shape,fe_arr.shape)\n #print(de_arr.shape,fe_arr.shape)\n arr = np.hstack((de_arr,fe_arr))\n\n return arr\n\ndef normalizearr1d(arr1d):\n \"\"\"Normalizes array to the range [-1,1]\"\"\"\n\n arr1d_min = np.min(arr1d)\n\n arr1d_max = np.max(arr1d)\n\n arr1d = 2 * (arr1d - arr1d_min) / (arr1d_max - arr1d_min) - 1\n return arr1d\n\ndef normalize_arr(window):\n\n for i in range(window.shape[1]):\n\n window[:,i] = normalizearr1d(window[:,i].copy())\n\n return window\n\ndef save_cwru_history(history,SAVEPATH,model_name,motor_load):\n fig,axes = plt.subplots()\n l1, = axes.plot(history['train'],label = \"Training error\")\n l2, = axes.plot(history['val'], label = \"Validation error\")\n plt.legend(handles=[l1,l2])\n axes.set_title('Convergence of '+model_name)\n axes.set_ylabel('CrossEntropyLoss')\n axes.set_xlabel('Epoch')\n fig.savefig(SAVEPATH+model_name+'-'+motor_load+'.png')\n plt.close(fig)\n\ndef save_path_formatter(args):\n args_dict = vars(args)\n data_folder_name = args_dict['dataset']\n folder_string = []\n\n key_map = OrderedDict()\n key_map['arch'] =''\n key_map['batch_size']='bs'\n key_map['deconv']='deconv'\n key_map['delinear']='delinear'\n key_map['batchnorm'] = 'bn'\n\n key_map['lr']='lr'\n key_map['stride']='stride'\n key_map['eps'] = 'eps'\n key_map['deconv_iter'] = 'it'\n #key_map['lr_scheduler']=''\n\n key_map['epochs'] = 'ep'\n\n\n\n\n key_map['bias'] = 'bias'\n #key_map['block_fc']='bfc'\n #key_map['freeze']='freeze'\n\n\n for key, key2 in key_map.items():\n value = args_dict[key]\n if key2 is not '':\n folder_string.append('{}.{}'.format(key2, value))\n else:\n folder_string.append('{}'.format(value))\n\n save_path = ','.join(folder_string)\n timestamp = datetime.datetime.now().strftime(\"%m-%d-%H.%M\")\n return os.path.join('./logs',data_folder_name,save_path,timestamp).replace(\"\\\\\",\"/\")\n"
] |
[
[
"numpy.hstack",
"matplotlib.pyplot.legend",
"numpy.min",
"scipy.io.loadmat",
"matplotlib.pyplot.subplots",
"torch.from_numpy",
"numpy.max",
"numpy.copy",
"matplotlib.pyplot.close"
]
] |
ixjlyons/pygesture
|
[
"6f36d0341de4036ff2a99a97d5b96e62ad2159ec"
] |
[
"pygesture/wav.py"
] |
[
"import numpy as np\nimport scipy.io.wavfile as siowav\n\n\ndef write(filename, rate, data):\n \"\"\"\n Writes recording data to file in WAV format. It is basically a convenience\n wrapper around `scipy.io.wavfile.write` for handling normalized float data.\n\n Paramters\n ---------\n filename : str\n Path + file name to the file to write to.\n rate : int\n Sample rate in Hz.\n data : ndarray\n Data to write. For multi-channel recordings, the shape should be\n (num_samples, num_channels).\n \"\"\"\n data *= 32768\n data = data.astype(np.int16, copy=False)\n siowav.write(filename, rate, data)\n\n\ndef read(filename):\n \"\"\"\n Reads recording data from a WAV file. It is basically a convenience wrapper\n around `scipy.io.wavfile.read` for getting float data.\n\n Parameters\n ----------\n filename : str\n Path + file name to the file to read from.\n\n Returns\n -------\n rate : int\n Sample rate.\n data : ndarray\n Float data (-1 to 1) from the file. Shape is (num_samples,\n num_channels).\n \"\"\"\n rate, data = siowav.read(filename)\n # make sure we get a 2D array even if there's only one channel\n if data.ndim == 1:\n data = data[:, np.newaxis]\n data = data / 32768.0\n return rate, data\n\n\nclass ContinuousWriter(object):\n \"\"\"\n Writes data to a WAV file chunk by chunk.\n\n Parameters\n ----------\n \"\"\"\n\n def __init__(self, filename, fs):\n self.filename = filename\n self.fs = fs\n\n self.fid = open(self.filename, 'ab')\n\n def write(self, data):\n write(self.fid, self.fs, data)\n\n def close(self):\n self.fid.close()\n"
] |
[
[
"scipy.io.wavfile.write",
"scipy.io.wavfile.read"
]
] |
AppleHolic/PytorchSR
|
[
"f56611ccf6167f8a7f8f88e882576bdd9a48800b"
] |
[
"utils.py"
] |
[
"import logging\nimport torch\nfrom torch.autograd import Variable\nfrom models.cbhg import CBHGNet\nfrom models.mgru import MinimalGRUNet\nfrom run import Runner\nfrom trainers.timit import TIMITTrainer\n\n\ndef get_logger(name):\n # setup logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\ndef get_loadable_checkpoint(checkpoint):\n \"\"\"\n If model is saved with DataParallel, checkpoint keys is started with 'module.' remove it and return new state dict\n :param checkpoint:\n :return: new checkpoint\n \"\"\"\n new_checkpoint = {}\n for key, val in checkpoint.items():\n new_key = key.replace('module.', '')\n new_checkpoint[new_key] = val\n return new_checkpoint\n\n\ndef to_variable(tensor, is_cuda=True):\n result = Variable(tensor, requires_grad=False)\n if is_cuda:\n return result.cuda()\n else:\n return result\n\n\ndef get_trainer(name='cbhg'):\n if name not in Runner.IMPLEMENTED_MODELS:\n raise NotImplementedError('Trainer for %s is not implemented !! ' % name)\n\n if name == 'cbhg':\n return TIMITTrainer\n else:\n return None\n\n\ndef get_networks(name='cbhg', checkpoint_path='', is_cuda=True, is_multi_gpu=True):\n \"\"\"\n\n :param name: the name of network\n :param checkpoint_path: checkpoint path if you want to load checkpoint\n :param is_cuda: usage of cuda\n :param is_multi_gpu: check multi gpu\n :return: network, pretrained step\n \"\"\"\n\n if name == 'cbhg':\n network = CBHGNet()\n elif name == 'mgru':\n network = MinimalGRUNet()\n else:\n raise NotImplementedError('Network %s is not implemented !! ' % name)\n\n if checkpoint_path:\n checkpoint = torch.load(checkpoint_path)\n network.load_state_dict(get_loadable_checkpoint(checkpoint['net']))\n\n if is_cuda:\n network = network.cuda()\n\n if is_multi_gpu:\n network = torch.nn.DataParallel(network)\n\n return network\n"
] |
[
[
"torch.nn.DataParallel",
"torch.load",
"torch.autograd.Variable"
]
] |
deep-spin/sparse_continuous_distributions
|
[
"7cc7bc7140738ebd4585d36e47bddd9be6ebed12"
] |
[
"spcdist/tests/test_agreement.py"
] |
[
"import pytest\n\nimport numpy as np\nimport torch\n\nfrom spcdist.scipy import multivariate_beta_gaussian\nfrom spcdist.torch import MultivariateBetaGaussianDiag\n\n\[email protected]('alpha', [3/2, 4/3, 2, 3])\ndef test_torch_scipy_agreement(alpha):\n mean = np.array([10, 11.1])\n scale_diag = np.array([0.5, 1.5])\n\n mbg = multivariate_beta_gaussian(mean=mean,\n scale=np.diag(scale_diag),\n alpha=alpha)\n\n mbg_t = MultivariateBetaGaussianDiag(torch.from_numpy(mean),\n torch.from_numpy(scale_diag),\n alpha)\n\n assert np.allclose(\n mbg_t.tsallis_entropy.item(),\n mbg.tsallis_entropy()\n )\n\n assert np.allclose(\n mbg_t.log_radius.item(),\n np.log(mbg.radius)\n )\n\n assert np.allclose(\n mbg_t._tau.item(),\n mbg.tau\n )\n"
] |
[
[
"numpy.diag",
"numpy.log",
"numpy.array",
"torch.from_numpy"
]
] |
YitongXia/shape-inversion
|
[
"a1176778330e22546ee81dc01e93c0b1e9e7a37d"
] |
[
"shape_inversion.py"
] |
[
"import os\nimport os.path as osp\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nimport torch.nn.functional as F\nimport torchvision\nfrom torch.autograd import Variable\nfrom model.treegan_network import Generator, Discriminator\n\nfrom utils.common_utils import *\nfrom loss import *\nfrom evaluation.pointnet import *\nimport time\nfrom external.ChamferDistancePytorch.chamfer_python import distChamfer, distChamfer_raw\n\nclass ShapeInversion(object):\n\n def __init__(self, args):\n self.args = args\n\n if self.args.dist:\n self.rank = dist.get_rank()\n self.world_size = dist.get_world_size()\n else:\n self.rank, self.world_size = 0, 1\n \n # init seed for static masks: ball_hole, knn_hole, voxel_mask \n self.to_reset_mask = True \n self.mask_type = self.args.mask_type\n self.update_G_stages = self.args.update_G_stages\n self.iterations = self.args.iterations\n self.G_lrs = self.args.G_lrs\n self.z_lrs = self.args.z_lrs\n self.select_num = self.args.select_num\n\n self.loss_log = []\n \n # create model\n self.G = Generator(features=args.G_FEAT, degrees=args.DEGREE, support=args.support,args=self.args).cuda() \n self.D = Discriminator(features=args.D_FEAT).cuda() \n \n self.G.optim = torch.optim.Adam(\n [{'params': self.G.get_params(i)}\n for i in range(7)],\n lr=self.G_lrs[0], \n betas=(0,0.99),\n weight_decay=0,\n eps=1e-8)\n self.z = torch.zeros((1, 1, 96)).normal_().cuda()\n self.z = Variable(self.z, requires_grad=True)\n self.z_optim = torch.optim.Adam([self.z], lr=self.args.z_lrs[0], betas=(0,0.99))\n\n # load weights\n checkpoint = torch.load(args.ckpt_load, map_location=self.args.device) \n self.G.load_state_dict(checkpoint['G_state_dict'])\n self.D.load_state_dict(checkpoint['D_state_dict'])\n\n self.G.eval()\n if self.D is not None:\n self.D.eval()\n self.G_weight = deepcopy(self.G.state_dict())\n \n # prepare latent variable and optimizer\n self.G_scheduler = LRScheduler(self.G.optim, self.args.warm_up)\n self.z_scheduler = LRScheduler(self.z_optim, self.args.warm_up)\n\n # loss functions\n self.ftr_net = self.D\n self.criterion = DiscriminatorLoss()\n\n if self.args.directed_hausdorff:\n self.directed_hausdorff = DirectedHausdorff()\n\n # for visualization\n self.checkpoint_pcd = [] # to save the staged checkpoints\n self.checkpoint_flags = [] # plot subtitle\n\n \n if len(args.w_D_loss) == 1:\n self.w_D_loss = args.w_D_loss * len(args.G_lrs)\n else:\n self.w_D_loss = args.w_D_loss\n\n def reset_G(self,pcd_id=None): \n \"\"\"\n to call in every new fine_tuning\n before the 1st one also okay\n \"\"\"\n self.G.load_state_dict(self.G_weight, strict=False) \n if self.args.random_G:\n self.G.train()\n else:\n self.G.eval()\n self.checkpoint_pcd = [] # to save the staged checkpoints\n self.checkpoint_flags = []\n self.pcd_id = pcd_id # for \n if self.mask_type == 'voxel_mask':\n self.to_reset_mask = True # reset hole center for each shape \n \n def set_target(self, gt=None, partial=None):\n '''\n set target \n '''\n if gt is not None:\n self.gt = gt.unsqueeze(0)\n # for visualization\n self.checkpoint_flags.append('GT')\n self.checkpoint_pcd.append(self.gt)\n else:\n self.gt = None\n \n if partial is not None:\n if self.args.target_downsample_method.lower() == 'fps':\n target_size = self.args.target_downsample_size\n self.target = self.downsample(partial.unsqueeze(0), target_size)\n else:\n self.target = partial.unsqueeze(0)\n else:\n self.target = self.pre_process(self.gt, stage=-1)\n # for visualization\n self.checkpoint_flags.append('target') \n self.checkpoint_pcd.append(self.target)\n \n def run(self, ith=-1):\n loss_dict = {}\n curr_step = 0\n count = 0\n for stage, iteration in enumerate(self.iterations):\n\n for i in range(iteration):\n curr_step += 1\n # setup learning rate\n self.G_scheduler.update(curr_step, self.args.G_lrs[stage])\n self.z_scheduler.update(curr_step, self.args.z_lrs[stage])\n\n # forward\n self.z_optim.zero_grad()\n \n if self.update_G_stages[stage]:\n self.G.optim.zero_grad()\n \n tree = [self.z]\n x = self.G(tree)\n \n # masking\n x_map = self.pre_process(x,stage=stage)\n\n ### compute losses\n ftr_loss = self.criterion(self.ftr_net, x_map, self.target)\n\n dist1, dist2 , _, _ = distChamfer(x_map, self.target)\n cd_loss = dist1.mean() + dist2.mean()\n # optional early stopping\n if self.args.early_stopping:\n if cd_loss.item() < self.args.stop_cd:\n break\n\n # nll corresponds to a negative log-likelihood loss\n nll = self.z**2 / 2\n nll = nll.mean()\n \n ### loss\n loss = ftr_loss * self.w_D_loss[stage] + nll * self.args.w_nll \\\n + cd_loss * 1\n \n # optional to use directed_hausdorff\n if self.args.directed_hausdorff:\n directed_hausdorff_loss = self.directed_hausdorff(self.target, x)\n loss += directed_hausdorff_loss*self.args.w_directed_hausdorff_loss\n \n # backward\n loss.backward()\n self.z_optim.step()\n if self.update_G_stages[stage]:\n self.G.optim.step()\n\n # save checkpoint for each stage\n self.checkpoint_flags.append('s_'+str(stage)+' x')\n self.checkpoint_pcd.append(x)\n self.checkpoint_flags.append('s_'+str(stage)+' x_map')\n self.checkpoint_pcd.append(x_map)\n\n # test only for each stage\n if self.gt is not None:\n dist1, dist2 , _, _ = distChamfer(x,self.gt)\n test_cd = dist1.mean() + dist2.mean()\n with open(self.args.log_pathname, \"a\") as file_object:\n msg = str(self.pcd_id) + ',' + 'stage'+str(stage) + ',' + 'cd' +',' + '{:6.5f}'.format(test_cd.item())\n file_object.write(msg+'\\n')\n \n if self.gt is not None:\n loss_dict = {\n 'ftr_loss': np.asscalar(ftr_loss.detach().cpu().numpy()),\n 'nll': np.asscalar(nll.detach().cpu().numpy()),\n 'cd': np.asscalar(test_cd.detach().cpu().numpy()),\n }\n self.loss_log.append(loss_dict)\n \n ### save point clouds\n self.x = x\n if not osp.isdir(self.args.save_inversion_path):\n os.mkdir(self.args.save_inversion_path)\n x_np = x[0].detach().cpu().numpy()\n x_map_np = x_map[0].detach().cpu().numpy()\n target_np = self.target[0].detach().cpu().numpy()\n if ith == -1:\n basename = str(self.pcd_id)\n else:\n basename = str(self.pcd_id)+'_'+str(ith)\n if self.gt is not None:\n gt_np = self.gt[0].detach().cpu().numpy()\n np.savetxt(osp.join(self.args.save_inversion_path,basename+'_gt.txt'), gt_np, fmt = \"%f;%f;%f\") \n np.savetxt(osp.join(self.args.save_inversion_path,basename+'_x.txt'), x_np, fmt = \"%f;%f;%f\") \n np.savetxt(osp.join(self.args.save_inversion_path,basename+'_xmap.txt'), x_map_np, fmt = \"%f;%f;%f\") \n np.savetxt(osp.join(self.args.save_inversion_path,basename+'_target.txt'), target_np, fmt = \"%f;%f;%f\") \n\n # jittering mode\n if self.args.inversion_mode == 'jittering':\n self.jitter(self.target)\n \n \n def diversity_search(self, select_y=False):\n \"\"\"\n produce batch by batch\n search by 2pf and partial\n but constrainted to z dimension are large\n \"\"\"\n batch_size = 50\n\n num_batch = int(self.select_num/batch_size)\n x_ls = []\n z_ls = []\n cd_ls = []\n tic = time.time()\n with torch.no_grad():\n for i in range(num_batch):\n z = torch.randn(batch_size, 1, 96).cuda()\n tree = [z]\n x = self.G(tree)\n dist1, dist2 , _, _ = distChamfer(self.target.repeat(batch_size,1,1),x)\n cd = dist1.mean(1) # single directional CD\n\n x_ls.append(x)\n z_ls.append(z)\n cd_ls.append(cd)\n \n x_full = torch.cat(x_ls)\n cd_full = torch.cat(cd_ls)\n z_full = torch.cat(z_ls)\n\n toc = time.time()\n \n cd_candidates, idx = torch.topk(cd_full,self.args.n_z_candidates,largest=False)\n z_t = z_full[idx].transpose(0,1)\n seeds = farthest_point_sample(z_t, self.args.n_outputs).squeeze(0) \n z_ten = z_full[idx][seeds]\n\n self.zs = [itm.unsqueeze(0) for itm in z_ten]\n self.xs = []\n\n def select_z(self, select_y=False):\n tic = time.time()\n with torch.no_grad():\n if self.select_num == 0:\n self.z.zero_()\n return\n elif self.select_num == 1:\n self.z.normal_()\n return\n z_all, y_all, loss_all = [], [], []\n for i in range(self.select_num):\n z = torch.randn(1, 1, 96).cuda()\n tree = [z]\n with torch.no_grad():\n x = self.G(tree)\n ftr_loss = self.criterion(self.ftr_net, x, self.target) \n z_all.append(z)\n loss_all.append(ftr_loss.detach().cpu().numpy())\n \n toc = time.time()\n loss_all = np.array(loss_all)\n idx = np.argmin(loss_all)\n \n self.z.copy_(z_all[idx])\n if select_y:\n self.y.copy_(y_all[idx])\n \n x = self.G([self.z])\n\n # visualization\n if self.gt is not None:\n x_map = self.pre_process(x, stage=-1)\n dist1, dist2 , _, _ = distChamfer(x,self.gt)\n cd_loss = dist1.mean() + dist2.mean()\n \n with open(self.args.log_pathname, \"a\") as file_object:\n msg = str(self.pcd_id) + ',' + 'init' + ',' + 'cd' +',' + '{:6.5f}'.format(cd_loss.item())\n # print(msg)\n file_object.write(msg+'\\n')\n self.checkpoint_flags.append('init x')\n self.checkpoint_pcd.append(x)\n self.checkpoint_flags.append('init x_map')\n self.checkpoint_pcd.append(x_map)\n return z_all[idx]\n\n \n def pre_process(self,pcd,stage=-1):\n \"\"\"\n transfer a pcd in the observation space:\n with the following mask_type:\n none: for ['reconstruction', 'jittering', 'morphing']\n ball_hole, knn_hole: randomly create the holes from complete pcd, similar to PF-Net\n voxel_mask: baseline in ShapeInversion\n tau_mask: baseline in ShapeInversion\n k_mask: proposed component by ShapeInversion\n \"\"\"\n \n if self.mask_type == 'none':\n return pcd\n elif self.mask_type in ['ball_hole', 'knn_hole']:\n ### set static mask for each new partial pcd\n if self.to_reset_mask:\n # either ball hole or knn_hole, hence there might be unused configs\n self.hole_k = self.args.hole_k\n self.hole_radius = self.args.hole_radius\n self.hole_n = self.args.hole_n\n seeds = farthest_point_sample(pcd, self.hole_n) # shape (B,hole_n)\n self.hole_centers = torch.stack([img[seed] for img, seed in zip(pcd,seeds)]) # (B, hole_n, 3)\n # turn off mask after set mask, until next partial pcd\n self.to_reset_mask = False\n \n ### preprocess\n flag_map = torch.ones(1,2048,1).cuda()\n pcd_new = pcd.unsqueeze(2).repeat(1,1,self.hole_n,1)\n seeds_new = self.hole_centers.unsqueeze(1).repeat(1,2048,1,1)\n delta = pcd_new.add(-seeds_new) # (B, 2048, hole_n, 3)\n dist_mat = torch.norm(delta,dim=3)\n dist_new = dist_mat.transpose(1,2) # (B, hole_n, 2048)\n\n if self.mask_type == 'knn_hole':\n # idx (B, hole_n, hole_k), dist (B, hole_n, hole_k)\n dist, idx = torch.topk(dist_new,self.hole_k,largest=False) \n \n for i in range(self.hole_n):\n dist_per_hole = dist_new[:,i,:].unsqueeze(2)\n if self.mask_type == 'knn_hole':\n threshold_dist = dist[:,i, -1]\n if self.mask_type == 'ball_hole': \n threshold_dist = self.hole_radius\n flag_map[dist_per_hole <= threshold_dist] = 0\n \n target = torch.mul(pcd, flag_map)\n return target \n elif self.mask_type == 'voxel_mask':\n \"\"\"\n voxels in the partial and optionally surroundings are 1, the rest are 0.\n \"\"\" \n ### set static mask for each new partial pcd\n if self.to_reset_mask:\n mask_partial = self.voxelize(self.target, n_bins=self.args.voxel_bins, pcd_limit=0.5, threshold=0)\n # optional to add surrounding to the mask partial\n surrounding = self.args.surrounding \n self.mask_dict = {}\n for key_gt in mask_partial:\n x,y,z = key_gt\n surrounding_ls = []\n surrounding_ls.append((x,y,z))\n for x_s in range(x-surrounding+1, x+surrounding):\n for y_s in range(y-surrounding+1, y+surrounding):\n for z_s in range(z-surrounding+1, z+surrounding):\n surrounding_ls.append((x_s,y_s,z_s))\n for xyz in surrounding_ls:\n self.mask_dict[xyz] = 1\n # turn off mask after set mask, until next partial pcd\n self.to_reset_mask = False \n \n ### preprocess\n n_bins = self.args.voxel_bins\n mask_tensor = torch.zeros(2048,1)\n pcd_new = pcd*n_bins + n_bins * 0.5\n pcd_new = pcd_new.type(torch.int64)\n ls_voxels = pcd_new.squeeze(0).tolist() # 2028 of sublists\n tuple_voxels = [tuple(itm) for itm in ls_voxels]\n for i in range(2048):\n tuple_voxel = tuple_voxels[i] \n if tuple_voxel in self.mask_dict:\n mask_tensor[i] = 1\n \n mask_tensor = mask_tensor.unsqueeze(0).cuda()\n pcd_map = torch.mul(pcd, mask_tensor)\n return pcd_map \n elif self.mask_type == 'k_mask':\n pcd_map = self.k_mask(self.target, pcd,stage)\n return pcd_map\n elif self.mask_type == 'tau_mask':\n pcd_map = self.tau_mask(self.target, pcd,stage)\n return pcd_map\n else:\n raise NotImplementedError\n\n def voxelize(self, pcd, n_bins=32, pcd_limit=0.5, threshold=0):\n \"\"\"\n given a partial/GT pcd\n return {0,1} masks with resolution n_bins^3\n voxel_limit in case the pcd is very small, but still assume it is symmetric\n threshold is needed, in case we would need to handle noise\n the form of output is a dict, key (x,y,z) , value: count\n \"\"\"\n pcd_new = pcd * n_bins + n_bins * 0.5\n pcd_new = pcd_new.type(torch.int64)\n ls_voxels = pcd_new.squeeze(0).tolist() # 2028 of sublists\n tuple_voxels = [tuple(itm) for itm in ls_voxels]\n mask_dict = {}\n for tuple_voxel in tuple_voxels:\n if tuple_voxel not in mask_dict:\n mask_dict[tuple_voxel] = 1\n else:\n mask_dict[tuple_voxel] += 1\n for voxel, cnt in mask_dict.items():\n if cnt <= threshold:\n del mask_dict[voxel]\n return mask_dict \n \n def tau_mask(self, target, x, stage=-1):\n \"\"\"\n tau mask\n \"\"\"\n # dist_mat shape (B, N_target, N_output), where B = 1\n stage = max(0, stage)\n dist_tau = self.args.tau_mask_dist[stage]\n dist_mat = distChamfer_raw(target, x)\n idx0, idx1, idx2 = torch.where(dist_mat<dist_tau) \n idx = torch.unique(idx2).type(torch.long)\n x_map = x[:, idx]\n return x_map\n \n def k_mask(self, target, x, stage=-1):\n \"\"\"\n masking based on CD.\n target: (1, N, 3), partial, can be < 2048, 2048, > 2048\n x: (1, 2048, 3)\n x_map: (1, N', 3), N' < 2048\n x_map: v1: 2048, 0 masked points\n \"\"\"\n stage = max(0, stage)\n knn = self.args.k_mask_k[stage]\n if knn == 1:\n cd1, cd2, argmin1, argmin2 = distChamfer(target, x)\n idx = torch.unique(argmin1).type(torch.long)\n elif knn > 1:\n # dist_mat shape (B, 2048, 2048), where B = 1\n dist_mat = distChamfer_raw(target, x)\n # indices (B, 2048, k)\n val, indices = torch.topk(dist_mat, k=knn, dim=2,largest=False)\n # union of all the indices\n idx = torch.unique(indices).type(torch.long)\n\n if self.args.masking_option == 'element_product': \n mask_tensor = torch.zeros(2048,1)\n mask_tensor[idx] = 1\n mask_tensor = mask_tensor.cuda().unsqueeze(0)\n x_map = torch.mul(x, mask_tensor) \n elif self.args.masking_option == 'indexing': \n x_map = x[:, idx]\n\n return x_map\n\n def jitter(self, x):\n z_rand = self.z.clone()\n\n stds = [0.3, 0.5, 0.7]\n n_jitters = 12\n\n flag_list = ['gt', 'recon']\n pcd_list = [self.gt, self.x]\n with torch.no_grad():\n for std in stds:\n \n for i in range(n_jitters):\n z_rand.normal_()\n z = self.z + std * z_rand\n x_jitter = self.G([z])\n x_np = x_jitter.squeeze(0).detach().cpu().numpy()\n basename = '{}_std{:3.2f}_{}.txt'.format(self.pcd_id,std,i)\n pathname = osp.join(self.args.save_inversion_path,basename)\n np.savetxt(pathname, x_np, fmt = \"%f;%f;%f\") \n flag_list.append(basename)\n pcd_list.append(x_jitter)\n self.checkpoint_pcd = pcd_list\n self.checkpoint_flags = flag_list \n\n def downsample(self, dense_pcd, n=2048):\n \"\"\"\n input pcd cpu tensor\n return downsampled cpu tensor\n \"\"\"\n idx = farthest_point_sample(dense_pcd,n)\n sparse_pcd = dense_pcd[0,idx]\n return sparse_pcd\n\n\n\n"
] |
[
[
"torch.optim.Adam",
"torch.norm",
"torch.ones",
"numpy.savetxt",
"torch.load",
"torch.cat",
"torch.zeros",
"torch.randn",
"torch.mul",
"torch.no_grad",
"numpy.argmin",
"torch.where",
"torch.unique",
"torch.topk",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"numpy.array",
"torch.autograd.Variable"
]
] |
SciPioneer/pipeline_experiments
|
[
"7e0fe6f884edfab026379cce1b5ae03b5c2489cd"
] |
[
"BERT/main.py"
] |
[
"import argparse\nimport torch.multiprocessing as mp\nimport math\nimport sys\nimport time\nimport os\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom torch.distributed.nn import RemoteModule\nfrom torch.utils.data import DataLoader\nimport torch.distributed.rpc as rpc\nfrom torch.distributed.optim import DistributedOptimizer\nimport torch.distributed.autograd as dist_autograd\n\nfrom .model import MLMTask, MLMTask2, MLMTaskEmbedding, MLMTaskEncoder, MLMTaskHead\nfrom .utils import run_demo, run_ddp, wrap_up\nfrom .sharder import MLMTaskSharder\nfrom .cpu_rpc import DistributedCPURPCSequential, WorkerModule, layer_on_device, pipeline_on_devices\n\nfrom fairscale.experimental.nn.distributed_pipeline import DistributedLoss, DistributedPipeline, PipelineModulesGraph\nfrom fairscale.experimental.nn.distributed_pipeline.trace import make_graph\n\n\ndef collate_batch(batch_data, args, mask_id, cls_id):\n batch_data = torch.tensor(batch_data).long().view(args.batch_size, -1).t().contiguous()\n # Generate masks with args.mask_frac\n data_len = batch_data.size(0)\n ones_num = int(data_len * args.mask_frac)\n zeros_num = data_len - ones_num\n lm_mask = torch.cat([torch.zeros(zeros_num), torch.ones(ones_num)])\n lm_mask = lm_mask[torch.randperm(data_len)]\n batch_data = torch.cat((torch.tensor([[cls_id] * batch_data.size(1)]).long(), batch_data))\n lm_mask = torch.cat((torch.tensor([0.0]), lm_mask))\n\n #targets = torch.stack([batch_data[i] for i in range(lm_mask.size(0)) if lm_mask[i]]).view(-1)\n targets = torch.stack([batch_data[i] for i in range(lm_mask.size(0))]).view(-1)\n batch_data = batch_data.masked_fill(lm_mask.bool().unsqueeze(1), mask_id)\n return batch_data, lm_mask, targets\n\n\ndef process_raw_data(raw_data, args):\n _num = raw_data.size(0) // (args.batch_size * args.bptt)\n raw_data = raw_data[:(_num * args.batch_size * args.bptt)]\n return raw_data\n\n\nclass Loss(nn.Module):\n def __init__(self, criterion, ntokens):\n super().__init__()\n self.ntokens = ntokens\n self.criterion = criterion\n #self.criterion = nn.CrossEntropyLoss()\n\n def forward(self, input, target):\n #print(\"INPUT:\", input.sum().item())\n return self.criterion(input.view(-1, self.ntokens), target.to(input.device))\n\n\ndef run_batch(optimizer, model, loss_module, data, lm_mask, targets):\n with dist_autograd.context() as context_id:\n #data = data.to(0)\n data = data.transpose(0, 1)\n output = model(data)\n #print(\"OUTPUT:\", output.sum().item())\n #output = rpc.RRef(output)\n loss = loss_module(output, rpc.RRef(targets)).to_here()\n #return loss.item()\n dist_autograd.backward(context_id, [loss])\n # torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)\n optimizer.step(context_id)\n\n return loss.item()\n\ndef train(model, vocab, train_loss_log, train_data,\n optimizer, criterion, ntokens, epoch, args):\n #model.train()\n total_loss = 0.\n start_time = time.time()\n mask_id = vocab.stoi['<MASK>']\n cls_id = vocab.stoi['<cls>']\n train_loss_log.append(0.0)\n dataloader = DataLoader(train_data, batch_size=args.batch_size * args.bptt,\n shuffle=False, collate_fn=lambda b: collate_batch(b, args, mask_id, cls_id))\n\n loss_module = DistributedLoss(Loss, criterion, ntokens)\n\n for batch, (data, lm_mask, targets) in enumerate(dataloader):\n try:\n loss = run_batch(optimizer, model, loss_module, data, lm_mask, targets)\n except:\n #print(rpc.rpc_sync(\"w3\", torch.cuda.memory_stats, args=(3,)))\n #time.sleep(60)\n raise\n #rpc.rpc_sync(f\"w3\", torch.cuda.empty_cache)\n print(\"LOSS:\", \"%0.3f\" % (loss,))\n total_loss += loss\n\n if batch % args.log_interval == 0 and batch > 0:\n cur_loss = total_loss / args.log_interval\n elapsed = time.time() - start_time\n train_loss_log[-1] = cur_loss\n print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:05.5f} | ms/batch {:5.2f} | '\n 'loss {:5.2f} | ppl {:8.2f}'.format(epoch, batch,\n len(train_data) // (args.bptt * args.batch_size),\n args.lr,\n elapsed * 1000 / args.log_interval,\n cur_loss, math.exp(cur_loss)))\n total_loss = 0\n start_time = time.time()\n\n\nclass NoOp(nn.Module):\n def forward(self, input):\n #import math; print(input.shape,\"=\",math.prod(input.shape))\n return input\n\ndef run_main(args):\n import torchtext\n if args.dataset == 'WikiText103':\n from torchtext.experimental.datasets import WikiText103 as WLMDataset\n elif args.dataset == 'WikiText2':\n from torchtext.experimental.datasets import WikiText2 as WLMDataset\n elif args.dataset == 'WMTNewsCrawl':\n from torchtext.experimental.datasets import WMTNewsCrawl as WLMDataset\n elif args.dataset == 'EnWik9':\n from torchtext.datasets import EnWik9\n elif args.dataset == 'BookCorpus':\n from data import BookCorpus\n else:\n print(\"dataset for MLM task is not supported\")\n\n try:\n vocab = torch.load(args.save_vocab)\n except:\n print(f\"WLMDataset = {WLMDataset}\")\n train_dataset, valid_dataset, test_dataset = WLMDataset()\n old_vocab = train_dataset.vocab\n print(f\"len(old_vocab) = {len(old_vocab)}\")\n vocab = torchtext.vocab.Vocab(counter=old_vocab.freqs,\n specials=['<unk>', '<pad>', '<MASK>'])\n with open(args.save_vocab, 'wb') as f:\n torch.save(vocab, f)\n\n if args.dataset == 'WikiText103' or args.dataset == 'WikiText2':\n train_dataset, valid_dataset, test_dataset = WLMDataset(vocab=vocab)\n train_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))\n valid_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))\n test_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))\n elif args.dataset == 'WMTNewsCrawl':\n from torchtext.experimental.datasets import WikiText2\n test_dataset, valid_dataset = WikiText2(vocab=vocab, split=('test', 'valid'))\n valid_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))\n test_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))\n train_dataset = WLMDataset(vocab=vocab, split='train')\n train_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))\n elif args.dataset == 'EnWik9':\n enwik9 = EnWik9()\n idx1, idx2 = int(len(enwik9) * 0.8), int(len(enwik9) * 0.9)\n train_data = torch.tensor([vocab.stoi[_id]\n for _id in enwik9[0:idx1]]).long()\n val_data = torch.tensor([vocab.stoi[_id]\n for _id in enwik9[idx1:idx2]]).long()\n test_data = torch.tensor([vocab.stoi[_id]\n for _id in enwik9[idx2:]]).long()\n from torchtext.experimental.datasets import LanguageModelingDataset\n train_dataset = LanguageModelingDataset(train_data, vocab, lambda x: x)\n valid_dataset = LanguageModelingDataset(val_data, vocab, lambda x: x)\n test_dataset = LanguageModelingDataset(test_data, vocab, lambda x: x)\n elif args.dataset == 'BookCorpus':\n train_dataset, valid_dataset, test_dataset = BookCorpus(vocab)\n\n train_data = process_raw_data(train_dataset.data, args)\n val_data = process_raw_data(valid_dataset.data, args)\n test_data = process_raw_data(test_dataset.data, args)\n\n ntokens = len(train_dataset.get_vocab())\n print(f\"Vocabulary size = {ntokens}\")\n\n # model = DistributedCPURPCSequential(\n # WorkerModule(\"worker1\", layer_on_device(\"cuda:0\"), MLMTask, ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout)\n # )\n\n # model = DistributedCPURPCSequential(\n # WorkerModule(\"worker1\", layer_on_device(\"cuda:0\"), MLMTaskEmbedding, ntokens, args.emsize),\n # WorkerModule(\"worker2\", layer_on_device(\"cuda:1\"), MLMTaskEncoder, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout),\n # WorkerModule(\"worker3\", layer_on_device(\"cuda:2\"), MLMTaskHead, ntokens, args.emsize),\n # )\n\n # model = DistributedCPURPCSequential(\n # WorkerModule(\"worker1\", pipeline_on_devices(0, 1, 2, 3, 4, 5, 6, 7, include_embeddings=True, n_encoders=args.nlayers, include_head=True), MLMTaskSharder, ntokens, args.emsize, args.nhead, args.nhid, args.dropout),\n # )\n\n # model = DistributedCPURPCSequential(\n # WorkerModule(\"worker1\", pipeline_on_devices(6, 4, 2, 0, include_embeddings=True, n_encoders=args.nlayers // 2), MLMTaskSharder, ntokens, args.emsize, args.nhead, args.nhid, args.dropout),\n # WorkerModule(\"worker2\", pipeline_on_devices(1, 3, 5, 7, n_encoders=args.nlayers // 2, include_head=True), MLMTaskSharder, ntokens, args.emsize, args.nhead, args.nhid, args.dropout),\n # )\n\n layers = [RemoteModule(\"w0/cuda:0\", MLMTaskEmbedding, (ntokens, args.emsize))]\n n_encoders = args.nlayers\n if not False:\n for i, device in enumerate([f\"w{i}/cuda:{i}\" for i in (0, 1, 2, 3, 4, 5, 6)]):\n this_encoders = n_encoders // (7-i)\n layers.append(RemoteModule(device, MLMTaskEncoder, (args.emsize, args.nhead, args.nhid, this_encoders, args.dropout)))\n n_encoders -= this_encoders\n layers.append(RemoteModule(\"w7/cuda:7\", MLMTaskHead, (ntokens, args.emsize)))\n #layers.append(RemoteModule(\"w3/cuda:3\", NoOp, ()))\n org_model = nn.Sequential(*layers)\n\n #org_model = nn.Sequential(*(\n # MLMTaskSharder([\"w5/cuda:5\", \"w2/cuda:2\"], dict(include_embeddings=True, n_encoders=args.nlayers // 6, n_encoders_on_last_gpu=args.nlayers // 6), ntokens, args.emsize, args.nhead, args.nhid, args.dropout)\n # +MLMTaskSharder([\"w0/cuda:0\", \"w7/cuda:7\"], dict(n_encoders=args.nlayers // 3), ntokens, args.emsize, args.nhead, args.nhid, args.dropout)\n # +MLMTaskSharder([\"w1/cuda:1\", \"w4/cuda:4\"], dict(n_encoders=args.nlayers // 3), ntokens, args.emsize, args.nhead, args.nhid, args.dropout)\n # +MLMTaskSharder([\"w6/cuda:6\", \"w3/cuda:3\"], dict(n_encoders=args.nlayers // 6, n_encoders_on_first_gpu=args.nlayers // 6, include_head=True), ntokens, args.emsize, args.nhead, args.nhid, args.dropout)\n # ))\n\n graph = make_graph(org_model)\n #for node in graph.nodes: print(node.module.on, node.get_name())\n model = DistributedPipeline(graph, chunks=8)\n\n params = sum([torch.prod(torch.tensor(p.rpc_sync().size())) for p in model.parameter_rrefs()])\n print(f'Total parameters = {int(params.item() // 1e6)}M')\n\n criterion = nn.CrossEntropyLoss()\n optimizer = DistributedOptimizer(\n torch.optim.SGD,\n model.parameter_rrefs(),\n lr=args.lr,\n )\n best_val_loss = None\n train_loss_log, val_loss_log = [], []\n\n for epoch in range(1, args.epochs + 1):\n epoch_start_time = time.time()\n train(model, train_dataset.vocab, train_loss_log, train_data,\n optimizer, criterion, ntokens, epoch, args)\n\n\ndef run_worker(rank, args):\n first_rank = (args.world_size // args.num_workers) * int(os.environ.get('SLURM_PROCID', '0'))\n rank += first_rank\n\n print(\"rank:\", rank)\n torch.cuda.set_per_process_memory_fraction(0.9, rank - first_rank)\n torch.manual_seed(args.seed)\n os.environ['MASTER_ADDR'] = args.master_addr\n os.environ['MASTER_PORT'] = args.master_port\n options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=256)\n for i in range(args.world_size):\n options.set_device_map(f\"w{i}\", {rank - first_rank: i % (args.world_size // args.num_workers)})\n rpc.init_rpc(\n f\"w{rank}\",\n rank=rank,\n world_size=args.world_size,\n rpc_backend_options=options\n )\n\n if rank == first_rank:\n run_main(args)\n\n rpc.shutdown()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Pipeline experiments')\n parser.add_argument('--emsize', type=int, default=768,\n help='size of word embeddings')\n parser.add_argument('--nhid', type=int, default=3072,\n help='number of hidden units per layer')\n parser.add_argument('--nlayers', type=int, default=12,\n help='number of layers')\n parser.add_argument('--nhead', type=int, default=12,\n help='the number of heads in the encoder/decoder of the transformer model')\n parser.add_argument('--lr', type=float, default=0.1,\n help='initial learning rate')\n parser.add_argument('--clip', type=float, default=0.1,\n help='gradient clipping')\n parser.add_argument('--epochs', type=int, default=8,\n help='upper epoch limit')\n parser.add_argument('--batch_size', type=int, default=32, metavar='N',\n help='batch size')\n parser.add_argument('--bptt', type=int, default=128,\n help='sequence length')\n parser.add_argument('--dropout', type=float, default=0.2,\n help='dropout applied to layers (0 = no dropout)')\n parser.add_argument('--seed', type=int, default=5431916812,\n help='random seed')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='report interval')\n parser.add_argument('--checkpoint', type=str, default='None',\n help='path to load the checkpoint')\n # parser.add_argument('--save', type=str, default='mlm_bert.pt',\n # help='path to save the final model')\n parser.add_argument('--save-vocab', type=str, default='torchtext_bert_vocab.pt',\n help='path to save the vocab')\n parser.add_argument('--mask_frac', type=float, default=0.15,\n help='the fraction of masked tokens')\n parser.add_argument('--dataset', type=str, default='WikiText2',\n help='dataset used for MLM task')\n # parser.add_argument('--parallel', type=str, default='None',\n # help='Use DataParallel to train model')\n parser.add_argument('--world_size', type=int, default=8,\n help='the world size to initiate DPP')\n parser.add_argument('--rank', type=int, default=None,\n help=\"Global rank of this process. Pass in 0 for master.\")\n parser.add_argument('--master_addr', type=str, default='localhost',\n help=\"\"\"Address of master, will default to localhost if not provided. Master must be able to accept network traffic on the address + port.\"\"\")\n parser.add_argument('--master_port', type=str, default='29500',\n help=\"\"\"Port that master is listening on, will default to 29500 if not provided. Master must be able to accept network traffic on the host and port.\"\"\")\n parser.add_argument('--gpus', type=int, default=1,\n help='number of GPUs per worker node to use')\n parser.add_argument('--num_workers', type=int, default=1,\n help='number of GPUs per worker node to use')\n parser.add_argument('--rpc', type=str, default='cpu',\n help='pipeline mode, `cpu` for CPU RPC, `cuda` for CUDA RPC')\n args = parser.parse_args()\n\n assert args.world_size % args.num_workers == 0\n\n #run_worker(args.rank, args.world_size, args)\n mp.spawn(run_worker, args=(args,), nprocs=args.world_size // args.num_workers)\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.CrossEntropyLoss",
"torch.distributed.nn.RemoteModule",
"torch.ones",
"torch.multiprocessing.spawn",
"torch.load",
"torch.distributed.autograd.backward",
"torch.manual_seed",
"torch.zeros",
"torch.randperm",
"torch.distributed.rpc.init_rpc",
"torch.tensor",
"torch.distributed.rpc.TensorPipeRpcBackendOptions",
"torch.distributed.autograd.context",
"torch.distributed.rpc.shutdown",
"torch.cuda.set_per_process_memory_fraction",
"torch.distributed.rpc.RRef",
"torch.save"
]
] |
ib-da-ncirl/airport_codes
|
[
"4b00b1be786cfa373b7e8d520553493c6f8fb0d6"
] |
[
"load_cvs_node.py"
] |
[
"# The MIT License (MIT)\n# Copyright (c) 2019 Ian Buttimer\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport re\nfrom zipfile import ZipFile\nimport pandas as pd\nimport os.path as path\n\nfrom dagster import solid\n\n\n@solid\ndef load_csv_from_zip(context, zip_path, pattern, encoding, header):\n \"\"\"\n Load csv files from a zip file into a dictionary of panda DataFrames,\n where the filename matches the specified pattern\n :param context: execution context\n :param zip_path: path to zip file\n :param pattern: regex pattern to match csv files in zip file\n :param encoding: encoding to use when reading csv files\n :param header: header to use\n :return: dictionary of panda DataFrames\n :rtype: dict\n \"\"\"\n # verify zip path\n if not path.exists(zip_path):\n raise ValueError(f'Invalid zip file path: {zip_path}')\n\n regex = re.compile(pattern)\n\n # use dictionary comprehension to load all the csv files in the zip file into a pandas data frame\n zip_file = ZipFile(zip_path)\n df = {\n file.filename: pd.read_csv(\n # Namibia has the country code 'NA' which is treated as Nan by default, so disable default\n zip_file.open(file.filename), encoding=encoding, names=header, keep_default_na=False)\n for file in zip_file.infolist()\n if regex.match(file.filename)\n }\n\n context.log.info(f'Loaded {len(df)} files')\n\n return df\n\n\n@solid\ndef combine_csv_from_dict(context, df_dict):\n \"\"\"\n Combine a dictionary of panda DataFrames into a single DataFrame\n :param context: execution context\n :param df_dict: dictionary of panda DataFrames\n :return: panda DataFrame or None\n :rtype: dict\n \"\"\"\n df_count = len(df_dict)\n if df_count == 0:\n df = None\n else:\n df = pd.DataFrame()\n for key in df_dict.keys():\n df = df.append(df_dict[key])\n\n context.log.info(f'Merged {df_count} DataFrames')\n\n return df\n"
] |
[
[
"pandas.DataFrame"
]
] |
scottthomas586/web-scraping-challenge
|
[
"a88691d9c2b241b41b84660f9be61f98322ce28f"
] |
[
"Mission_to_Mars/scrape_mars.py"
] |
[
"# import dependancies\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\nimport pandas as pd \nimport pymongo \nimport requests \n\ndef init_browser():\n executable_path = {\"executable_path\": \"chromedriver\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\ndef scrape():\n browser = init_browser()\n\n #NASA Mars News\n\n url1 = \"https://mars.nasa.gov/news\"\n browser.visit(url1)\n time.sleep(5)\n html = browser.html\n soup = bs(html, 'html.parser')\n\n #scrape site to collect latest title\n news_title = soup.find_all(class_ = \"content_title\")\n top_news_title = news_title[1]\n #scrape site to collect the paragraph text\n news_p = soup.find_all(class_ = \"article_teaser_body\")\n top_news_p = news_p[0]\n\n #JSL Mars Space Image\n\n url2 = 'https://www.jpl.nasa.gov/images?search=&category=Mars'\n browser.visit(url2)\n time.sleep(5)\n html2 = browser.html\n soup = bs(html2, 'html.parser')\n\n #find the image source\n feature_image_url = soup.find(class_ = \"BaseImage\")['src']\n\n #visit the Mars facts url\n\n url3 = 'https://space-facts.com/mars/'\n browser.visit(url3)\n time.sleep(5)\n html3 = browser.html\n soup = bs(html3, 'html.parser')\n\n facts = pd.read_html(url3)\n facts_df = facts[0]\n mars_facts = facts_df.rename(columns={0 : \"Features\", 1 : \"Value\"}).set_index([\"Features\"])\n mars_table = mars_facts.to_html()\n mars_table.replace('\\n','')\n\n #visit the USGS astrogeology url\n url4 = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(url4)\n time.sleep(5)\n html4 = browser.html\n soup = bs(html4, 'html.parser')\n\n products = soup.find('div', class_ = 'collapsible results')\n hemispheres=products.find_all('a')\n\n hemisphere_image_urls = []\n\n for hemisphere in hemispheres:\n if hemisphere.h3:\n title=hemisphere.h3.text\n link=hemisphere[\"href\"]\n main_url=\"https://astrogeology.usgs.gov/\"\n next_url=main_url+link\n browser.visit(next_url)\n time.sleep(5)\n html = browser.html\n soup = bs(html, 'html.parser')\n hemisphere2=soup.find(\"div\",class_= \"downloads\")\n img=hemisphere2.ul.a[\"href\"]\n hemisphere_dict={}\n hemisphere_dict[\"Title\"]=title\n hemisphere_dict[\"Image_URL\"]=img\n hemisphere_image_urls.append(hemisphere_dict)\n browser.back()\n\n Mars={\n \"Mars_news_title\": top_news_title,\n \"Mars_news_p\": top_news_p,\n \"Featured_mars_image\": feature_image_url,\n \"Mars_facts\": mars_table,\n \"Mars_hemispheres\": hemisphere_image_urls\n }\n browser.quit()\n\n return Mars"
] |
[
[
"pandas.read_html"
]
] |
emptymalei/statistical-tests
|
[
"8b5d4d4965c431be458271b25cca56fa8e2bf01f"
] |
[
"dietbox/visual/eda.py"
] |
[
"import matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef count_plot_with_percentage(dataframe, column, ax=None):\n if ax is None:\n fig, ax = plt.subplots()\n\n total_counts = len(dataframe)\n vc = dataframe[column].value_counts()\n vc_fraction = vc / total_counts\n\n sns.countplot(x=dataframe[column], ax=ax, order=vc.index)\n\n for p in ax.patches:\n x = p.get_bbox().get_points()[:, 0]\n y = p.get_bbox().get_points()[1, 1]\n ax.annotate(\n f\"{y:0.0f} ({100.*y/total_counts:.1f}%)\",\n (x.mean(), y),\n ha=\"center\",\n va=\"bottom\",\n )\n\n return ax\n"
] |
[
[
"matplotlib.pyplot.subplots"
]
] |
bioinsilico/BIPSPI
|
[
"d4316235e94f19c92b627ad31045e24c5403fbeb"
] |
[
"trainAndTest/processOneFold.py"
] |
[
"from __future__ import print_function\nimport itertools\nimport sys, os\nimport inspect\nimport numpy as np\nfrom joblib import load as joblib_load\n\nfrom .resultsManager import ResultsManager\n#from .classifiers.randomForest import trainMethod, predictMethod\nfrom .classifiers.xgBoost import trainMethod, predictMethod\n\ndef getDataForTestFromPrefix( testPrefix, testPath ):\n '''\n Load a data file whose name startswith testPrefix and it is contained in testPath.\n Returns a tuple with all data needed to perform predictions and testing\n\n @param prefix: str. The prefix of the filename to be loaded. E.g. \"1A2K\"\n @param filesPath: str. The path where data files are contained\n @return (data_d, data_t, ppiComplex.getLabels(), ppiComplex.getIds())\n data_d: np.array (n,m). A np.array that can be feed to the classifier. Each row represents\n a pair of amino acids in direct form (first ligand aa second receptor aa)\n data_l: np.array (n,m). A np.array that can be feed to the classifier. Each row represents\n a pair of amino acids in transpose form (first receptor aa second ligand aa)\n ppiComplex.getLabels(): np.array which contains the labels (-1, 1 ) of each row (pair of amino acids)\n ppiComplex.getIds(): pandas.DataFrame whose columns are:\n chainIdL structResIdL resNameL chainIdR structResIdR resNameR categ \n '''\n for fname in sorted(os.listdir(testPath)):\n if fname.startswith(testPrefix):\n ppiComplex= joblib_load(os.path.join(testPath, fname) )\n data_d,data_t= ppiComplex.getData() \n return (data_d, data_t, ppiComplex.getLabels(), ppiComplex.getIds())\n \ndef getDataForClassifierFromComplexes(listOfComplexes): \n '''\n Extracts the needed information to train a classifier from a list of codified complexes\n (codifyComplexes.ComplexCodified.ComplexCodified).\n\n @param listOfComplexes: [codifyComplexes.ComplexCodified.ComplexCodified]. The complex codified that will be used for\n training\n \n @return (dataDir,dataTrans, labels)\n dataDir: np.array (n,m). A np.array that can be feed to the classifier. Each row represents\n a pair of amino acids in direct form (first ligand aa second receptor aa)\n dataTrans: np.array (n,m). A np.array that can be feed to the classifier. Each row represents\n a pair of amino acids in transpose form (first receptor aa second ligand aa)\n labels: np.array which contains the labels (-1, 1 ) of each row (pair of amino acids)\n '''\n dataDir= []\n dataTrans= []\n labels= []\n prefixes= []\n complexesNumId=[]\n \n if not isinstance(listOfComplexes, list) and not isinstance(listOfComplexes, tuple):\n listOfComplexes= [listOfComplexes]\n for complexNum, ppiComplex in enumerate(listOfComplexes):\n if not inspect.isgenerator(ppiComplex):\n ppiComplex= [ppiComplex]\n for dataBatch in ppiComplex: #In case ppiComplex is an iterator of chunks\n data_d,data_t= dataBatch.getData()\n dataDir.append( data_d)\n dataTrans.append( data_t)\n labels.append( dataBatch.getLabels())\n prefixes.append(dataBatch.getPrefix())\n complexesNumId+= [complexNum]* data_d.shape[0]\n# print(dataBatch.prefix, np.max(data_d),np.max(data_t))\n dataDir= np.concatenate(dataDir)\n dataTrans= np.concatenate(dataTrans)\n labels= np.concatenate(labels) \n return dataDir,dataTrans, labels, complexesNumId\n \ndef trainAndTestOneFold(trainComplexes, testPrefixes, testPath, outputPath, verbose, ncpu= 1):\n '''\n Trains and tests one fold\n \n @param trainComplexes: [codifyComplexes.ComplexCodified.ComplexCodified]. The complex codified that will be used for\n training\n @param testPrefixes: str[]. A list that contains prefixes for all complexes to be tested\n @param testPath: str. Path to a dir where testing data files are stored\n @param outputPath: str. Path to a dir where predictions will be stored\n @param verbose: boolean. Whether or not print to stdout info\n @param ncpu: int. Number of cpu's to use in parallel\n ''' \n resultsForEvaluation_list= []\n testPrefixesNotEvaluated=[]\n finalResults=[]\n for testPrefix in testPrefixes:\n if outputPath is not None:\n outName= os.path.join( outputPath, testPrefix+\".res.tab\") \n if verbose and os.path.isfile(outName):\n print(\"Complex already computed: %s\"%(outName))\n resultsForEvaluation_list.append( (testPrefix, ResultsManager.loadExistingResults(outName) ) )\n else:\n testPrefixesNotEvaluated.append( testPrefix )\n else:\n testPrefixesNotEvaluated.append( testPrefix )\n modelo=None\n if len(testPrefixesNotEvaluated)> 0 or len(testPrefixes)==0:\n if verbose: \n print(\"Testing:\", testPrefixesNotEvaluated)\n print(\"Training classifier\")\n verboseLevel=1\n else: \n verboseLevel=0\n \n dataDir,dataTrans, labels, __ = getDataForClassifierFromComplexes(trainComplexes)\n trainData= np.concatenate([dataDir,dataTrans])\n trainLabels= np.concatenate([labels,labels])\n dataDir,dataTrans, labels = (None, None, None)\n modelo= trainMethod(trainData, trainLabels, verboseLevel= verboseLevel, ncpu= ncpu)\n if verbose==True: print (\"Classifier fitted.\")\n \n\n for testPrefix in testPrefixesNotEvaluated:\n prob_predictionsDir_list= []\n prob_predictionsTrans_list=[]\n testlabels_list=[]\n testPairsIds_list=[]\n if verbose==True: print(\"Computing predictions for %s\"%(testPrefix))\n testDataDirect, testDataTrans, testlabels, testPairsIds= getDataForTestFromPrefix( testPrefix, testPath )\n prob_predictionsDir= predictMethod(modelo, testDataDirect)\n prob_predictionsTrans= predictMethod(modelo,testDataTrans)\n resultEval= ResultsManager(testPrefix, prob_predictionsDir, prob_predictionsTrans, testPairsIds)\n if verbose==True: print(\"Evaluating predictions of %s\"%(testPrefix))\n resultEval.getFullEvaluation()\n if verbose==True: print(resultEval)\n## raw_input(\"press enter\")\n finalResults.append( resultEval )\n if not outputPath is None:\n outName= os.path.join(outputPath, testPrefix+\".res.tab\") \n if not os.path.isfile(outName):\n if verbose==True: print(\"Saving results at %s\"%(outName))\n resultEval.writeResults(outName)\n \n for testPrefix, resultEval in resultsForEvaluation_list:\n if verbose==True: print(\"Evaluating predictions for %s\"%(testPrefix))\n resultEval.getFullEvaluation()\n if verbose==True: print(resultEval)\n finalResults.append( resultEval )\n\n return finalResults, modelo\n \n \n"
] |
[
[
"numpy.concatenate"
]
] |
ashiqks/Object-Segmentation-Multi-Class-Detection
|
[
"448db17644512c44b51481e893c7903449d0a470"
] |
[
"food.py"
] |
[
"\"\"\"\nMask R-CNN\nTrain on the toy Balloon dataset and implement color splash effect.\n\nCopyright (c) 2018 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\n------------------------------------------------------------\n\nUsage: import the module (see Jupyter notebooks for examples), or run from\n the command line as such:\n\n # Train a new model starting from pre-trained COCO weights\n python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=coco\n\n # Resume training a model that you had trained earlier\n python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=last\n\n # Train a new model starting from ImageNet weights\n python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=imagenet\n\n # Apply color splash to an image\n python3 balloon.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>\n\n # Apply color splash to video using the last weights you trained\n python3 balloon.py splash --weights=last --video=<URL or path to file>\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.draw\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\n# Path to trained weights file\nCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\n# Directory to save logs and model checkpoints, if not provided\n# through the command line argument --logs\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n############################################################\n# Configurations\n############################################################\n\n\nclass FoodConfig(Config):\n \"\"\"Configuration for training on the toy dataset.\n Derives from the base Config class and overrides some values.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"food\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 2\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 4 # Background + balloon\n\n # Number of training steps per epoch\n STEPS_PER_EPOCH = 100\n\n # Skip detections with < 90% confidence\n DETECTION_MIN_CONFIDENCE = 0.9\n\n\n############################################################\n# Dataset\n############################################################\n\nclass FoodDataset(utils.Dataset):\n\n def load_food(self, dataset_dir, subset):\n \"\"\"Load a subset of the Balloon dataset.\n dataset_dir: Root directory of the dataset.\n subset: Subset to load: train or val\n \"\"\"\n # Add classes. We have only one class to add.\n self.add_class(\"food\", 1, \"Chilli_Chicken\")\n self.add_class(\"food\", 2, \"Tandoori_Chicken\")\n self.add_class(\"food\", 3, \"Gulab_Jamun\")\n self.add_class(\"food\", 4, \"Ice_Cream\")\n\n # Train or validation dataset?\n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(dataset_dir, subset)\n\n # Load annotations\n # VGG Image Annotator (up to version 1.6) saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n # Note: In VIA 2.0, regions was changed from a dict to a list.\n annotations = json.load(open(os.path.join(dataset_dir, \"via_region_data.json\")))\n annotations = list(annotations.values()) # don't need the dict keys\n\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n annotations = [a for a in annotations if a['regions']]\n\n # Add images\n for a in annotations:\n # Get the x, y coordinaets of points of the polygons that make up\n # the outline of each object instance. These are stores in the\n # shape_attributes (see json format above)\n # The if condition is needed to support VIA versions 1.x and 2.x.\n polygons = [r['shape_attributes'] for r in a['regions'].values()]\n objects = [s['region_attributes'] for s in a['regions'].values()]\n class_ids = [int(n['food']) for n in objects]\n\t\t\t# load_mask() needs the image size to convert polygons to masks.\n # Unfortunately, VIA doesn't include it in JSON, so we must read\n # the image. This is only managable since the dataset is tiny.\n # print(\"multi_numbers=\", multi_numbers)\n # num_ids = [n for n in multi_numbers['number'].values()]\n # for n in multi_numbers:\n \n image_path = os.path.join(dataset_dir, a['filename'])\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"food\",\n image_id=a['filename'], # use file name as a unique image id\n path=image_path,\n width=width, height=height,\n polygons=polygons,\n\t\t\t\tclass_ids=class_ids)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"food\":\n return super(self.__class__, self).load_mask(image_id)\n class_ids = image_info['class_ids']\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n #class_ids=np.array([self.class_names.index(shapes[0])])\n print(\"info['class_ids']=\", info['class_ids'])\n class_ids = np.array(class_ids, dtype=np.int32)\n return mask, class_ids#[mask.shape[-1]] #np.ones([mask.shape[-1]], dtype=np.int32)#class_ids.astype(np.int32)\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"food\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n\n\ndef train(model):\n \"\"\"Train the model.\"\"\"\n # Training dataset.\n dataset_train = FoodDataset()\n dataset_train.load_food(args.dataset, \"train\")\n dataset_train.prepare()\n\n # Validation dataset\n dataset_val = FoodDataset()\n dataset_val.load_food(args.dataset, \"val\")\n dataset_val.prepare()\n\n # *** This training schedule is an example. Update to your needs ***\n # Since we're using a very small dataset, and starting from\n # COCO trained weights, we don't need to train too long. Also,\n # no need to train all layers, just the heads should do it.\n print(\"Training network heads\")\n model.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=30,\n layers='heads')\n\n\ndef color_splash(image, mask):\n \"\"\"Apply color splash effect.\n image: RGB image [height, width, 3]\n mask: instance segmentation mask [height, width, instance count]\n\n Returns result image.\n \"\"\"\n # Make a grayscale copy of the image. The grayscale copy still\n # has 3 RGB channels, though.\n gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255\n # Copy color pixels from the original color image where mask is set\n if mask.shape[-1] > 0:\n # We're treating all instances as one, so collapse the mask into one layer\n mask = (np.sum(mask, -1, keepdims=True) >= 1)\n splash = np.where(mask, image, gray).astype(np.uint8)\n else:\n splash = gray.astype(np.uint8)\n return splash\n\n\ndef detect_and_color_splash(model, image_path=None, video_path=None):\n assert image_path or video_path\n\n # Image or video?\n if image_path:\n # Run model detection and generate the color splash effect\n print(\"Running on {}\".format(args.image))\n # Read image\n image = skimage.io.imread(args.image)\n # Detect objects\n r = model.detect([image], verbose=1)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # Save output\n file_name = \"splash_{:%Y%m%dT%H%M%S}.png\".format(datetime.datetime.now())\n skimage.io.imsave(file_name, splash)\n elif video_path:\n import cv2\n # Video capture\n vcapture = cv2.VideoCapture(video_path)\n width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = vcapture.get(cv2.CAP_PROP_FPS)\n\n # Define codec and create video writer\n file_name = \"splash_{:%Y%m%dT%H%M%S}.avi\".format(datetime.datetime.now())\n vwriter = cv2.VideoWriter(file_name,\n cv2.VideoWriter_fourcc(*'MJPG'),\n fps, (width, height))\n\n count = 0\n success = True\n while success:\n print(\"frame: \", count)\n # Read next image\n success, image = vcapture.read()\n if success:\n # OpenCV returns images as BGR, convert to RGB\n image = image[..., ::-1]\n # Detect objects\n r = model.detect([image], verbose=0)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # RGB -> BGR to save image to video\n splash = splash[..., ::-1]\n # Add image to video writer\n vwriter.write(splash)\n count += 1\n vwriter.release()\n print(\"Saved to \", file_name)\n\n\n############################################################\n# Training\n############################################################\n\nif __name__ == '__main__':\n import argparse\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Train Mask R-CNN to detect balloons.')\n parser.add_argument(\"command\",\n metavar=\"<command>\",\n help=\"'train' or 'splash'\")\n parser.add_argument('--dataset', required=False,\n metavar=\"/path/to/balloon/dataset/\",\n help='Directory of the Balloon dataset')\n parser.add_argument('--weights', required=True,\n metavar=\"/path/to/weights.h5\",\n help=\"Path to weights .h5 file or 'coco'\")\n parser.add_argument('--logs', required=False,\n default=DEFAULT_LOGS_DIR,\n metavar=\"/path/to/logs/\",\n help='Logs and checkpoints directory (default=logs/)')\n parser.add_argument('--image', required=False,\n metavar=\"path or URL to image\",\n help='Image to apply the color splash effect on')\n parser.add_argument('--video', required=False,\n metavar=\"path or URL to video\",\n help='Video to apply the color splash effect on')\n args = parser.parse_args()\n\n # Validate arguments\n if args.command == \"train\":\n assert args.dataset, \"Argument --dataset is required for training\"\n elif args.command == \"splash\":\n assert args.image or args.video,\\\n \"Provide --image or --video to apply color splash\"\n\n print(\"Weights: \", args.weights)\n print(\"Dataset: \", args.dataset)\n print(\"Logs: \", args.logs)\n\n # Configurations\n if args.command == \"train\":\n config = FoodConfig()\n else:\n class InferenceConfig(FoodConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n config = InferenceConfig()\n config.display()\n\n # Create model\n if args.command == \"train\":\n model = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=args.logs)\n else:\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=args.logs)\n\n # Select weights file to load\n if args.weights.lower() == \"coco\":\n weights_path = COCO_WEIGHTS_PATH\n # Download weights file\n if not os.path.exists(weights_path):\n utils.download_trained_weights(weights_path)\n elif args.weights.lower() == \"last\":\n # Find last trained weights\n weights_path = model.find_last()\n elif args.weights.lower() == \"imagenet\":\n # Start from ImageNet trained weights\n weights_path = model.get_imagenet_weights()\n else:\n weights_path = args.weights\n\n # Load weights\n print(\"Loading weights \", weights_path)\n if args.weights.lower() == \"coco\":\n # Exclude the last layers because they require a matching\n # number of classes\n model.load_weights(weights_path, by_name=True, exclude=[\n \"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\n else:\n model.load_weights(weights_path, by_name=True)\n\n # Train or evaluate\n if args.command == \"train\":\n train(model)\n elif args.command == \"splash\":\n detect_and_color_splash(model, image_path=args.image,\n video_path=args.video)\n else:\n print(\"'{}' is not recognized. \"\n \"Use 'train' or 'splash'\".format(args.command))\n"
] |
[
[
"numpy.array",
"numpy.where",
"numpy.sum"
]
] |
pandyakaa/modified-adaptdl-sched
|
[
"753af5e71f4a814a138902d718d0648362d3d11d"
] |
[
"adaptdl/adaptdl/torch/gradient_noise_scale.py"
] |
[
"import functools\nimport logging\nimport numpy as np\nimport torch.distributed\nimport torch.optim\n\nfrom torch.autograd import Variable\n\n__all__ = [\"GradientNoiseScale\"]\n\nlogging.basicConfig(level=logging.INFO)\nLOG = logging.getLogger(__name__)\nLOG.setLevel(logging.INFO)\n\n\ndef _average_groups(grads1, grads2):\n ret = []\n for group1, group2 in zip(grads1, grads2):\n ret.append([])\n for g1, g2 in zip(group1, group2):\n if g1 is None:\n ret[-1].append(g2)\n elif g2 is None:\n ret[-1].append(g1)\n else:\n ret[-1].append((g1 + g2) / 2)\n return ret\n\n\ndef _normsqr_groups(grads):\n ret = []\n for group in grads:\n normsqr = [g.pow(2).sum(dtype=torch.float64)\n for g in group if g is not None]\n ret.append(sum(normsqr).item() if normsqr else 0.0)\n return np.array(ret)\n\n\nclass GradientNoiseScale(object):\n \"\"\"This class tracks gradient related stats and takes care of gradient\n accumulation.\"\"\"\n def __init__(self, adp, optimizer,\n mp_scaler=None,\n num_replicas=None,\n accum_scale=None):\n self._adp = adp\n self._optimizer = optimizer\n self._orig_optimizer_zero_grad = optimizer.zero_grad\n self._should_zero_grad = True\n self._mp_scaler = mp_scaler\n self._local_sqr = None\n self._num_replicas = (num_replicas if num_replicas is not None\n else torch.distributed.get_world_size())\n self._accum_scale = accum_scale or self._num_replicas\n self._prev_grads = None\n\n self.reset_accumulation()\n\n self._optimizer.state.setdefault(\"gns\", {\n \"progress\": 0.0,\n \"prev_scale\": 0.0,\n # Averages of n and v\n \"sqr_avg\": np.ones(len(optimizer.param_groups)),\n \"var_avg\": np.zeros(len(optimizer.param_groups)),\n # Whether estimates are biased (using differenced estimator).\n \"biased\": False,\n })\n\n for idx, param_group in enumerate(self._optimizer.param_groups):\n for param in param_group[\"params\"]:\n param.register_hook(\n functools.partial(self._backward_hook, idx, param))\n self._callback_queued = False\n self._smoothing = 0.999\n\n @property\n def _state(self):\n return self._optimizer.state[\"gns\"]\n\n def reset_accumulation(self):\n \"\"\"reset accumulation calculations and gradients.\"\"\"\n self._orig_optimizer_zero_grad()\n self._local_sqr = None\n self._accum_count = 0\n\n @property\n def should_zero_grad(self):\n return self._should_zero_grad\n\n @property\n def accum_scale(self):\n return self._accum_scale\n\n @property\n def accum_count(self):\n return self._accum_count\n\n def set_accum_scale(self, accum_scale):\n if not np.isclose(self._accum_scale, accum_scale):\n self.reset_accumulation()\n self._accum_scale = accum_scale\n\n @property\n def raw_sqr_avg(self):\n view = self._state[\"sqr_avg\"].view()\n view.flags.writeable = False\n return view\n\n def sqr_avg(self):\n \"\"\"\n Current estimate of the squared l2-norm of the true gradient (sigma\n squared).\n\n Returns (float): Estimate of squared l2-norm.\n \"\"\"\n return float(np.sum(np.maximum(self._state[\"sqr_avg\"], 0.0)))\n\n @property\n def raw_var_avg(self):\n view = self._state[\"var_avg\"].view()\n view.flags.writeable = False\n return view\n\n def var_avg(self):\n \"\"\"\n Current estimate of the trace of the covariance of the true gradient\n (mu squared).\n\n Returns (float): Estimate of trace of the covariance.\n \"\"\"\n return float(np.sum(np.maximum(self._state[\"var_avg\"], 1e-6)))\n\n def get_progress(self):\n return self._state[\"progress\"]\n\n def set_progress(self, progress):\n self._state[\"progress\"] = progress\n\n def gain(self, scale):\n \"\"\"\n Current estimate of the GradientNoiseScale gain ratio.\n\n Arguments:\n scale (float): The total scale to estimate the gain ratio for.\n\n Returns (float): Estimate of gain ratio.\n \"\"\"\n var = self.var_avg()\n norm = self.sqr_avg()\n return (var + norm) / (var / scale + norm)\n\n def _update_avg(self, param_name, value, factor):\n biased = self._state.get(param_name + \"_biased\", 0.0)\n unbias = self._state.get(param_name + \"_unbias\", 0.0)\n biased = factor * biased + (1.0 - factor) * value\n unbias = factor * unbias + (1.0 - factor)\n self._state[param_name + \"_biased\"] = biased\n self._state[param_name + \"_unbias\"] = unbias\n self._state[param_name] = biased / unbias\n\n def _reset_avg(self, param_name):\n self._state.pop(param_name + \"_biased\", None)\n self._state.pop(param_name + \"_unbias\", None)\n\n def _backward_hook(self, idx, param, grad):\n # This method should be invoked once for each parameter during the\n # backward pass, before gradients are synchronized between replicas.\n if self._local_sqr is None:\n self._local_sqr = torch.zeros(len(self._optimizer.param_groups),\n device=grad.device,\n dtype=torch.float64)\n # Update the local gradient square sum\n self._local_sqr[idx] += grad.detach().pow(2).sum(dtype=torch.float64)\n if not self._callback_queued:\n Variable._execution_engine.queue_callback(self._queue_callback)\n self._callback_queued = True\n\n def _queue_callback(self):\n # This method should be invoked after the entire backward pass. We want\n # to make sure self._final_callback is invoked once, only after all\n # gradients have been synchronized between each replica. However, the\n # synchronization code in DistributedDataParallel is also done in a\n # callback, which might not yet be executed. Therefore, we enqueue\n # self._final_callback from this method, which should ensure it is\n # invoked after the gradient synchronization callback.\n self._callback_queued = False\n self._accum_count += 1\n if self._adp.require_backward_grad_sync:\n # Asynchronously sum the local squared-gradient statistics. The\n # actual gradient averaging should also be happening at the same\n # time, until self._final_callback is invoked.\n if self._num_replicas > 1:\n self._async_op = torch.distributed.all_reduce(self._local_sqr,\n async_op=True)\n Variable._execution_engine.queue_callback(self._final_callback)\n self._should_zero_grad = True\n else:\n # Keep on accumulating gradients, should not zero grad.\n self._should_zero_grad = False\n\n def _final_callback(self):\n # This method should be invoked once the gradients have been\n # synchronized between all replicas and accumulation steps.\n if self._num_replicas > 1:\n self._async_op.wait()\n\n grads = []\n if self._mp_scaler is not None:\n mixed_precision_scale = self._mp_scaler.get_scale()\n else:\n mixed_precision_scale = 1.0\n for group in self._optimizer.param_groups:\n grads.append([])\n for param in group[\"params\"]:\n if param.grad is None:\n grads[-1].append(None)\n continue\n grad = param.grad.detach().float()\n grads[-1].append(\n grad / mixed_precision_scale / self._accum_count)\n # Note: mixed precision can result in nan/inf gradients,\n # which propogate into our norm and variance estimates.\n # Mixed precision autoscaling skips the skip where\n # there are nan/inf, so we also skip the update here\n grads_normsqr = _normsqr_groups(grads)\n if not np.all(np.isfinite(grads_normsqr)):\n LOG.warning(\"GradientNoiseScale detected invalid gradient! \"\n \"Skipping step.\")\n return\n count = self._num_replicas * self._accum_count\n scale = self._accum_scale * self._accum_count\n if count > 1:\n # Average local squared-norm samples.\n local_sqr = self._local_sqr.cpu().numpy() / count\n # Gradient is squared in local_sqr, so need to square the\n # mixed precision scale as well\n local_sqr = (local_sqr / mixed_precision_scale ** 2)\n total_sqr = grads_normsqr\n if self._state[\"biased\"]:\n self._reset_avg(\"sqr_avg\")\n self._reset_avg(\"var_avg\")\n self._state[\"biased\"] = False\n self._prev_grads = None\n else:\n # Single gradient datapoint, use difference estimation.\n if self._prev_grads is not None:\n local_sqr = (_normsqr_groups(self._prev_grads) +\n grads_normsqr) / 2\n avg_grads = _average_groups(grads, self._prev_grads)\n total_sqr = _normsqr_groups(avg_grads)\n count = 2\n scale = 2 * self._accum_scale\n self._state[\"biased\"] = True\n self._prev_grads = [[g.clone() if g is not None else None\n for g in group] for group in grads]\n if count > 1:\n grad_sqr = (count * total_sqr - local_sqr) / (count - 1)\n grad_var = (local_sqr - total_sqr) * scale / (count - 1)\n theta = self._smoothing ** scale\n self._update_avg('sqr_avg', grad_sqr, theta)\n self._update_avg('var_avg', grad_var, theta)\n"
] |
[
[
"numpy.maximum",
"numpy.isfinite",
"torch.autograd.Variable._execution_engine.queue_callback",
"numpy.array",
"numpy.isclose"
]
] |
songzijiang/FasterSeg
|
[
"1a14ef6dd665afd229a16ab43b532b5a406512f8"
] |
[
"tools/utils/visualize.py"
] |
[
"import numpy as np\nimport cv2\nimport scipy.io as sio\n\n\ndef set_img_color(colors, background, img, gt, show255=False, weight_foreground=0.55):\n origin = np.array(img)\n for i in range(len(colors)):\n if i != background:\n img[np.where(gt == i)] = colors[i]\n if show255:\n img[np.where(gt == 255)] = 0\n cv2.addWeighted(img, weight_foreground, origin, (1 - weight_foreground), 0, img)\n return img\n\n\ndef show_prediction(colors, background, img, pred, weight_foreground=1):\n im = np.array(img, np.uint8)\n set_img_color(colors, background, im, pred, weight_foreground=weight_foreground)\n final = np.array(im)\n return final\n\n\ndef show_img(colors, background, img, clean, gt, *pds):\n im1 = np.array(img, np.uint8)\n # set_img_color(colors, background, im1, clean)\n final = np.array(im1)\n # the pivot black bar\n pivot = np.zeros((im1.shape[0], 15, 3), dtype=np.uint8)\n for pd in pds:\n im = np.array(img, np.uint8)\n # pd[np.where(gt == 255)] = 255\n set_img_color(colors, background, im, pd)\n final = np.column_stack((final, pivot))\n final = np.column_stack((final, im))\n\n im = np.array(img, np.uint8)\n set_img_color(colors, background, im, gt, True)\n final = np.column_stack((final, pivot))\n final = np.column_stack((final, im))\n return final\n\n\ndef get_colors(class_num):\n colors = []\n for i in range(class_num):\n colors.append((np.random.random((1, 3)) * 255).tolist()[0])\n\n return colors\n\n\ndef get_ade_colors():\n colors = sio.loadmat('./color150.mat')['colors']\n colors = colors[:, ::-1, ]\n colors = np.array(colors).astype(int).tolist()\n colors.insert(0, [0, 0, 0])\n\n return colors\n\n\ndef print_iou(iu, mean_pixel_acc, class_names=None, show_no_back=False,\n no_print=False):\n n = iu.size\n lines = []\n for i in range(n):\n if class_names is None:\n cls = 'Class %d:' % (i + 1)\n else:\n cls = '%d %s' % (i + 1, class_names[i])\n lines.append('%-8s\\t%.3f%%' % (cls, iu[i] * 100))\n mean_IU = np.nanmean(iu)\n # mean_IU_no_back = np.nanmean(iu[1:])\n mean_IU_no_back = np.nanmean(iu[:-1])\n if show_no_back:\n lines.append(\n '---------------------------- %-8s\\t%.3f%%\\t%-8s\\t%.3f%%\\t%-8s\\t%.3f%%' % (\n 'mean_IU', mean_IU * 100, 'mean_IU_no_back',\n mean_IU_no_back * 100,\n 'mean_pixel_ACC', mean_pixel_acc * 100))\n else:\n print(mean_pixel_acc)\n lines.append(\n '---------------------------- %-8s\\t%.3f%%\\t%-8s\\t%.3f%%' % (\n 'mean_IU', mean_IU * 100, 'mean_pixel_ACC',\n mean_pixel_acc * 100))\n line = \"\\n\".join(lines)\n if not no_print:\n print(line)\n return line\n"
] |
[
[
"numpy.random.random",
"scipy.io.loadmat",
"numpy.nanmean",
"numpy.column_stack",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
thudzj/BayesAdapter.github.io
|
[
"243b8b8686e2c9f1ea0bcda5ede317ab98405845"
] |
[
"reproduction/finetune_face.py"
] |
[
"from __future__ import division\nimport os, sys, shutil, time, random, math\nimport argparse\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\n\nimport torch\nfrom torch.optim import SGD\nimport torch.backends.cudnn as cudnn\n\nimport torch.nn.parallel\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.utils.data.distributed\n\nfrom scalablebdl.mean_field import PsiSGD, to_bayesian, to_deterministic\nfrom scalablebdl.bnn_utils import freeze, unfreeze, disable_dropout\n\nfrom utils import AverageMeter, RecorderMeter, time_string, \\\n convert_secs2time, _ECELoss, plot_mi, plot_ens, ent, accuracy, \\\n reduce_tensor, dist_collect, print_log, save_checkpoint, verify\nfrom dataset.face import load_dataset_ft\nimport models.mobilenet as models\n\nparser = argparse.ArgumentParser(description='Training script for Face', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n# Data / Model\nparser.add_argument('--data_path', metavar='DPATH', type=str,\n default='/data/xiaoyang/data/faces_emore/')\nparser.add_argument('--data_path_fake', metavar='DPATH', type=str,\n default='/data/zhijie/autobayes/deepfake_samples/face/')\nparser.add_argument('--dataset', metavar='DSET', type=str, default='face')\nparser.add_argument('--arch', metavar='ARCH', default='mobilenet_v2')\n\n# Optimization\nparser.add_argument('--epochs', metavar='N', type=int, default=16)\nparser.add_argument('--batch_size', type=int, default=256)\nparser.add_argument('--learning_rate', type=float, default=0.1)\nparser.add_argument('--momentum', type=float, default=0.9)\nparser.add_argument('--schedule', type=int, nargs='+', default=[4, 8, 12],\n help='Decrease learning rate at these epochs.')\nparser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1, 0.1],\n help='LR for psi is multiplied by gamma on schedule')\n\n#Regularization\nparser.add_argument('--decay', type=float, default=5e-4,\n help='Weight decay')\n\n# Checkpoints\nparser.add_argument('--save_path', type=str, default='/data/zhijie/snapshots_ba/',\n help='Folder to save checkpoints and log.')\nparser.add_argument('--job-id', type=str, default='bayesadapter-face')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='Path to latest checkpoint (default: none)')\nparser.add_argument('--start_epoch', default=0, type=int, metavar='N')\nparser.add_argument('--evaluate', dest='evaluate', action='store_true',\n help='Evaluate model on test set')\n\n# Acceleration\nparser.add_argument('--workers', type=int, default=4,\n help='number of data loading workers (default: 4)')\n\n# Random seed\nparser.add_argument('--manualSeed', type=int, default=0, help='manual seed')\n\n# Bayesian\nparser.add_argument('--psi_init_range', type=float, nargs='+', default=[-6, -5])\nparser.add_argument('--num_fake', type=int, default=1000)\nparser.add_argument('--uncertainty_threshold', type=float, default=0.75)\n\n# Fake generated data augmentation\nparser.add_argument('--blur_prob', type=float, default=0.5)\nparser.add_argument('--blur_sig', type=float, nargs='+', default=[0., 3.])\nparser.add_argument('--jpg_prob', type=float, default=0.5)\nparser.add_argument('--jpg_method', type=str, nargs='+', default=['cv2', 'pil'])\nparser.add_argument('--jpg_qual', type=int, nargs='+', default=[30, 100])\n\n# Attack settings\nparser.add_argument('--epsilon', default=16./255., type=float,\n help='perturbation')\nparser.add_argument('--num-steps', default=20, type=int,\n help='perturb number of steps')\nparser.add_argument('--step-size', default=1./255., type=float,\n help='perturb step size')\nparser.add_argument('--random', default=True,\n help='random initialization for PGD')\n\n# Dist\nparser.add_argument('--world-size', default=1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=0, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://127.0.0.1', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-port', default='1234', type=str,\n help='port used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\nbest_acc = 0\n\ndef main():\n args = parser.parse_args()\n if not os.path.isdir(args.data_path): os.makedirs(args.data_path)\n job_id = args.job_id\n args.save_path = args.save_path + job_id\n if not os.path.isdir(args.save_path): os.makedirs(args.save_path)\n\n args.use_cuda = torch.cuda.is_available()\n if args.manualSeed is None: args.manualSeed = random.randint(1, 10000)\n random.seed(args.manualSeed)\n np.random.seed(args.manualSeed)\n torch.manual_seed(args.manualSeed)\n if args.use_cuda: torch.cuda.manual_seed_all(args.manualSeed)\n cudnn.deterministic = True\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n else:\n args.multiprocessing_distributed = True\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n args.world_size = ngpus_per_node * args.world_size\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n main_worker(args.gpu, ngpus_per_node, args)\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc\n args.gpu = gpu\n assert args.gpu is not None\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n log = open(os.path.join(args.save_path, 'log_seed{}{}.txt'.format(\n args.manualSeed, '_eval' if args.evaluate else '')), 'w')\n log = (log, args.gpu)\n\n net = models.__dict__[args.arch](pretrained=True, num_classes=10341)\n disable_dropout(net)\n net = to_bayesian(net, args.psi_init_range)\n unfreeze(net)\n\n print_log(\"Python version : {}\".format(sys.version.replace('\\n', ' ')), log)\n print_log(\"PyTorch version : {}\".format(torch.__version__), log)\n print_log(\"CuDNN version : {}\".format(torch.backends.cudnn.version()), log)\n print_log(\"Number of parameters: {}\".format(sum([p.numel() for p in net.parameters()])), log)\n print_log(str(args), log)\n\n if args.distributed:\n if args.multiprocessing_distributed:\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend,\n init_method=args.dist_url+\":\"+args.dist_port,\n world_size=args.world_size, rank=args.rank)\n torch.cuda.set_device(args.gpu)\n net.cuda(args.gpu)\n args.batch_size = int(args.batch_size / ngpus_per_node)\n net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[args.gpu])\n else:\n torch.cuda.set_device(args.gpu)\n net = net.cuda(args.gpu)\n\n criterion = torch.nn.CrossEntropyLoss().cuda(args.gpu)\n\n mus, psis = [], []\n for name, param in net.named_parameters():\n if 'psi' in name: psis.append(param)\n else: mus.append(param)\n mu_optimizer = SGD(mus, args.learning_rate, args.momentum,\n weight_decay=args.decay)\n\n psi_optimizer = PsiSGD(psis, args.learning_rate, args.momentum,\n weight_decay=args.decay)\n\n recorder = RecorderMeter(args.epochs)\n if args.resume:\n if args.resume == 'auto':\n args.resume = os.path.join(args.save_path, 'checkpoint.pth.tar')\n if os.path.isfile(args.resume):\n print_log(\"=> loading checkpoint '{}'\".format(args.resume), log)\n checkpoint = torch.load(args.resume, map_location='cuda:{}'.format(args.gpu))\n recorder = checkpoint['recorder']\n recorder.refresh(args.epochs)\n args.start_epoch = checkpoint['epoch']\n net.load_state_dict(checkpoint['state_dict'] if args.distributed\n else {k.replace('module.', ''): v for k,v in checkpoint['state_dict'].items()})\n mu_optimizer.load_state_dict(checkpoint['mu_optimizer'])\n psi_optimizer.load_state_dict(checkpoint['psi_optimizer'])\n best_acc = recorder.max_accuracy(False)\n print_log(\"=> loaded checkpoint '{}' accuracy={} (epoch {})\".format(\n args.resume, best_acc, checkpoint['epoch']), log)\n else:\n print_log(\"=> no checkpoint found at '{}'\".format(args.resume), log)\n else:\n print_log(\"=> do not use any checkpoint for the model\", log)\n\n cudnn.benchmark = True\n\n train_loader, ood_train_loader, val_loaders, fake_loader = load_dataset_ft(args)\n psi_optimizer.num_data = len(train_loader.dataset)\n\n if args.evaluate:\n evaluate(val_loaders, fake_loader, net, criterion, args, log, 20, 100)\n return\n\n start_time = time.time()\n epoch_time = AverageMeter()\n train_los = -1\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_loader.sampler.set_epoch(epoch)\n ood_train_loader.sampler.set_epoch(epoch)\n cur_lr, cur_slr = adjust_learning_rate(mu_optimizer, psi_optimizer, epoch, args)\n\n need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (args.epochs-epoch))\n need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)\n\n print_log('\\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} [learning_rate={:6.4f} {:6.4f}]'.format(\n time_string(), epoch, args.epochs, need_time, cur_lr, cur_slr) \\\n + ' [Best : Accuracy={:.2f}, Error={:.2f}]'.format(recorder.max_accuracy(False), 100-recorder.max_accuracy(False)), log)\n\n train_acc, train_los = train(train_loader, ood_train_loader, net,\n criterion, mu_optimizer, psi_optimizer,\n epoch, args, log)\n val_acc, val_los = 0, 0\n recorder.update(epoch, train_los, train_acc, val_acc, val_los)\n\n is_best = False\n if val_acc > best_acc:\n is_best = True\n best_acc = val_acc\n\n if args.gpu == 0:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': net.state_dict(),\n 'recorder': recorder,\n 'mu_optimizer' : mu_optimizer.state_dict(),\n 'psi_optimizer' : psi_optimizer.state_dict(),\n }, False, args.save_path, 'checkpoint.pth.tar')\n\n epoch_time.update(time.time() - start_time)\n start_time = time.time()\n recorder.plot_curve(os.path.join(args.save_path, 'log.png'))\n evaluate(val_loaders, fake_loader, net, criterion, args, log, 20, 100)\n\n log[0].close()\n\ndef train(train_loader, ood_train_loader, model, criterion,\n mu_optimizer, psi_optimizer, epoch, args, log):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n ur_losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n ood_train_loader_iter = iter(ood_train_loader)\n\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n data_time.update(time.time() - end)\n\n input = input.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n input1 = next(ood_train_loader_iter)\n input1 = input1.cuda(args.gpu, non_blocking=True)\n\n bs = input.shape[0]\n bs1 = input1.shape[0]\n\n output = model(torch.cat([input, input1.repeat(2, 1, 1, 1)]))\n loss = criterion(output[:bs], target)\n\n out1_0 = output[bs:bs+bs1].softmax(-1)\n out1_1 = output[bs+bs1:].softmax(-1)\n mi1 = ent((out1_0 + out1_1)/2.) - (ent(out1_0) + ent(out1_1))/2.\n ur_loss = torch.nn.functional.relu(args.uncertainty_threshold - mi1).mean()\n\n prec1, prec5 = accuracy(output[:bs], target, topk=(1, 5))\n losses.update(loss.detach().item(), bs)\n ur_losses.update(ur_loss.detach().item(), bs1)\n top1.update(prec1.item(), bs)\n top5.update(prec5.item(), bs)\n\n mu_optimizer.zero_grad()\n psi_optimizer.zero_grad()\n (loss+ur_loss).backward()\n mu_optimizer.step()\n psi_optimizer.step()\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i == len(train_loader) - 1:\n print_log(' Epoch: [{:03d}][{:03d}/{:03d}] '\n 'Time {batch_time.avg:.3f} '\n 'Data {data_time.avg:.3f} '\n 'Loss {loss.avg:.4f} '\n 'UR Loss {ur_loss.avg:.4f} '\n 'Prec@1 {top1.avg:.3f} '\n 'Prec@5 {top5.avg:.3f} '.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n ur_loss=ur_losses, data_time=data_time, loss=losses,\n top1=top1, top5=top5) + time_string(), log)\n return top1.avg, losses.avg\n\ndef evaluate(val_loaders, fake_loader, net,\n criterion, args, log, num_mc_samples, num_mc_samples2):\n freeze(net)\n if args.gpu == 0:\n print(\"-----------------deterministic-----------------\")\n deter_rets = ens_validate(val_loaders, net, criterion, args, log, 1)\n unfreeze(net)\n\n if args.gpu == 0:\n print(\"-----------------ensemble {} times-----------------\".format(num_mc_samples2))\n rets = ens_validate(val_loaders, net, criterion, args, log, num_mc_samples2)\n\n ens_attack(val_loaders, net, criterion, args, log, num_mc_samples, min(num_mc_samples, 8))\n if args.gpu == 0:\n for k in val_loaders:\n print_log('{} vs. adversarial: AP {}'.format(k[0],\n plot_mi(args.save_path, 'adv_'+k[0], k[0])), log)\n\n ens_validate(fake_loader, net, criterion, args, log, num_mc_samples, suffix='fake')\n if args.gpu == 0:\n for k in val_loaders:\n print_log('{} vs. DeepFake: AP {}'.format(k[0],\n plot_mi(args.save_path, 'fake', k[0])), log)\n\ndef ens_validate(val_loaders, model, criterion, args, log, num_ens=100, suffix=''):\n model.eval()\n\n if isinstance(val_loaders, list):\n name, val_loader, issame = val_loaders[args.gpu % len(val_loaders)]\n else:\n name, val_loader, issame = suffix, val_loaders, None\n with torch.no_grad():\n with model.no_sync():\n embeddings = []\n mis = [0 for _ in range(len(val_loader))]\n preds = [0 for _ in range(len(val_loader))]\n for i, input in enumerate(val_loader):\n if isinstance(input, tuple) or isinstance(input, list): input = input[0]\n input = input.cuda(args.gpu, non_blocking=True)\n\n embedding_b = 0\n for ens in range(num_ens):\n output, output_logits = model(input, return_both=True)\n embedding_b += output/num_ens\n mis[i] = (mis[i] * ens + (-output_logits.softmax(-1)\n * output_logits.log_softmax(-1)).sum(1)) / (ens + 1)\n preds[i] = (preds[i] * ens + output_logits.softmax(-1)) / (ens + 1)\n\n norm = torch.norm(embedding_b, 2, 1, True)\n embedding = torch.div(embedding_b, norm)\n embeddings.append(embedding)\n\n embeddings = torch.cat(embeddings).data.cpu().numpy()\n preds = torch.cat(preds, 0)\n mis = (- preds * preds.log()).sum(1) - (0 if num_ens == 1 else torch.cat(mis, 0))\n if (isinstance(val_loaders, list) and args.gpu < len(val_loaders)) or \\\n ((not isinstance(val_loaders, list)) and args.gpu == 0):\n np.save(os.path.join(args.save_path, 'mis_{}.npy'.format(name)), mis.data.cpu().numpy())\n if issame is not None:\n tpr, fpr, accuracy, best_thresholds = verify(embeddings, issame, 10)\n print_log(' **Test** {}: {:.3f}'.format(name, accuracy.mean()), log, True)\n torch.distributed.barrier()\n\ndef ens_attack(val_loaders, model, criterion, args, log, num_ens=20, num_ens_a=8):\n def _grad(X, y, mean, std):\n with model.no_sync():\n with torch.enable_grad():\n X.requires_grad_()\n output = model(X.sub(mean).div(std).repeat(num_ens_a, 1, 1, 1), True)\n output = output.reshape(num_ens_a, X.size(0)//2, 2, output.size(-1))\n loss = ((output[:, :, 0, :].mean(0) - y[:, 1, :].detach())**2).sum(1) \\\n + ((output[:, :, 1, :].mean(0) - y[:, 0, :].detach())**2).sum(1)\n grad_ = torch.autograd.grad(\n [loss], [X], grad_outputs=torch.ones_like(loss), retain_graph=False)[0].detach()\n return grad_\n\n def _pgd_whitebox(X, mean, std):\n freeze(model)\n y = model(X.sub(mean).div(std), True).reshape(X.size(0)//2, 2, -1)\n unfreeze(model)\n\n X_pgd = X.clone()\n if args.random:\n X_pgd += torch.cuda.FloatTensor(*X_pgd.shape).uniform_(-args.epsilon, args.epsilon)\n\n for _ in range(args.num_steps):\n grad_ = _grad(X_pgd, y, mean, std)\n X_pgd += args.step_size * grad_.sign()\n eta = torch.clamp(X_pgd - X, -args.epsilon, args.epsilon)\n X_pgd = torch.clamp(X + eta, 0, 1.0)\n\n mis = 0\n preds = 0\n embedding_b = 0\n for ens in range(num_ens):\n output, output_logits = model(X_pgd.sub(mean).div(std), return_both=True)\n embedding_b += output/num_ens\n mis = (mis * ens + (-output_logits.softmax(-1) * (output_logits).log_softmax(-1)).sum(1)) / (ens + 1)\n preds = (preds * ens + output_logits.softmax(-1)) / (ens + 1)\n\n norm = torch.norm(embedding_b, 2, 1, True)\n embedding = torch.div(embedding_b, norm)\n mis = (- preds * (preds+1e-8).log()).sum(1) - (0 if num_ens == 1 else mis)\n return embedding, mis\n\n mean = torch.from_numpy(np.array([0.5, 0.5, 0.5])).view(1,3,1,1).cuda(args.gpu).float()\n std = torch.from_numpy(np.array([0.5, 0.5, 0.5])).view(1,3,1,1).cuda(args.gpu).float()\n\n model.eval()\n name, val_loader, issame = val_loaders[args.gpu % len(val_loaders)]\n with torch.no_grad():\n with model.no_sync():\n mis = []\n embeddings = []\n for i, input in enumerate(val_loader):\n is_pair = issame[i*args.batch_size//2:min(len(issame), i*args.batch_size//2+args.batch_size//2)]\n if np.all(is_pair == False): continue\n\n input = input.cuda(args.gpu, non_blocking=True).mul_(std).add_(mean)\n input = input.reshape(args.batch_size//2, 2, 3, 112, 112)\n assert len(is_pair) == input.shape[0], (len(is_pair), input.shape[0])\n\n mask = torch.from_numpy(is_pair).cuda(args.gpu, non_blocking=True) == True\n input = input[mask, :, :, :, :].view(-1, 3, 112, 112)\n\n embedding, mis_ = _pgd_whitebox(input, mean, std)\n mis.append(mis_)\n embeddings.append(embedding)\n\n mis = torch.cat(mis, 0)\n\n torch.distributed.barrier()\n if args.gpu < len(val_loaders): np.save(os.path.join(args.save_path, 'mis_adv_{}.npy'.format(name)), mis.data.cpu().numpy())\n\ndef adjust_learning_rate(mu_optimizer, psi_optimizer, epoch, args):\n lr = args.learning_rate\n slr = args.learning_rate\n assert len(args.gammas) == len(args.schedule), \\\n \"length of gammas and schedule should be equal\"\n for (gamma, step) in zip(args.gammas, args.schedule):\n if (epoch >= step): slr = slr * gamma\n else: break\n lr = lr * np.prod(args.gammas)\n for param_group in mu_optimizer.param_groups: param_group['lr'] = lr\n for param_group in psi_optimizer.param_groups: param_group['lr'] = slr\n return lr, slr\n\nif __name__ == '__main__': main()\n"
] |
[
[
"torch.multiprocessing.spawn",
"torch.cat",
"numpy.all",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.nn.CrossEntropyLoss",
"torch.norm",
"torch.distributed.init_process_group",
"torch.backends.cudnn.version",
"torch.from_numpy",
"torch.distributed.barrier",
"torch.nn.functional.relu",
"torch.optim.SGD",
"torch.ones_like",
"torch.div",
"torch.enable_grad",
"torch.cuda.FloatTensor",
"torch.cuda.device_count",
"numpy.array",
"torch.nn.parallel.DistributedDataParallel",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"numpy.prod",
"torch.clamp"
]
] |
fpetitzon/effective_dimension
|
[
"1f6384422cfc9e4d05be9c678fd6e7cf65b3bd4f"
] |
[
"Sensitivity_plots/generate_data/cnn_sensitivity_lower_depth.py"
] |
[
"import numpy as np\nfrom effective_dimension import Model, EffectiveDimension, ClassicalNeuralNetwork\n\n# This code file generates data to test the sensitivity of the effective dimension to different number of samples used \\\n# for the Monte Carlo estimates for the integrals in the effective dimension formula. m = number of theta samples \\\n# and we fix n here to a sufficiently large value. In particular, this file looks at models with lower d (lower \\\n# number of parameters).\n# We fix the model architecture for different input sizes to the classical models that produce the highest effective \\\n# dimension on average with 100 theta samples and 100 data samples.\n\nn = [1000, 2000, 8000, 10000, 40000, 60000, 100000, 150000, 200000, 500000, 1000000, 10000000, 10000000000, 10000000000000]\n\nsize4 = [4,1,1,1,2]\n\nsize6 = [6,1,1,1,1,1,2]\n\nsize8 = [8,1,1,1,1,1,1,1,2]\n\nsize10 = [10, 1, 4, 1, 2]\n\nseeds = [1,200,300,400,500,600,700,800,900,1000]\n\nfor k in seeds:\n for i in range(10, 110, 10):\n cnet = ClassicalNeuralNetwork(size=size4, samples=i)\n ed = EffectiveDimension(cnet, num_thetas=i, num_inputs=i, seed=k)\n f, trace, fishers = ed.get_fhat()\n effdims = ed.eff_dim(f, n)\n np.save(\"4in_1layer_effective_dimension_samples_%i_seed_%i.npy\" %(i,k), effdims)\n np.save(\"4in_1layer_f_hats_samples_%i_seed_%i.npy\" %(i,k), f)\n np.save(\"4in_1layer_fishers_samples_%i_seed_%i.npy\" %(i,k), fishers)\n np.save(\"4in_1layer_trace_samples_%i_seed_%i.npy\" %(i,k), trace)\n\nfor k in seeds:\n for i in range(10, 110, 10):\n cnet = ClassicalNeuralNetwork(size=size6, samples=i)\n ed = EffectiveDimension(cnet, num_thetas=i, num_inputs=i, seed=k)\n f, trace, fishers = ed.get_fhat()\n effdims = ed.eff_dim(f, n)\n np.save(\"6in_1layer_effective_dimension_samples_%i_seed_%i.npy\" %(i,k), effdims)\n np.save(\"6in_1layer_f_hats_samples_%i_seed_%i.npy\" %(i,k), f)\n np.save(\"6in_1layer_fishers_samples_%i_seed_%i.npy\" %(i,k), fishers)\n np.save(\"6in_1layer_trace_samples_%i_seed_%i.npy\" %(i,k), trace)\n\nfor k in seeds:\n for i in range(10, 110, 10):\n cnet = ClassicalNeuralNetwork(size=size8, samples=i)\n ed = EffectiveDimension(cnet, num_thetas=i, num_inputs=i, seed=k)\n f, trace, fishers = ed.get_fhat()\n effdims = ed.eff_dim(f, n)\n np.save(\"8in_1layer_effective_dimension_samples_%i_seed_%i.npy\" %(i,k), effdims)\n np.save(\"8in_1layer_f_hats_samples_%i_seed_%i.npy\" %(i,k), f)\n np.save(\"8in_1layer_fishers_samples_%i_seed_%i.npy\" %(i,k), fishers)\n np.save(\"8in_1layer_trace_samples_%i_seed_%i.npy\" %(i,k), trace)\n\nfor k in seeds:\n for i in range(10, 110, 10):\n cnet = ClassicalNeuralNetwork(size=size10, samples=i)\n ed = EffectiveDimension(cnet, num_thetas=i, num_inputs=i, seed=k)\n f, trace, fishers = ed.get_fhat()\n effdims = ed.eff_dim(f, n)\n np.save(\"10in_1layer_effective_dimension_samples_%i_seed_%i.npy\" %(i,k), effdims)\n np.save(\"10in_1layer_f_hats_samples_%i_seed_%i.npy\" %(i,k), f)\n np.save(\"10in_1layer_fishers_samples_%i_seed_%i.npy\" %(i,k), fishers)\n np.save(\"10in_1layer_trace_samples_%i_seed_%i.npy\" %(i,k), trace)"
] |
[
[
"numpy.save"
]
] |
jirheee/CS492-Team-Project
|
[
"99b75d40743bb54b8561cd4a3a5e369e8d15128e"
] |
[
"Server/src/ml/AlphaZero_Gomoku/translate_model_from_old_to_new.py"
] |
[
"import torch\n\nimport json\n\nfrom nn_architecture import PolicyValueNet\nimport argparse\nimport numpy as np\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-y\",\"--force_yes\",action=\"store_true\",default = False)\nargs = parser.parse_args()\n\ntrained_path = r\"D:\\Dropbox\\Workspace\\03 Python\\11 AI_Battle\\CS492-Team-Project\\Server\\src\\ml\\models\\1aaa41fa-526e-47c6-916c-07906127df3c\\curr.model\"\n\ntrained = torch.load(trained_path)\n\nmodel_config = f\"../Server/src/ml/models/1aaa41fa-526e-47c6-916c-07906127df3c/model.json\"\nwith open(model_config, encoding='utf-8') as f:\n model_config = json.loads(f.read())\n\nboard_width = model_config[\"board\"][\"board_width\"]\nboard_height = model_config[\"board\"][\"board_height\"]\nname = model_config[\"name\"]\n\npolicy_value_net = PolicyValueNet(board_width, board_height, model_config[\"nn_type\"], model_config[\"layers\"])\n\nnew_format = policy_value_net.get_policy_param()\n\n\nprint(\"trained keys\\n\", trained.keys())\nprint(\"new keys\\n\", new_format.keys(),end=\"\\n\\n\\n\")\n\n\nprint(\"------------------------------trained--------------------------------\")\nfor ii in [(ii,np.array2string(jj.numpy(),precision=2, separator=\",\",threshold=5)) for (ii,jj) in trained.items()]:\n print(ii[0],ii[1])\nprint(\"------------------------------trained--------------------------------\\n\")\nprint(\"------------------------------new_format--------------------------------\")\nfor ii in [(ii,np.array2string(jj.to('cpu').numpy(),precision=2, separator=\",\",threshold=5)) for (ii,jj) in new_format.items()]:\n print(ii[0],ii[1])\nprint(\"------------------------------new_format--------------------------------\\n\\n\")\n\n\nprint([trained[ii].shape for ii in trained.keys()])\nprint([new_format[ii].shape for ii in new_format.keys()])\nnum = len(trained.keys())\n\nfor ii in range(num):\n new_key = list(new_format.keys())[ii]\n old_key = list(trained.keys())[ii]\n print(f\"mapping {old_key:20s} from trained ===> to {new_key:20s} in new_format\")\n new_format[new_key]=trained[old_key]\n\nprint();print();\nprint([trained[ii].shape for ii in trained.keys()])\nprint([new_format[ii].shape for ii in new_format.keys()])\nprint();print()\n\nprint(\"------------------------------trained--------------------------------\")\nfor ii in [(ii,np.array2string(jj.numpy(),precision=2, separator=\",\",threshold=5)) for (ii,jj) in trained.items()]:\n print(ii[0],ii[1])\nprint(\"------------------------------trained--------------------------------\\n\")\nprint(\"------------------------------new_format--------------------------------\")\nfor ii in [(ii,np.array2string(jj.to('cpu').numpy(),precision=2, separator=\",\",threshold=5)) for (ii,jj) in new_format.items()]:\n print(ii[0],ii[1])\nprint(\"------------------------------new_format--------------------------------\\n\")\n\nif args.force_yes or input(\"Save and overwrite the model with the new_format?\").lower() in [\"y\",\"yes\"]:\n torch.save(new_format,trained_path)\n print(\"saved\")\nelse:\n print(\"exiting without saving\")\n"
] |
[
[
"torch.save",
"torch.load"
]
] |
simard-landscape-lab/orinoco
|
[
"03f0e6b19985873dfdf8d4b9438e9ffcdb706099"
] |
[
"orinoco/nd_tools.py"
] |
[
"import numpy as np\nimport scipy.ndimage as nd\nfrom scipy.ndimage import find_objects\nfrom typing import Callable\n\n\ndef get_array_from_features(label_array: np.ndarray,\n features: np.ndarray) -> np.ndarray:\n \"\"\"\n Using p x q segmentation labels (2d) and feature array with dimension (m x\n n) where m is the number of unique labels and n is the number of features,\n obtain a p x q x m channel array in which each spatial segment is labeled\n according to n-features.\n\n See `find_objects` found\n [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.find_objects.html)\n for the crucial scipy function used.\n\n Parameters\n ----------\n label_array : np.array\n p x q integer array of labels corresponding to superpixels\n features : np.array\n m x n array of features - M corresponds to number of distinct items to\n be classified and N number of features for each item.\n\n Returns\n -------\n out : np.array\n p x q (x n) array where we drop the dimension if n == 1.\n\n Notes\n ------\n Inverse of get_features_from_array with fixed labels, namely if `f` are\n features and `l` labels, then:\n\n get_features_from_array(l, get_array_from_features(l, f)) == f\n\n And similarly, if `f_array` is an array of populated segments, then\n\n get_array_from_features(l, get_features_from_array(l, f)) == f\n \"\"\"\n # Assume labels are 0, 1, 2, ..., n\n if len(features.shape) != 2:\n raise ValueError('features must be 2d array')\n elif features.shape[1] == 1:\n out = np.zeros(label_array.shape, dtype=features.dtype)\n else:\n m, n = label_array.shape\n out = np.zeros((m, n, features.shape[1]), dtype=features.dtype)\n\n labels_p1 = label_array + 1\n indices = find_objects(labels_p1)\n labels_unique = np.unique(labels_p1)\n # ensures that (number of features) == (number of unique superpixel labels)\n assert(len(labels_unique) == features.shape[0])\n for k, label in enumerate(labels_unique):\n indices_temp = indices[label-1]\n # if features is m x 1, then do not need extra dimension when indexing\n label_slice = labels_p1[indices_temp] == label\n if features.shape[1] == 1:\n out[indices_temp][label_slice] = features[k, 0]\n # if features is m x n with n > 1, then requires extra dimension when\n # indexing\n else:\n out[indices_temp + (np.s_[:], )][label_slice] = features[k, ...]\n return out\n\n\ndef get_features_from_array(label_array: np.ndarray,\n data_array: np.ndarray) -> np.ndarray:\n \"\"\"\n Assuming that each segment area from `label_array` (p x q) has a\n homogeneous value, obtain the corresonding feautre vector of size (m x n),\n where m is the number of segment labels and n is the number of channels in\n `data_array` (p x q x n). We also allow data array to be (p x q) if there\n is only one channel.\n\n\n See `find_objects` found\n [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.find_objects.html)\n for the crucial scipy function used.\n\n Parameters\n ----------\n label_array : np.ndarray\n p x q integer array of labels corresponding to superpixels\n data_array : np.ndarray\n p x q x n (or p x q) array of data assumed that each segment label has\n the same value.\n\n Returns\n -------\n np.ndarray:\n m x n array where m is `len(np.unique(label_array))` and n is the number\n of channels. If `data_array` has shape p x q, then n = 1.\n\n Notes\n ------\n Inverse of get_features_from_array with fixed labels, namely if `f` are\n features and `l` labels, then\n\n get_features_from_array(l, get_array_from_features(l, f)) == f\n\n And similarly, if `f_array` is an array of populated segments, then\n\n get_array_from_features(l, get_features_from_array(l, f)) == f\n \"\"\"\n # Ensure that 2d label_array has the same 1st two dimensions as data_array\n assert(label_array.shape == (data_array.shape[0], data_array.shape[1]))\n labels_p1 = label_array + 1\n indices = find_objects(labels_p1)\n labels_unique = np.unique(labels_p1)\n\n m = len(labels_unique)\n if len(data_array.shape) == 2:\n features = np.zeros((m, 1))\n elif len(data_array.shape) == 3:\n features = np.zeros((m, data_array.shape[2])).astype(bool)\n else:\n raise ValueError('data_array must be 2d or 3d')\n\n for k, label in enumerate(labels_unique):\n indices_temp = indices[label-1]\n # if features is m x 1, then do not need extra dimension when indexing\n label_slice = labels_p1[indices_temp] == label\n if features.shape[1] == 1:\n features[k, 0] = data_array[indices_temp][label_slice][0]\n # if features is m x n with n > 1, then requires extra dimension when\n # indexing\n else:\n temp = data_array[indices_temp + (np.s_[:], )]\n features[k, ...] = temp[label_slice][0, ...]\n return features\n\n\ndef apply_func_to_superpixels(func: Callable,\n labels: np.ndarray,\n array: np.ndarray,\n dtype: type = float) -> np.ndarray:\n \"\"\"\n This is a wrapper for `scipy.ndimage.labeled_comprehension`.\n\n See this\n [link](https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.ndimage.measurements.labeled_comprehension.html).\n\n Parameters\n ----------\n func : Callable\n Function to call on each flattened segment\n labels : np.ndarray\n p x q label array\n array : np.ndarray\n p x q data array\n dtype : type\n The return type of the array of features. Defaults to float.\n\n Returns\n -------\n np.ndarray:\n A populated (float) array in which each segment i is filled with value\n func(array[array = i]).\n \"\"\"\n if len(array.shape) != 2:\n raise ValueError('The array must be a 2d array')\n labels_ = labels + 1\n labels_unique = np.unique(labels_)\n features = nd.labeled_comprehension(array,\n labels_,\n labels_unique,\n func, dtype, np.nan)\n return features.reshape((-1, 1))\n\n\ndef get_superpixel_area_as_features(labels: np.array) -> np.array:\n \"\"\"\n Obtain a feature array in which features are size of corresponding\n features.\n\n Parameters\n ----------\n labels : np.array\n Label array (p x q)\n\n Returns\n -------\n np.array:\n Size features (m x 1), where m is number of unique labels.\n \"\"\"\n return apply_func_to_superpixels(np.size, labels, labels).astype(int)\n\n\ndef filter_binary_array_by_min_size(binary_array: np.ndarray,\n min_size: int,\n structure: np.ndarray = np.ones((3, 3)),\n ) -> np.ndarray:\n \"\"\"\n Look at contigious areas of 1's in binary array and remove those areas with\n less than `min_size`, remove it.\n\n Parameters\n ----------\n binary_array : np.ndarray\n Array of 0's and 1's.\n min_size : int\n Minimum size\n structure : np.ndarray\n How pixel connectivity is determined e.g.\n + 4-connectivity is np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]).\n + 8-connectivity is np.ones((3, 3)), which is the default.\n\n Returns\n -------\n np.ndarray:\n binary array in which small continguous areas 1's of size less than\n min_size have been removed.\n \"\"\"\n\n binary_array_temp = binary_array[~np.isnan(binary_array)]\n if ~((binary_array_temp == 0) | (binary_array_temp == 1)).all():\n raise ValueError('Array must be binary!')\n connected_component_labels, _ = nd.measurements.label(binary_array,\n structure=structure)\n size_features = get_superpixel_area_as_features(connected_component_labels)\n binary_features = get_features_from_array(connected_component_labels,\n binary_array)\n\n # Only want 1s of certain size\n filtered_size_features = ((size_features >= min_size).astype(int) *\n binary_features)\n\n binary_array_filtered = get_array_from_features(connected_component_labels,\n filtered_size_features)\n return binary_array_filtered\n\n\ndef scale_img(img: np.ndarray,\n new_min: int = 0,\n new_max: int = 1) -> np.ndarray:\n \"\"\"\n Scale an image by the absolute max and min in the array to have dynamic\n range new_min to new_max. Useful for visualization.\n\n Parameters\n ----------\n img : np.ndarray\n new_min : int\n new_max : int\n\n Returns\n -------\n np.ndarray:\n New image with shape equal to img, scaled to [new_min, new_max]\n \"\"\"\n i_min = np.nanmin(img)\n i_max = np.nanmax(img)\n if i_min == i_max:\n # then image is constant image and clip between new_min and new_max\n return np.clip(img, new_min, new_max)\n img_scaled = (img - i_min) / (i_max - i_min) * (new_max - new_min)\n img_scaled += new_min\n return img_scaled\n"
] |
[
[
"numpy.nanmax",
"numpy.unique",
"numpy.clip",
"numpy.isnan",
"numpy.nanmin",
"scipy.ndimage.measurements.label",
"numpy.ones",
"scipy.ndimage.find_objects",
"scipy.ndimage.labeled_comprehension",
"numpy.zeros"
]
] |
AxelHenningsson/contomo
|
[
"215c5723c5f04542eee096c4f3de11b5698cb2c9"
] |
[
"contomo/projected_advection_pde.py"
] |
[
"\nimport numpy as np\nimport dill as pickle\nimport matplotlib.pyplot as plt\nimport os\nfrom contomo import utils\nfrom contomo import velocity_solver\n\nclass ProjectedAdvectionPDE(object):\n \"\"\"Advection partial differential equation (PDE) projected into sinogram space.\n\n This object defines the partial differential equation to be solved as an \n inital value problem. Mathematically, the system of equations to solve are\n\n .. math::\n \\\\dfrac{\\\\partial \\\\rho}{\\\\partial t} = \\\\mathcal{F}[ \\\\rho, v ] \\\\quad\\\\quad (1)\n .. math::\n \\\\mathcal{P}[ \\\\mathcal{F}[ \\\\rho, v ] ] = \\\\dfrac{\\\\partial g}{\\\\partial t} \\\\quad\\\\quad (2)\n\n where :math:`\\\\partial \\\\rho/\\\\partial t` is the temporal derivative of a density field :math:`\\\\rho(x,t)`, \n :math:`\\\\mathcal{P}[\\\\cdot]` defines a projection operator, and :math:`\\\\partial g/\\\\partial t` is the temporal\n derivative of some measured projections, :math:`g(s,t)`. Here :math:`x` is a real space coordinate and :math:`s` a\n sinogram space coordinate, :math:`t` denotes time. The operator :math:`\\\\mathcal{F}[\\\\cdot]` is a\n flow model approximation to :math:`\\\\partial \\\\rho/\\\\partial t`, driven by and underlying velocity field\n :math:`v(x,t)`, which in turn is decomposed on a finite basis.\n\n Given the inital value of the field :math:`\\\\rho(x,t=0)=\\\\rho_0` and a set of measured\n projections :math:`g(s, t)`, this class progagates equation (1) in time using some\n provided ray model for :math:`\\\\mathcal{P}[\\\\cdot]` and flow model :math:`\\\\mathcal{F}[\\\\cdot]`.\n\n Args:\n flow_model (:obj:`FlowModel`): Object defining the derivatives of density w.r.t time.\n ray_model (:obj:`RayModel`): Object defining the transformation from real space to sinogram space.\n sinogram_interpolator (:obj:`RayModel`): Object defining the continous derivatives in time of measured sinograms.\n\n Attributes:\n flow_model (:obj:`FlowModel`): Object defining the derivatives of density w.r.t time.\n ray_model (:obj:`RayModel`): Object defining the transformation from real space to sinogram space.\n sinogram_interpolator (:obj:`SinogramInterpolator`): Object defining the continous derivatives in time of measured sinograms.\n\n \"\"\"\n\n def __init__( self, \n flow_model, \n ray_model,\n sinogram_interpolator ):\n self.flow_model = flow_model\n self.ray_model = ray_model\n self.sinogram_interpolator = sinogram_interpolator\n\n def get_density_derivative(self, time, rho):\n \"\"\"Compute right hand side of advection PDE (1) by solving for v(x,t) through (2).\n\n The procedure to approximate the density field temporal derivative can be described\n in two steps. First the velocities are recovered by solving, P[ F[ rho, v ] ] = dgdt,\n next, F[ rho, v ] can be computed based on the retrived v.\n\n Args:\n time (float): time.\n rho (:obj:`numpy array`): real space density field.\n\n Returns:\n drhodt (:obj:`numpy array`): density field temporal derivative at time ``time``, ``shape=rho.shape``.\n\n \"\"\"\n\n # self.sinogram_interpolator.soft_spline_reset()\n # self.sinogram_interpolator.add_sinograms( [time], \n # [self.ray_model.forward_project( rho )], \n # resolution = self.stepsize/1000.,\n # update_splines=False )\n # bc_indx = np.argmin( np.abs( self.sinogram_interpolator.sample_times - time ) ) + 1\n # self.sinogram_interpolator.set_bc_index(bc_indx)\n # self.sinogram_interpolator._set_splines(local=time)\n\n ##DEBUG:\n if 0:\n t0 = np.max([0,time - 2.1*self.stepsize])\n fig,ax = self.sinogram_interpolator.show_fit( [32], [0], [32], np.arange(t0, time + 2.1*self.stepsize, self.stepsize/30.) )\n plt.show()\n ##\n\n dgdt = self.sinogram_interpolator( time, derivative=1 )[:,:,:]\n \n self._velocity_solver.flow_model.fixate_density_field( rho )\n\n self._velocity_solver.second_member = dgdt\n\n # TODO: Probably slightly faster to have the inital guess only be redefined at the measurement points\n # and not for each RK integration point ...\n if self._velocity_solver.optimal_coefficents is not None:\n inital_guess = self._velocity_solver.optimal_coefficents \n else:\n inital_guess = np.zeros( self.flow_model.velocity_basis.coefficents.shape )\n\n self._velocity_solver.solve( dgdt, \n inital_guess,\n maxiter=self.maxiter, \n verbose=self.verbose, \n print_frequency=1 )\n\n self._velocity_solver.initial_guess_coefficents = self._velocity_solver.optimal_coefficents\n self.flow_model.velocity_basis.coefficents = self._velocity_solver.optimal_coefficents\n drhodt = self.flow_model.get_temporal_density_derivatives()\n\n return drhodt\n\n def propagate_from_initial_value( self, \n initial_volume,\n start_time,\n end_time,\n stepsize,\n velocity_recovery_iterations = 10,\n reinterpolation=\"mutated moving bc\",\n verbose = True,\n save_path = None ):\n \"\"\"Propagate the target advection equation in time.\n\n Args:\n initial_volume (:obj:`numpy array`): Density field at starting time.\n start_time (:obj:`float`): Time at which the initial density field exists.\n end_time (:obj:`float`): Time at which to stop time integration.\n stepsize (:obj:`float`): Duration of time between two integration steps. \n number_of_timesteps (:obj:`int`): Number of integration steps to execute.\n velocity_recovery_iterations (:obj:`numpy array`): Number of allowed iterations for recovering velocities \n in the projected sub-problem.\n reinterpolation (:obj:`str`): One of ```mutated moving bc```, ```mutated static bc``` or\n ```original static bc```. Defines the strategy for computing projected derivatives during \n RK time integration Defaults to ```mutated moving bc```.\n verbose (:obj:`bool`, optional): Print progress and convergence metrics. Defaults to True.\n save_path (:obj:`string`, optional): Save reconstructed density fields and sinograms to the given\n absolute path ending with desired folder name. Defaults to None.\n\n \"\"\"\n self.stepsize = stepsize\n\n self._velocity_solver = velocity_solver.VelocitySolver( self.flow_model, \n self.ray_model,\n dt = stepsize )\n self._velocity_solver.x0 = np.zeros(self._velocity_solver.flow_model.velocity_basis.coefficents.shape)\n\n self.maxiter = velocity_recovery_iterations\n self.verbose = verbose\n\n current_time = start_time\n self.current_volume = initial_volume.copy()\n\n if save_path is not None:\n self._instantiate_save_folders( save_path )\n\n # Compute inherent erros due to ray model etc..\n interpolated_sinogram = self.sinogram_interpolator( start_time, original=True )[:,:,:]\n reconstructed_sinogram = self.ray_model.forward_project( initial_volume )\n residual = interpolated_sinogram - reconstructed_sinogram\n iL2 = np.linalg.norm( interpolated_sinogram - reconstructed_sinogram )\n iMaxAbs = np.max(np.abs(residual))\n iRMSE = np.sqrt( np.sum( residual**2 ) / len(residual.flatten()) )\n iMAE = np.sum( np.abs(residual) / len(residual.flatten()) )\n\n if verbose:\n print(\"##############################################################################\")\n print(\" R A Y M O D E L E R R O R \")\n print( \"L2 Norm: \", iL2 )\n print( \"Max.Abs: \", iMaxAbs )\n print( \"RMSE: \", iRMSE )\n print( \"MAE: \", iMAE )\n\n print(\"##############################################################################\")\n print(\" \")\n print(\"Starting propagation of density volume in time\")\n\n print(end_time, start_time, stepsize)\n number_of_timesteps = int( np.ceil( (end_time - start_time)/stepsize ) )\n for step in range(number_of_timesteps - 2): # we cannot integrate for the last step as the spline ends\n\n if self.verbose:\n print(\" \")\n print(\"@ time = \", current_time, \"s ,i.e, @timestep = \", step, \" integrating for timestep = \", step+1)\n print(\"The total number of temporal data points is \", number_of_timesteps)\n\n # Reinterpolation strategy\n if reinterpolation==\"original static bc\":\n pass\n elif reinterpolation==\"mutated moving bc\":\n self.sinogram_interpolator.soft_spline_reset()\n self.sinogram_interpolator.add_sinograms( [current_time], \n [self.ray_model.forward_project( self.current_volume )], \n resolution = self.stepsize/1000.,\n update_splines=False )\n bc_indx = np.argmin( np.abs( self.sinogram_interpolator.sample_times - current_time ) ) + 1\n self.sinogram_interpolator.set_bc_index(bc_indx)\n self.sinogram_interpolator._set_splines(local=current_time)\n elif reinterpolation==\"mutated static bc\":\n self.sinogram_interpolator.soft_spline_reset()\n self.sinogram_interpolator.add_sinograms( [current_time], \n [self.ray_model.forward_project( self.current_volume )], \n resolution = self.stepsize/1000.,\n update_splines=False )\n self.sinogram_interpolator.set_bc_index(0)\n self.sinogram_interpolator._set_splines()\n\n previous_volume = self.current_volume.copy()\n\n adapted_stepsize = np.min([ stepsize, end_time - current_time] ) # To handle the last step\n\n self.current_volume = utils.TVD_RK3_step( self.get_density_derivative, \n current_time, \n self.current_volume.copy(), \n adapted_stepsize )\n\n current_time += adapted_stepsize\n\n if verbose:\n original_sinogram = self.sinogram_interpolator( current_time, original=True )[:,:,:]\n starting_reconstructed_sinogram = self.ray_model.forward_project( previous_volume )\n reconstructed_sinogram = self.ray_model.forward_project( self.current_volume )\n residual = reconstructed_sinogram - original_sinogram\n print(\"Original default sinogram L2 error to current time (no motion): \", np.linalg.norm( original_sinogram - starting_reconstructed_sinogram) )\n print(\"Resulting sinogram L2 error to current time (by applying the motion): \", np.linalg.norm( original_sinogram - reconstructed_sinogram), \" (inherent L2\", iL2, \")\" )\n print(\"Sinogram residual Max.Abs error: \", np.max(np.abs(residual)), \" (inherent Max.Abs\", iMaxAbs, \")\" )\n print(\"Sinogram residual RMSE error: \", np.sqrt( np.sum( residual**2 ) / len(residual.flatten()) ), \" (inherent RMSE\", iRMSE, \")\" )\n print(\"Sinogram residual MAE error: \", np.sum( np.abs(residual) / len(residual.flatten()) ), \" (inherent MAE\", iMAE, \")\" )\n\n if save_path is not None:\n self._save_integration_step( save_path, self.current_volume, current_time, step )\n\n\n def _instantiate_save_folders(self, save_path):\n \"\"\"Setup a folder structure to save reconstruction progress.\n\n \"\"\"\n os.mkdir(save_path)\n for folder in [\"volumes\", \"projections\", \"velocity\"]:\n if folder not in os.listdir(save_path):\n os.mkdir(save_path + \"/\" + folder)\n\n np.save(save_path+\"/times\", np.array([]))\n\n meta_data = { \"detector dimension\" : self.ray_model.number_of_detector_pixels,\n \"angles\" : self.ray_model.angles,\n \"number of velocity basis functions\" : self.flow_model.velocity_basis.coefficents.shape[0],\n \"Finite volume cell size\" : self.flow_model.dx }\n\n np.save(save_path+\"/meta_data.npy\", meta_data)\n\n def _save_integration_step(self, save_path, current_volume, current_time, step ):\n \"\"\"Save volume and projections to disc in save_path directory.\n\n \"\"\"\n interpolated_sinogram = self.sinogram_interpolator( current_time, original=True )[:,:,:]\n reconstructed_sinogram = self.ray_model.forward_project( current_volume )\n times = np.load(save_path+\"/times.npy\")\n\n np.save( save_path + \"/volumes/volume_\"+str(step).zfill(4)+\".npy\", current_volume )\n\n utils.save_as_vtk_voxel_volume(save_path + \"/volumes/volume_\"+str(step).zfill(4) , current_volume, 3*[self.flow_model.dx], self.flow_model.origin)\n\n np.save( save_path + \"/projections/reconstructed_sinogram_\"+str(step).zfill(4)+\".npy\", reconstructed_sinogram )\n np.save( save_path + \"/projections/interpolated_sinogram_\"+str(step).zfill(4)+\".npy\", interpolated_sinogram )\n\n np.save( save_path + \"/times\", np.concatenate( [times, np.array([current_time]) ]) )\n\n np.save(save_path + \"/velocity/basis_coefficents_\"+str(step).zfill(4)+\".npy\", self._velocity_solver.optimal_coefficents)\n\n def save(self, file):\n \"\"\"Save the projected advection pde problem by pickling it to disc.\n\n Args:\n file (str): Absolute file path ending with the desired filename and no extensions.\n\n \"\"\"\n with open(file+\".papde\", 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n @classmethod\n def load(cls, file):\n \"\"\"Load a projected advection pde problem from a pickled file. \n\n Args:\n file (str): Absolute file path ending with the full filename. The extension\n should be \".papde\".\n\n \"\"\"\n with open(file, 'rb') as output:\n return pickle.load(output)"
] |
[
[
"numpy.abs",
"numpy.min",
"numpy.arange",
"numpy.linalg.norm",
"numpy.save",
"numpy.max",
"numpy.ceil",
"numpy.load",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum"
]
] |
byungsook/neural-flow-style
|
[
"7c7ac504474621685577ea43b8b805f22809ef3a"
] |
[
"styler_2p.py"
] |
[
"#############################################################\n# MIT License, Copyright © 2020, ETH Zurich, Byungsoo Kim\n#############################################################\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom tqdm import trange\nfrom util import *\nfrom transform import p2g\nimport vgg\nfrom styler_base import StylerBase\n\nclass Styler(StylerBase):\n def __init__(self, self_dict):\n StylerBase.__init__(self, self_dict)\n\n # particle position \n # shape: [N,2], scale: [0,1]\n p = []\n p_shp = [None,2]\n self.p = []\n \n # particle density shape\n r_shp = [None,1]\n self.r = []\n\n # particle color shape\n c_shp = [None,3]\n self.c = []\n\n # output and density field\n d = []\n d_gray = []\n\n self.opt_init = []\n self.opt_ph = []\n self.opt = []\n\n self.res = tf.compat.v1.placeholder(tf.int32, [2], name='resolution')\n\n for i in range(self.batch_size):\n # particle position, [N,2]\n p_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=p_shp, name='p%d' % i)\n self.p.append(p_)\n p_ = tf.expand_dims(p_, axis=0) # [1,N,2]\n p.append(p_[0])\n\n # particle density, [N,1]\n r_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=r_shp, name='r%d' % i)\n self.r.append(r_)\n r_ = tf.expand_dims(r_, axis=0) # [1,N,1]\n \n # position-based (SPH) density field estimation\n d_gray_ = p2g(p_, self.domain, self.res, self.radius, self.rest_density, self.nsize, support=self.support, clip=self.clip) # [B,N,2] -> [B,H,W,1]\n d_gray_ /= self.rest_density # normalize density\n d_gray.append(d_gray_)\n\n # particle color, [N,3]\n opt_ph = tf.compat.v1.placeholder(dtype=tf.float32, shape=c_shp, name='c_opt_ph%d' % i)\n self.opt_ph.append(opt_ph)\n opt_var = tf.Variable(opt_ph, validate_shape=False, name='c_opt%d' % i)\n self.opt.append(opt_var)\n opt_var_ = tf.reshape(opt_var, tf.shape(opt_ph))\n opt_var_ = tf.expand_dims(opt_var_, axis=0)\n \n # clip particle color\n c_ = tf.clip_by_value(opt_var_, 0, 1)\n\n # mask color\n self.c.append(c_[0]*tf.clip_by_value(r_[0]/self.rest_density, 0, 1))\n\n # position-based (SPH) color field estimation\n d_ = p2g(p_, self.domain, self.res, self.radius, self.rest_density, self.nsize, support=self.support, clip=self.clip,\n pc=c_, pd=r_) # [B,N,2] -> [B,H,W,3]\n \n d.append(d_)\n\n self.opt_init = tf.compat.v1.initializers.variables(self.opt)\n\n # particle position\n self.p_out = p # [N,2]*B\n \n # estimated color fields\n d = tf.concat(d, axis=0) # [B,H,W,3]\n\n # value clipping for rendering\n d = tf.clip_by_value(d, 0, 1)\n\n # estimated density fields for masking\n d_gray = tf.concat(d_gray, axis=0) # [B,H,W,1]\n\n # clamp density field [0,1]\n d_gray = tf.clip_by_value(d_gray, 0, 1)\n\n # mask for style features\n self.d_gray = d_gray\n\n # stylized result\n self.d_out = d*d_gray # [B,H,W,3]\n\n self._plugin_to_loss_net(d)\n\n def render_test(self, params):\n feed = {}\n feed[self.res] = self.resolution\n \n for i in range(self.batch_size):\n feed[self.p[i]] = params['p'][i]\n feed[self.r[i]] = params['r'][i]\n n = params['p'][i].shape[0]\n\n # feed[self.opt_ph[i]] = np.ones([n,3])\n c_init_shp = [n,3]\n c_init = self.rng.uniform(-5,5, c_init_shp).astype(np.float32)\n c_init += np.array([vgg._R_MEAN, vgg._G_MEAN, vgg._B_MEAN])\n feed[self.opt_ph[i]] = c_init/255\n\n self.sess.run(self.opt_init, feed)\n p_out, d_out, d_gray = self.sess.run([self.p_out, self.d_out, self.d_gray], feed)\n plt.subplot(121)\n plt.imshow(d_out[0])\n plt.subplot(122)\n plt.imshow(d_gray[0,...,0])\n plt.show()\n\n for i, p in enumerate(p_out):\n p[:,0] = p[:,0]*self.domain[0]\n p[:,1] = p[:,1]*self.domain[1]\n p_out[i] = np.stack([p[:,1],p[:,0]], axis=-1)\n v_ = None\n bbox = [\n [0,0,-1],\n [self.domain[1],self.domain[0],1],\n ]\n draw_pt(p_out, v_, bbox=bbox)\n return\n\n # save to image\n for t in trange(0,self.num_frames,self.batch_size):\n if t == 0:\n n = params['p'][0].shape[0]\n from matplotlib import cm\n c = cm.plasma(np.linspace(0,1,n))[...,:-1]\n \n for i in range(self.batch_size):\n feed[self.p[i]] = params['p'][t+i]\n feed[self.r[i]] = params['r'][t+i]\n if 'p' in self.target_field:\n feed[self.opt_ph[i]] = np.zeros([n,2])\n if 'c' in self.target_field:\n feed[self.opt_ph[i]] = c\n \n self.sess.run(self.opt_init, feed) \n d_out = self.sess.run(self.d_out, feed)\n if d_out.shape[-1] == 1:\n d_out = d_out[...,0] # [B,H,W]\n # plt.imshow(d_out[0])\n # plt.show()\n for i in range(self.batch_size): \n im = Image.fromarray((d_out[i]*255).astype(np.uint8))\n d_path = os.path.join(self.log_dir, '%03d.png' % (t+i))\n im.save(d_path)\n\n def run(self, params):\n # loss\n self._loss(params)\n\n # optimizer\n self.opt_lr = tf.compat.v1.placeholder(tf.float32)\n\n # settings for octave process\n oct_size = []\n hw = np.array(self.resolution)\n for _ in range(self.octave_n):\n oct_size.append(hw)\n hw = (hw//self.octave_scale).astype(np.int)\n oct_size.reverse()\n print('input size for each octave', oct_size)\n\n p = params['p']\n r = params['r']\n\n g_opt = []\n n = p[0].shape[0] # n is fixed\n # # same noise\n # c_opt_shp = [n, 3]\n # different noise\n c_opt_shp = [self.num_frames, n, 3]\n c_opt = self.rng.uniform(-5,5, c_opt_shp).astype(np.float32)\n c_opt += np.array([vgg._R_MEAN, vgg._G_MEAN, vgg._B_MEAN])\n c_opt /= 255 # [0,1]\n for i in range(self.num_frames):\n # # same noise\n # c_opt.append(c_opt)\n # different noise\n g_opt.append(c_opt[i])\n\n # optimize\n loss_history = []\n d_intm = []\n opt_ = {}\n for octave in trange(self.octave_n, desc='octave'):\n loss_history_o = []\n d_intm_o = []\n\n feed = {}\n feed[self.res] = oct_size[octave]\n if self.content_img is not None:\n feed[self.content_feature] = self._content_feature(\n self.content_img, oct_size[octave])\n\n if self.style_img is not None:\n style_features = self._style_feature(\n self.style_img, oct_size[octave])\n \n for i in range(len(self.style_features)):\n feed[self.style_features[i]] = style_features[i]\n\n if self.w_hist > 0:\n hist_features = self._hist_feature(\n self.style_img, oct_size[octave])\n \n for i in range(len(self.hist_features)):\n feed[self.hist_features[i]] = hist_features[i]\n\n if type(self.lr) == list:\n lr = self.lr[octave]\n else:\n lr = self.lr\n\n # optimizer list for each batch\n for step in trange(self.iter,desc='iter'):\n g_tmp = [None]*self.num_frames\n\n for t in range(0,self.num_frames,self.batch_size):\n for i in range(self.batch_size):\n feed[self.p[i]] = p[t+i]\n feed[self.r[i]] = r[t+i]\n feed[self.opt_ph[i]] = g_opt[t+i]\n \n # assign g_opt to self.opt through self.opt_ph\n self.sess.run(self.opt_init, feed)\n\n feed[self.opt_lr] = lr\n opt_id = t//self.frames_per_opt\n # opt_id = self.rng.randint(num_opt)\n if opt_id in opt_:\n train_op = opt_[opt_id]\n else:\n opt = tf.compat.v1.train.AdamOptimizer(learning_rate=self.opt_lr)\n train_op = opt.minimize(self.total_loss, var_list=self.opt)\n self.sess.run(tf.compat.v1.variables_initializer(opt.variables()), feed)\n opt_[opt_id] = train_op\n\n # optimize\n _, l_ = self.sess.run([train_op, self.total_loss], feed)\n loss_history_o.append(l_)\n\n g_opt_ = self.sess.run(self.opt, feed)\n for i in range(self.batch_size):\n g_tmp[t+i] = np.nan_to_num(g_opt_[i]) - g_opt[t+i]\n\n if step == self.iter-1 and octave < self.octave_n-1: # True or \n d_intm_ = self.sess.run(self.d_out, feed)\n d_intm_o.append((d_intm_*255).astype(np.uint8))\n\n # ## debug\n # d_gray = self.sess.run(self.d_gray, feed)\n # plt.subplot(121)\n # plt.imshow(d_intm_[0,...])\n # plt.subplot(122)\n # plt.imshow(d_gray[0,...,0])\n # plt.show()\n\n #########\n # gradient alignment\n if self.window_sigma > 0 and self.num_frames > 1:\n g_tmp = denoise(g_tmp, sigma=(self.window_sigma,0,0))\n\n for t in range(self.num_frames):\n g_opt[t] += g_tmp[t]\n\n loss_history.append(loss_history_o)\n if octave < self.octave_n-1:\n d_intm.append(np.concatenate(d_intm_o, axis=0))\n\n # gather outputs\n result = {\n 'l': loss_history, 'd_intm': d_intm,\n }\n\n # final inference\n c_sty = [None]*self.num_frames\n d_sty = [None]*self.num_frames\n for t in range(0,self.num_frames,self.batch_size):\n for i in range(self.batch_size):\n feed[self.p[i]] = p[t+i]\n feed[self.r[i]] = r[t+i]\n feed[self.opt_ph[i]] = g_opt[t+i]\n\n self.sess.run(self.opt_init, feed) \n p_, d_ = self.sess.run([self.p_out, self.d_out], feed)\n c_ = self.sess.run(self.c, feed)\n \n for i in range(self.batch_size):\n c_sty[t+i] = c_[i]\n\n d_ = (d_*255).astype(np.uint8)\n d_sty[t:t+self.batch_size] = d_\n\n result['c'] = c_sty\n result['d'] = np.array(d_sty)\n\n return result"
] |
[
[
"tensorflow.clip_by_value",
"matplotlib.pyplot.imshow",
"tensorflow.concat",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.Variable",
"tensorflow.shape",
"numpy.linspace",
"tensorflow.expand_dims",
"numpy.stack",
"numpy.nan_to_num",
"numpy.concatenate",
"tensorflow.compat.v1.placeholder",
"matplotlib.pyplot.subplot",
"tensorflow.compat.v1.initializers.variables",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros"
]
] |
20chase/cartpole_rl
|
[
"687fc30f7e69f4850c545dce74f4e844d75fd732",
"687fc30f7e69f4850c545dce74f4e844d75fd732"
] |
[
"progress_plotter.py",
"sac/play_gym.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef df_plot(dfs, x, ys, ylim=None, legend_loc='best'):\n plt.rcParams[\"figure.figsize\"] = (10, 5)\n plt.style.use('ggplot')\n if ylim:\n plt.ylim(ylim)\n\n plt.plot(dfs[x]/3600, dfs[ys], linewidth=1, label=ys)\n plt.xlabel(x)\n plt.legend(loc=legend_loc)\n plt.show()\n\ndef main():\n filepath='./ppo_dm/log/progress.csv'\n dataframes = []\n\n data = pd.read_csv(filepath)\n df_plot(data, 'time_elapsed', 'policy_entropy')\n df_plot(data, 'time_elapsed', 'eprewmean')\n df_plot(data, 'time_elapsed', 'policy_loss')\n df_plot(data, 'time_elapsed', 'explained_variance', ylim=(-1, 1))\n df_plot(data, 'time_elapsed', 'lr')\n df_plot(data, 'time_elapsed', 'value_loss')\n df_plot(data, 'time_elapsed', 'approxkl')\n\nif __name__ == '__main__':\n main()\n",
"import argparse\nimport gym\nimport time\nimport os, sys\n\nimport numpy as np\nimport tensorflow as tf\nfrom spinup.utils.logx import EpochLogger\n\nfrom sac import SAC \n\nparser = argparse.ArgumentParser(description='prioritized deep deterministic policy gradient algorithm')\n\nparser.add_argument(\n '--train', action='store_true')\n\nparser.add_argument(\n '--lr', default=1e-3, type=float, help='learning rate')\n\nparser.add_argument(\n '--gamma', default=.99, type=float, help='gamma')\n\nparser.add_argument(\n '--nenvs', default=5, type=int, help='the number of processes')\n\nparser.add_argument(\n '--batch_size', default=1024, type=int, help='training batch size')\n\nparser.add_argument(\n '--replay_size', default=1000000, type=int, help='the size of replay buffer')\n\nparser.add_argument(\n '--save', default=False, type=bool, help='whether to save network')\n\nparser.add_argument(\n '--load', action=\"store_true\")\n\nparser.add_argument(\n '--gym_id', default='Humanoid-v2', type=str, help='gym id')\n\nparser.add_argument(\n '--seed', default=0, type=int)\n\nargs = parser.parse_args()\n\nclass PlayGym(object):\n def __init__(self, \n args, \n train_env, test_env, \n agent):\n\n self.args = args\n self.train_env = train_env\n self.test_env = test_env\n self.agent = agent\n\n self.nsteps = 200\n\n def learn(self, \n start_steps=10000, \n steps_per_epoch=5000, \n epochs=1000,\n max_ep_len=1000):\n\n ob = self.train_env.reset()\n ep_len = 0\n ep_ret = 0\n\n total_steps = steps_per_epoch * epochs\n for t in range(total_steps):\n if t > start_steps:\n act = self.agent.action([ob])[0]\n else:\n act = self.train_env.action_space.sample()\n\n new_ob, rew, done, _ = self.train_env.step(act)\n ep_len += 1\n ep_ret += rew\n\n done = False if ep_len==max_ep_len else done\n\n self.agent.replay_buffer.store(ob, act, rew, new_ob, done)\n\n ob = new_ob\n\n if done or (ep_len==max_ep_len):\n for _ in range(int(ep_len / 10)):\n self.agent.train()\n\n print(\"time_step {}: {}\".format(t, ep_ret))\n\n ob = self.train_env.reset()\n ep_len = 0\n ep_ret = 0\n\n if t % 100000 == 0:\n self.agent.save_net(\"./log/{}\".format(int(t / 10000)))\n\n def play(self):\n ob = self.test_env.reset()\n done = False\n total_rew = 0\n while not done:\n act = self.agent.action([ob], test=True)[0]\n self.test_env.render()\n ob, rew, done, _ = self.test_env.step(act)\n total_rew += rew\n \n print(\"reward: {}\".format(total_rew))\n\nclass MakeEnv(object):\n def __init__(self):\n pass\n\n def make(self):\n return gym.make(args.gym_id)\n\nif __name__ == '__main__':\n curr_path = sys.path[0]\n graph = tf.get_default_graph()\n config = tf.ConfigProto()\n session = tf.Session(graph=graph, config=config)\n\n maker = MakeEnv() \n train_env, test_env = maker.make(), maker.make()\n \n ob_space = train_env.observation_space\n ac_space = train_env.action_space\n print(ac_space.high[0])\n\n agent = SAC(session, args, \n ob_space.shape[0], ac_space.shape[0], \n ac_space.high[0])\n\n player = PlayGym(\n args, train_env, test_env, agent\n )\n\n session.run(tf.global_variables_initializer())\n session.run(agent.target_init)\n if args.load:\n agent.load_net(\"./log/290\")\n if args.train:\n player.learn(epochs=3000)\n else:\n for _ in range(100):\n player.play()\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use"
],
[
"tensorflow.ConfigProto",
"tensorflow.get_default_graph",
"tensorflow.global_variables_initializer",
"tensorflow.Session"
]
] |
Skyblueballykid/Draftkings-2016-2017
|
[
"ef41be4d98c004bf6b2eb9f4aa2779e665981086"
] |
[
"XGBoost/XGBClassifier.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn as sk\nimport sklearn.multiclass as mk\nimport patsy as pt\nimport xgboost as xgboost\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import gradient_boosting\nfrom patsy import dmatrix, dmatrices, demo_data, ContrastMatrix, Poly\nfrom numpy import loadtxt\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nNBA = loadtxt('2016_2017_NBA_Binned_5.csv', delimiter=\",\")\n\n# split into input (X) and output (Y) variables\nX = NBA[:,0:17]\nY = NBA[:,18]\n\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.75, random_state=0)\n\n\n# fit model no training data\nmodel = xgboost. XGBClassifier(base_score=0.5, colsample_bylevel=1, colsample_bytree=1,\n gamma=1.005, learning_rate=0.1, max_delta_step=0, max_depth=3,\n min_child_weight=1, missing=None, n_estimators=100, nthread=-1,\n objective='binary:logistic', reg_alpha=0, reg_lambda=1,\n scale_pos_weight=1, seed=0, silent=True, subsample=1)\nmodel.fit(X_train, y_train)\n\n# make predictions for test data\ny_pred = model.predict(X_test)\npredictions = [round(value) for value in y_pred]\n\n# evaluate predictions\naccuracy = accuracy_score(y_test, predictions)\nprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"numpy.loadtxt",
"sklearn.metrics.accuracy_score"
]
] |
ThibaultLatrille/MutationSelectionDrift
|
[
"7b9e4fe5b181413823ddba9b637af553f977836c"
] |
[
"DataEmpirical/OrthoMam/orthomam_filter.py"
] |
[
"#!python3\nimport pandas as pd\nfrom collections import Counter\nimport os\n\n\ndef save_df(name, col):\n col.to_csv(\"./\" + name, index=False, header=None)\n print(\"{0} CDS saved into '{1}'\".format(len(col), name))\n\n\ndf_orthomam_cds = pd.read_csv(\"./orthomam.tsv\", sep=\"\\t\")\ncol_orthomam_cds = df_orthomam_cds[\"gene\"]\nprint(\"{0} CDS with mutsel inference.\".format(len(col_orthomam_cds)))\n\ncol_pursel_cds = df_orthomam_cds.query(\"pp_m2a < 0.1 and pp_mutsel < 0.1 and pp_mutselfix < 0.1\")[\"gene\"]\nprint(\"{0} CDS with mutsel inference not under positive selection\".format(len(col_pursel_cds)))\n\nlist_folder_cds = [i.replace(\".ali\", \"\") for i in os.listdir('./singlegene_alignments')]\nprint(\"{0} CDS in the folder 'singlegene_alignments'.\".format(len(list_folder_cds)))\n\ncol_filtered_cds = col_pursel_cds[col_pursel_cds.isin(list_folder_cds)]\nprint(\"{0} CDS in the folder 'singlegene_alignments' and not under positive selection.\".format(len(col_filtered_cds)))\nsave_df(\"cds.filtered.list\", col_filtered_cds)\n\n\ndef coverage(filepath):\n cov = 0.0\n nbr_seqs = 0\n with open(filepath, 'r') as ali_file:\n next(ali_file)\n for line in ali_file:\n if line != \"\\n\":\n name, seq = line.replace(\" \", \" \").replace(\"\\n\", \"\").split(\" \")\n count = Counter(seq.upper())\n cov += (count[\"A\"] + count[\"C\"] + count[\"G\"] + count[\"T\"]) / len(seq)\n nbr_seqs += 1\n return cov / nbr_seqs\n\n\nthreshold = 0.99\nlist_high_coverage_cds = [i for i in list_folder_cds if coverage('./singlegene_alignments/{0}.ali'.format(i)) >= threshold]\ncol_filtered_high_coverage_cds = col_filtered_cds[col_filtered_cds.isin(list_high_coverage_cds)]\nprint(\"{0} CDS are not under positive selection and with a coverage >{1}.\".format(len(col_filtered_high_coverage_cds), threshold))\nsave_df(\"cds.filtered.highcoverage.list\", col_filtered_high_coverage_cds)\n\ncol_high_coverage_cds = col_orthomam_cds[col_orthomam_cds.isin(list_high_coverage_cds)]\nprint(\"{0} CDS are with a coverage >{1}.\".format(len(col_high_coverage_cds), threshold))\nsave_df(\"cds.highcoverage.list\", col_high_coverage_cds)\n"
] |
[
[
"pandas.read_csv"
]
] |
kimjungwow/onnxruntime-riscv
|
[
"1ab8a95eb6675afb6d0ad9d93600ef0022e2ddb5",
"3c21abef03190648fe68a6633ac026725e6dfc58"
] |
[
"onnxruntime/python/tools/quantization/E2E_example_model/object_detection/trt/yolov3/preprocessing.py",
"systolic_runner/rcnn_runner/ort_native.py"
] |
[
"import os\nimport sys\nimport numpy as np\nimport re\nfrom PIL import Image\nimport cv2\nimport pdb\n\ndef yolov3_preprocess_func(images_folder, height, width, start_index=0, size_limit=0):\n '''\n Loads a batch of images and preprocess them\n parameter images_folder: path to folder storing images\n parameter height: image height in pixels\n parameter width: image width in pixels\n parameter size_limit: number of images to load. Default is 0 which means all images are picked.\n return: list of matrices characterizing multiple images\n '''\n # this function is from yolo3.utils.letterbox_image\n # https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/utils.py\n def letterbox_image(image, size):\n '''resize image with unchanged aspect ratio using padding'''\n iw, ih = image.size\n w, h = size\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n\n image = image.resize((nw,nh), Image.BICUBIC)\n new_image = Image.new('RGB', size, (128,128,128))\n new_image.paste(image, ((w-nw)//2, (h-nh)//2))\n return new_image\n\n image_names = os.listdir(images_folder)\n if start_index >= len(image_names):\n return np.asanyarray([]), np.asanyarray([]), np.asanyarray([])\n elif size_limit > 0 and len(image_names) >= size_limit:\n end_index = start_index + size_limit\n if end_index > len(image_names):\n end_index = len(image_names)\n\n batch_filenames = [image_names[i] for i in range(start_index, end_index)]\n else:\n batch_filenames = image_names\n\n\n unconcatenated_batch_data = []\n image_size_list = []\n\n print(batch_filenames)\n print(\"size: %s\" % str(len(batch_filenames)))\n\n for image_name in batch_filenames:\n image_filepath = images_folder + '/' + image_name\n img = Image.open(image_filepath) \n model_image_size = (height, width)\n boxed_image = letterbox_image(img, tuple(reversed(model_image_size)))\n image_data = np.array(boxed_image, dtype='float32')\n image_data /= 255.\n image_data = np.transpose(image_data, [2, 0, 1])\n image_data = np.expand_dims(image_data, 0)\n unconcatenated_batch_data.append(image_data)\n image_size_list.append(np.array([img.size[1], img.size[0]], dtype=np.float32).reshape(1, 2))\n\n batch_data = np.concatenate(np.expand_dims(unconcatenated_batch_data, axis=0), axis=0)\n return batch_data, batch_filenames, image_size_list\n\ndef yolov3_preprocess_func_2(images_folder, height, width, start_index=0, size_limit=0):\n '''\n Loads a batch of images and preprocess them\n parameter images_folder: path to folder storing images\n parameter height: image height in pixels\n parameter width: image width in pixels\n parameter size_limit: number of images to load. Default is 0 which means all images are picked.\n return: list of matrices characterizing multiple images\n '''\n\n # reference from here:\n # https://github.com/jkjung-avt/tensorrt_demos/blob/3fb15c908b155d5edc1bf098c6b8c31886cd8e8d/utils/yolo.py#L60\n def _preprocess_yolo(img, input_shape):\n \"\"\"Preprocess an image before TRT YOLO inferencing.\n # Args\n img: int8 numpy array of shape (img_h, img_w, 3)\n input_shape: a tuple of (H, W)\n # Returns\n preprocessed img: float32 numpy array of shape (3, H, W)\n \"\"\"\n img = cv2.resize(img, (input_shape[1], input_shape[0]))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.transpose((2, 0, 1)).astype(np.float32)\n img /= 255.0\n return img\n\n image_names = os.listdir(images_folder)\n if start_index >= len(image_names):\n return np.asanyarray([]), np.asanyarray([]), np.asanyarray([])\n elif size_limit > 0 and len(image_names) >= size_limit:\n end_index = start_index + size_limit\n if end_index > len(image_names):\n end_index = len(image_names)\n\n batch_filenames = [image_names[i] for i in range(start_index, end_index)]\n else:\n batch_filenames = image_names\n\n unconcatenated_batch_data = []\n image_size_list = []\n\n print(batch_filenames)\n print(\"size: %s\" % str(len(batch_filenames)))\n\n for image_name in batch_filenames:\n image_filepath = images_folder + '/' + image_name\n model_image_size = (height, width)\n\n img = cv2.imread(image_filepath)\n image_data = _preprocess_yolo(img, tuple(model_image_size)) \n image_data = np.ascontiguousarray(image_data)\n image_data = np.expand_dims(image_data, 0)\n unconcatenated_batch_data.append(image_data)\n _height, _width, _ = img.shape\n # image_size_list.append(img.shape[0:2]) # img.shape is h, w, c\n image_size_list.append(np.array([img.shape[0], img.shape[1]], dtype=np.float32).reshape(1, 2))\n\n batch_data = np.concatenate(np.expand_dims(unconcatenated_batch_data, axis=0), axis=0)\n return batch_data, batch_filenames, image_size_list\n\ndef yolov3_variant_preprocess_func(images_folder, height, width, start_index=0, size_limit=0):\n '''\n Loads a batch of images and preprocess them\n parameter images_folder: path to folder storing images\n parameter height: image height in pixels\n parameter width: image width in pixels\n parameter size_limit: number of images to load. Default is 0 which means all images are picked.\n return: list of matrices characterizing multiple images\n '''\n\n # reference from here:\n # https://github.com/jkjung-avt/tensorrt_demos/blob/3fb15c908b155d5edc1bf098c6b8c31886cd8e8d/utils/yolo.py#L60\n def _preprocess_yolo(img, input_shape):\n \"\"\"Preprocess an image before TRT YOLO inferencing.\n # Args\n img: int8 numpy array of shape (img_h, img_w, 3)\n input_shape: a tuple of (H, W)\n # Returns\n preprocessed img: float32 numpy array of shape (3, H, W)\n \"\"\"\n img = cv2.resize(img, (input_shape[1], input_shape[0]))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.transpose((2, 0, 1)).astype(np.float32)\n img /= 255.0\n return img\n\n image_names = os.listdir(images_folder)\n if start_index >= len(image_names):\n return np.asanyarray([]), np.asanyarray([]), np.asanyarray([])\n elif size_limit > 0 and len(image_names) >= size_limit:\n end_index = start_index + size_limit\n if end_index > len(image_names):\n end_index = len(image_names)\n\n batch_filenames = [image_names[i] for i in range(start_index, end_index)]\n else:\n batch_filenames = image_names\n\n unconcatenated_batch_data = []\n image_size_list = []\n\n print(batch_filenames)\n print(\"size: %s\" % str(len(batch_filenames)))\n\n for image_name in batch_filenames:\n image_filepath = images_folder + '/' + image_name\n model_image_size = (height, width)\n\n img = cv2.imread(image_filepath)\n image_data = _preprocess_yolo(img, tuple(model_image_size))\n image_data = np.ascontiguousarray(image_data)\n image_data = np.expand_dims(image_data, 0)\n unconcatenated_batch_data.append(image_data)\n _height, _width, _ = img.shape\n image_size_list.append(img.shape[0:2]) # img.shape is h, w, c\n\n batch_data = np.concatenate(np.expand_dims(unconcatenated_batch_data, axis=0), axis=0)\n return batch_data, batch_filenames, image_size_list\n\n\n# This is for special tuned yolov3 model\ndef yolov3_variant_preprocess_func_2(images_folder, height, width, start_index=0, size_limit=0):\n def letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):\n # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232\n shape = img.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = new_shape\n ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return img, ratio, (dw, dh)\n\n image_names = os.listdir(images_folder)\n if start_index >= len(image_names):\n return np.asanyarray([]), np.asanyarray([]), np.asanyarray([])\n elif size_limit > 0 and len(image_names) >= size_limit:\n end_index = start_index + size_limit\n if end_index > len(image_names):\n end_index = len(image_names)\n\n batch_filenames = [image_names[i] for i in range(start_index, end_index)]\n else:\n batch_filenames = image_names\n\n unconcatenated_batch_data = []\n image_size_list = []\n\n print(batch_filenames)\n print(\"size: %s\" % str(len(batch_filenames)))\n\n for image_name in batch_filenames:\n image_filepath = images_folder + '/' + image_name\n img0 = cv2.imread(image_filepath)\n img = letterbox(img0, new_shape=(height, width), auto=False)[0]\n img = img[:, :, ::-1].transpose(2, 0, 1)\n img = np.expand_dims(img, axis=0)\n img = np.repeat(img, 1, axis=0)\n\n img = img.astype('float32') / 255.0\n\n unconcatenated_batch_data.append(img)\n image_size_list.append(img0.shape[0:2]) # img.shape is h, w, c\n\n batch_data = np.concatenate(np.expand_dims(unconcatenated_batch_data, axis=0), axis=0)\n return batch_data, batch_filenames, image_size_list\n",
"import onnxruntime as rt\nimport numpy\nimport argparse\nimport numpy as np\nimport cv2\n\nparser = argparse.ArgumentParser(description='ImageNet native ORT')\nparser.add_argument('--model', type=str, required=True)\nparser.add_argument('--image', type=str, required=True)\nargs, args_other = parser.parse_known_args()\n\n# sess_options = rt.SessionOptions()\n# sess_options.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED\n\n\nsess = rt.InferenceSession(args.model)\ninput_name = sess.get_inputs()[0].name\n\n\ndef get_expected(path):\n return int(path.split('/')[-2])\n\n# This assumes a preprocessing for model exported by pytorch\n# Model from model zoo is different\ndef load_image(img_path):\n # CV loads in BGR, and rcnn expects rgb\n loaded = cv2.imread(img_path)\n loaded = cv2.cvtColor(loaded, cv2.COLOR_BGR2RGB)\n img_data = loaded.transpose(2, 0, 1)\n\n # The mean values provided are in RGB format\n mean_vec = np.array([0.485, 0.456, 0.406])\n stddev_vec = np.array([0.229, 0.224, 0.225])\n\n norm_img_data = np.zeros(img_data.shape).astype('float32')\n for i in range(img_data.shape[0]): \n norm_img_data[i,:,:] = (img_data[i,:,:]/255 - mean_vec[i]) / stddev_vec[i]\n norm_img_data = np.expand_dims(norm_img_data, axis=0)\n return norm_img_data\n\ndef perform_inference(file):\n preprocessed = load_image(file)\n print(file)\n result = sess.run(None, {input_name: preprocessed})\n import pdb; pdb.set_trace()\n\n\nperform_inference(args.image)\n"
] |
[
[
"numpy.expand_dims",
"numpy.ascontiguousarray",
"numpy.asanyarray",
"numpy.transpose",
"numpy.mod",
"numpy.repeat",
"numpy.array"
],
[
"numpy.array",
"numpy.expand_dims",
"numpy.zeros"
]
] |
hhsecond/mlflow-redisai-pytorch-demo
|
[
"b1bd6b670f1df6f92ec72773bccd08d13f6f5906"
] |
[
"save_classifier.py"
] |
[
"import torchvision.models as models\nimport mlflow.pytorch\nimport torch\nfrom torchvision import transforms\nimport PIL\n\n\nwith mlflow.start_run() as run:\n # Actual training loop would have come here\n normalize = transforms.Compose( # noqa\n [transforms.ToTensor(), # noqa\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])] # noqa\n )\n image = normalize(PIL.Image.open('fish.jpg'))\n model = models.resnet18(pretrained=True).eval()\n out = model(image.unsqueeze(0))\n scripted_model = torch.jit.script(model)\n print(\"Saving TorchScript Model ..\")\n mlflow.pytorch.log_model(scripted_model, artifact_path='classifier')\n"
] |
[
[
"torch.jit.script"
]
] |
mhsekhavat/neuralet
|
[
"c44dc8f11a250524caa1ac77a7cf65dc393f2a8e"
] |
[
"coral-dev-board/inception_v4_299_quant/src/server-example.py"
] |
[
"import time\nfrom multiprocessing.managers import BaseManager\nfrom queue import Queue\nimport numpy as np\nimport sys\nimport os\nimport wget \n\nfrom tflite_runtime.interpreter import load_delegate\nfrom tflite_runtime.interpreter import Interpreter\n\nHOST = '127.0.0.1'\nINPUT_PORT = 50000\nINPUT_AUTH = b'inputpass'\nOUTPUT_PORT = 50001\nOUTPUT_AUTH = b'outpass'\ninput_queue = Queue()\noutput_queue = Queue()\n\nclass QueueManager(BaseManager): pass\n\nQueueManager.register('get_input_queue', callable=lambda:input_queue)\ninput_manager = QueueManager(address=(HOST, INPUT_PORT), authkey=INPUT_AUTH)\ninput_manager.start()\n\nQueueManager.register('get_output_queue', callable=lambda:output_queue)\noutput_manager = QueueManager(address=(HOST, OUTPUT_PORT), authkey=OUTPUT_AUTH)\noutput_manager.start()\n\nmodel_file = 'inception_v4_299_quant_edgetpu.tflite'\nmodel_name = 'inception_v4_299_quant'\nmodel_path = 'data/models/' + model_name + '/' + model_file\n\nbase_url = 'https://raw.githubusercontent.com/neuralet/neuralet-models/master/edge-tpu/'\nurl = base_url + model_name + '/' + model_file\n\nif not os.path.isfile(model_path):\n print('model does not exist, downloading from ', url)\n wget.download(url, model_path)\n\n\ndef main():\n\n interpreter = Interpreter(model_path, experimental_delegates=[load_delegate(\"libedgetpu.so.1\")])\n\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n input_queue = input_manager.get_input_queue()\n output_queue = output_manager.get_output_queue()\n\n print('------------------------------------------------------------------------------------------')\n print('Started Inference server, waiting for incoming requests ... (send \\'stop\\' to kill server)')\n print('------------------------------------------------------------------------------------------')\n\n while True:\n data = input_queue.get()\n print('recieved data with type ', type(data))\n\n if type(data) == str and data == \"stop\": break\n \n if type(data) == np.ndarray:\n input_image = np.expand_dims(data, axis=0)\n interpreter.set_tensor(input_details[0][\"index\"], input_image)\n t_begin = time.perf_counter()\n interpreter.invoke()\n inference_time = time.perf_counter() - t_begin\n net_output = interpreter.get_tensor(output_details[0][\"index\"])\n print('inference output: ', net_output , ', done in ', inference_time, ' seconds' )\n output_queue.put(net_output)\n\n # End while\n\n input_manager.shutdown()\n output_manager.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.expand_dims"
]
] |
franckfotso/cbir_binary_code
|
[
"90475b3b92285851cc8da2c38029a43aaa28f3d0"
] |
[
"libs/cvprw15/feat_tools.py"
] |
[
"# Project: cbir_binary_code\n# File: feat_tools\n# Written by: Franck FOTSO\n# Licensed: MIT License\n# Copyright (c) 2017\n\nimport caffe\nimport numpy as np\nimport progressbar\nimport datetime\nimport math\n\ndef pycaffe_init_feat(use_gpu, model_prototxt, model_file):\n print ('model_prototxt: {}'.format(model_prototxt))\n print ('model_file: {}'.format(model_file))\n\n if use_gpu == 0:\n caffe.set_mode_cpu()\n print ('Using CPU Mode')\n else:\n caffe.set_mode_gpu()\n caffe.set_device(0)\n print ('Using GPU Mode')\n\n net = caffe.Net(str(model_prototxt), str(model_file), caffe.TEST)\n return net\n\n\ndef prepare_batch(image_files, transformer):\n l_im_transf = []\n for im_pn in image_files:\n try:\n im = caffe.io.load_image(im_pn)\n im_transf = transformer.preprocess('data', im)\n l_im_transf.append(im_transf)\n except:\n print ('prepare_batch> IOError: cannot identify image file - {}'.format(im_pn))\n\n return l_im_transf\n\n\ndef pycaffe_batch_feat(list_im, net, feat_len, ilsvrc_2012_mean_pn, layer_n):\n batch_size = 10 # RFM: init_val=10\n dim = feat_len\n for im_pn in list_im:\n #print im_pn\n pass\n\n # Adjust the batch size and dim to match with models/bvlc_reference_caffenet/deploy.prototxt\n if (len(list_im) % batch_size):\n print ('Assuming batches of {} images rest will be filled with zeros').format(batch_size)\n\n # init caffe network (spews logging info)\n # net = pycaffe_init_feat(use_gpu, model_prototxt, model_file)\n\n # load the mean ImageNet image (as distributed with Caffe) for subtraction\n mu = np.load(ilsvrc_2012_mean_pn)\n mu = mu.mean(1).mean(1) # average over pixels to obtain the mean (BGR) pixel values\n # print 'mean-subtracted values:', zip('BGR', mu)\n\n # create transformer for the input called 'data'\n transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\n transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension\n transformer.set_mean('data', mu) # subtract the dataset-mean value in each channel\n transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]\n transformer.set_channel_swap('data', (2, 1, 0)) # swap channels from RGB to BGR\n\n # prepare input\n num_images = len(list_im)\n scores = np.zeros((num_images, dim), dtype=\"float32\")\n num_batches = int(math.ceil(float(len(list_im)) / float(batch_size)))\n # print num_batches\n\n # monitor tasks\n step_title = layer_n + ', pycaffe_batch_feat: '\n widgets = [step_title, progressbar.Percentage(),\n \" \", progressbar.Bar(), \" \", progressbar.ETA()]\n pbar = progressbar.ProgressBar(maxval=num_batches, widgets=widgets).start()\n\n # loop over all images\n for bb in range(0, num_batches):\n im_range = range(batch_size * (bb), min(num_images, batch_size * (bb + 1)))\n # print im_range\n startTime = datetime.datetime.now()\n sub_list_im = []\n for v in im_range:\n sub_list_im.append(list_im[v])\n # print sub_list_im\n l_im_transf = prepare_batch(sub_list_im, transformer)\n # print ('Batch {} out of {} => {}% Done on {} seconds').format((bb+1),num_batches,\\\n # (float(bb+1)/float(num_batches))*100,(datetime.datetime.now() - startTime).total_seconds())\n\n # with the default; we can also change it later, e.g., for different batch sizes)\n net.blobs['data'].reshape(len(im_range), # batch size default(50)\n 3, # 3-channel (BGR) images\n 227, 227) # image size is 227x227\n net.blobs['data'].data[...] = l_im_transf\n\n # foward propagation\n output = net.forward()\n # the output probability vector for the batch\n output_prob = output[layer_n]\n scores[im_range] = output_prob\n pbar.update(bb)\n\n pbar.finish()\n\n return scores\n"
] |
[
[
"numpy.load",
"numpy.zeros"
]
] |
RMeli/spyrmsd
|
[
"3c11da5f33892e260055041df1b6b5179790c5f8"
] |
[
"tests/test_molecule.py"
] |
[
"import copy\nimport os\nfrom collections import defaultdict\nfrom typing import DefaultDict, List, Tuple\n\nimport numpy as np\nimport pytest\n\nfrom spyrmsd import constants, graph, io, molecule, utils\nfrom tests import molecules\n\n\n# atoms is a list of atomic numbers and atom counts\[email protected](\n \"mol, atoms\",\n [\n (molecules.benzene, [(1, 6), (6, 6)]),\n (molecules.ethanol, [(1, 6), (6, 2), (8, 1)]),\n (molecules.dialanine, [(1, 12), (6, 6), (7, 2), (8, 3)]),\n ],\n)\ndef test_load(mol: molecule.Molecule, atoms: List[Tuple[int, int]]) -> None:\n\n n = sum([n_atoms for _, n_atoms in atoms])\n\n assert len(mol) == n\n assert mol.atomicnums.shape == (n,)\n assert mol.coordinates.shape == (n, 3)\n\n # Count number of atoms of different elements\n atomcount: DefaultDict[int, int] = defaultdict(int)\n for atomicnum in mol.atomicnums:\n atomcount[atomicnum] += 1\n\n assert len(atomcount) == len(atoms)\n\n for Z, n_atoms in atoms:\n assert atomcount[Z] == n_atoms\n\n\ndef test_loadall() -> None:\n\n path = os.path.join(molecules.molpath, \"1cbr_docking.sdf\")\n\n mols = io.loadall(path)\n\n assert len(mols) == 10\n\n\[email protected](\"mol\", molecules.allmolecules)\ndef test_molecule_translate(mol: molecule.Molecule) -> None:\n\n mt = copy.deepcopy(mol)\n\n t = np.array([0.5, 1.1, -0.1])\n mt.translate(t)\n\n for tcoord, coord in zip(mt.coordinates, mol.coordinates):\n assert np.allclose(tcoord - t, coord)\n\n\[email protected](\"mol\", molecules.allmolecules)\ndef test_molecule_rotate_z(mol: molecule.Molecule) -> None:\n\n z_axis = np.array([0, 0, 1])\n\n for angle in [0, 45, 90]:\n\n rotated = np.zeros((len(mol), 3))\n for i, coord in enumerate(mol.coordinates):\n rotated[i] = utils.rotate(coord, angle, z_axis, units=\"deg\")\n\n mol.rotate(angle, z_axis, units=\"deg\")\n\n assert np.allclose(mol.coordinates, rotated)\n\n # Reset\n mol.rotate(-angle, z_axis, units=\"deg\")\n\n\[email protected](\"mol\", molecules.allmolecules)\ndef test_molecule_rotate(mol: molecule.Molecule) -> None:\n\n axis = np.random.rand(3)\n\n for angle in np.random.rand(10) * 180:\n\n rotated = np.zeros((len(mol), 3))\n for i, coord in enumerate(mol.coordinates):\n rotated[i] = utils.rotate(coord, angle, axis, units=\"deg\")\n\n mol.rotate(angle, axis, units=\"deg\")\n\n assert np.allclose(mol.coordinates, rotated)\n\n # Reset\n mol.rotate(-angle, axis, units=\"deg\")\n\n\ndef test_molecule_center_of_geometry_benzene() -> None:\n\n mol = molecules.benzene\n\n assert np.allclose(mol.center_of_geometry(), np.zeros(3))\n\n\ndef test_molecule_center_of_mass_benzene() -> None:\n\n mol = molecules.benzene\n\n assert np.allclose(mol.center_of_mass(), np.zeros(3))\n\n\ndef test_molecule_center_of_mass_H2() -> None:\n\n atomicnums = [1, 1]\n coordinates = [[0.0, 0.0, -1.0], [0.0, 0.0, 1.0]]\n\n mol = molecule.Molecule(atomicnums, coordinates)\n\n assert np.allclose(mol.center_of_mass(), np.zeros(3))\n\n\ndef test_molecule_center_of_mass_HF() -> None:\n\n atomicnums = [1, 9]\n coordinates = [[0.0, 0.0, -1.0], [0.0, 0.0, 1.0]]\n\n H_mass = constants.anum_to_mass[1]\n F_mass = constants.anum_to_mass[9]\n\n z_com = (-H_mass + F_mass) / (H_mass + F_mass)\n\n mol = molecule.Molecule(atomicnums, coordinates)\n\n assert np.allclose(mol.center_of_mass(), np.array([0, 0, z_com]))\n\n\[email protected](\n \"mol, n_atoms, stripped\",\n [\n (molecules.benzene, 12, 6),\n (molecules.ethanol, 9, 6),\n (molecules.dialanine, 23, 12),\n ],\n)\ndef test_molecule_strip(mol: molecule.Molecule, n_atoms: int, stripped: int) -> None:\n\n m = copy.deepcopy(mol)\n\n assert len(m) == n_atoms\n\n m.strip()\n\n assert len(m) == n_atoms - stripped\n\n\[email protected](\n \"mol, n_bonds\",\n [(molecules.benzene, 12), (molecules.ethanol, 8), (molecules.dialanine, 22)],\n)\ndef test_graph_from_adjacency_matrix(mol: molecule.Molecule, n_bonds: int) -> None:\n\n G = mol.to_graph()\n\n assert graph.num_vertices(G) == len(mol)\n assert graph.num_edges(G) == n_bonds\n\n for idx, atomicnum in enumerate(mol.atomicnums):\n assert graph.vertex_property(G, \"atomicnum\", idx) == atomicnum\n\n\[email protected](\n \"mol, n_bonds\",\n [(molecules.benzene, 12), (molecules.ethanol, 8), (molecules.dialanine, 22)],\n)\ndef test_graph_from_atomic_coordinates_perception(\n mol: molecule.Molecule, n_bonds: int\n) -> None:\n\n m = copy.deepcopy(mol)\n\n delattr(m, \"adjacency_matrix\")\n m.G = None\n\n with pytest.warns(UserWarning):\n\n # Uses automatic bond perception\n G = m.to_graph()\n\n assert graph.num_vertices(G) == len(m)\n assert graph.num_edges(G) == n_bonds\n\n for idx, atomicnum in enumerate(mol.atomicnums):\n assert graph.vertex_property(G, \"atomicnum\", idx) == atomicnum\n\n\[email protected](\n \"adjacency\",\n [True, False],\n)\ndef test_from_obmol(adjacency):\n pytest.importorskip(\"openbabel\")\n\n from spyrmsd.optional import obabel as ob\n\n # Load molecules with OpenBabel\n path = os.path.join(molecules.molpath, \"1cbr_docking.sdf\")\n mols = ob.loadall(path)\n\n # Convert OpenBabel molecules to spyrmsd molecules\n mols = [molecule.Molecule.from_obabel(mol, adjacency) for mol in mols]\n\n assert len(mols) == 10\n\n for mol in mols:\n assert isinstance(mol, molecule.Molecule)\n\n if adjacency:\n assert mol.adjacency_matrix is not None\n else:\n with pytest.raises(AttributeError):\n # No adjacency_matrix attribute\n mol.adjacency_matrix\n\n\[email protected](\n \"adjacency\",\n [True, False],\n)\ndef test_from_rdmol(adjacency):\n pytest.importorskip(\"rdkit\")\n\n from spyrmsd.optional import rdkit as rd\n\n # Load molecules with RDKit\n path = os.path.join(molecules.molpath, \"1cbr_docking.sdf\")\n mols = rd.loadall(path)\n\n # Convert OpenBabel molecules to spyrmsd molecules\n mols = [molecule.Molecule.from_rdkit(mol, adjacency) for mol in mols]\n\n assert len(mols) == 10\n\n for mol in mols:\n assert isinstance(mol, molecule.Molecule)\n\n if adjacency:\n assert mol.adjacency_matrix is not None\n else:\n with pytest.raises(AttributeError):\n # No adjacency_matrix attribute\n mol.adjacency_matrix\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.random.rand",
"numpy.allclose"
]
] |
dicastro/tfm
|
[
"af875dd099288494a1f02a831e42d900d6cac29b"
] |
[
"gen_anchors.py"
] |
[
"import random\nimport argparse\nimport numpy as np\n\nfrom annotations import parse_voc_annotation, parse_txt_annotation\nimport json\n\ndef IOU(ann, centroids):\n w, h = ann\n similarities = []\n\n for centroid in centroids:\n c_w, c_h = centroid\n\n if c_w >= w and c_h >= h:\n similarity = w*h/(c_w*c_h)\n elif c_w >= w and c_h <= h:\n similarity = w*c_h/(w*h + (c_w-w)*c_h)\n elif c_w <= w and c_h >= h:\n similarity = c_w*h/(w*h + c_w*(c_h-h))\n else: #means both w,h are bigger than c_w and c_h respectively\n similarity = (c_w*c_h)/(w*h)\n similarities.append(similarity) # will become (k,) shape\n\n return np.array(similarities)\n\ndef avg_IOU(anns, centroids):\n n,d = anns.shape\n sum = 0.\n\n for i in range(anns.shape[0]):\n sum+= max(IOU(anns[i], centroids))\n\n return sum/n\n\ndef print_anchors(centroids):\n out_string = ''\n\n anchors = centroids.copy()\n\n widths = anchors[:, 0]\n sorted_indices = np.argsort(widths)\n\n r = \"anchors: [\"\n for i in sorted_indices:\n out_string += str(int(anchors[i,0]*416)) + ',' + str(int(anchors[i,1]*416)) + ', '\n \n print(out_string[:-2])\n\ndef run_kmeans(ann_dims, anchor_num):\n ann_num = ann_dims.shape[0]\n iterations = 0\n prev_assignments = np.ones(ann_num)*(-1)\n iteration = 0\n old_distances = np.zeros((ann_num, anchor_num))\n\n indices = [random.randrange(ann_dims.shape[0]) for i in range(anchor_num)]\n centroids = ann_dims[indices]\n anchor_dim = ann_dims.shape[1]\n\n while True:\n distances = []\n iteration += 1\n for i in range(ann_num):\n d = 1 - IOU(ann_dims[i], centroids)\n distances.append(d)\n distances = np.array(distances) # distances.shape = (ann_num, anchor_num)\n\n print(\"iteration {}: dists = {}\".format(iteration, np.sum(np.abs(old_distances-distances))))\n\n #assign samples to centroids\n assignments = np.argmin(distances,axis=1)\n\n if (assignments == prev_assignments).all() :\n return centroids\n\n #calculate new centroids\n centroid_sums=np.zeros((anchor_num, anchor_dim), np.float)\n for i in range(ann_num):\n centroid_sums[assignments[i]]+=ann_dims[i]\n for j in range(anchor_num):\n centroids[j] = centroid_sums[j]/(np.sum(assignments==j) + 1e-6)\n\n prev_assignments = assignments.copy()\n old_distances = distances.copy()\n\ndef _main_(args):\n config_path = args.conf\n num_anchors = args.anchors\n\n with open(config_path) as config_buffer:\n config = json.loads(config_buffer.read())\n\n if config['model']['data_load_method'] == 'voc':\n train_imgs, train_labels = parse_voc_annotation(\n config['train']['train_annot'],\n config['train']['train_image_folder'],\n config['train']['cache_name'],\n config['model']['labels']\n )\n elif config['model']['data_load_method'] == 'txt':\n train_imgs, train_labels = parse_txt_annotation(\n config['train']['train_annot'],\n config['train']['train_image_folder'],\n config['train']['cache_name'],\n config['model']['labels']\n )\n else:\n raise Exception('Unsupported data_load_method: \\'{}\\''.format(config['model']['data_load_method']))\n\n # run k_mean to find the anchors\n annotation_dims = []\n for image in train_imgs:\n #print(image['filename'])\n for obj in image['object']:\n relative_w = (float(obj['xmax']) - float(obj['xmin']))/image['width']\n relatice_h = (float(obj[\"ymax\"]) - float(obj['ymin']))/image['height']\n annotation_dims.append(tuple(map(float, (relative_w,relatice_h))))\n\n annotation_dims = np.array(annotation_dims)\n centroids = run_kmeans(annotation_dims, num_anchors)\n\n # write anchors to file\n print('\\naverage IOU for', num_anchors, 'anchors:', '%0.2f' % avg_IOU(annotation_dims, centroids))\n print_anchors(centroids)\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser()\n\n argparser.add_argument(\n '-c',\n '--conf',\n default='config.json',\n help='path to configuration file')\n argparser.add_argument(\n '-a',\n '--anchors',\n default=9,\n type=int,\n help='number of anchors to use')\n\n args = argparser.parse_args()\n _main_(args)\n"
] |
[
[
"numpy.abs",
"numpy.ones",
"numpy.argmin",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
RTHMaK/RPGOne
|
[
"3f3ada7db1762781668bfb2377154fdc00e17212"
] |
[
"deep_qa-master/tests/layers/backend/batch_dot_test.py"
] |
[
"# pylint: disable=no-self-use,invalid-name\nimport numpy\nfrom numpy.testing import assert_almost_equal\nimport keras.backend as K\nfrom keras.layers import Input, Masking\nfrom keras.models import Model\n\nfrom deep_qa.layers.backend.batch_dot import BatchDot\nfrom deep_qa.layers.wrappers.output_mask import OutputMask\nfrom ...common.test_case import DeepQaTestCase\n\n\nclass TestBatchDotLayer(DeepQaTestCase):\n def test_compute_mask_basic(self):\n batch_size = 2\n # test the case where the tensors are even\n # tensor_a has shape (2, 3, 2), so mask_a has shape (2, 3)\n tensor_a = K.variable(numpy.random.randint(7, size=(batch_size, 3, 2)))\n mask_a = K.variable(numpy.array([[1, 0, 1], [1, 1, 0]]))\n # tensor_b has shape (2, 4, 2), so mask_b has shape (2, 4)\n tensor_b = K.variable(numpy.random.randint(7, size=(batch_size, 4, 2)))\n mask_b = K.variable(numpy.array([[0, 1, 1, 1], [1, 0, 1, 1]]))\n # a_dot_b would have shape (2, 3, 4), so mask of a_dot_b has shape (2, 3, 4)\n calculated_mask = K.eval(BatchDot().compute_mask([tensor_a, tensor_b],\n [mask_a, mask_b]))\n assert_almost_equal(calculated_mask, numpy.array([[[0.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 1.0, 1.0]],\n [[1.0, 0.0, 1.0, 1.0],\n [1.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0]]]))\n\n # test the case where tensor_a has less dimensions than tensor_b\n # tensor_a has shape (2, 4, 2), so mask_a has shape (2, 4)\n tensor_a = K.variable(numpy.random.randint(7, size=(batch_size, 4, 2)))\n mask_a = K.variable(numpy.array([[1, 0, 1, 0], [1, 1, 0, 0]]))\n # tensor_b has shape (2, 4, 3, 2), so mask_b has shape (2, 4, 3)\n tensor_b = K.variable(numpy.random.randint(7, size=(batch_size, 4, 3, 2)))\n mask_b = K.variable(numpy.array([[[1, 1, 1],\n [1, 1, 1],\n [1, 1, 0],\n [1, 0, 0]],\n [[1, 1, 1],\n [1, 1, 0],\n [1, 0, 0],\n [0, 0, 0]]]))\n # a_dot_b would have shape (2, 4, 3), so mask of a_dot_b has shape (2, 4, 3)\n calculated_mask = K.eval(BatchDot().compute_mask([tensor_a, tensor_b],\n [mask_a, mask_b]))\n assert calculated_mask.shape == (batch_size, 4, 3)\n assert_almost_equal(calculated_mask, numpy.array([[[1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0],\n [1.0, 1.0, 0.0],\n [0.0, 0.0, 0.0]],\n [[1.0, 1.0, 1.0],\n [1.0, 1.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]]]))\n\n # test the case where tensor_a has more dimensions than tensor_b\n # tensor_a has shape (2, 3, 4, 2), so mask_a has shape (2, 3, 4)\n tensor_a = K.variable(numpy.random.randint(7, size=(batch_size, 3, 4, 2)))\n mask_a = K.variable(numpy.array([[[1, 1, 1, 1],\n [1, 1, 1, 1],\n [1, 1, 0, 1]],\n [[1, 1, 1, 1],\n [1, 1, 0, 1],\n [1, 0, 0, 1]]]))\n # tensor_b has shape (2, 3, 2), so mask_b has shape (2, 3)\n tensor_b = K.variable(numpy.random.randint(7, size=(batch_size, 3, 2)))\n mask_b = K.variable(numpy.array([[1, 0, 1], [1, 1, 0]]))\n # a_dot_b would have shape (2, 3, 4), so mask of a_dot_b has shape (2, 3, 4)\n calculated_mask = K.eval(BatchDot().compute_mask([tensor_a, tensor_b],\n [mask_a, mask_b]))\n assert calculated_mask.shape == (batch_size, 3, 4)\n assert_almost_equal(calculated_mask, numpy.array([[[1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 0.0, 1.0]],\n [[1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 0.0, 1.0],\n [0.0, 0.0, 0.0, 0.0]]]))\n\n def test_a_smaller_than_b(self):\n batch_size = 3\n tensor_a = numpy.random.randint(7, size=(batch_size, 5))\n tensor_b = numpy.random.randint(7, size=(batch_size, 2, 5))\n\n # Manually set some values to 1 here, which will be masked later\n # (1 and not 0 so that masked values are still non-zero in the output)\n tensor_a[0] = 0\n tensor_b[0][1] = 0\n\n input_tensor_a = Input(shape=(5,))\n masked_tensor_a = Masking(mask_value=0)(input_tensor_a)\n input_tensor_b = Input(shape=(2, 5))\n masked_tensor_b = Masking(mask_value=0)(input_tensor_b)\n\n a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])\n\n a_dot_b_mask = OutputMask()(a_dot_b)\n model = Model(inputs=[input_tensor_a, input_tensor_b],\n outputs=[a_dot_b, a_dot_b_mask])\n # a_dot_b and mask_tensor are of shape (3, 2).\n a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])\n # Test that the dot happened like we expected.\n for i in range(batch_size):\n # each dot product should be of shape (2,)\n assert_almost_equal(a_dot_b_tensor[i],\n numpy.einsum(\"i,mi->m\", tensor_a[i], tensor_b[i]))\n # Check that the values in the output mask are 0 where the\n # values were set to 1 above.\n assert mask_tensor[0][0] == 0\n assert mask_tensor[0][1] == 0\n\n def test_a_larger_than_b(self):\n batch_size = 3\n tensor_a = numpy.random.randint(7, size=(batch_size, 2, 5))\n tensor_b = numpy.random.randint(7, size=(batch_size, 5))\n\n # Manually set some values to 1 here, which will be masked later\n # (1 and not 0 so that masked values are still non-zero in the output)\n tensor_a[0][1] = 0\n tensor_b[0] = 0\n\n input_tensor_a = Input(shape=(2, 5))\n masked_tensor_a = Masking(mask_value=0)(input_tensor_a)\n input_tensor_b = Input(shape=(5,))\n masked_tensor_b = Masking(mask_value=0)(input_tensor_b)\n\n a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])\n\n a_dot_b_mask = OutputMask()(a_dot_b)\n model = Model(inputs=[input_tensor_a, input_tensor_b],\n outputs=[a_dot_b, a_dot_b_mask])\n # a_dot_b and mask_tensor are of shape (3, 2).\n a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])\n # Test that the dot happened like we expected.\n for i in range(batch_size):\n # each dot product should be of shape (2,)\n assert_almost_equal(a_dot_b_tensor[i],\n numpy.einsum(\"mi,i->m\", tensor_a[i], tensor_b[i]))\n # Check that the values in the output mask are 0 where the\n # values were set to 1 above.\n assert mask_tensor[0][0] == 0\n assert mask_tensor[0][1] == 0\n\n def test_a_smaller_than_b_higher_dimension(self):\n batch_size = 3\n tensor_a = numpy.random.randint(7, size=(batch_size, 4, 5))\n tensor_b = numpy.random.randint(7, size=(batch_size, 4, 2, 5))\n\n # Manually set some values to 1 here, which will be masked later\n # (1 and not 0 so that masked values are still non-zero in the output)\n tensor_a[0][1] = 0\n tensor_a[1][3] = 0\n tensor_b[0][1][1] = 0\n tensor_b[0][2][1] = 0\n\n input_tensor_a = Input(shape=(4, 5))\n masked_tensor_a = Masking(mask_value=0)(input_tensor_a)\n input_tensor_b = Input(shape=(4, 2, 5))\n masked_tensor_b = Masking(mask_value=0)(input_tensor_b)\n\n if K.backend() == \"theano\":\n self.assertRaises(RuntimeError, BatchDot(),\n [masked_tensor_a, masked_tensor_b])\n return\n else:\n a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])\n\n a_dot_b_mask = OutputMask()(a_dot_b)\n model = Model(inputs=[input_tensor_a, input_tensor_b],\n outputs=[a_dot_b, a_dot_b_mask])\n # a_dot_b and mask_tensor are of shape (3, 4, 2).\n a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])\n # Test that the dot happened like we expected.\n for i in range(batch_size):\n # each dot product should be of shape (4, 2)\n assert_almost_equal(a_dot_b_tensor[i],\n numpy.einsum(\"ij,imj->im\", tensor_a[i], tensor_b[i]))\n # Check that the values in the output mask are 0 where the\n # values were set to 1 above.\n assert mask_tensor[0][1][0] == 0\n assert mask_tensor[0][1][1] == 0\n assert mask_tensor[0][2][1] == 0\n assert mask_tensor[1][3][0] == 0\n assert mask_tensor[1][3][1] == 0\n\n def test_a_larger_than_b_higher_dimension(self):\n batch_size = 3\n tensor_a = numpy.random.randint(7, size=(batch_size, 4, 2, 5))\n tensor_b = numpy.random.randint(7, size=(batch_size, 4, 5))\n\n # Manually set some values to 1 here, which will be masked later\n # (1 and not 0 so that masked values are still non-zero in the output)\n tensor_a[0][1][1] = 0\n tensor_a[0][2][1] = 0\n tensor_b[0][1] = 0\n tensor_b[1][3] = 0\n\n input_tensor_a = Input(shape=(4, 2, 5))\n masked_tensor_a = Masking(mask_value=0)(input_tensor_a)\n input_tensor_b = Input(shape=(4, 5))\n masked_tensor_b = Masking(mask_value=0)(input_tensor_b)\n\n if K.backend() == \"theano\":\n self.assertRaises(RuntimeError, BatchDot(),\n [masked_tensor_a, masked_tensor_b])\n return\n else:\n a_dot_b = BatchDot()([masked_tensor_a, masked_tensor_b])\n a_dot_b_mask = OutputMask()(a_dot_b)\n model = Model(inputs=[input_tensor_a, input_tensor_b],\n outputs=[a_dot_b, a_dot_b_mask])\n # a_dot_b and mask_tensor are of shape (3, 4, 2).\n a_dot_b_tensor, mask_tensor = model.predict([tensor_a, tensor_b])\n # Test that the dot happened like we expected.\n for i in range(batch_size):\n # each dot product should be of shape (4, 2)\n assert_almost_equal(a_dot_b_tensor[i],\n numpy.einsum(\"imj,ij->im\", tensor_a[i], tensor_b[i]))\n # Check that the values in the output mask are 0 where the\n # values were set to 1 above.\n assert mask_tensor[0][1][0] == 0\n assert mask_tensor[0][1][1] == 0\n assert mask_tensor[0][2][1] == 0\n assert mask_tensor[1][3][0] == 0\n assert mask_tensor[1][3][1] == 0\n\n def test_output_shapes(self):\n bd = BatchDot()\n a_shapes = [(5, 10), (1, 1, 1), (1, 5, 3), (1, 5, 4, 3), (1, 5, 3)]\n b_shapes = [(5, 10), (1, 1, 1), (1, 2, 3), (1, 5, 3), (1, 5, 4, 3)]\n expected_shapes = [(5, 1), (1, 1, 1), (1, 5, 2), (1, 5, 4), (1, 5, 4)]\n for a_shape, b_shape, expected_shape in zip(a_shapes, b_shapes, expected_shapes):\n if (len(a_shape) > 3 or len(b_shape) > 3) and K.backend() == \"theano\":\n # this breaks in theano, so check that an error is raised\n self.assertRaises(RuntimeError, bd.call,\n [K.ones(shape=a_shape), K.ones(shape=b_shape)])\n else:\n assert K.eval(bd([K.ones(shape=a_shape), K.ones(shape=b_shape)])).shape == expected_shape\n assert bd.compute_output_shape([a_shape, b_shape]) == expected_shape\n"
] |
[
[
"numpy.array",
"numpy.einsum",
"numpy.random.randint"
]
] |
jackson-code/Delta
|
[
"ff4c1df4dc75d9ff88025d37d5cd3216a5f353ff"
] |
[
"belt - 1104/isolation_forest.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 21 20:35:15 2021\n\n@author: user\n\"\"\"\nfrom sklearn.ensemble import IsolationForest\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import ConfusionMatrixDisplay\nfrom sklearn.metrics import classification_report\nimport pandas as pd\n\nclass MyIsolationForest():\n def __init__(self, abnormal_ratio, n_estimators, max_samples, max_features, X_train, random_state=None):\n self.IF = IsolationForest(\n contamination=abnormal_ratio, random_state=random_state, max_samples=max_samples, n_estimators=n_estimators, max_features=max_features)\n self.IF.fit(X_train)\n \n def PlotScoreHist(self, normal_bins, anomaly_bins):\n plt.figure(figsize=(7,5))\n plt.title('')\n \n # 畫正常資料的長條圖\n nums,bins,patches = plt.hist(self.normal_score, bins=normal_bins, \n density=False, alpha=0.3, rwidth=0.5,\n label='normal testing dataset')\n #plt.xticks(bins,bins) \n for num,bin in zip(nums,bins):\n plt.annotate(num,xy=(bin,num))\n \n # 畫異常資料的長條圖\n nums,bins,patches = plt.hist(self.abnormal_score, bins=anomaly_bins, \n density=False, alpha=0.3, rwidth=0.5,\n label='abnormal testing dataset')\n #plt.xticks(bins,bins) \n for num,bin in zip(nums,bins):\n plt.annotate(num,xy=(bin,num))\n \n plt.legend(loc='upper left')\n plt.xlabel(\"anomaly score\")\n plt.ylabel(\"data quantity \")\n \n def PlotAnomalyScore(self, title): \n axis_x = np.arange(0, len(self.all_score))\n \n plt.figure(figsize=(7,5))\n \n plt.title(title, fontsize=15, y=1.05, fontweight='bold')\n plt.ylabel('Anomaly Score', fontsize=15) \n plt.xlabel('Data Points', fontsize=15) \n plt.grid()\n \n plt.plot(axis_x, self.all_score)\n \n def ScoreDifference(self):\n return self.abnormal_score.min() - self.normal_score.max()\n \n def Predict(self, X_test, y_test_bi, y_true_mul, labels): \n self.X_test = X_test\n \n # 先資料和label合併,才能將正常、異常資料分開\n Xy_test = pd.concat([X_test, y_test_bi], axis=1)\n \n self.X_test_bi_normal = Xy_test[Xy_test['Span'] == 1]\n self.X_test_bi_normal.drop(labels='Span', axis=1, inplace=True)\n \n self.X_test_bi_abnormal = Xy_test[Xy_test['Span'] == -1]\n self.X_test_bi_abnormal.drop(labels='Span', axis=1, inplace=True)\n \n self.y_test_bi = y_test_bi\n \n # 將正常、異常分開預測,為了畫圖時兩者顏色分開\n self.y_test_bi_normal = self.IF.predict(self.X_test_bi_normal)\n self.y_test_bi_abnormal = self.IF.predict(self.X_test_bi_abnormal)\n \n # binary classification (原本IF的結果)\n self.y_pred_bi = np.r_[self.y_test_bi_normal, self.y_test_bi_abnormal]\n \n # 正常資料的anomaly score\n score = self.IF.score_samples(self.X_test_bi_normal)\n self.normal_score = pd.Series(score * -1, index=self.X_test_bi_normal.index, name='Anomaly_score')\n \n # 異常資料的anomaly score\n score = self.IF.score_samples(self.X_test_bi_abnormal)\n self.abnormal_score = pd.Series(score * -1, index=self.X_test_bi_abnormal.index, name='Anomaly_score')\n \n self.all_score = pd.concat([self.normal_score, self.abnormal_score], axis=0)\n \n # multiply classification\n self.y_true_mul = y_true_mul\n self.y_pred_mul = self.MultiClassify(y_true_mul, labels)\n\n \n def MultiClassify(self, y_true, labels):\n df = pd.concat([y_true, self.all_score], axis=1, ignore_index=True)\n df.columns = ['Span', \"Anomaly_score\"]\n \n # 計算每種span的異常分數的平均\n mean = []\n for i in labels:\n mean.append(df[df.Span == i].Anomaly_score.mean())\n self.means = mean\n \n # 計算平均之間的中點,當作multi-classify的threshold \n mean_middle = []\n for i in range(len(labels)):\n # 避免超出idx範圍\n if i == len(labels)-1:\n break\n current = mean[i]\n next = mean[i+1]\n mean_middle.append((current + next) / 2)\n self.multi_threshold = mean_middle\n \n\n score = pd.Series.to_list(self.all_score)\n y_pred = []\n # 多元分類\n # 將每個異常分數與threshold合併並排序,異常分數的idx+1即為prediction\n for i in range(len(score)):\n temp = [score[i]]\n temp.extend(mean_middle)\n temp.sort()\n y_pred.append(temp.index(score[i]) + 1)\n y_pred = pd.Series(y_pred, index=y_true.index)\n return y_pred\n \n def ConfusionMatrixBinary(self, display_labels, title): \n # binary classification (原本IF的結果) \n cm = confusion_matrix(self.y_test_bi, self.y_pred_bi)\n disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels)\n disp = disp.plot(include_values=True, cmap='Blues')\n plt.title(title)\n \n \n def ClassificationReportBinary(self):\n print(classification_report(self.y_test_bi, self.y_pred_bi)) \n \n def ConfusionMatrixMulti(self, display_labels, title): \n # multi classification \n plt.title(title)\n cm = confusion_matrix(self.y_true_mul, self.y_pred_mul)\n disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels)\n disp = disp.plot(include_values=True, cmap='Blues') \n \n def ClassificationReportMulti(self):\n print(classification_report(self.y_true_mul, self.y_pred_mul)) \n \n def ConfusionMatrixThree(self, display_labels, title): \n # 把span1, 2合併,3 4 合併,5 6 合併\n y_pred_3 = self.y_pred_mul.copy()\n y_pred_3[self.y_pred_mul==1] = 12\n y_pred_3[self.y_pred_mul==2] = 12\n y_pred_3[self.y_pred_mul==3] = 34\n y_pred_3[self.y_pred_mul==4] = 34\n y_pred_3[self.y_pred_mul==5] = 56\n y_pred_3[self.y_pred_mul==6] = 56\n self.y_pred_3 = y_pred_3\n y_true_3 = self.y_true_mul.copy()\n y_true_3[self.y_true_mul==1] = 12\n y_true_3[self.y_true_mul==2] = 12\n y_true_3[self.y_true_mul==3] = 34\n y_true_3[self.y_true_mul==4] = 34\n y_true_3[self.y_true_mul==5] = 56\n y_true_3[self.y_true_mul==6] = 56\n self.y_true_3 = y_true_3\n \n plt.title(title)\n cm = confusion_matrix(self.y_true_3, self.y_pred_3)\n disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels)\n disp = disp.plot(include_values=True, cmap='Blues') \n \n def ClassificationReportThree(self):\n print(classification_report(self.y_true_3, self.y_pred_3)) \n \n \n "
] |
[
[
"matplotlib.pyplot.legend",
"pandas.concat",
"pandas.Series",
"matplotlib.pyplot.title",
"sklearn.metrics.ConfusionMatrixDisplay",
"matplotlib.pyplot.annotate",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"pandas.Series.to_list",
"sklearn.ensemble.IsolationForest",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.figure"
]
] |
cerad95/job-scraper
|
[
"3841c34d0ead57089fce192bd285faccfcf8f5da"
] |
[
"src/job_searcher.py"
] |
[
"import asyncio\nimport os\nfrom sys import platform as _platform\n\nimport pandas\nfrom graduateland import graduateland\nfrom gspread import spreadsheet\nfrom jindex import jindex\nfrom pip._vendor.colorama import Fore\nfrom tqdm import tqdm\n\n\nasync def fetch(session, url):\n async with session.get(url) as response:\n return await response.read()\n\n\nclass job_searcher:\n def __init__(self):\n self.websites = []\n self.websites.append(jindex(\"Jobindex\", \"https://www.jobindex.dk/jobsoegning?geoareaid=15182&geoareaid=15187&geoareaid=4&geoareaid=3&geoareaid=2&geoareaid=15180&geoareaid=16149&subid=1&subid=2&subid=3&subid=4&subid=6&subid=7&subid=93&subid=116&subid=127\"))\n self.websites.append(graduateland(\"Graduateland\", \"https://graduateland.com/da/jobs?types%5B%5D=1&types%5B%5D=3&positions%5B%5D=15&languages%5B%5D=1&languages%5B%5D=23&limit=10\"))\n \n\n self.printstring = \"{desc:<20}{percentage:3.0f}%%|%s{bar}%s{r_bar}\" % (Fore.LIGHTBLUE_EX, Fore.RESET)\n\n def write_to_csv(self):\n\n df = pandas.DataFrame\n frames = []\n\n for site in self.websites:\n df = pandas.DataFrame.from_records([s.as_dict() for s in site.jobs])\n df.reset_index(drop=True, inplace=True)\n frames.append(df)\n\n concat_df = pandas.concat(frames, ignore_index=True)\n\n if _platform == \"linux\" or _platform == \"linux2\":\n concat_df.to_csv(r'data.csv', header=True, sep='|', index=False)\n elif _platform == \"darwin\":\n print(\"mac is trash\")\n elif _platform == \"win32\":\n concat_df.to_csv(r'C:\\users\\alex\\desktop\\info.csv', header=True, sep='|', index=False)\n elif _platform == \"win64\":\n concat_df.to_csv(r'C:\\users\\alex\\desktop\\info.csv',headeshr=True, sep='|', index=False)\n\n def jobs_unique_and_sorted(self):\n for website in self.websites:\n print(website.name)\n print(len(website.jobs))\n website.jobs = list(set(website.jobs))\n website.jobs.sort(key=lambda x: (x.location, x.title), reverse=False)\n print(len(website.jobs))\n\nif __name__ == '__main__':\n js = job_searcher()\n\n loop = asyncio.get_event_loop()\n\n for website in js.websites:\n urls = website.generate_urls_for_pages()\n loop.run_until_complete(website.fetch_all_urls(urls))\n website.scrape_all_pages()\n\n while True:\n decision = input(\"\\nStore local or cloud: \")\n if decision == \"local\":\n with tqdm(iterable=False, total=2, bar_format=js.printstring) as pbar:\n pbar.set_description(\"Sorting Jobs\")\n js.jobs_unique_and_sorted()\n pbar.update(1)\n pbar.set_description(\"Writing CSV\")\n js.write_to_csv()\n pbar.update(1)\n break\n elif decision == \"cloud\":\n with tqdm(iterable=False, total=5, bar_format=js.printstring) as pbar:\n gspread = spreadsheet()\n pbar.set_description(\"Sorting Jobs\")\n js.jobs_unique_and_sorted()\n pbar.update(1)\n pbar.set_description(\"Writing CSV\")\n js.write_to_csv()\n pbar.update(1)\n pbar.set_description(\"Creating Worksheet\")\n gspread.create_new_worksheet()\n pbar.update(1)\n pbar.set_description(\"Uploading CSV\")\n gspread.insert_csv_file()\n pbar.update(1)\n pbar.set_description(\"Deleting leftover CSV file\")\n os.remove(\"data.csv\")\n pbar.update(1)\n break\n else:\n print(\"Only \\'local\\' or \\'cloud\\' as input is accepted.\")\n"
] |
[
[
"pandas.concat"
]
] |
sweb/pandas
|
[
"db051b94d50cec2a5f2a160b36d67716c47839c7"
] |
[
"pandas/tests/arrays/test_timedeltas.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas.core.arrays import TimedeltaArrayMixin as TimedeltaArray\nimport pandas.util.testing as tm\n\n\nclass TestTimedeltaArrayConstructor(object):\n def test_non_array_raises(self):\n with pytest.raises(ValueError, match='list'):\n TimedeltaArray([1, 2, 3])\n\n def test_other_type_raises(self):\n with pytest.raises(TypeError,\n match=\"dtype bool cannot be converted\"):\n TimedeltaArray(np.array([1, 2, 3], dtype='bool'))\n\n def test_incorrect_dtype_raises(self):\n # TODO: why TypeError for 'category' but ValueError for i8?\n with pytest.raises(TypeError,\n match=r'category cannot be converted '\n r'to timedelta64\\[ns\\]'):\n TimedeltaArray(np.array([1, 2, 3], dtype='i8'), dtype='category')\n\n with pytest.raises(TypeError,\n match=r\"dtype int64 cannot be converted \"\n r\"to timedelta64\\[ns\\]\"):\n TimedeltaArray(np.array([1, 2, 3], dtype='i8'),\n dtype=np.dtype(\"int64\"))\n\n def test_copy(self):\n data = np.array([1, 2, 3], dtype='m8[ns]')\n arr = TimedeltaArray(data, copy=False)\n assert arr._data is data\n\n arr = TimedeltaArray(data, copy=True)\n assert arr._data is not data\n assert arr._data.base is not data\n\n\nclass TestTimedeltaArray(object):\n def test_from_sequence_dtype(self):\n msg = r\"Only timedelta64\\[ns\\] dtype is valid\"\n with pytest.raises(ValueError, match=msg):\n TimedeltaArray._from_sequence([], dtype=object)\n\n def test_abs(self):\n vals = np.array([-3600 * 10**9, 'NaT', 7200 * 10**9], dtype='m8[ns]')\n arr = TimedeltaArray(vals)\n\n evals = np.array([3600 * 10**9, 'NaT', 7200 * 10**9], dtype='m8[ns]')\n expected = TimedeltaArray(evals)\n\n result = abs(arr)\n tm.assert_timedelta_array_equal(result, expected)\n\n def test_neg(self):\n vals = np.array([-3600 * 10**9, 'NaT', 7200 * 10**9], dtype='m8[ns]')\n arr = TimedeltaArray(vals)\n\n evals = np.array([3600 * 10**9, 'NaT', -7200 * 10**9], dtype='m8[ns]')\n expected = TimedeltaArray(evals)\n\n result = -arr\n tm.assert_timedelta_array_equal(result, expected)\n\n def test_neg_freq(self):\n tdi = pd.timedelta_range('2 Days', periods=4, freq='H')\n arr = TimedeltaArray(tdi, freq=tdi.freq)\n\n expected = TimedeltaArray(-tdi._data, freq=-tdi.freq)\n\n result = -arr\n tm.assert_timedelta_array_equal(result, expected)\n\n @pytest.mark.parametrize(\"dtype\", [\n int, np.int32, np.int64, 'uint32', 'uint64',\n ])\n def test_astype_int(self, dtype):\n arr = TimedeltaArray._from_sequence([pd.Timedelta('1H'),\n pd.Timedelta('2H')])\n result = arr.astype(dtype)\n\n if np.dtype(dtype).kind == 'u':\n expected_dtype = np.dtype('uint64')\n else:\n expected_dtype = np.dtype('int64')\n expected = arr.astype(expected_dtype)\n\n assert result.dtype == expected_dtype\n tm.assert_numpy_array_equal(result, expected)\n\n def test_setitem_clears_freq(self):\n a = TimedeltaArray(pd.timedelta_range('1H', periods=2, freq='H'))\n a[0] = pd.Timedelta(\"1H\")\n assert a.freq is None\n\n\nclass TestReductions(object):\n\n def test_min_max(self):\n arr = TimedeltaArray._from_sequence([\n '3H', '3H', 'NaT', '2H', '5H', '4H',\n ])\n\n result = arr.min()\n expected = pd.Timedelta('2H')\n assert result == expected\n\n result = arr.max()\n expected = pd.Timedelta('5H')\n assert result == expected\n\n result = arr.min(skipna=False)\n assert result is pd.NaT\n\n result = arr.max(skipna=False)\n assert result is pd.NaT\n\n @pytest.mark.parametrize('skipna', [True, False])\n def test_min_max_empty(self, skipna):\n arr = TimedeltaArray._from_sequence([])\n result = arr.min(skipna=skipna)\n assert result is pd.NaT\n\n result = arr.max(skipna=skipna)\n assert result is pd.NaT\n"
] |
[
[
"pandas.timedelta_range",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.util.testing.assert_timedelta_array_equal",
"pandas.core.arrays.TimedeltaArrayMixin",
"pandas.Timedelta",
"numpy.dtype",
"pandas.core.arrays.TimedeltaArrayMixin._from_sequence",
"numpy.array"
]
] |
eakgun/LSTM-TimeSeries-Forecasting
|
[
"177a5134df87ccf08d057a65b46dfcf87c0dec76"
] |
[
"TSMethods.py"
] |
[
"# Time Series\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef create_series(df, xcol, datecol):\r\n # Create a dataframe with the features and the date time as the index\r\n features_considered = [xcol]\r\n features = df[features_considered]\r\n features.index = df[datecol]\r\n # features.head()\r\n # features.plot(subplots=True)\r\n return features\r\n\r\n\r\n# X is the series to test\r\n# log_x asks whether to log X prior to testing or not\r\ndef stationarity_test(X, log_x = \"Y\", return_p = False, print_res = True):\r\n \r\n # If X isn't logged, we need to log it for better results\r\n if log_x == \"Y\":\r\n X = np.log(X[X>0])\r\n \r\n # Once we have the series as needed we can do the ADF test\r\n from statsmodels.tsa.stattools import adfuller\r\n dickey_fuller = adfuller(X)\r\n \r\n if print_res:\r\n # If ADF statistic is < our 1% critical value (sig level) we can conclude it's not a fluke (ie low P val / reject H(0))\r\n print('ADF Stat is: {}.'.format(dickey_fuller[0]))\r\n # A lower p val means we can reject the H(0) that our data is NOT stationary\r\n print('P Val is: {}.'.format(dickey_fuller[1]))\r\n print('Critical Values (Significance Levels): ')\r\n for key,val in dickey_fuller[4].items():\r\n print(key,\":\",round(val,3))\r\n \r\n if return_p:\r\n return dickey_fuller[1]\r\n \r\n# Differencing the data \r\ndef difference(X):\r\n diff = X.diff()\r\n plt.plot(diff)\r\n plt.show()\r\n return diff"
] |
[
[
"matplotlib.pyplot.plot",
"numpy.log",
"matplotlib.pyplot.show"
]
] |
JasonZK/Soccer_OCR
|
[
"b26ca9068f1a10f00210d66fa4ae27dad89e71b2"
] |
[
"get_goal.py"
] |
[
"import os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nimport easyocr\nimport matplotlib.pyplot as plt # plt 用于显示图片\nimport re\nfrom paddleocr import PaddleOCR, draw_ocr\nimport cv2 as cv\nimport numpy as np\nimport time\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nkeyword1 = \"goal\"\n\n\n# print(BASE_DIR)\n\n\ndef makedir(new_dir):\n if not os.path.exists(new_dir):\n os.makedirs(new_dir)\n\n\nEVENT_DIR = \"D:/dataset/event\"\nVIDEO_DIR = \"D:/dataset/video_1\"\n# FRAMES_DIR = os.path.join(BASE_DIR, \"goal_frames\")\n\n\n# 遍历event文件夹\nVID1 = ['0003', '0029', '0056', '0072', '1003', '1010', '1133', '1158', '1208',\n '1230']\n\n\n\n\ndef get_frame_dic(base_dir, classes, keyword):\n frame_dic = {}\n num = 0\n for root, _, files in os.walk(base_dir):\n # 遍历每个txt\n for event_index in files:\n # 只处理event.txt\n if classes in event_index:\n # event_index[:4]获取视频序号\n with open(base_dir + \"\\\\\" + event_index, \"r\") as f:\n for line in f.readlines():\n line1 = line.strip('\\n') # 去掉列表中每一个元素的换行符\n line = line.split()\n if keyword in line1 and line[3] == '0':\n frame_dic.setdefault(event_index[:4], []).append(line[1] + \" \" + line[2])\n num += 1\n\n return frame_dic, num\n\n\ndef get_frames(video_dir, frame_dic, n):\n get = 0\n miss = 0\n num = 0\n\n ocr = PaddleOCR(lang=\"en\", gpu_mem=5000, det=False,\n rec_model_dir=\"./inference/en_ppocr_mobile_v2.0_rec_infer/\") # 首次执行会自动下载模型文件\n\n for root, _, files in os.walk(video_dir):\n for video_index in files:\n video_index1 = video_index.split('_')\n # print(video_index1)\n if video_index1[1] in frame_dic.keys():\n videoCap = cv.VideoCapture(video_dir + \"\\\\\" + video_index)\n print(\"-----------------------------\")\n print(\"video:{}\".format(video_index1[1]))\n for index in frame_dic[video_index1[1]]:\n num += 1\n start = int(index.split()[0])\n end = int(index.split()[1])\n print(\"start:{} end:{}\".format(start, end))\n i = start\n flag = False\n team1 = 0\n team2 = 0\n miss_str = []\n # init_candidate = {}\n while i <= start + 50:\n videoCap.set(cv.CAP_PROP_POS_FRAMES, i)\n # print(\"I'm finding video:{} frame:{}\".format(video_index1[1], i))\n boolFrame, matFrame = videoCap.read()\n if boolFrame:\n temp_jpgframe = np.asarray(matFrame)\n # 截取上面0-90区域进行OCR检测\n jpgframe = temp_jpgframe[0:120]\n # 得到OCR识别结果\n time1 = time.time()\n temp_result = ocr(jpgframe)\n time2 = time.time()\n # print(\" time for one:{}\".format(time2 - time1))\n result = ''\n for str, ration in temp_result[1]:\n result += str\n result += ' '\n # result = ''.join(re_str)\n # print(result)\n # 检索比赛时间\n if result:\n score2 = re.findall(\"\\d-\\d\", result)\n if score2:\n num_num = score2[0]\n num1, num2 = num_num.split(\"-\")\n team1 = int(num1)\n team2 = int(num2)\n i += 1\n break\n game_time = re.findall(\"\\d\\d:\\d\\d\", result)\n # print(game_time)\n # 获取删除比赛时间之后的字符串\n if not game_time:\n i += 1\n continue\n temp = result.replace(game_time[0], '')\n if temp:\n # 如果不为空,则检索比分\n score1 = re.findall(\"\\d:\\d\", temp)\n if score1:\n num_num = score1[0]\n num1, num2 = num_num.split(\":\")\n team1 = int(num1)\n team2 = int(num2)\n i += 1\n break\n else:\n i += 1\n continue\n else:\n print(\"can not get frames!\")\n i += 1\n print(\" find init: {}-{} i:{} str:{}\".format(team1, team2, i, result))\n while i <= end + 3500:\n videoCap.set(cv.CAP_PROP_POS_FRAMES, i)\n # print(\"I'm working with video:{} frame:{}\".format(video_index1[1], i))\n boolFrame, matFrame = videoCap.read()\n if boolFrame:\n temp_jpgframe = np.asarray(matFrame)\n # 截取上面0-90区域进行OCR检测\n jpgframe = temp_jpgframe[0:120]\n # 得到OCR识别结果\n time1 = time.time()\n temp_result = ocr(jpgframe)\n time2 = time.time()\n # print(\" time for one:{}\".format(time2 - time1))\n result = ''\n for str, ration in temp_result[1]:\n result += str\n result += ' '\n # result = ''.join(re_str)\n # print(result)\n # 检索比赛时间\n miss_str.append(result)\n if result:\n score2 = re.findall(\"\\d-\\d\", result)\n if score2:\n num_num = score2[0]\n num1, num2 = num_num.split(\"-\")\n num1 = int(num1)\n num2 = int(num2)\n i += 100\n if team1 != num1 or team2 != num2:\n print(\" score change! before: {}-{} after: {}-{} str:{}\".format(team1, team2, num1, num2, result))\n get += 1\n flag = True\n break\n else:\n i += 100\n continue\n game_time = re.findall(\"\\d\\d:\\d\\d\", result)\n # print(game_time)\n # 获取删除比赛时间之后的字符串\n if not game_time:\n i += 100\n continue\n temp = result.replace(game_time[0], '')\n if temp:\n # 如果不为空,则检索比分\n score1 = re.findall(\"\\d:\\d\", temp)\n if score1:\n num_num = score1[0]\n num1, num2 = num_num.split(\":\")\n num1 = int(num1)\n num2 = int(num2)\n else:\n i += 100\n continue\n if team1 != num1 or team2 != num2:\n print(\" score change! before: {}-{} after: {}-{} str:{}\".format(team1, team2, num1, num2, result))\n get += 1\n flag = True\n break\n else:\n i += 100\n continue\n else:\n print(\"can not get frames!\")\n i += 100\n if not flag:\n miss += 1\n print(\" miss! goal frame:{} {}\".format(start, end))\n for str_mis in miss_str:\n print(\" {}\".format(str_mis))\n print(\"--------------------------------------------\")\n print(\"total goal:{} get:{} miss:{}\".format(num, get, miss))\n\n\n\nFRAMES_DIC, NUM = get_frame_dic(base_dir=EVENT_DIR, classes='event', keyword=keyword1)\nprint(FRAMES_DIC)\nprint(NUM)\nget_frames(video_dir=VIDEO_DIR, frame_dic=FRAMES_DIC, n=NUM)\n"
] |
[
[
"numpy.asarray"
]
] |
charmichaniyara/ga-learner-dsmp-repo
|
[
"b807ae5e9373e8b95426ff888e6ca6f6b3bbe2c1"
] |
[
"Numpy/code.py"
] |
[
"# --------------\n# Importing header files\r\nimport numpy as np\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\n#New record\r\nnew_record=[[50, 9, 4, 1, 0, 0, 40, 0]]\r\n\r\n#Reading file\r\n#Step1\r\ndata = np.genfromtxt(path, delimiter=\",\", skip_header=1)\r\n\r\n#Code starts here\r\n\r\ncensus = np.concatenate((data,np.array(new_record)),axis=0)\r\n\r\n#Step2\r\nage = census[:,0]\r\n#print(age)\r\n\r\nmax_age = age.max()\r\nprint(\"Maximum Age: \",max_age)\r\n\r\nmin_age = age.min()\r\nprint(\"Minimum Age: \",min_age)\r\n\r\nage_mean = age.mean()\r\nprint(\"Mean Age: \",age_mean)\r\n\r\nage_std = np.std(age)\r\nprint(\"Standard Deviation of Age: \",age_std)\r\n\r\n#Step3\r\nr = census[:,2]\r\nb0 = (r==0)\r\nrace_0 = r[b0]\r\n\r\nb1 = (r==1)\r\nrace_1 = r[b1]\r\n\r\nb2 = (r==2)\r\nrace_2 = r[b2]\r\n\r\nb3 = (r==3)\r\nrace_3 = r[b3]\r\n\r\nb4 = (r==4)\r\nrace_4 = r[b4]\r\n\r\nlen_0 = len(race_0)\r\n\r\nlen_1 = len(race_1)\r\n\r\nlen_2 = len(race_2)\r\n\r\nlen_3 = len(race_3)\r\n\r\nlen_4 = len(race_4)\r\n\r\nm = [len_0,len_1,len_2,len_3,len_4]\r\nmarray= np.array(m)\r\nminor = marray.min()\r\n\r\nif len_0 == minor:\r\n minority_race = 0\r\nelif len_1 == minor:\r\n minority_race = 1\r\nelif len_2 == minor:\r\n minority_race = 2\r\nelif len_3 == minor:\r\n minority_race = 3\r\nelif len_4 == minor:\r\n minority_race = 4\r\n \r\nprint(\"Minority Race: \",minority_race)\r\n\r\n#Step4\r\n\r\na = census[:,0]\r\nag = (a > 60)\r\nsenior_citizens = a[ag]\r\n\r\nwh = census[:,6]\r\na = census[:,0]\r\nswh = (a > 60)\r\nsenior_wh = wh[swh]\r\n\r\nworking_hours_sum = senior_wh.sum()\r\nprint(\"Working hours sum: \", working_hours_sum)\r\n\r\nsenior_citizens_len = len(senior_citizens)\r\n\r\nworking_hours_sum = senior_wh.sum()\r\n\r\navg_working_hours = working_hours_sum / senior_citizens_len\r\nprint(\"Average Working Hours: \",avg_working_hours)\r\n\r\n#Step5\r\nh = census[:,1]\r\nh1 = (h > 10)\r\nhigh = h[h1]\r\n#high\r\n\r\nl = census[:,1]\r\nl1 = (l <= 10)\r\nlow = l[l1]\r\n#low\r\n\r\nmh = census[:,7]\r\nh = census[:,1]\r\nh1 = (h > 10)\r\nhigh_pay = mh[h1]\r\n#high_pay\r\navg_pay_high = high_pay.mean()\r\nprint(\"Avg High Pay: \",avg_pay_high)\r\n\r\nml = census[:,7]\r\nl = census[:,1]\r\nl1 = (l <= 10)\r\nlow_pay = ml[l1]\r\n#low_pay\r\navg_pay_low = low_pay.mean()\r\nprint(\"Avg Low Pay: \",avg_pay_low)\r\n\r\n\r\n\n\n\n"
] |
[
[
"numpy.std",
"numpy.array",
"numpy.genfromtxt"
]
] |
geffy/retailhero-recommender-solution
|
[
"9e94f313146acd87bd09fe8ab63e4f58f22b9a3e"
] |
[
"src/featurizer/feature_extractor.py"
] |
[
"import copy\nimport pickle\nfrom collections import defaultdict\nfrom typing import Any, List, Set, Tuple\n\nimport implicit\nfrom scipy.stats import rankdata\n\nfrom src.featurizer.client_profile import ClientProfile\nfrom src.featurizer.daily import DailyScorer, split_date\nfrom src.featurizer.nn import WrapperNnModel\nfrom src.featurizer.product_info import ProductInfoMapType\nfrom src.utils import ProductEncoder\nfrom src.utils_mini import ProductEncoderMini, make_coo_row_mini\n\n\nclass ImplicitWrapperMini:\n def __init__(self, product_encoder, model_root):\n self.pe = product_encoder\n self.model = pickle.load(open(model_root + \"/model.pkl\", \"rb\"))\n\n def predict(self, actual_row, N=30):\n raw_recs = self.model.recommend(\n userid=0, user_items=actual_row, N=N, filter_already_liked_items=False, recalculate_user=True\n )\n return [(self.pe.toPid(int(idx)), score) for (idx, score) in raw_recs]\n\n\nclass FeatureExtractor:\n USE_KEYS = [\"product_id\", \"level_1\", \"level_2\", \"level_3\", \"level_4\", \"segment_id\", \"vendor_id\"]\n\n def __init__(\n self,\n product_info_map: ProductInfoMapType,\n product_encoder: ProductEncoder,\n global_top: Set[str],\n tagged_models: List[Tuple[str, Any]],\n mini_models: List[Tuple[str, ImplicitWrapperMini]],\n lvl4_model: Any,\n nn2_2: WrapperNnModel,\n ):\n self._pi = product_info_map\n self._pe = product_encoder\n self._global_top = set(global_top)\n self._tagged_models = tagged_models\n self._mini_models = mini_models\n self._lvl4_model = lvl4_model\n self._nn2_2 = nn2_2\n\n def build_precalc(self, profile: ClientProfile):\n precalc = {\"pairs\": {}, \"map\": {}}\n\n # batched calculation of models scores\n for model, tag in self._tagged_models:\n pairs = model.recommend(\n userid=0,\n user_items=profile.row_product,\n N=1000,\n recalculate_user=True,\n filter_already_liked_items=False,\n )\n precalc[\"pairs\"][tag] = [(self._pe.toPid(int(idx)), score) for (idx, score) in pairs]\n\n # batched calculation of models scores\n for model, tag in self._mini_models:\n pairs = model.predict(profile._sparse_actual_products_row, N=1000)\n precalc[\"pairs\"][tag] = pairs\n\n # nn models\n precalc[\"map\"][\"nn22\"] = self._nn2_2.predict(profile._js[\"transaction_history\"])\n\n return precalc\n\n def build_features(self, profile: ClientProfile, precalc, candidates: List[str], raw_date: str):\n rows = []\n\n lvl4_relevances = {\n idx: score\n for (idx, score) in self._lvl4_model.recommend(\n userid=0,\n user_items=profile.row_lvl4,\n N=1000,\n recalculate_user=True,\n filter_already_liked_items=False,\n )\n }\n\n iterate_over_models = [tag for _, tag in self._tagged_models]\n iterate_over_models.extend([tag for _, tag in self._mini_models])\n iterate_over_models.extend([\"nn22\", \"fm22\"])\n\n # fill with default values\n indexed_candidates = self._pe.toIdx(candidates)\n scores = defaultdict(dict)\n for idx in indexed_candidates:\n for tag in iterate_over_models:\n scores[idx][\"model_{}_score\".format(tag)] = -1\n\n # batched calculation of models scores\n for _, tag in self._tagged_models + self._mini_models:\n for (pid, score) in precalc[\"pairs\"][tag]:\n scores[self._pe.toIdx(pid)][\"model_{}_score\".format(tag)] = score\n\n splitted_date = split_date(raw_date)\n\n # nn model + daily_scores\n for idx in indexed_candidates:\n pid = self._pe.toPid(idx)\n scores[idx][\"model_nn22_score\"] = precalc[\"map\"][\"nn22\"].get(pid, -100)\n scores[idx][\"model_fm22_score\"] = -100 # rudiment\n\n # model ranks\n for tag in iterate_over_models:\n _rels = [scores[idx][\"model_{}_score\".format(tag)] for idx in indexed_candidates]\n _ranks = rankdata(_rels) / len(_rels)\n for idx, rank in zip(indexed_candidates, _ranks):\n scores[idx][\"model_{}_rank\".format(tag)] = rank\n\n static_user_features = profile.get_user_features()\n\n for product_id in candidates:\n pi = self._pi[product_id]\n\n # user features\n row = copy.copy(static_user_features)\n row[\"time_hour\"] = splitted_date.hour\n row[\"time_weekday\"] = splitted_date.weekday\n\n # product features\n row[\"product_truncated_id\"] = product_id if product_id in self._global_top else \"_RAREID\"\n row[\"product_level4_id\"] = str(pi.level_4)\n row[\"product_level3_id\"] = str(pi.level_3)\n row[\"product_level2_id\"] = str(pi.level_2)\n row[\"product_level1_id\"] = str(pi.level_1)\n row[\"product_segment_id\"] = str(pi.segment_id)\n row[\"product_vendor_id\"] = str(pi.vendor_id)\n row[\"product_brand_id\"] = str(pi.brand_id)\n\n for feature in ClientProfile.FLOAT_KEYS:\n row[\"product_\" + feature] = pi.__getattribute__(feature)\n\n # pairwise part\n profile.set_pairwise_features(pi, FeatureExtractor.USE_KEYS, row)\n\n # model scores\n row.update(scores[self._pe.toIdx(product_id)])\n row[\"model_lvl4_score\"] = lvl4_relevances.get(self._pe.lvlToIdx(product_id, lvl=\"level_4\"), -1)\n rows.append(row)\n return rows\n"
] |
[
[
"scipy.stats.rankdata"
]
] |
chandlerbing65nm/PVT
|
[
"e171519b2a1a44e36ebdf0732f274a190b50ce29"
] |
[
"detection/get_flops.py"
] |
[
"import argparse\n\nimport torch\nfrom mmcv import Config, DictAction\n\nfrom mmdet.models import build_detector\n\ntry:\n from mmcv.cnn import get_model_complexity_info\nexcept ImportError:\n raise ImportError('Please upgrade mmcv to >0.6.2')\n#import pvt\n#import pvt_v2\nfrom mmcv.cnn.utils.flops_counter import get_model_complexity_info, flops_to_string, params_to_string\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a detector')\n parser.add_argument('config', help='train config file path')\n parser.add_argument(\n '--shape',\n type=int,\n nargs='+',\n default=[512, 512],\n help='input image size')\n parser.add_argument(\n '--cfg-options',\n nargs='+',\n action=DictAction,\n help='override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into config file. If the value to '\n 'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n 'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n 'Note that the quotation marks are necessary and that no white space '\n 'is allowed.')\n args = parser.parse_args()\n return args\n\n\ndef sra_flops(h, w, r, dim):\n return 2 * h * w * (h // r) * (w // r) * dim\n\n\ndef li_sra_flops(h, w, dim):\n return 2 * h * w * 7 * 7 * dim\n\n\ndef get_flops(model, input_shape):\n flops, params = get_model_complexity_info(model, input_shape, as_strings=False)\n\n backbone = model.backbone\n backbone_name = type(backbone).__name__\n\n if 'pvt' in backbone_name:\n _, H, W = input_shape\n if 'li' in backbone_name: # calculate flops of PVTv2_li\n stage1 = li_sra_flops(H // 4, W // 4,\n backbone.block1[0].attn.dim) * len(backbone.block1)\n stage2 = li_sra_flops(H // 8, W // 8,\n backbone.block2[0].attn.dim) * len(backbone.block2)\n stage3 = li_sra_flops(H // 16, W // 16,\n backbone.block3[0].attn.dim) * len(backbone.block3)\n stage4 = li_sra_flops(H // 32, W // 32,\n backbone.block4[0].attn.dim) * len(backbone.block4)\n else: # calculate flops of PVT/PVTv2\n stage1 = sra_flops(H // 4, W // 4,\n backbone.block1[0].attn.sr_ratio,\n backbone.block1[0].attn.dim) * len(backbone.block1)\n stage2 = sra_flops(H // 8, W // 8,\n backbone.block2[0].attn.sr_ratio,\n backbone.block2[0].attn.dim) * len(backbone.block2)\n stage3 = sra_flops(H // 16, W // 16,\n backbone.block3[0].attn.sr_ratio,\n backbone.block3[0].attn.dim) * len(backbone.block3)\n stage4 = sra_flops(H // 32, W // 32,\n backbone.block4[0].attn.sr_ratio,\n backbone.block4[0].attn.dim) * len(backbone.block4)\n flops += stage1 + stage2 + stage3 + stage4\n return flops_to_string(flops), params_to_string(params)\n\n\ndef main():\n args = parse_args()\n\n if len(args.shape) == 1:\n input_shape = (3, args.shape[0], args.shape[0])\n elif len(args.shape) == 2:\n input_shape = (3,) + tuple(args.shape)\n else:\n raise ValueError('invalid input shape')\n\n cfg = Config.fromfile(args.config)\n if args.cfg_options is not None:\n cfg.merge_from_dict(args.cfg_options)\n # import modules from string list.\n if cfg.get('custom_imports', None):\n from mmcv.utils import import_modules_from_strings\n import_modules_from_strings(**cfg['custom_imports'])\n\n model = build_detector(\n cfg.model,\n train_cfg=cfg.get('train_cfg'),\n test_cfg=cfg.get('test_cfg'))\n if torch.cuda.is_available():\n model.cuda()\n model.eval()\n\n if hasattr(model, 'forward_dummy'):\n model.forward = model.forward_dummy\n else:\n raise NotImplementedError(\n 'FLOPs counter is currently not currently supported with {}'.\n format(model.__class__.__name__))\n\n flops, params = get_flops(model, input_shape)\n\n split_line = '=' * 30\n print(f'{split_line}\\nInput shape: {input_shape}\\n'\n f'Flops: {flops}\\nParams: {params}\\n{split_line}')\n print('!!!Please be cautious if you use the results in papers. '\n 'You may need to check if all ops are supported and verify that the '\n 'flops computation is correct.')\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.cuda.is_available"
]
] |
bsautermeister/tensorflow-handwriting-demo
|
[
"73292f3bfc4fd9dcdad0d6e77d72c82c87e15d7e"
] |
[
"tensorflow/utils/tensor.py"
] |
[
"\"\"\" Tensorflow related utility functions. \"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\n\n\ndef get_num_trainable_params():\n total_parameters = 0\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n shape = variable.get_shape()\n variable_parametes = 1\n for dim in shape:\n variable_parametes *= dim.value\n total_parameters += variable_parametes\n return total_parameters"
] |
[
[
"tensorflow.trainable_variables"
]
] |
StevenGrove/LearnableTreeFilterV2
|
[
"3814a5a84c0a5c33d6538749eaf5aed4827366de"
] |
[
"cvpods/evaluation/crowdhuman_evaluation.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport copy\nimport itertools\nimport json\nimport logging\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pycocotools.mask as mask_util\n\nimport torch\n\nfrom cvpods.data.datasets.coco import convert_to_coco_json\nfrom cvpods.structures import BoxMode\nfrom cvpods.utils import PathManager, comm, create_small_table\n\nfrom .crowdhumantools import Database\nfrom .evaluator import DatasetEvaluator\nfrom .registry import EVALUATOR\n\n\[email protected]()\nclass CrowdHumanEvaluator(DatasetEvaluator):\n \"\"\"\n Evaluate object proposal, instance detection/segmentation, keypoint detection\n outputs using COCO's metrics and APIs.\n \"\"\"\n\n def __init__(\n self,\n dataset_name,\n meta,\n cfg,\n distributed,\n output_dir=None,\n dump=False):\n \"\"\"\n Args:\n dataset_name (str): name of the dataset to be evaluated.\n It must have either the following corresponding metadata:\n\n\n \"json_file\": the path to the COCO format annotation\n Or it must be in cvpods's standard dataset format\n so it can be converted to COCO format automatically.\n cfg (CfgNode): config instance\n distributed (True): if True, will collect results from all ranks for evaluation.\n Otherwise, will evaluate the results in the current process.\n output_dir (str): optional, an output directory to dump results.\n \"\"\"\n self._dump = dump\n self._tasks = self._tasks_from_config(cfg)\n self._distributed = distributed\n self._output_dir = output_dir\n\n self._cpu_device = torch.device(\"cpu\")\n self._logger = logging.getLogger(__name__)\n\n self._metadata = meta\n if not hasattr(self._metadata, \"json_file\"):\n self._logger.warning(\n f\"json_file was not found in MetaDataCatalog for '{dataset_name}'\")\n\n cache_path = convert_to_coco_json(dataset_name, output_dir)\n self._metadata.json_file = cache_path\n\n self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS\n # Test set json files do not contain annotations (evaluation must be\n # performed using the COCO evaluation server).\n\n def reset(self):\n self._predictions = []\n self._coco_results = []\n\n def _tasks_from_config(self, cfg):\n \"\"\"\n Returns:\n tuple[str]: tasks that can be evaluated under the given configuration.\n \"\"\"\n # @[email protected]: next 4 line to a func\n if self._dump:\n with open(\"README.md\", \"w\") as f:\n name = cfg.OUTPUT_DIR.split(\"/\")[-1]\n f.write(\"# {} \\n\".format(name))\n\n tasks = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n tasks = tasks + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n tasks = tasks + (\"keypoints\",)\n return tasks\n\n def boxes_dump(self, boxes, is_gt=False):\n result = []\n boxes = boxes.tolist()\n for box in boxes:\n if is_gt:\n box_dict = {}\n box_dict['box'] = [box[0], box[1], box[2] - box[0],\n box[3] - box[1]]\n box_dict['tag'] = box[-1]\n result.append(box_dict)\n else:\n box_dict = {}\n box_dict['box'] = [box[0], box[1], box[2] - box[0],\n box[3] - box[1]]\n box_dict['tag'] = 1\n box_dict['score'] = box[-1]\n result.append(box_dict)\n return result\n\n def process(self, inputs, outputs):\n \"\"\"\n Args:\n inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).\n It is a list of dict. Each dict corresponds to an image and\n contains keys like \"height\", \"width\", \"file_name\", \"image_id\".\n outputs: the outputs of a COCO model. It is a list of dicts with key\n \"instances\" that contains :class:`Instances`.\n \"\"\"\n for input, output in zip(inputs, outputs):\n prediction = {\"image_id\": input[\"image_id\"]}\n\n # TODO this is ugly\n if \"instances\" in output:\n instances = output[\"instances\"].to(self._cpu_device)\n prediction[\"instances\"] = instances_to_coco_json(\n instances, input[\"image_id\"])\n if \"proposals\" in output:\n prediction[\"proposals\"] = output[\"proposals\"].to(\n self._cpu_device)\n\n gt_boxes = input['instances'].gt_boxes.tensor.cpu().numpy()\n gt_classes = input['instances'].gt_classes.cpu().numpy()[:, np.newaxis]\n gt_boxes = np.concatenate([gt_boxes, gt_classes], axis=1)\n\n pred_boxes = output['instances'].pred_boxes.tensor.cpu().numpy()\n pred_score = output['instances'].scores.cpu().numpy()[:, np.newaxis]\n pred_boxes = np.concatenate([pred_boxes, pred_score], axis=1)\n\n result_dict = dict(\n ID=input['image_id'],\n height=int(input['height']),\n width=int(input['width']),\n dtboxes=self.boxes_dump(pred_boxes),\n gtboxes=self.boxes_dump(gt_boxes, is_gt=True)\n )\n # rois=misc_utils.boxes_dump(rois[:, 1:], True))\n self._predictions.append(result_dict)\n\n def evaluate(self):\n if self._distributed:\n comm.synchronize()\n self._predictions = comm.gather(self._predictions, dst=0)\n self._predictions = list(itertools.chain(*self._predictions))\n\n if not comm.is_main_process():\n return {}\n\n if len(self._predictions) == 0:\n self._logger.warning(\n \"[COCOEvaluator] Did not receive valid predictions.\")\n return {}\n\n if self._output_dir:\n PathManager.mkdirs(self._output_dir)\n file_path = os.path.join(\n self._output_dir, \"instances_predictions.pth\")\n with PathManager.open(file_path, \"wb\") as f:\n torch.save(self._predictions, f)\n\n self._results = OrderedDict()\n self._eval_predictions(set(self._tasks))\n # Copy so the caller can do whatever with results\n return copy.deepcopy(self._results)\n\n def _eval_predictions(self, tasks):\n \"\"\"\n Evaluate self._predictions on the given tasks.\n Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for CrowdHuman format ...\")\n self._coco_results = self._predictions\n\n if self._output_dir:\n file_path = os.path.join(\n self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n\n with PathManager.open(file_path, \"w\") as f:\n for db in self._coco_results:\n line = json.dumps(db) + '\\n'\n f.write(line)\n\n self._logger.info(\"Evaluating predictions ...\")\n for task in sorted(tasks):\n coco_eval = (\n _evaluate_predictions_on_crowdhuman(\n self._metadata.json_file, file_path)\n if len(self._coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n res = self._derive_coco_results(coco_eval, task)\n self._results[task] = res\n\n def _derive_coco_results(self, coco_eval, iou_type):\n \"\"\"\n Derive the desired score numbers from summarized COCOeval.\n\n Args:\n coco_eval (None or COCOEval): None represents no predictions from model.\n iou_type (str):\n class_names (None or list[str]): if provided, will use it to predict\n per-category AP.\n\n Returns:\n a dict of {metric name: score}\n \"\"\"\n\n metrics = [\"AP\", \"mMR\", \"Recall\"]\n\n if coco_eval is None:\n self._logger.warn(\n \"No predictions from the model! Set scores to -1\")\n return {metric: -1 for metric in metrics}\n\n # the standard metrics\n results = {metric: coco_eval[idx]\n for idx, metric in enumerate(metrics)}\n small_table = create_small_table(results)\n self._logger.info(\n \"Evaluation results for {}: \\n\".format(iou_type) + small_table\n )\n\n # if class_names is None or len(class_names) <= 1:\n return results\n\n\ndef instances_to_coco_json(instances, img_id):\n \"\"\"\n Dump an \"Instances\" object to a COCO-format json that's used for evaluation.\n Args:\n instances (Instances):\n img_id (int): the image id\n Returns:\n list[dict]: list of json annotations in COCO format.\n \"\"\"\n num_instance = len(instances)\n if num_instance == 0:\n return []\n\n boxes = instances.pred_boxes.tensor.numpy()\n boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)\n boxes = boxes.tolist()\n scores = instances.scores.tolist()\n classes = instances.pred_classes.tolist()\n\n has_mask = instances.has(\"pred_masks\")\n if has_mask:\n # use RLE to encode the masks, because they are too large and takes memory\n # since this evaluator stores outputs of the entire dataset\n rles = [\n mask_util.encode(np.array(mask[:, :, None], order=\"F\", dtype=\"uint8\"))[0]\n for mask in instances.pred_masks\n ]\n for rle in rles:\n # \"counts\" is an array encoded by mask_util as a byte-stream. Python3's\n # json writer which always produces strings cannot serialize a bytestream\n # unless you decode it. Thankfully, utf-8 works out (which is also what\n # the pycocotools/_mask.pyx does).\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\")\n\n has_keypoints = instances.has(\"pred_keypoints\")\n if has_keypoints:\n keypoints = instances.pred_keypoints\n\n results = []\n for k in range(num_instance):\n result = {\n \"image_id\": img_id,\n \"category_id\": classes[k],\n \"bbox\": boxes[k],\n \"height\": boxes[k][3],\n \"score\": scores[k],\n }\n if has_mask:\n result[\"segmentation\"] = rles[k]\n if has_keypoints:\n # In COCO annotations,\n # keypoints coordinates are pixel indices.\n # However our predictions are floating point coordinates.\n # Therefore we subtract 0.5 to be consistent with the annotation format.\n # This is the inverse of data loading logic in `datasets/coco.py`.\n keypoints[k][:, :2] -= 0.5\n result[\"keypoints\"] = keypoints[k].flatten().tolist()\n results.append(result)\n return results\n\n\ndef _evaluate_predictions_on_crowdhuman(gt_path, dt_path, target_key=\"box\", mode=0):\n \"\"\"\n Evaluate the coco results using COCOEval API.\n \"\"\"\n database = Database(gt_path, dt_path, target_key, None, mode)\n database.compare()\n AP, recall, _ = database.eval_AP()\n mMR, _ = database.eval_MR()\n return AP, mMR, recall\n"
] |
[
[
"torch.device",
"numpy.array",
"numpy.concatenate",
"torch.save"
]
] |
BGTCapital/hummingbot
|
[
"2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242",
"2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242"
] |
[
"hummingbot/connector/exchange/okex/okex_api_order_book_data_source.py",
"hummingbot/strategy/perpetual_market_making/perpetual_market_making.py"
] |
[
"#!/usr/bin/env python\n\nimport aiohttp\nimport asyncio\n\nimport json\nimport logging\nimport pandas as pd\nimport time\nfrom typing import (\n Any,\n AsyncIterable,\n Dict,\n List,\n Optional,\n)\nimport websockets\nfrom websockets.exceptions import ConnectionClosed\n\nfrom hummingbot.core.data_type.order_book_message import OrderBookMessage\nfrom hummingbot.core.data_type.order_book import OrderBook\nfrom hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource\nfrom hummingbot.logger import HummingbotLogger\nfrom hummingbot.connector.exchange.okex.okex_order_book import OkexOrderBook\nfrom hummingbot.connector.exchange.okex.constants import (\n OKEX_INSTRUMENTS_URL,\n OKEX_DEPTH_URL,\n OKEX_TICKERS_URL,\n OKEX_WS_URI_PUBLIC,\n)\n\nfrom dateutil.parser import parse as dataparse\n\n\nclass OkexAPIOrderBookDataSource(OrderBookTrackerDataSource):\n\n MESSAGE_TIMEOUT = 30.0\n PING_TIMEOUT = 10.0\n\n _okexaobds_logger: Optional[HummingbotLogger] = None\n\n @classmethod\n def logger(cls) -> HummingbotLogger:\n if cls._okexaobds_logger is None:\n cls._okexaobds_logger = logging.getLogger(__name__)\n return cls._okexaobds_logger\n\n def __init__(self, trading_pairs: List[str]):\n super().__init__(trading_pairs)\n self._trading_pairs: List[str] = trading_pairs\n\n @staticmethod\n async def fetch_trading_pairs() -> List[str]:\n # Returns a List of str, representing each active trading pair on the exchange.\n async with aiohttp.ClientSession() as client:\n async with client.get(OKEX_INSTRUMENTS_URL) as products_response:\n\n products_response: aiohttp.ClientResponse = products_response\n if products_response.status != 200:\n raise IOError(f\"Error fetching active OKEx markets. HTTP status is {products_response.status}.\")\n\n data = await products_response.json()\n data = data['data']\n\n trading_pairs = []\n for item in data:\n # I couldn't find where to check if it's online in OKEx API doc\n if item['state'] == 'live':\n trading_pairs.append(item['instId'])\n\n return trading_pairs\n\n async def get_new_order_book(self, trading_pair: str) -> OrderBook:\n async with aiohttp.ClientSession() as client:\n snapshot: Dict[str, Any] = await self.get_snapshot(client, trading_pair)\n\n snapshot_msg: OrderBookMessage = OkexOrderBook.snapshot_message_from_exchange(\n snapshot,\n trading_pair,\n timestamp=snapshot['ts'],\n metadata={\"trading_pair\": trading_pair})\n order_book: OrderBook = self.order_book_create_function()\n order_book.apply_snapshot(snapshot_msg.bids, snapshot_msg.asks, snapshot_msg.update_id)\n return order_book\n\n # Move this to OrderBookTrackerDataSource or this needs a whole refactor?\n @classmethod\n async def get_last_traded_prices(cls, trading_pairs: List[str]) -> Dict[str, float]:\n async with aiohttp.ClientSession() as client:\n async with client.get(OKEX_TICKERS_URL) as products_response:\n\n products_response: aiohttp.ClientResponse = products_response\n if products_response.status != 200:\n raise IOError(f\"Error fetching active OKEx markets. HTTP status is {products_response.status}.\")\n\n data = await products_response.json()\n data = data['data']\n all_markets: pd.DataFrame = pd.DataFrame.from_records(data=data)\n all_markets.set_index('instId', inplace=True)\n\n out: Dict[str, float] = {}\n\n for trading_pair in trading_pairs:\n out[trading_pair] = float(all_markets['last'][trading_pair])\n\n return out\n\n async def get_trading_pairs(self) -> List[str]:\n if not self._trading_pairs:\n try:\n self._trading_pairs = await self.fetch_trading_pairs()\n except Exception:\n self._trading_pairs = []\n self.logger().network(\n \"Error getting active exchange information.\",\n exc_info=True,\n app_warning_msg=\"Error getting active exchange information. Check network connection.\"\n )\n return self._trading_pairs\n\n @staticmethod\n async def get_snapshot(client: aiohttp.ClientSession, trading_pair: str) -> Dict[str, Any]:\n \"\"\"Fetches order book snapshot for a particular trading pair from the exchange REST API.\"\"\"\n params = {}\n async with client.get(OKEX_DEPTH_URL.format(trading_pair=trading_pair), params=params) as response:\n response: aiohttp.ClientResponse = response\n if response.status != 200:\n raise IOError(f\"Error fetching OKEX market snapshot for {trading_pair}. \"\n f\"HTTP status is {response.status}.\")\n api_data = await response.read()\n data: Dict[str, Any] = json.loads(api_data)['data'][0]\n data['ts'] = int(data['ts'])\n\n return data\n\n @classmethod\n def iso_to_timestamp(cls, date: str):\n return dataparse(date).timestamp()\n\n async def listen_for_trades(self, ev_loop: Optional[asyncio.BaseEventLoop], output: asyncio.Queue):\n \"\"\"Subscribes to the trade channel of the exchange. Adds incoming messages(of filled orders) to the output queue, to be processed by\"\"\"\n\n while True:\n try:\n trading_pairs: List[str] = self._trading_pairs\n async with websockets.connect(OKEX_WS_URI_PUBLIC) as ws:\n ws: websockets.WebSocketClientProtocol = ws\n\n for trading_pair in trading_pairs:\n subscribe_request: Dict[str, Any] = {\n \"op\": \"subscribe\",\n \"args\": [\n {\n \"channel\": \"trades\",\n \"instType\": \"SPOT\",\n \"instId\": trading_pair,\n }\n ]\n }\n await ws.send(json.dumps(subscribe_request))\n\n async for raw_msg in self._inner_messages(ws):\n decoded_msg: str = raw_msg\n\n self.logger().debug(\"decode menssage:\" + decoded_msg)\n\n if '\"event\":\"subscribe\"' in decoded_msg:\n self.logger().debug(f\"Subscribed to channel, full message: {decoded_msg}\")\n elif '\"channel\": \"orders\"' in decoded_msg:\n self.logger().debug(f\"Received new trade: {decoded_msg}\")\n\n for data in json.loads(decoded_msg)['data']:\n trading_pair = data['instId']\n trade_message: OrderBookMessage = OkexOrderBook.trade_message_from_exchange(\n data, data['uTime'], metadata={\"trading_pair\": trading_pair}\n )\n self.logger().debug(f\"Putting msg in queue: {str(trade_message)}\")\n output.put_nowait(trade_message)\n else:\n self.logger().debug(f\"Unrecognized message received from OKEx websocket: {decoded_msg}\")\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error with WebSocket connection. Retrying after 30 seconds...\",\n exc_info=True)\n await asyncio.sleep(30.0)\n\n async def _inner_messages(self,\n ws: websockets.WebSocketClientProtocol) -> AsyncIterable[str]:\n # Terminate the recv() loop as soon as the next message timed out, so the outer loop can reconnect.\n try:\n while True:\n try:\n msg: str = await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT)\n yield msg\n except asyncio.TimeoutError:\n pong_waiter = await ws.ping()\n await asyncio.wait_for(pong_waiter, timeout=self.PING_TIMEOUT)\n except asyncio.TimeoutError:\n self.logger().warning(\"WebSocket ping timed out. Going to reconnect...\")\n return\n except ConnectionClosed:\n return\n finally:\n await ws.close()\n\n async def listen_for_order_book_diffs(self, ev_loop: Optional[asyncio.BaseEventLoop], output: asyncio.Queue):\n \"\"\"Fetches or Subscribes to the order book snapshots for each trading pair. Additionally, parses the incoming message into a OrderBookMessage and appends it into the output Queue.\"\"\"\n while True:\n try:\n trading_pairs: List[str] = await self.get_trading_pairs()\n async with websockets.connect(OKEX_WS_URI_PUBLIC) as ws:\n ws: websockets.WebSocketClientProtocol = ws\n\n for trading_pair in trading_pairs:\n subscribe_request: Dict[str, Any] = {\n \"op\": \"subscribe\",\n \"args\": [\n {\n \"channel\": \"books\",\n \"instId\": trading_pair\n }\n ]\n }\n await ws.send(json.dumps(subscribe_request))\n\n async for raw_msg in self._inner_messages(ws):\n decoded_msg: str = raw_msg\n\n if '\"event\":\"subscribe\"' in decoded_msg:\n self.logger().debug(f\"Subscribed to channel, full message: {decoded_msg}\")\n elif '\"action\":\"update\"' in decoded_msg:\n msg = json.loads(decoded_msg)\n for data in msg['data']:\n order_book_message: OrderBookMessage = OkexOrderBook.diff_message_from_exchange(data, int(data['ts']), msg['arg'])\n output.put_nowait(order_book_message)\n else:\n self.logger().debug(f\"Unrecognized message received from OKEx websocket: {decoded_msg}\")\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error with WebSocket connection. Retrying after 30 seconds...\",\n exc_info=True)\n await asyncio.sleep(30.0)\n\n async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):\n \"\"\"Fetches or Subscribes to the order book deltas(diffs) for each trading pair. Additionally, parses the incoming message into a OrderBookMessage and appends it into the output Queue.\"\"\"\n while True:\n try:\n trading_pairs: List[str] = await self.get_trading_pairs()\n async with aiohttp.ClientSession() as client:\n for trading_pair in trading_pairs:\n try:\n snapshot: Dict[str, Any] = await self.get_snapshot(client, trading_pair)\n snapshot_msg: OrderBookMessage = OkexOrderBook.snapshot_message_from_exchange(\n snapshot,\n trading_pair,\n timestamp=snapshot['ts'],\n metadata={\"trading_pair\": trading_pair}\n )\n output.put_nowait(snapshot_msg)\n self.logger().debug(f\"Saved order book snapshot for {trading_pair}\")\n await asyncio.sleep(5.0)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error.\", exc_info=True)\n await asyncio.sleep(5.0)\n this_hour: pd.Timestamp = pd.Timestamp.utcnow().replace(minute=0, second=0, microsecond=0)\n next_hour: pd.Timestamp = this_hour + pd.Timedelta(hours=1)\n delta: float = next_hour.timestamp() - time.time()\n await asyncio.sleep(delta)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unexpected error.\", exc_info=True)\n await asyncio.sleep(5.0)\n",
"import logging\nimport time\nfrom decimal import Decimal\nfrom itertools import chain\nfrom math import ceil, floor\nfrom typing import Dict, List\n\nimport numpy as np\nimport pandas as pd\n\nfrom hummingbot.connector.derivative.position import Position\nfrom hummingbot.connector.exchange_base import ExchangeBase\nfrom hummingbot.core.clock import Clock\nfrom hummingbot.core.data_type.limit_order import LimitOrder\nfrom hummingbot.core.data_type.order_candidate import PerpetualOrderCandidate\nfrom hummingbot.core.event.events import (\n BuyOrderCompletedEvent,\n OrderFilledEvent,\n OrderType,\n PositionAction,\n PositionMode,\n PriceType,\n SellOrderCompletedEvent,\n TradeType\n)\nfrom hummingbot.core.network_iterator import NetworkStatus\nfrom hummingbot.core.utils import map_df_to_str\nfrom hummingbot.strategy.asset_price_delegate import AssetPriceDelegate\nfrom hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple\nfrom hummingbot.strategy.order_book_asset_price_delegate import OrderBookAssetPriceDelegate\nfrom hummingbot.strategy.perpetual_market_making.data_types import PriceSize, Proposal\nfrom hummingbot.strategy.perpetual_market_making.perpetual_market_making_order_tracker import (\n PerpetualMarketMakingOrderTracker\n)\nfrom hummingbot.strategy.strategy_py_base import StrategyPyBase\n\nNaN = float(\"nan\")\ns_decimal_zero = Decimal(0)\ns_decimal_neg_one = Decimal(-1)\n\n\nclass PerpetualMarketMakingStrategy(StrategyPyBase):\n OPTION_LOG_CREATE_ORDER = 1 << 3\n OPTION_LOG_MAKER_ORDER_FILLED = 1 << 4\n OPTION_LOG_STATUS_REPORT = 1 << 5\n OPTION_LOG_ALL = 0x7fffffffffffffff\n _logger = None\n\n @classmethod\n def logger(cls):\n if cls._logger is None:\n cls._logger = logging.getLogger(__name__)\n return cls._logger\n\n def init_params(self,\n market_info: MarketTradingPairTuple,\n leverage: int,\n position_mode: str,\n bid_spread: Decimal,\n ask_spread: Decimal,\n order_amount: Decimal,\n long_profit_taking_spread: Decimal,\n short_profit_taking_spread: Decimal,\n stop_loss_spread: Decimal,\n time_between_stop_loss_orders: float,\n stop_loss_slippage_buffer: Decimal,\n order_levels: int = 1,\n order_level_spread: Decimal = s_decimal_zero,\n order_level_amount: Decimal = s_decimal_zero,\n order_refresh_time: float = 30.0,\n order_refresh_tolerance_pct: Decimal = s_decimal_neg_one,\n filled_order_delay: float = 60.0,\n order_optimization_enabled: bool = False,\n ask_order_optimization_depth: Decimal = s_decimal_zero,\n bid_order_optimization_depth: Decimal = s_decimal_zero,\n asset_price_delegate: AssetPriceDelegate = None,\n price_type: str = \"mid_price\",\n price_ceiling: Decimal = s_decimal_neg_one,\n price_floor: Decimal = s_decimal_neg_one,\n logging_options: int = OPTION_LOG_ALL,\n status_report_interval: float = 900,\n minimum_spread: Decimal = Decimal(0),\n hb_app_notification: bool = False,\n order_override: Dict[str, List[str]] = {},\n ):\n\n if price_ceiling != s_decimal_neg_one and price_ceiling < price_floor:\n raise ValueError(\"Parameter price_ceiling cannot be lower than price_floor.\")\n\n self._sb_order_tracker = PerpetualMarketMakingOrderTracker()\n self._market_info = market_info\n self._leverage = leverage\n self._position_mode = PositionMode.HEDGE if position_mode == \"Hedge\" else PositionMode.ONEWAY\n self._bid_spread = bid_spread\n self._ask_spread = ask_spread\n self._minimum_spread = minimum_spread\n self._order_amount = order_amount\n self._long_profit_taking_spread = long_profit_taking_spread\n self._short_profit_taking_spread = short_profit_taking_spread\n self._stop_loss_spread = stop_loss_spread\n self._order_levels = order_levels\n self._buy_levels = order_levels\n self._sell_levels = order_levels\n self._order_level_spread = order_level_spread\n self._order_level_amount = order_level_amount\n self._order_refresh_time = order_refresh_time\n self._order_refresh_tolerance_pct = order_refresh_tolerance_pct\n self._filled_order_delay = filled_order_delay\n self._order_optimization_enabled = order_optimization_enabled\n self._ask_order_optimization_depth = ask_order_optimization_depth\n self._bid_order_optimization_depth = bid_order_optimization_depth\n self._asset_price_delegate = asset_price_delegate\n self._price_type = self.get_price_type(price_type)\n self._price_ceiling = price_ceiling\n self._price_floor = price_floor\n self._hb_app_notification = hb_app_notification\n self._order_override = order_override\n\n self._cancel_timestamp = 0\n self._create_timestamp = 0\n self._all_markets_ready = False\n self._logging_options = logging_options\n self._last_timestamp = 0\n self._status_report_interval = status_report_interval\n self._last_own_trade_price = Decimal('nan')\n self._ts_peak_bid_price = Decimal('0')\n self._ts_peak_ask_price = Decimal('0')\n self._exit_orders = dict()\n self._next_buy_exit_order_timestamp = 0\n self._next_sell_exit_order_timestamp = 0\n\n self.add_markets([market_info.market])\n\n self._close_order_type = OrderType.LIMIT\n self._time_between_stop_loss_orders = time_between_stop_loss_orders\n self._stop_loss_slippage_buffer = stop_loss_slippage_buffer\n\n def all_markets_ready(self):\n return all([market.ready for market in self.active_markets])\n\n @property\n def order_refresh_tolerance_pct(self) -> Decimal:\n return self._order_refresh_tolerance_pct\n\n @order_refresh_tolerance_pct.setter\n def order_refresh_tolerance_pct(self, value: Decimal):\n self._order_refresh_tolerance_pct = value\n\n @property\n def order_amount(self) -> Decimal:\n return self._order_amount\n\n @order_amount.setter\n def order_amount(self, value: Decimal):\n self._order_amount = value\n\n @property\n def order_levels(self) -> int:\n return self._order_levels\n\n @order_levels.setter\n def order_levels(self, value: int):\n self._order_levels = value\n self._buy_levels = value\n self._sell_levels = value\n\n @property\n def buy_levels(self) -> int:\n return self._buy_levels\n\n @buy_levels.setter\n def buy_levels(self, value: int):\n self._buy_levels = value\n\n @property\n def sell_levels(self) -> int:\n return self._sell_levels\n\n @sell_levels.setter\n def sell_levels(self, value: int):\n self._sell_levels = value\n\n @property\n def order_level_amount(self) -> Decimal:\n return self._order_level_amount\n\n @order_level_amount.setter\n def order_level_amount(self, value: Decimal):\n self._order_level_amount = value\n\n @property\n def order_level_spread(self) -> Decimal:\n return self._order_level_spread\n\n @order_level_spread.setter\n def order_level_spread(self, value: Decimal):\n self._order_level_spread = value\n\n @property\n def bid_spread(self) -> Decimal:\n return self._bid_spread\n\n @bid_spread.setter\n def bid_spread(self, value: Decimal):\n self._bid_spread = value\n\n @property\n def ask_spread(self) -> Decimal:\n return self._ask_spread\n\n @ask_spread.setter\n def ask_spread(self, value: Decimal):\n self._ask_spread = value\n\n @property\n def order_optimization_enabled(self) -> bool:\n return self._order_optimization_enabled\n\n @order_optimization_enabled.setter\n def order_optimization_enabled(self, value: bool):\n self._order_optimization_enabled = value\n\n @property\n def order_refresh_time(self) -> float:\n return self._order_refresh_time\n\n @order_refresh_time.setter\n def order_refresh_time(self, value: float):\n self._order_refresh_time = value\n\n @property\n def filled_order_delay(self) -> float:\n return self._filled_order_delay\n\n @filled_order_delay.setter\n def filled_order_delay(self, value: float):\n self._filled_order_delay = value\n\n @property\n def price_ceiling(self) -> Decimal:\n return self._price_ceiling\n\n @price_ceiling.setter\n def price_ceiling(self, value: Decimal):\n self._price_ceiling = value\n\n @property\n def price_floor(self) -> Decimal:\n return self._price_floor\n\n @price_floor.setter\n def price_floor(self, value: Decimal):\n self._price_floor = value\n\n @property\n def base_asset(self):\n return self._market_info.base_asset\n\n @property\n def quote_asset(self):\n return self._market_info.quote_asset\n\n @property\n def trading_pair(self):\n return self._market_info.trading_pair\n\n def get_price(self) -> float:\n if self._asset_price_delegate is not None:\n price_provider = self._asset_price_delegate\n else:\n price_provider = self._market_info\n if self._price_type is PriceType.LastOwnTrade:\n price = self._last_own_trade_price\n else:\n price = price_provider.get_price_by_type(self._price_type)\n if price.is_nan():\n price = price_provider.get_price_by_type(PriceType.MidPrice)\n return price\n\n def get_last_price(self) -> float:\n return self._market_info.get_last_price()\n\n def get_mid_price(self) -> Decimal:\n delegate: AssetPriceDelegate = self._asset_price_delegate\n if delegate is not None:\n mid_price = delegate.get_mid_price()\n else:\n mid_price = self._market_info.get_mid_price()\n return mid_price\n\n @property\n def active_orders(self) -> List[LimitOrder]:\n if self._market_info not in self._sb_order_tracker.market_pair_to_active_orders:\n return []\n return self._sb_order_tracker.market_pair_to_active_orders[self._market_info]\n\n @property\n def active_positions(self) -> Dict[str, Position]:\n return self._market_info.market.account_positions\n\n @property\n def active_buys(self) -> List[LimitOrder]:\n return [o for o in self.active_orders if o.is_buy]\n\n @property\n def active_sells(self) -> List[LimitOrder]:\n return [o for o in self.active_orders if not o.is_buy]\n\n @property\n def logging_options(self) -> int:\n return self._logging_options\n\n @logging_options.setter\n def logging_options(self, logging_options: int):\n self._logging_options = logging_options\n\n @property\n def asset_price_delegate(self) -> AssetPriceDelegate:\n return self._asset_price_delegate\n\n @asset_price_delegate.setter\n def asset_price_delegate(self, value):\n self._asset_price_delegate = value\n\n def perpetual_mm_assets_df(self) -> pd.DataFrame:\n market, trading_pair, base_asset, quote_asset = self._market_info\n quote_balance = float(market.get_balance(quote_asset))\n available_quote_balance = float(market.get_available_balance(quote_asset))\n data = [\n [\"\", quote_asset],\n [\"Total Balance\", round(quote_balance, 4)],\n [\"Available Balance\", round(available_quote_balance, 4)]\n ]\n df = pd.DataFrame(data=data)\n return df\n\n def active_orders_df(self) -> pd.DataFrame:\n price = self.get_price()\n active_orders = self.active_orders\n no_sells = len([o for o in active_orders if not o.is_buy])\n active_orders.sort(key=lambda x: x.price, reverse=True)\n columns = [\"Level\", \"Type\", \"Price\", \"Spread\", \"Amount (Orig)\", \"Amount (Adj)\", \"Age\"]\n data = []\n lvl_buy, lvl_sell = 0, 0\n for idx in range(0, len(active_orders)):\n order = active_orders[idx]\n level = None\n if order.is_buy:\n level = lvl_buy + 1\n lvl_buy += 1\n else:\n level = no_sells - lvl_sell\n lvl_sell += 1\n spread = 0 if price == 0 else abs(order.price - price) / price\n age = \"n/a\"\n # // indicates order is a paper order so 'n/a'. For real orders, calculate age.\n if \"//\" not in order.client_order_id:\n age = pd.Timestamp(int(time.time()) - int(order.client_order_id[-16:]) / 1e6,\n unit='s').strftime('%H:%M:%S')\n amount_orig = \"\" if level is None else self._order_amount + ((level - 1) * self._order_level_amount)\n data.append([\n level,\n \"buy\" if order.is_buy else \"sell\",\n float(order.price),\n f\"{spread:.2%}\",\n amount_orig,\n float(order.quantity),\n age\n ])\n\n return pd.DataFrame(data=data, columns=columns)\n\n def active_positions_df(self) -> pd.DataFrame:\n columns = [\"Symbol\", \"Type\", \"Entry Price\", \"Amount\", \"Leverage\", \"Unrealized PnL\"]\n data = []\n market, trading_pair = self._market_info.market, self._market_info.trading_pair\n for idx in self.active_positions.values():\n is_buy = True if idx.amount > 0 else False\n unrealized_profit = ((market.get_price(trading_pair, is_buy) - idx.entry_price) * idx.amount)\n data.append([\n idx.trading_pair,\n idx.position_side.name,\n idx.entry_price,\n idx.amount,\n idx.leverage,\n unrealized_profit\n ])\n\n return pd.DataFrame(data=data, columns=columns)\n\n def market_status_data_frame(self) -> pd.DataFrame:\n markets_data = []\n markets_columns = [\"Exchange\", \"Market\", \"Best Bid\", \"Best Ask\", f\"Ref Price ({self._price_type.name})\"]\n if self._price_type is PriceType.LastOwnTrade and self._last_own_trade_price.is_nan():\n markets_columns[-1] = \"Ref Price (MidPrice)\"\n market_books = [(self._market_info.market, self._market_info.trading_pair)]\n if type(self._asset_price_delegate) is OrderBookAssetPriceDelegate:\n market_books.append((self._asset_price_delegate.market, self._asset_price_delegate.trading_pair))\n for market, trading_pair in market_books:\n bid_price = market.get_price(trading_pair, False)\n ask_price = market.get_price(trading_pair, True)\n ref_price = float(\"nan\")\n if market == self._market_info.market and self._asset_price_delegate is None:\n ref_price = self.get_price()\n elif market == self._asset_price_delegate.market and self._price_type is not PriceType.LastOwnTrade:\n ref_price = self._asset_price_delegate.get_price_by_type(self._price_type)\n markets_data.append([\n market.display_name,\n trading_pair,\n float(bid_price),\n float(ask_price),\n float(ref_price)\n ])\n return pd.DataFrame(data=markets_data, columns=markets_columns).replace(np.nan, '', regex=True)\n\n def format_status(self) -> str:\n if not self._all_markets_ready:\n return \"Market connectors are not ready.\"\n lines = []\n warning_lines = []\n\n markets_df = self.market_status_data_frame()\n lines.extend([\"\", \" Markets:\"] + [\" \" + line for line in markets_df.to_string(index=False).split(\"\\n\")])\n\n assets_df = map_df_to_str(self.perpetual_mm_assets_df())\n\n first_col_length = max(*assets_df[0].apply(len))\n df_lines = assets_df.to_string(index=False, header=False,\n formatters={0: (\"{:<\" + str(first_col_length) + \"}\").format}).split(\"\\n\")\n lines.extend([\"\", \" Assets:\"] + [\" \" + line for line in df_lines])\n\n # See if there're any open orders.\n if len(self.active_orders) > 0:\n df = self.active_orders_df()\n lines.extend([\"\", \" Orders:\"] + [\" \" + line for line in df.to_string(index=False).split(\"\\n\")])\n else:\n lines.extend([\"\", \" No active maker orders.\"])\n\n # See if there're any active positions.\n if len(self.active_positions) > 0:\n df = self.active_positions_df()\n lines.extend([\"\", \" Positions:\"] + [\" \" + line for line in df.to_string(index=False).split(\"\\n\")])\n else:\n lines.extend([\"\", \" No active positions.\"])\n\n if len(warning_lines) > 0:\n lines.extend([\"\", \"*** WARNINGS ***\"] + warning_lines)\n\n return \"\\n\".join(lines)\n\n def start(self, clock: Clock, timestamp: float):\n super().start(clock, timestamp)\n self._last_timestamp = timestamp\n self.apply_initial_settings(self.trading_pair, self._position_mode, self._leverage)\n\n def apply_initial_settings(self, trading_pair: str, position: Position, leverage: int):\n market: ExchangeBase = self._market_info.market\n market.set_leverage(trading_pair, leverage)\n market.set_position_mode(position)\n\n def tick(self, timestamp: float):\n market: ExchangeBase = self._market_info.market\n session_positions = [s for s in self.active_positions.values() if s.trading_pair == self.trading_pair]\n current_tick = timestamp // self._status_report_interval\n last_tick = self._last_timestamp // self._status_report_interval\n should_report_warnings = ((current_tick > last_tick) and\n (self._logging_options & self.OPTION_LOG_STATUS_REPORT))\n try:\n if not self._all_markets_ready:\n self._all_markets_ready = all([market.ready for market in self.active_markets])\n if self._asset_price_delegate is not None and self._all_markets_ready:\n self._all_markets_ready = self._asset_price_delegate.ready\n if not self._all_markets_ready:\n # M({self.trading_pair}) Maker sell order {order_id}arkets not ready yet. Don't do anything.\n if should_report_warnings:\n self.logger().warning(\"Markets are not ready. No market making trades are permitted.\")\n return\n\n if should_report_warnings:\n if not all([market.network_status is NetworkStatus.CONNECTED for market in self.active_markets]):\n self.logger().warning(\"WARNING: Some markets are not connected or are down at the moment. Market \"\n \"making may be dangerous when markets or networks are unstable.\")\n\n if len(session_positions) == 0:\n self._exit_orders = dict() # Empty list of exit order at this point to reduce size\n proposal = None\n if self._create_timestamp <= self.current_timestamp:\n # 1. Create base order proposals\n proposal = self.create_base_proposal()\n # 2. Apply functions that limit numbers of buys and sells proposal\n self.apply_order_levels_modifiers(proposal)\n # 3. Apply functions that modify orders price\n self.apply_order_price_modifiers(proposal)\n # 4. Apply budget constraint, i.e. can't buy/sell more than what you have.\n self.apply_budget_constraint(proposal)\n\n self.filter_out_takers(proposal)\n\n self.cancel_active_orders(proposal)\n self.cancel_orders_below_min_spread()\n if self.to_create_orders(proposal):\n self.execute_orders_proposal(proposal, PositionAction.OPEN)\n # Reset peak ask and bid prices\n self._ts_peak_ask_price = market.get_price(self.trading_pair, False)\n self._ts_peak_bid_price = market.get_price(self.trading_pair, True)\n else:\n self.manage_positions(session_positions)\n finally:\n self._last_timestamp = timestamp\n\n def manage_positions(self, session_positions: List[Position]):\n mode = self._position_mode\n\n proposals = self.profit_taking_proposal(mode, session_positions)\n if proposals is not None:\n self.execute_orders_proposal(proposals, PositionAction.CLOSE)\n\n # check if stop loss needs to be placed\n proposals = self.stop_loss_proposal(mode, session_positions)\n if proposals is not None:\n self.execute_orders_proposal(proposals, PositionAction.CLOSE)\n\n def profit_taking_proposal(self, mode: PositionMode, active_positions: List) -> Proposal:\n\n market: ExchangeBase = self._market_info.market\n unwanted_exit_orders = [o for o in self.active_orders\n if o.client_order_id not in self._exit_orders.keys()]\n ask_price = market.get_price(self.trading_pair, True)\n bid_price = market.get_price(self.trading_pair, False)\n buys = []\n sells = []\n\n if mode == PositionMode.ONEWAY:\n # in one-way mode, only one active position is expected per time\n if len(active_positions) > 1:\n self.logger().error(f\"More than one open position in {mode.name} position mode. \"\n \"Kindly ensure you do not interact with the exchange through \"\n \"other platforms and restart this strategy.\")\n else:\n # Cancel open order that could potentially close position before reaching take_profit_limit\n for order in unwanted_exit_orders:\n if ((active_positions[0].amount < 0 and order.is_buy)\n or (active_positions[0].amount > 0 and not order.is_buy)):\n self.cancel_order(self._market_info, order.client_order_id)\n self.logger().info(f\"Initiated cancellation of {'buy' if order.is_buy else 'sell'} order \"\n f\"{order.client_order_id} in favour of take profit order.\")\n\n for position in active_positions:\n if (ask_price > position.entry_price and position.amount > 0) or (\n bid_price < position.entry_price and position.amount < 0):\n # check if there is an active order to take profit, and create if none exists\n profit_spread = self._long_profit_taking_spread if position.amount > 0 else self._short_profit_taking_spread\n take_profit_price = position.entry_price * (Decimal(\"1\") + profit_spread) if position.amount > 0 \\\n else position.entry_price * (Decimal(\"1\") - profit_spread)\n price = market.quantize_order_price(self.trading_pair, take_profit_price)\n size = market.quantize_order_amount(self.trading_pair, abs(position.amount))\n old_exit_orders = [\n o for o in self.active_orders\n if ((o.price != price or o.quantity != size)\n and o.client_order_id in self._exit_orders.keys()\n and ((position.amount < 0 and o.is_buy) or (position.amount > 0 and not o.is_buy)))]\n for old_order in old_exit_orders:\n self.cancel_order(self._market_info, old_order.client_order_id)\n self.logger().info(\n f\"Initiated cancellation of previous take profit order {old_order.client_order_id} in favour of new take profit order.\")\n exit_order_exists = [o for o in self.active_orders if o.price == price]\n if len(exit_order_exists) == 0:\n if size > 0 and price > 0:\n if position.amount < 0:\n buys.append(PriceSize(price, size))\n else:\n sells.append(PriceSize(price, size))\n return Proposal(buys, sells)\n\n def _should_renew_stop_loss(self, stop_loss_order: LimitOrder) -> bool:\n stop_loss_creation_timestamp = self._exit_orders.get(stop_loss_order.client_order_id)\n time_since_stop_loss = self.current_timestamp - stop_loss_creation_timestamp\n return time_since_stop_loss >= self._time_between_stop_loss_orders\n\n def stop_loss_proposal(self, mode: PositionMode, active_positions: List[Position]) -> Proposal:\n market: ExchangeBase = self._market_info.market\n top_ask = market.get_price(self.trading_pair, False)\n top_bid = market.get_price(self.trading_pair, True)\n buys = []\n sells = []\n\n for position in active_positions:\n # check if stop loss order needs to be placed\n stop_loss_price = position.entry_price * (Decimal(\"1\") + self._stop_loss_spread) if position.amount < 0 \\\n else position.entry_price * (Decimal(\"1\") - self._stop_loss_spread)\n existent_stop_loss_orders = [order for order in self.active_orders\n if order.client_order_id in self._exit_orders.keys()\n and ((position.amount > 0 and not order.is_buy)\n or (position.amount < 0 and order.is_buy))]\n if (not existent_stop_loss_orders\n or (self._should_renew_stop_loss(existent_stop_loss_orders[0]))):\n previous_stop_loss_price = None\n for order in existent_stop_loss_orders:\n previous_stop_loss_price = order.price\n self.cancel_order(self._market_info, order.client_order_id)\n new_price = previous_stop_loss_price or stop_loss_price\n if (top_ask <= stop_loss_price and position.amount > 0):\n price = market.quantize_order_price(\n self.trading_pair,\n new_price * (Decimal(1) - self._stop_loss_slippage_buffer))\n take_profit_orders = [o for o in self.active_orders\n if (not o.is_buy and o.price > price\n and o.client_order_id in self._exit_orders.keys())]\n # cancel take profit orders if they exist\n for old_order in take_profit_orders:\n self.cancel_order(self._market_info, old_order.client_order_id)\n size = market.quantize_order_amount(self.trading_pair, abs(position.amount))\n if size > 0 and price > 0:\n self.logger().info(\"Creating stop loss sell order to close long position.\")\n sells.append(PriceSize(price, size))\n elif (top_bid >= stop_loss_price and position.amount < 0):\n price = market.quantize_order_price(\n self.trading_pair,\n new_price * (Decimal(1) + self._stop_loss_slippage_buffer))\n take_profit_orders = [o for o in self.active_orders\n if (o.is_buy and o.price < price\n and o.client_order_id in self._exit_orders.keys())]\n # cancel take profit orders if they exist\n for old_order in take_profit_orders:\n self.cancel_order(self._market_info, old_order.client_order_id)\n size = market.quantize_order_amount(self.trading_pair, abs(position.amount))\n if size > 0 and price > 0:\n self.logger().info(\"Creating stop loss buy order to close short position.\")\n buys.append(PriceSize(price, size))\n return Proposal(buys, sells)\n\n def create_base_proposal(self):\n market: ExchangeBase = self._market_info.market\n buys = []\n sells = []\n\n # First to check if a customized order override is configured, otherwise the proposal will be created according\n # to order spread, amount, and levels setting.\n order_override = self._order_override\n if order_override is not None and len(order_override) > 0:\n for key, value in order_override.items():\n if str(value[0]) in [\"buy\", \"sell\"]:\n if str(value[0]) == \"buy\":\n price = self.get_price() * (Decimal(\"1\") - Decimal(str(value[1])) / Decimal(\"100\"))\n price = market.quantize_order_price(self.trading_pair, price)\n size = Decimal(str(value[2]))\n size = market.quantize_order_amount(self.trading_pair, size)\n if size > 0 and price > 0:\n buys.append(PriceSize(price, size))\n elif str(value[0]) == \"sell\":\n price = self.get_price() * (Decimal(\"1\") + Decimal(str(value[1])) / Decimal(\"100\"))\n price = market.quantize_order_price(self.trading_pair, price)\n size = Decimal(str(value[2]))\n size = market.quantize_order_amount(self.trading_pair, size)\n if size > 0 and price > 0:\n sells.append(PriceSize(price, size))\n else:\n for level in range(0, self._buy_levels):\n price = self.get_price() * (Decimal(\"1\") - self._bid_spread - (level * self._order_level_spread))\n price = market.quantize_order_price(self.trading_pair, price)\n size = self._order_amount + (self._order_level_amount * level)\n size = market.quantize_order_amount(self.trading_pair, size)\n if size > 0:\n buys.append(PriceSize(price, size))\n for level in range(0, self._sell_levels):\n price = self.get_price() * (Decimal(\"1\") + self._ask_spread + (level * self._order_level_spread))\n price = market.quantize_order_price(self.trading_pair, price)\n size = self._order_amount + (self._order_level_amount * level)\n size = market.quantize_order_amount(self.trading_pair, size)\n if size > 0:\n sells.append(PriceSize(price, size))\n\n return Proposal(buys, sells)\n\n def apply_order_levels_modifiers(self, proposal: Proposal):\n self.apply_price_band(proposal)\n\n def apply_price_band(self, proposal: Proposal):\n if self._price_ceiling > 0 and self.get_price() >= self._price_ceiling:\n proposal.buys = []\n if self._price_floor > 0 and self.get_price() <= self._price_floor:\n proposal.sells = []\n\n def apply_order_price_modifiers(self, proposal: Proposal):\n if self._order_optimization_enabled:\n self.apply_order_optimization(proposal)\n\n def apply_budget_constraint(self, proposal: Proposal):\n checker = self._market_info.market.budget_checker\n\n order_candidates = self.create_order_candidates_for_budget_check(proposal)\n adjusted_candidates = checker.adjust_candidates(order_candidates, all_or_none=True)\n self.apply_adjusted_order_candidates_to_proposal(adjusted_candidates, proposal)\n\n def create_order_candidates_for_budget_check(self, proposal: Proposal):\n order_candidates = []\n\n is_maker = True\n order_candidates.extend(\n [\n PerpetualOrderCandidate(\n self.trading_pair,\n is_maker,\n OrderType.LIMIT,\n TradeType.BUY,\n buy.size,\n buy.price,\n leverage=Decimal(self._leverage),\n )\n for buy in proposal.buys\n ]\n )\n order_candidates.extend(\n [\n PerpetualOrderCandidate(\n self.trading_pair,\n is_maker,\n OrderType.LIMIT,\n TradeType.SELL,\n sell.size,\n sell.price,\n leverage=Decimal(self._leverage),\n )\n for sell in proposal.sells\n ]\n )\n return order_candidates\n\n def apply_adjusted_order_candidates_to_proposal(self,\n adjusted_candidates: List[PerpetualOrderCandidate],\n proposal: Proposal):\n for order in chain(proposal.buys, proposal.sells):\n adjusted_candidate = adjusted_candidates.pop(0)\n if adjusted_candidate.amount == s_decimal_zero:\n self.logger().info(\n f\"Insufficient balance: {adjusted_candidate.order_side.name} order (price: {order.price},\"\n f\" size: {order.size}) is omitted.\"\n )\n self.logger().warning(\n \"You are also at a possible risk of being liquidated if there happens to be an open loss.\")\n order.size = s_decimal_zero\n proposal.buys = [o for o in proposal.buys if o.size > 0]\n proposal.sells = [o for o in proposal.sells if o.size > 0]\n\n def filter_out_takers(self, proposal: Proposal):\n market: ExchangeBase = self._market_info.market\n top_ask = market.get_price(self.trading_pair, True)\n if not top_ask.is_nan():\n proposal.buys = [buy for buy in proposal.buys if buy.price < top_ask]\n top_bid = market.get_price(self.trading_pair, False)\n if not top_bid.is_nan():\n proposal.sells = [sell for sell in proposal.sells if sell.price > top_bid]\n\n # Compare the market price with the top bid and top ask price\n def apply_order_optimization(self, proposal: Proposal):\n market: ExchangeBase = self._market_info.market\n own_buy_size = s_decimal_zero\n own_sell_size = s_decimal_zero\n\n # If there are multiple orders, do not jump prices\n if self._order_levels > 1:\n return\n\n for order in self.active_orders:\n if order.is_buy:\n own_buy_size = order.quantity\n else:\n own_sell_size = order.quantity\n\n if len(proposal.buys) == 1:\n # Get the top bid price in the market using order_optimization_depth and your buy order volume\n top_bid_price = self._market_info.get_price_for_volume(\n False, self._bid_order_optimization_depth + own_buy_size).result_price\n price_quantum = market.get_order_price_quantum(\n self.trading_pair,\n top_bid_price\n )\n # Get the price above the top bid\n price_above_bid = (ceil(top_bid_price / price_quantum) + 1) * price_quantum\n\n # If the price_above_bid is lower than the price suggested by the pricing proposal,\n # lower your price to this\n lower_buy_price = min(proposal.buys[0].price, price_above_bid)\n proposal.buys[0].price = market.quantize_order_price(self.trading_pair, lower_buy_price)\n\n if len(proposal.sells) == 1:\n # Get the top ask price in the market using order_optimization_depth and your sell order volume\n top_ask_price = self._market_info.get_price_for_volume(\n True, self._ask_order_optimization_depth + own_sell_size).result_price\n price_quantum = market.get_order_price_quantum(\n self.trading_pair,\n top_ask_price\n )\n # Get the price below the top ask\n price_below_ask = (floor(top_ask_price / price_quantum) - 1) * price_quantum\n\n # If the price_below_ask is higher than the price suggested by the pricing proposal,\n # increase your price to this\n higher_sell_price = max(proposal.sells[0].price, price_below_ask)\n proposal.sells[0].price = market.quantize_order_price(self.trading_pair, higher_sell_price)\n\n def did_fill_order(self, order_filled_event: OrderFilledEvent):\n order_id = order_filled_event.order_id\n market_info = self._sb_order_tracker.get_shadow_market_pair_from_order_id(order_id)\n\n if market_info is not None:\n if self._logging_options & self.OPTION_LOG_MAKER_ORDER_FILLED:\n self.log_with_clock(\n logging.INFO,\n f\"({market_info.trading_pair}) Maker \"\n f\"{'buy' if order_filled_event.trade_type is TradeType.BUY else 'sell'} order of \"\n f\"{order_filled_event.amount} {market_info.base_asset} filled.\"\n )\n\n def did_complete_buy_order(self, order_completed_event: BuyOrderCompletedEvent):\n order_id = order_completed_event.order_id\n limit_order_record = self._sb_order_tracker.get_limit_order(self._market_info, order_id)\n if limit_order_record is None:\n return\n\n # delay order creation by filled_order_delay (in seconds)\n self._create_timestamp = self.current_timestamp + self._filled_order_delay\n self._cancel_timestamp = min(self._cancel_timestamp, self._create_timestamp)\n\n self._last_own_trade_price = limit_order_record.price\n\n self.log_with_clock(\n logging.INFO,\n f\"({self.trading_pair}) Maker buy order {order_id} \"\n f\"({limit_order_record.quantity} {limit_order_record.base_currency} @ \"\n f\"{limit_order_record.price} {limit_order_record.quote_currency}) has been completely filled.\"\n )\n self.notify_hb_app_with_timestamp(\n f\"Maker BUY order {limit_order_record.quantity} {limit_order_record.base_currency} @ \"\n f\"{limit_order_record.price} {limit_order_record.quote_currency} is filled.\"\n )\n\n def did_complete_sell_order(self, order_completed_event: SellOrderCompletedEvent):\n order_id = order_completed_event.order_id\n limit_order_record: LimitOrder = self._sb_order_tracker.get_limit_order(self._market_info, order_id)\n if limit_order_record is None:\n return\n\n # delay order creation by filled_order_delay (in seconds)\n self._create_timestamp = self.current_timestamp + self._filled_order_delay\n self._cancel_timestamp = min(self._cancel_timestamp, self._create_timestamp)\n\n self._last_own_trade_price = limit_order_record.price\n\n self.log_with_clock(\n logging.INFO,\n f\"({self.trading_pair}) Maker sell order {order_id} \"\n f\"({limit_order_record.quantity} {limit_order_record.base_currency} @ \"\n f\"{limit_order_record.price} {limit_order_record.quote_currency}) has been completely filled.\"\n )\n self.notify_hb_app_with_timestamp(\n f\"Maker SELL order {limit_order_record.quantity} {limit_order_record.base_currency} @ \"\n f\"{limit_order_record.price} {limit_order_record.quote_currency} is filled.\"\n )\n\n def is_within_tolerance(self, current_prices: List[Decimal], proposal_prices: List[Decimal]) -> bool:\n if len(current_prices) != len(proposal_prices):\n return False\n current_prices = sorted(current_prices)\n proposal_prices = sorted(proposal_prices)\n for current, proposal in zip(current_prices, proposal_prices):\n # if spread diff is more than the tolerance or order quantities are different, return false.\n if abs(proposal - current) / current > self._order_refresh_tolerance_pct:\n return False\n return True\n\n # Return value: whether order cancellation is deferred.\n def cancel_active_orders(self, proposal: Proposal):\n if self._cancel_timestamp > self.current_timestamp:\n return\n\n to_defer_canceling = False\n if len(self.active_orders) == 0:\n return\n if proposal is not None and self._order_refresh_tolerance_pct >= 0:\n\n active_buy_prices = [Decimal(str(o.price)) for o in self.active_orders if o.is_buy]\n active_sell_prices = [Decimal(str(o.price)) for o in self.active_orders if not o.is_buy]\n proposal_buys = [buy.price for buy in proposal.buys]\n proposal_sells = [sell.price for sell in proposal.sells]\n if self.is_within_tolerance(active_buy_prices, proposal_buys) and \\\n self.is_within_tolerance(active_sell_prices, proposal_sells):\n to_defer_canceling = True\n\n if not to_defer_canceling:\n for order in self.active_orders:\n self.cancel_order(self._market_info, order.client_order_id)\n else:\n self.logger().info(f\"Not cancelling active orders since difference between new order prices \"\n f\"and current order prices is within \"\n f\"{self._order_refresh_tolerance_pct:.2%} order_refresh_tolerance_pct\")\n self.set_timers()\n\n def cancel_orders_below_min_spread(self):\n price = self.get_price()\n for order in self.active_orders:\n negation = -1 if order.is_buy else 1\n if (negation * (order.price - price) / price) < self._minimum_spread:\n self.logger().info(f\"Order is below minimum spread ({self._minimum_spread}).\"\n f\" Cancelling Order: ({'Buy' if order.is_buy else 'Sell'}) \"\n f\"ID - {order.client_order_id}\")\n self.cancel_order(self._market_info, order.client_order_id)\n\n def to_create_orders(self, proposal: Proposal) -> bool:\n return (self._create_timestamp < self.current_timestamp and\n proposal is not None and\n len(self.active_orders) == 0)\n\n def execute_orders_proposal(self, proposal: Proposal, position_action: PositionAction):\n orders_created = False\n\n if len(proposal.buys) > 0:\n if position_action == PositionAction.CLOSE:\n if self.current_timestamp < self._next_buy_exit_order_timestamp:\n return\n else:\n self._next_buy_exit_order_timestamp = self.current_timestamp + self.filled_order_delay\n if self._logging_options & self.OPTION_LOG_CREATE_ORDER:\n price_quote_str = [f\"{buy.size.normalize()} {self.base_asset}, \"\n f\"{buy.price.normalize()} {self.quote_asset}\"\n for buy in proposal.buys]\n self.logger().info(\n f\"({self.trading_pair}) Creating {len(proposal.buys)} {self._close_order_type.name} bid orders \"\n f\"at (Size, Price): {price_quote_str} to {position_action.name} position.\"\n )\n for buy in proposal.buys:\n bid_order_id = self.buy_with_specific_market(\n self._market_info,\n buy.size,\n order_type=self._close_order_type,\n price=buy.price,\n position_action=position_action\n )\n if position_action == PositionAction.CLOSE:\n self._exit_orders[bid_order_id] = self.current_timestamp\n orders_created = True\n if len(proposal.sells) > 0:\n if position_action == PositionAction.CLOSE:\n if self.current_timestamp < self._next_sell_exit_order_timestamp:\n return\n else:\n self._next_sell_exit_order_timestamp = self.current_timestamp + self.filled_order_delay\n if self._logging_options & self.OPTION_LOG_CREATE_ORDER:\n price_quote_str = [f\"{sell.size.normalize()} {self.base_asset}, \"\n f\"{sell.price.normalize()} {self.quote_asset}\"\n for sell in proposal.sells]\n self.logger().info(\n f\"({self.trading_pair}) Creating {len(proposal.sells)} {self._close_order_type.name} ask \"\n f\"orders at (Size, Price): {price_quote_str} to {position_action.name} position.\"\n )\n for sell in proposal.sells:\n ask_order_id = self.sell_with_specific_market(\n self._market_info,\n sell.size,\n order_type=self._close_order_type,\n price=sell.price,\n position_action=position_action\n )\n if position_action == PositionAction.CLOSE:\n self._exit_orders[ask_order_id] = self.current_timestamp\n orders_created = True\n if orders_created:\n self.set_timers()\n\n def set_timers(self):\n next_cycle = self.current_timestamp + self._order_refresh_time\n if self._create_timestamp <= self.current_timestamp:\n self._create_timestamp = next_cycle\n if self._cancel_timestamp <= self.current_timestamp:\n self._cancel_timestamp = min(self._create_timestamp, next_cycle)\n\n def notify_hb_app(self, msg: str):\n if self._hb_app_notification:\n super().notify_hb_app(msg)\n\n def get_price_type(self, price_type_str: str) -> PriceType:\n if price_type_str == \"mid_price\":\n return PriceType.MidPrice\n elif price_type_str == \"best_bid\":\n return PriceType.BestBid\n elif price_type_str == \"best_ask\":\n return PriceType.BestAsk\n elif price_type_str == \"last_price\":\n return PriceType.LastTrade\n elif price_type_str == 'last_own_trade_price':\n return PriceType.LastOwnTrade\n elif price_type_str == \"custom\":\n return PriceType.Custom\n else:\n raise ValueError(f\"Unrecognized price type string {price_type_str}.\")\n"
] |
[
[
"pandas.DataFrame.from_records",
"pandas.Timedelta",
"pandas.Timestamp.utcnow"
],
[
"pandas.DataFrame"
]
] |
Zarand3r/corona_light
|
[
"96482cfb6fff8959772ccd48c10ce0588b191f18"
] |
[
"models/submissions/epidemiological/version3_0/generate_submission3_0_2.py"
] |
[
"############################################\n# Adapted from code written by August Chen #\n############################################\nimport pandas as pd\nimport numpy as np\nimport os\nimport csv\nimport datetime\nimport git\nimport sys\nrepo = git.Repo(\"./\", search_parent_directories=True)\nhomedir = repo.working_dir\nsys.path.insert(0, f\"{homedir}\" + '/models/submissions/processing/')\nsys.path.insert(1, f\"{homedir}\" + '/models/epidemiological/production')\nimport fit_counties3_0\nimport formatter2\n\n# hashtable with month and number of days in the month\nmaxMonth = {1:31, 2:29, 3:31, 4:30, 5:31, 6:30, 7:31, 8:31, 9:30, 10:31, 11:30, 12:31}\n\n# gets next day, needed because the current day could be the last of the month\ndef next_day(current_day):\n\t# assumes that everything is in 2020\n\tif current_day.day < maxMonth[current_day.month]:\n\t\treturn datetime.datetime(2020, current_day.month, current_day.day + 1)\n\telse:\n\t\treturn datetime.datetime(2020, current_day.month + 1, 1)\n\ndef previous_day(current_day):\n\t# assumes that everything is in 2020\n\tif current_day.day >= 1:\n\t\treturn datetime.datetime(2020, current_day.month, current_day.day-1)\n\telse:\n\t\tprevious_month = current_day.month - 1\n\t\treturn datetime.datetime(2020, previous_month, maxMonth[previous_month])\n\n# we want formatting in the form 2020-04-01, with 0s before months, days < 10\ndef formatter(numb):\n\tif numb < 10:\n\t\treturn \"0\" + str(numb)\n\telse:\n\t\treturn str(numb)\n\ndef format_submission(dates, death_errors, fips, start, transpose=False):\n\tdates = dates.tolist()\n\t\n\tif transpose:\n\t\t# swap columns and rows for death_errors\n\t\tdeath_errors = np.array(death_errors)\n\t\tdeath_errors = death_errors.T\n\t\tdeath_errors = death_errors.tolist()\n\n\tdeath_errors = death_errors.tolist()\n\t\n\t# trim both lists so they begin with date represented by start\n\t# assumes the lists begin originally at the same place\n\tstart_index = -1\n\tfor i in range(0, len(dates)):\n\t\tcurrent_day = dates[i]\n\t\tif current_day.month == start.month and current_day.day == start.day:\n\t\t\tstart_index = i\n\t\t\tbreak\n\n\tif start_index == -1: # start doesn't exist in dates\n\t\tinitial_date = dates[0]\n\t\tdifference = initial_date.day - start.day\n\t\tfor i in range(difference):\n\t\t\tdates.insert(0, previous_day(initial_date))\n\t\t\tinitial_date = dates[0]\n\t\t\tdeath_errors = [[0,0,0,0,0,0,0,0,0]]+death_errors\n\t\tstart_index = 0\n\n\t# adding to dates so lengths match up\n\tfinal_date = dates[-1]\n\twhile len(dates) < len(death_errors):\n\t\tdates.append(next_day(final_date))\n\t\tfinal_date = dates[-1]\n\t\n\t\t\n\tdeath_errors = death_errors[start_index:]\n\tdates = dates[start_index:]\n\t# convert dates from datetime to string, add fips code\n\tfor i in range(len(dates)):\n\t\tday = dates[i]\n\t\tday_format = '{year}-{month}-{day}-{fips}'.format(year = day.year,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t month = formatter(day.month), \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t day = formatter(day.day), \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t fips = fips)\n\t\tdates[i] = day_format\n\t\tif i < len(death_errors):\n\t\t\tdeath_errors[i].insert(0, dates[i])\n\t\t\n\treturn death_errors\n\n\nif __name__ == '__main__':\n\tstart = datetime.datetime(2020, 4, 1)\n\tend = datetime.datetime(2020, 6, 30)\n\tsubmission = []\n\tguesses = [1.41578513e-01, 1.61248129e-01, 2.48362028e-01, 3.42978127e-01, 5.79023652e-01, 4.64392758e-02, \\\n\t9.86745420e-06, 4.83700388e-02, 4.85290835e-01, 3.72688900e-02, 4.92398129e-04, 5.20319673e-02, \\\n\t4.16822944e-02, 2.93718207e-02, 2.37765976e-01, 6.38313283e-04, 1.00539865e-04, 7.86113867e-01, \\\n\t3.26287443e-01, 8.18317732e-06, 5.43511913e-10, 1.30387168e-04, 3.58953133e-03, 1.57388153e-05]\n\toutput_dict = fit_counties3_0.multi_submission(end, bias=True, policy_regime=False, tail_regime=False, weight=True, guesses=guesses, error_start=-14, quick=True, tail=-14, fitQ=False, death_metric=\"deaths\", adaptive=True, fix_nonconvergent=False) #do regime next but not ready for fitQ\n\t# output_dict = fit_counties3_0.multi_submission(end, bias=True, regime=False, weight=True, guesses=guesses, start=-1, quick=True, fitQ=False, death_metric=\"avg_deaths\", adaptive=False, fix_nonconvergent=False) #do regime next but not ready for fitQ\n\tcounties_dates = output_dict[\"counties_dates\"]\n\tcounties_death_errors = output_dict[\"counties_death_errors\"]\n\tcounties_fips = output_dict[\"counties_fips\"]\n\tnonconvergent = output_dict[\"nonconvergent\"]\n\tparameters= output_dict[\"parameters\"]\n\tfor i in range(len(counties_fips)):\n\t\tcounty_prediction = format_submission(counties_dates[i], counties_death_errors[i], counties_fips[i], start)\n\t\tsubmission = submission + county_prediction\n\t# header = \"{},{},{},{},{},{},{},{},{},{}\\n\".format(\"id\", \"10\", \"20\", \"30\", \"40\", \"50\", \"60\", \"70\", \"80\", \"90\")\n\toutput_file = f'{homedir}/models/submissions/epidemiological/version3_0/new_submissions/predictions3_0_2.csv'\n\theader = [\"id\", \"10\", \"20\", \"30\", \"40\", \"50\", \"60\", \"70\", \"80\", \"90\"]\n\twith open(output_file, 'w') as submission_file:\n\t\twriter = csv.writer(submission_file, delimiter=',')\n\t\twriter.writerow(header)\n\t\twriter.writerows(submission)\n\n\tformatter2.reformat(output_file, save=True, fix=False, id=\"3_0_2\")\n\n\timport json\n\tparameter_file = f\"{homedir}/models/epidemiological/parameters/parameters3_0_2.csv\"\n\twith open(parameter_file, 'w') as file:\n\t\tfile.write(json.dumps(parameters))\n\n\n\n\n\n\n"
] |
[
[
"numpy.array"
]
] |
puchapu/transferlearning
|
[
"1f14c192706db52e3982daae623b5bbabb31ab57"
] |
[
"code/DeepDA/models.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom transfer_losses import TransferLoss\nimport backbones\n\n\nclass TransferNet(nn.Module):\n def __init__(self, num_class, base_net='resnet50', transfer_loss='mmd', use_bottleneck=True, bottleneck_width=256, max_iter=1000, **kwargs):\n super(TransferNet, self).__init__()\n self.num_class = num_class\n self.base_network = backbones.get_backbone(base_net)\n self.use_bottleneck = use_bottleneck\n self.transfer_loss = transfer_loss\n if self.use_bottleneck:\n bottleneck_list = [\n nn.Linear(self.base_network.output_num(), bottleneck_width),\n nn.ReLU()\n ]\n self.bottleneck_layer = nn.Sequential(*bottleneck_list)\n feature_dim = bottleneck_width\n else:\n feature_dim = self.base_network.output_num()\n \n self.classifier_layer = nn.Linear(feature_dim, num_class)\n transfer_loss_args = {\n \"loss_type\": self.transfer_loss,\n \"max_iter\": max_iter,\n \"num_class\": num_class\n }\n self.adapt_loss = TransferLoss(**transfer_loss_args)\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def forward(self, source, target, source_label):\n source = self.base_network(source)\n target = self.base_network(target)\n if self.use_bottleneck:\n source = self.bottleneck_layer(source)\n target = self.bottleneck_layer(target)\n # classification\n source_clf = self.classifier_layer(source)\n clf_loss = self.criterion(source_clf, source_label)\n # transfer\n kwargs = {}\n if self.transfer_loss == \"lmmd\":\n kwargs['source_label'] = source_label\n target_clf = self.classifier_layer(target)\n kwargs['target_logits'] = torch.nn.functional.softmax(target_clf, dim=1)\n elif self.transfer_loss == \"daan\":\n source_clf = self.classifier_layer(source)\n kwargs['source_logits'] = torch.nn.functional.softmax(source_clf, dim=1)\n target_clf = self.classifier_layer(target)\n kwargs['target_logits'] = torch.nn.functional.softmax(target_clf, dim=1)\n \n transfer_loss = self.adapt_loss(source, target, **kwargs)\n return clf_loss, transfer_loss\n \n def get_parameters(self, initial_lr=1.0):\n params = [\n {'params': self.base_network.parameters(), 'lr': 0.1 * initial_lr},\n {'params': self.classifier_layer.parameters(), 'lr': 1.0 * initial_lr},\n ]\n if self.use_bottleneck:\n params.append(\n {'params': self.bottleneck_layer.parameters(), 'lr': 1.0 * initial_lr}\n )\n # Loss-dependent\n if self.transfer_loss == \"adv\":\n params.append(\n {'params': self.adapt_loss.loss_func.domain_classifier.parameters(), 'lr': 1.0 * initial_lr}\n )\n elif self.transfer_loss == \"daan\":\n params.append(\n {'params': self.adapt_loss.loss_func.domain_classifier.parameters(), 'lr': 1.0 * initial_lr}\n )\n params.append(\n {'params': self.adapt_loss.loss_func.local_classifiers.parameters(), 'lr': 1.0 * initial_lr}\n )\n return params\n\n def predict(self, x):\n features = self.base_network(x)\n x = self.bottleneck_layer(features)\n clf = self.classifier_layer(x)\n return clf\n\n def epoch_based_processing(self, *args, **kwargs):\n if self.transfer_loss == \"daan\":\n self.adapt_loss.loss_func.update_dynamic_factor(*args, **kwargs)\n else:\n pass"
] |
[
[
"torch.nn.Sequential",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"torch.nn.Linear",
"torch.nn.ReLU"
]
] |
jmenashe/keras-rl
|
[
"e94ef887f5fb45d800d458f732e707a82c8b9b7d"
] |
[
"rl/policy.py"
] |
[
"from __future__ import division\nimport numpy as np\n\nfrom rl.util import *\n\n\nclass Policy(object):\n \"\"\"Abstract base class for all implemented policies.\n\n Each policy helps with selection of action to take on an environment.\n\n Do not use this abstract base class directly but instead use one of the concrete policies implemented.\n To implement your own policy, you have to implement the following methods:\n\n - `select_action`\n\n # Arguments\n agent (rl.core.Agent): Agent used\n \"\"\"\n def _set_agent(self, agent):\n self.agent = agent\n\n @property\n def metrics_names(self):\n return []\n\n @property\n def metrics(self):\n return []\n\n def select_action(self, **kwargs):\n raise NotImplementedError()\n\n def get_config(self):\n \"\"\"Return configuration of the policy\n\n # Returns\n Configuration as dict\n \"\"\"\n return {}\n\n\nclass LinearAnnealedPolicy(Policy):\n \"\"\"Implement the linear annealing policy\n\n Linear Annealing Policy computes a current threshold value and\n transfers it to an inner policy which chooses the action. The threshold\n value is following a linear function decreasing over time.\"\"\"\n def __init__(self, inner_policy, attr, value_max, value_min, value_test, nb_steps):\n if not hasattr(inner_policy, attr):\n raise ValueError('Policy does not have attribute \"{}\".'.format(attr))\n\n super(LinearAnnealedPolicy, self).__init__()\n\n self.inner_policy = inner_policy\n self.attr = attr\n self.value_max = value_max\n self.value_min = value_min\n self.value_test = value_test\n self.nb_steps = nb_steps\n\n def get_current_value(self):\n \"\"\"Return current annealing value\n\n # Returns\n Value to use in annealing\n \"\"\"\n if self.agent.training:\n # Linear annealed: f(x) = ax + b.\n a = -float(self.value_max - self.value_min) / float(self.nb_steps)\n b = float(self.value_max)\n value = max(self.value_min, a * float(self.agent.step) + b)\n else:\n value = self.value_test\n return value\n\n def select_action(self, **kwargs):\n \"\"\"Choose an action to perform\n\n # Returns\n Action to take (int)\n \"\"\"\n setattr(self.inner_policy, self.attr, self.get_current_value())\n return self.inner_policy.select_action(**kwargs)\n\n @property\n def metrics_names(self):\n \"\"\"Return names of metrics\n\n # Returns\n List of metric names\n \"\"\"\n return ['mean_{}'.format(self.attr)]\n\n @property\n def metrics(self):\n \"\"\"Return metrics values\n\n # Returns\n List of metric values\n \"\"\"\n\n return [getattr(self.inner_policy, self.attr)]\n\n def get_config(self):\n \"\"\"Return configurations of LinearAnnealedPolicy\n\n # Returns\n Dict of config\n \"\"\"\n config = super(LinearAnnealedPolicy, self).get_config()\n config['attr'] = self.attr\n config['value_max'] = self.value_max\n config['value_min'] = self.value_min\n config['value_test'] = self.value_test\n config['nb_steps'] = self.nb_steps\n config['inner_policy'] = get_object_config(self.inner_policy)\n return config\n\nclass SoftmaxPolicy(Policy):\n \"\"\" Implement softmax policy for multinimial distribution\n\n Simple Policy\n\n - takes action according to the pobability distribution\n\n \"\"\"\n def select_action(self, nb_actions, probs):\n \"\"\"Return the selected action\n\n # Arguments\n probs (np.ndarray) : Probabilty for each action\n\n # Returns\n action\n\n \"\"\"\n action = np.random.choice(range(nb_actions), p=probs)\n return action\n\nclass EpsGreedyQPolicy(Policy):\n \"\"\"Implement the epsilon greedy policy\n\n Eps Greedy policy either:\n\n - takes a random action with probability epsilon\n - takes current best action with prob (1 - epsilon)\n \"\"\"\n def __init__(self, eps=.1):\n super(EpsGreedyQPolicy, self).__init__()\n self.eps = eps\n\n def select_action(self, q_values):\n \"\"\"Return the selected action\n\n # Arguments\n q_values (np.ndarray): List of the estimations of Q for each action\n\n # Returns\n Selection action\n \"\"\"\n assert q_values.ndim == 1\n nb_actions = q_values.shape[0]\n\n if np.random.uniform() < self.eps:\n action = np.random.random_integers(0, nb_actions-1)\n else:\n action = np.argmax(q_values)\n return action\n\n def get_config(self):\n \"\"\"Return configurations of EpsGreedyQPolicy\n\n # Returns\n Dict of config\n \"\"\"\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config\n\n\nclass GreedyQPolicy(Policy):\n \"\"\"Implement the greedy policy\n\n Greedy policy returns the current best action according to q_values\n \"\"\"\n def select_action(self, q_values):\n \"\"\"Return the selected action\n\n # Arguments\n q_values (np.ndarray): List of the estimations of Q for each action\n\n # Returns\n Selection action\n \"\"\"\n assert q_values.ndim == 1\n action = np.argmax(q_values)\n return action\n\n\nclass BoltzmannQPolicy(Policy):\n \"\"\"Implement the Boltzmann Q Policy\n\n Boltzmann Q Policy builds a probability law on q values and returns\n an action selected randomly according to this law.\n \"\"\"\n def __init__(self, tau=1., clip=(-500., 500.)):\n super(BoltzmannQPolicy, self).__init__()\n self.tau = tau\n self.clip = clip\n\n def select_action(self, q_values):\n \"\"\"Return the selected action\n\n # Arguments\n q_values (np.ndarray): List of the estimations of Q for each action\n\n # Returns\n Selection action\n \"\"\"\n assert q_values.ndim == 1\n q_values = q_values.astype('float64')\n nb_actions = q_values.shape[0]\n\n exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))\n probs = exp_values / np.sum(exp_values)\n action = np.random.choice(range(nb_actions), p=probs)\n return action\n\n def get_config(self):\n \"\"\"Return configurations of BoltzmannQPolicy\n\n # Returns\n Dict of config\n \"\"\"\n config = super(BoltzmannQPolicy, self).get_config()\n config['tau'] = self.tau\n config['clip'] = self.clip\n return config\n\n\nclass MaxBoltzmannQPolicy(Policy):\n \"\"\"\n A combination of the eps-greedy and Boltzman q-policy.\n\n Wiering, M.: Explorations in Efficient Reinforcement Learning.\n PhD thesis, University of Amsterdam, Amsterdam (1999)\n\n https://pure.uva.nl/ws/files/3153478/8461_UBA003000033.pdf\n \"\"\"\n def __init__(self, eps=.1, tau=1., clip=(-500., 500.)):\n super(MaxBoltzmannQPolicy, self).__init__()\n self.eps = eps\n self.tau = tau\n self.clip = clip\n\n def select_action(self, q_values):\n \"\"\"Return the selected action\n The selected action follows the BoltzmannQPolicy with probability epsilon\n or return the Greedy Policy with probability (1 - epsilon)\n\n # Arguments\n q_values (np.ndarray): List of the estimations of Q for each action\n\n # Returns\n Selection action\n \"\"\"\n assert q_values.ndim == 1\n q_values = q_values.astype('float64')\n nb_actions = q_values.shape[0]\n\n if np.random.uniform() < self.eps:\n exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))\n probs = exp_values / np.sum(exp_values)\n action = np.random.choice(range(nb_actions), p=probs)\n else:\n action = np.argmax(q_values)\n return action\n\n def get_config(self):\n \"\"\"Return configurations of MaxBoltzmannQPolicy\n\n # Returns\n Dict of config\n \"\"\"\n config = super(MaxBoltzmannQPolicy, self).get_config()\n config['eps'] = self.eps\n config['tau'] = self.tau\n config['clip'] = self.clip\n return config\n\n\nclass BoltzmannGumbelQPolicy(Policy):\n \"\"\"Implements Boltzmann-Gumbel exploration (BGE) adapted for Q learning\n based on the paper Boltzmann Exploration Done Right\n (https://arxiv.org/pdf/1705.10257.pdf).\n\n BGE is invariant with respect to the mean of the rewards but not their\n variance. The parameter C, which defaults to 1, can be used to correct for\n this, and should be set to the least upper bound on the standard deviation\n of the rewards.\n\n BGE is only available for training, not testing. For testing purposes, you\n can achieve approximately the same result as BGE after training for N steps\n on K actions with parameter C by using the BoltzmannQPolicy and setting\n tau = C/sqrt(N/K).\"\"\"\n\n def __init__(self, C=1.0):\n assert C > 0, \"BoltzmannGumbelQPolicy C parameter must be > 0, not \" + repr(C)\n super(BoltzmannGumbelQPolicy, self).__init__()\n self.C = C\n self.action_counts = None\n\n def select_action(self, q_values):\n \"\"\"Return the selected action\n\n # Arguments\n q_values (np.ndarray): List of the estimations of Q for each action\n\n # Returns\n Selection action\n \"\"\"\n # We can't use BGE during testing, since we don't have access to the\n # action_counts at the end of training.\n assert self.agent.training, \"BoltzmannGumbelQPolicy should only be used for training, not testing\"\n\n assert q_values.ndim == 1, q_values.ndim\n q_values = q_values.astype('float64')\n\n # If we are starting training, we should reset the action_counts.\n # Otherwise, action_counts should already be initialized, since we\n # always do so when we begin training.\n if self.agent.step == 0:\n self.action_counts = np.ones(q_values.shape)\n assert self.action_counts is not None, self.agent.step\n assert self.action_counts.shape == q_values.shape, (self.action_counts.shape, q_values.shape)\n\n beta = self.C/np.sqrt(self.action_counts)\n Z = np.random.gumbel(size=q_values.shape)\n\n perturbation = beta * Z\n perturbed_q_values = q_values + perturbation\n action = np.argmax(perturbed_q_values)\n\n self.action_counts[action] += 1\n return action\n\n def get_config(self):\n \"\"\"Return configurations of BoltzmannGumbelQPolicy\n\n # Returns\n Dict of config\n \"\"\"\n config = super(BoltzmannGumbelQPolicy, self).get_config()\n config['C'] = self.C\n return config\n"
] |
[
[
"numpy.sqrt",
"numpy.clip",
"numpy.random.gumbel",
"numpy.ones",
"numpy.argmax",
"numpy.random.random_integers",
"numpy.random.uniform",
"numpy.sum"
]
] |
JaiWillems/celest
|
[
"5074b94feecc39127e5b0b0b1a683f636b725ca4"
] |
[
"celest/encounter/groundposition.py"
] |
[
"\n\nimport numpy as np\n\n\nclass GroundPosition(object):\n \"\"\"GroundPosition(latitude, longitude)\n\n Localize Earth surface location information.\n\n Parameters\n ----------\n latitude : float\n Lattitude of the location in decimal degrees.\n longitude : float\n Longitude of the location in decimal degrees.\n\n Attributes\n ----------\n lat, lon : float\n Latitude and longitude of the location in decimal degrees.\n radius : float\n Earth radius at (`latitude`, `longitude`).\n \"\"\"\n\n def __init__(self, latitude: float, longitude: float) -> None:\n\n self.lat = latitude\n self.lon = longitude\n self.radius = self._radius(latitude)\n\n def __str__(self) -> str:\n\n return f\"coor={(self.lat, self.lon)}, radius={round(self.radius, 5)}\"\n\n def _radius(self, latitude: float) -> float:\n \"\"\"Calculate geocentric radius using WGS84.\n\n Parameters\n ----------\n latitude : float\n Location's latitude in decimal degrees.\n\n Returns\n -------\n float\n Earth's geocentric radius in decimal kilometers.\n\n Notes\n -----\n The WGS84 Earth ellipsoid model is used as discussed in \"Earth Radius\n by Latitude (WGS 84)\" by Timur. [1]_\n\n References\n ----------\n .. [1] Timur. Earth Radius by Latitude (WGS 84). 2018. url:\n https://planetcalc.com/7721/.\n \"\"\"\n\n # Get lattidue parameter.\n phi = np.radians(latitude)\n\n # Define WGS84 Parameters.\n semi_major = 6378.137\n semi_minor = 6356.752314245\n\n c_phi, s_phi = np.cos(phi), np.sin(phi)\n\n num = (semi_major ** 2 * c_phi) ** 2 + (semi_minor ** 2 * s_phi) ** 2\n denom = (semi_major * c_phi) ** 2 + (semi_minor * s_phi) ** 2\n radius = np.sqrt(num / denom)\n\n return radius\n"
] |
[
[
"numpy.cos",
"numpy.radians",
"numpy.sqrt",
"numpy.sin"
]
] |
KenzoDezotti/cursoemvideo
|
[
"6eba03e67192f7384092192ed2cc1a8e59efd9b9"
] |
[
"data science Izero/regressao.py"
] |
[
"from matplotlib import pyplot as plt\nfrom collections import Counter\nfrom typing import List, Callable\n\n\nnum_friends = [100.0,49,41,40,25,21,21,19,19,18,18,16,15,15,15,15,14,14,13,13,13,13,12,12,11,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,8,8,8,8,8,8,8,8,8,8,8,8,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]\n\n\n\n\nfriend_counts = Counter(num_friends)\nxs = range(101) # largest value is 100\nys = [friend_counts[x] for x in xs] # height is just # of friends\nplt.bar(xs, ys)\nplt.axis([0, 101, 0, 25])\nplt.title(\"Histogram of Friend Counts\")\nplt.xlabel(\"# of friends\")\nplt.ylabel(\"# of people\")\nplt.show()"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
vitaut/pytorch
|
[
"7bc5962329f2b4c976ce01c0601255721cc5a2e0"
] |
[
"torch/ao/quantization/fx/quantization_patterns.py"
] |
[
"import torch\nfrom torch.fx import GraphModule\nfrom torch.fx.graph import (\n Node,\n Graph,\n)\nfrom ..observer import (\n default_affine_fixed_qparams_observer,\n default_symmetric_fixed_qparams_observer,\n)\n\nfrom ..quantization_mappings import (\n get_static_quant_module_class,\n get_dynamic_quant_module_class,\n get_quantized_operator,\n)\nfrom ..utils import (\n get_swapped_custom_module_class,\n activation_is_statically_quantized,\n activation_is_int8_quantized,\n weight_is_statically_quantized,\n get_qconfig_dtypes,\n activation_dtype,\n get_qparam_dict,\n check_node,\n)\n\nfrom torch.ao.quantization.quantize import (\n is_activation_post_process,\n)\n\nfrom .pattern_utils import (\n register_quant_pattern,\n get_default_output_activation_post_process_map,\n Pattern,\n)\nfrom ..utils import _parent_name\nfrom .utils import (\n all_node_args_have_no_tensors,\n quantize_node,\n get_per_tensor_qparams,\n get_linear_prepack_op_for_dtype,\n create_qparam_nodes,\n get_qconv_prepack_op,\n get_qconv_op,\n create_node_from_old_node_preserve_meta,\n)\n\nfrom ..qconfig import QConfigAny\n\nfrom abc import ABC\nimport operator\nimport warnings\n\nfrom typing import Any, Callable, Dict, Union, Optional, Tuple, List\n\n# -------------------------\n# Pattern Registrations\n# -------------------------\n\n# 1. Post Training Static Quantization and Quantization Aware Training Patterns\n\n# Base Pattern Handler\nclass QuantizeHandler(ABC):\n \"\"\" Base handler class for the quantizer patterns\n \"\"\"\n def __init__(self, node: Node, modules: Dict[str, torch.nn.Module]):\n \"\"\" Records pattern information in __init__, which will be used\n in convert\n \"\"\"\n # this is an indicator of whether all the inputs are Node or not\n # since some op might be quantized differently depending on whether\n # all inputs are tensors or not, e.g. add/mul\n self.num_tensor_args = len(node.args)\n self.all_node_args_are_tensors = True\n # the last node of the matched pattern\n self.last_node = node\n\n def _maybe_get_last_node_only_observer(\n self,\n modules: Dict[str, torch.nn.Module]\n ) -> Optional[torch.nn.Module]:\n \"\"\"\n If the last node of the pattern is observed, return the observer\n instance. Otherwise, return None.\n \"\"\"\n for maybe_obs_node, _ in self.last_node.users.items():\n if maybe_obs_node.op == 'call_module':\n maybe_obs = modules[str(maybe_obs_node.target)]\n if is_activation_post_process(maybe_obs):\n return maybe_obs\n return None\n\n def input_output_observed(self) -> bool:\n \"\"\"\n Returns True if the pattern matched to this qhandler could be\n be observed, and False it it should not be observed.\n \"\"\"\n return True\n\n def is_general_tensor_value_op(self) -> bool:\n \"\"\"\n Returns True if the operator works for both floating point and\n quantized input, and does some computation based on the input Tensor,\n so we need to insert observer/fake_quant for the output of the\n operator since the distribution of values is different for input and output\n Tensors (for HistogramObserver)\n while they share the same quantization parameters\n Example: avgpool2d\n \"\"\"\n return False\n\n def is_general_tensor_shape_op(self) -> bool:\n \"\"\" Similar to is_general_tensor_value_op, this is a check\n for ops that works for both floating point and quantized input,\n that only re-arranges the Tensor values or query some metadata about the Tensor\n We don't insert observer/fake_quant for the output of these operators\n Example: reshape, transpose, maxpool2d\n \"\"\"\n return False\n\n def should_insert_observer_for_output(\n self,\n qconfig: Any,\n model_is_training: bool,\n ) -> bool:\n \"\"\"\n Returns true if an observer should be inserted for the output of\n the pattern matched to this QuantizeHandler instance during the\n prepare step.\n \"\"\"\n # TODO(future PR): potentially clean up and deduplicate these\n # mappings.\n return self.all_node_args_are_tensors and self.input_output_observed()\n\n def should_mark_output_quantized_from_input_quantized_status(\n self,\n qconfig: QConfigAny\n ) -> bool:\n \"\"\"\n Returns true if after convert, the output of the matched pattern is\n quantized iff the first input is also quantized.\n \"\"\"\n return False\n\n def get_activation_ctr(\n self,\n qconfig: Any,\n pattern: Pattern,\n is_training: bool,\n ) -> Optional[Callable]:\n \"\"\"\n Returns the constructor for the activation observer which should be\n used for the pattern matched to this handler. Some handlers override\n this to a different value than what is specified in the qconfig.\n \"\"\"\n return qconfig.activation\n\n def is_output_quantized(self, qconfig):\n \"\"\" Returns true if the output node of convert is quantized\n when is_reference is False, we would return float node when a certain dtype\n combination is not supported (since fbgemm/qnnpack only support certain dtype\n combinations), so the output may be float, but when is_reference is True,\n we support all dtype combinations so the output will always be quantized.\n\n TODO: This is fragile, whether output is quantized should not depend on `is_reference` since\n we want to make sure whether a Tensor is quantized\n should be the same in prepare and convert and is_reference\n is only available in convert currently\n\n \"\"\"\n return True\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n \"\"\" Convert the given node to a quantized node and insert\n it to the quantized graph\n \"\"\"\n return NotImplemented\n\n\n# Binary op configs\n\n# Supported combinations are:\n# quant_type | activation (compute_type) | weight\n# static quint8 qint8\n\n# tuple (activation_dtype, weight_dtype, compute_dtype)\n# these are supported types for common binary ops like add/mul etc.\nall_dtypes = [\n (torch.qint8, torch.qint8, None),\n (torch.quint8, torch.qint8, None),\n (torch.float16, torch.float16, None),\n]\nfp16_dtypes = [\n (torch.float16, torch.float16, None)\n]\nint8_dtypes = [\n (torch.qint8, torch.qint8, None),\n (torch.quint8, torch.qint8, None),\n]\nbinary_op_supported_dtypes : Dict[Union[Callable, str], List[Tuple[torch.dtype, torch.dtype, None]]] = {\n operator.add: all_dtypes,\n torch.add: all_dtypes,\n operator.mul: all_dtypes,\n torch.mul: all_dtypes,\n torch.bmm: fp16_dtypes,\n torch.sub: fp16_dtypes,\n operator.sub: fp16_dtypes,\n torch.div: fp16_dtypes,\n operator.truediv: fp16_dtypes,\n}\n\ndefault_op_supported_dtypes = {\n torch.nn.ConvTranspose1d: int8_dtypes,\n torch.nn.ConvTranspose2d: int8_dtypes,\n torch.nn.ELU: int8_dtypes,\n torch.nn.LeakyReLU: int8_dtypes,\n torch.nn.Hardswish: int8_dtypes,\n torch.nn.InstanceNorm1d: int8_dtypes,\n torch.nn.InstanceNorm2d: int8_dtypes,\n torch.nn.InstanceNorm3d: int8_dtypes,\n torch.nn.LayerNorm: all_dtypes,\n torch.nn.SiLU: fp16_dtypes,\n torch.nn.Mish: fp16_dtypes,\n torch.nn.GELU: int8_dtypes,\n torch.nn.Dropout: int8_dtypes,\n torch.nn.Softmax: int8_dtypes,\n torch.nn.functional.elu: int8_dtypes,\n torch.nn.functional.hardswish: int8_dtypes,\n torch.nn.functional.instance_norm: int8_dtypes,\n torch.nn.functional.layer_norm: all_dtypes,\n torch.nn.functional.leaky_relu: int8_dtypes,\n torch.nn.functional.silu: fp16_dtypes,\n torch.nn.functional.mish: fp16_dtypes,\n torch.nn.functional.gelu: int8_dtypes,\n torch.nn.functional.softmax: int8_dtypes,\n torch.nn.functional.dropout: int8_dtypes,\n torch.sum: fp16_dtypes,\n}\n\nQAT_CONV_MODULE_CLASSES = \\\n (torch.nn.qat.Conv2d,\n torch.nn.qat.Conv3d,\n torch.nn.intrinsic.qat.ConvBn2d,\n torch.nn.intrinsic.qat.ConvBnReLU2d,\n torch.nn.intrinsic.qat.ConvReLU2d,\n torch.nn.intrinsic.qat.ConvBn3d,\n torch.nn.intrinsic.qat.ConvBnReLU3d,\n torch.nn.intrinsic.qat.ConvReLU3d)\n\n\n##########################\n# Helper Functions\n##########################\n\ndef _load_weight_qparams(\n self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n key = prefix + \"_weight_qparams\"\n if key in state_dict:\n self._weight_qparams = state_dict[key]\n state_dict.pop(key)\n\ndef _save_weight_qparams(self, destination, prefix, keep_vars):\n for attr_name in dir(self):\n if \"_weight_qparams\" == attr_name and \\\n isinstance(getattr(self, attr_name), dict):\n weight_qparams = getattr(self, attr_name)\n destination[prefix + attr_name] = weight_qparams\n\n\ndef _to_reference(float_module, weight_qparams):\n \"\"\" Make a weighted float module (e.g. conv and linear )a reference module by\n attaching _weight_qparams that records the qparams for weight\n and change the name for the module so that it's recognized\n when people print the model\n \"\"\"\n float_module._weight_qparams = weight_qparams\n float_module._register_state_dict_hook(_save_weight_qparams)\n float_module._register_load_state_dict_pre_hook(_load_weight_qparams, with_module=True)\n\n float_module_name = float_module._get_name()\n\n def _get_name():\n return float_module_name + \"(Reference)\"\n\n float_module._get_name = _get_name\n\n@register_quant_pattern(operator.add)\n@register_quant_pattern(operator.sub)\n@register_quant_pattern(operator.mul)\n@register_quant_pattern(operator.truediv)\n@register_quant_pattern(torch.add)\n@register_quant_pattern(torch.sub)\n@register_quant_pattern(torch.mul)\n@register_quant_pattern(torch.div)\n@register_quant_pattern(torch.bmm)\n@register_quant_pattern((torch.nn.ReLU, operator.add))\n@register_quant_pattern((torch.nn.ReLU, operator.mul))\n@register_quant_pattern((torch.nn.ReLU, torch.add))\n@register_quant_pattern((torch.nn.ReLU, torch.mul))\n@register_quant_pattern((torch.nn.functional.relu, operator.add))\n@register_quant_pattern((torch.nn.functional.relu, operator.mul))\n@register_quant_pattern((torch.nn.functional.relu, torch.add))\n@register_quant_pattern((torch.nn.functional.relu, torch.mul))\nclass BinaryOpQuantizeHandler(QuantizeHandler):\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n self.relu_node = None\n if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \\\n (node.op == 'call_module' and isinstance(modules[str(node.target)], torch.nn.ReLU)):\n self.relu_node = node\n node = node.args[0] # type: ignore[assignment]\n self.binary_op_node = node\n self.binary_op = node.target\n\n # determine how many of the first two args are Tensors (versus scalars)\n # this distinguishes things like \"x + y\" from \"x + 2\" or \"2 + x\"\n self.num_tensor_args = 0\n cache_for_no_tensor_check: Dict[Node, bool] = dict()\n for arg_idx in range(len(self.binary_op_node.args)):\n arg = self.binary_op_node.args[arg_idx]\n if isinstance(arg, Node) and (not all_node_args_have_no_tensors(arg, modules, cache_for_no_tensor_check)):\n self.num_tensor_args += 1\n self.all_node_args_are_tensors = \\\n (self.num_tensor_args == len(self.binary_op_node.args))\n\n qbin_op_mapping: Dict[Union[Callable, str], Callable] = {\n operator.add: torch.ops.quantized.add,\n torch.add: torch.ops.quantized.add,\n operator.mul: torch.ops.quantized.mul,\n torch.mul: torch.ops.quantized.mul,\n }\n qbin_relu_op_mapping: Dict[Union[Callable, str], Callable] = {\n operator.add: torch.ops.quantized.add_relu,\n torch.add: torch.ops.quantized.add_relu,\n operator.mul: torch.ops.quantized.mul_relu,\n torch.mul: torch.ops.quantized.mul_relu,\n }\n # corresponding quantized op\n self.quantized_binary_op: Optional[Callable] = None\n if self.binary_op in qbin_op_mapping:\n self.quantized_binary_op = qbin_relu_op_mapping[self.binary_op] \\\n if self.relu_node is not None \\\n else qbin_op_mapping[self.binary_op]\n\n def should_insert_observer_for_output(\n self,\n qconfig: Any,\n model_is_training: bool,\n ) -> bool:\n \"\"\"\n Returns true if an observer should be inserted for the output of\n the pattern matched to this QuantizeHandler instance during the\n prepare step.\n \"\"\"\n dtypes = get_qconfig_dtypes(qconfig)\n if not (self.binary_op in binary_op_supported_dtypes and dtypes in binary_op_supported_dtypes[self.binary_op]):\n return False\n if self.num_tensor_args == 1:\n return True\n elif self.all_node_args_are_tensors and self.input_output_observed():\n return True\n else:\n return False\n\n def is_general_tensor_value_op(self) -> bool:\n return self.num_tensor_args == 1\n\n def input_output_observed(self):\n # for x + y where x and y are scalars, we do not observe anything\n return self.num_tensor_args > 0\n\n def is_output_quantized(self, qconfig):\n dtypes = get_qconfig_dtypes(qconfig)\n return self.binary_op in binary_op_supported_dtypes and \\\n dtypes in binary_op_supported_dtypes[self.binary_op]\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n\n if self.num_tensor_args == 0:\n # example: x + y, when x and y are scalars\n return quantized_graph.node_copy(\n node, load_arg(quantized=None))\n\n dtypes = get_qconfig_dtypes(qconfig)\n\n if is_reference:\n act_dtype = activation_dtype(qconfig)\n dtypes = get_qconfig_dtypes(qconfig)\n if act_dtype == torch.float or \\\n not (self.binary_op in binary_op_supported_dtypes and dtypes in binary_op_supported_dtypes[self.binary_op]):\n return quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n else:\n if self.num_tensor_args == 2:\n # make sure both inputs are quantized to act_dtype\n load_arg(quantized={0: act_dtype, 1: act_dtype})(self.binary_op_node.args)\n args = load_arg(quantized=torch.float)(self.binary_op_node.args)\n kwargs = load_arg(quantized=torch.float)(self.binary_op_node.kwargs)\n op_out = quantized_graph.node_copy(self.binary_op_node, load_arg(quantized=torch.float))\n\n def modified_load_arg(n: Node):\n if n.name == self.binary_op_node.name:\n return op_out\n else:\n return load_arg(quantized=torch.float)(n)\n\n if self.relu_node:\n op_out = quantized_graph.node_copy(self.relu_node, modified_load_arg)\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n return quantize_node(\n op_out, activation_post_process,\n node, modules, quantized_graph, node_name_to_scope, is_input=False)\n elif not is_reference and self.binary_op in binary_op_supported_dtypes and \\\n dtypes in binary_op_supported_dtypes[self.binary_op]:\n if dtypes in [(torch.quint8, torch.qint8, None)]:\n assert self.quantized_binary_op is not None\n if self.num_tensor_args == 1:\n # add/mul scalar\n first_arg = self.binary_op_node.args[0]\n cache_for_no_tensor_check: Dict[Node, bool] = dict()\n if isinstance(first_arg, Node) and (\n not all_node_args_have_no_tensors(\n first_arg, modules, cache_for_no_tensor_check)):\n quantized_index = 0\n else:\n quantized_index = 1\n\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_function', self.quantized_binary_op,\n load_arg(quantized=[quantized_index])(self.binary_op_node.args),\n self.binary_op_node.kwargs\n ),\n self.binary_op_node)\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[operator]\n scale = float(scale)\n zero_point = int(zero_point)\n scale_arg, zero_point_arg = \\\n create_qparam_nodes(\n node.name, scale, zero_point, modules,\n quantized_graph, node_name_to_scope)\n kwargs = {**self.binary_op_node.kwargs}\n add_args = (*load_arg(quantized=activation_dtype(qconfig))(self.binary_op_node.args), scale_arg, zero_point_arg)\n op = create_node_from_old_node_preserve_meta(\n quantized_graph,\n ('call_function', self.quantized_binary_op, add_args, kwargs),\n self.binary_op_node)\n return op\n else:\n assert dtypes == (torch.float16, torch.float16, None)\n # TODO (refactor) this is duplicated, maybe have a helper function\n if self.relu_node:\n op_out = quantized_graph.node_copy(self.binary_op_node, load_arg(quantized=torch.float))\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n else:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantized_graph.create_node(\n \"call_method\", \"to\", (op_out, torch.float16,), {}\n )\n else:\n # leave the op unquantized if the dtype,reference combination is not supported\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by {} for is_reference={}. \"\n \"Supported non-reference dtype combinations are: {} \"\n \"\".format(dtypes,\n self.binary_op,\n is_reference,\n binary_op_supported_dtypes[self.binary_op]\n )\n )\n if self.relu_node:\n op_out = quantized_graph.node_copy(self.binary_op_node, load_arg(quantized=torch.float))\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n\n\n@register_quant_pattern(torch.cat)\nclass CatQuantizeHandler(QuantizeHandler):\n def is_general_tensor_value_op(self) -> bool:\n return True\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n if not self.all_node_args_are_tensors:\n return NotImplemented\n if is_reference:\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.float:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return op_out\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n # make sure the first argument is quantized to act_dtype\n load_arg(quantized={0: act_dtype})(node.args)\n args = list(load_arg(quantized=torch.float)(node.args))\n kwargs = load_arg(quantized=torch.float)(node.kwargs)\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantize_node(\n op_out,\n activation_post_process,\n node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=torch.quint8))\n\n# handle conv, maybe followed by relu\n# NB: matching order is reversed, that is we match from the bottom of this list to the beginning\n@register_quant_pattern(torch.nn.Conv1d)\n@register_quant_pattern(torch.nn.Conv2d)\n@register_quant_pattern(torch.nn.Conv3d)\n@register_quant_pattern(torch.nn.functional.conv1d)\n@register_quant_pattern(torch.nn.functional.conv2d)\n@register_quant_pattern(torch.nn.functional.conv3d)\n# TODO: add qat.Conv1d\n@register_quant_pattern(torch.nn.qat.Conv2d)\n@register_quant_pattern(torch.nn.qat.Conv3d)\n@register_quant_pattern(torch.nn.intrinsic.ConvReLU1d)\n@register_quant_pattern(torch.nn.intrinsic.ConvReLU2d)\n@register_quant_pattern(torch.nn.intrinsic.ConvReLU3d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBn1d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBn2d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBn3d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBnReLU1d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBnReLU2d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBnReLU3d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvReLU2d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvReLU3d)\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.conv1d))\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.conv2d))\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.conv3d))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.conv1d))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.conv2d))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.conv3d))\n# just for error checks\n@register_quant_pattern((torch.nn.ReLU, torch.nn.Conv1d))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.Conv2d))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.Conv3d))\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.Conv2d))\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.Conv3d))\n# TODO: rename Relu -> ReLU to be more consistent with other classes\nclass ConvReluQuantizeHandler(QuantizeHandler):\n def __init__(self, node: Node, modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n self.relu_node = None\n if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \\\n (node.op == 'call_module' and isinstance(modules[str(node.target)], torch.nn.ReLU)):\n self.relu_node = node\n node = node.args[0] # type: ignore[assignment]\n self.conv_node = node\n if node.op == \"call_module\":\n self.conv = modules[str(self.conv_node.target)]\n elif node.op == \"call_function\":\n self.conv = node.target # type: ignore[assignment]\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n # Supported combinations are:\n # quant_type | activation (compute_type) | weight\n # static quint8 qint8\n\n # tuple (activation_dtype, weight_dtype, compute_dtype)\n supported_dtypes = [\n (torch.quint8, torch.qint8, None),\n ]\n\n # TODO: is_reference option for conv module\n dtypes = get_qconfig_dtypes(qconfig)\n # leave the op unquantized if the dtype combination is not supported\n if not is_reference and dtypes not in supported_dtypes:\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by Conv \"\n \"supported dtype combinations are: {}\".format(dtypes, supported_dtypes))\n if self.relu_node:\n conv_out = quantized_graph.node_copy(self.conv_node, load_arg(quantized=torch.float))\n relu_args = [conv_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n\n activation_int8_quantized = activation_is_int8_quantized(qconfig)\n\n if self.conv_node.op == 'call_module':\n # note that relu should already be fused into conv module in the fusion step\n assert self.relu_node is None, 'conv module and relu fusion is not executed, ' \\\n 'please make sure to run fusion before prepare'\n output_activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert output_activation_post_process is not None\n\n # We'll always produce reference pattern for torch.nn.Conv*d,\n # will remove the else branch after we migrated all use cases\n if is_reference or \\\n type(self.conv) in [torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d] and \\\n dtypes in [(torch.quint8, torch.qint8, None)]:\n # produce dequant - float_op - quant pattern\n dtype = torch.float\n if activation_int8_quantized:\n dtype = activation_dtype(qconfig)\n activation = load_arg(quantized=dtype)(self.conv_node.args[0])\n args = load_arg(quantized=torch.float)(self.conv_node.args)\n # Get the float conv and attach quantization scheme and quantization\n # parameters of weight to the module\n # and qparam is a dictionary of\n # {\"qscheme\": ..., \"scale\": ..., \"zero_point\": ...} for per tensor quantization or\n # {\"qscheme\": ..., \"scale\": ..., \"zero_point\": ..., \"axis\": ...} for per channel quantization\n float_conv = self.conv\n fused_conv = None\n if isinstance(\n float_conv,\n QAT_CONV_MODULE_CLASSES):\n # case 1. converting qat conv module to\n # a float conv module, we need to attch\n # weight fake_quant to the conv module,\n # weight fake_quant is assumed to be run during\n # QAT so we don't need to run it again here\n float_conv = self.conv.to_float() # type: ignore[operator]\n # change qat conv to conv\n parent_name, name = _parent_name(self.conv_node.target)\n setattr(modules[parent_name], name, float_conv)\n if isinstance(float_conv, torch.nn.intrinsic._FusedModule):\n fused_conv = float_conv\n float_conv = float_conv[0]\n weight_post_process = self.conv.weight_fake_quant\n else:\n # case 2. converting a conv module/fused conv module\n # to float conv module, we need to attach\n # weight observer to the conv module and run it\n # with conv weight\n if isinstance(float_conv, torch.nn.intrinsic._FusedModule):\n fused_conv = float_conv\n float_conv = float_conv[0] # type: ignore[index]\n assert qconfig is not None\n weight_post_process = qconfig.weight()\n # run weight observer\n weight_post_process(float_conv.weight) # type: ignore[operator]\n weight_qparams = get_qparam_dict(weight_post_process)\n # hardcoded for now, TODO: expose the api to user,\n # we can have a map from module to reference module\n # and allow user to register new ones\n qconv_cls = get_static_quant_module_class(\n type(float_conv), is_reference=True)\n ref_conv = qconv_cls.from_float(float_conv, weight_qparams) # type: ignore[attr-defined]\n # if the parent is a fused conv (Sequential), we can replace the first\n # item to ref conv, otherwise we can update\n # the conv instance in the module tree\n if fused_conv is not None:\n fused_conv[0] = ref_conv\n else:\n parent_name, name = _parent_name(self.conv_node.target)\n setattr(modules[parent_name], name, ref_conv)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n ('call_module', self.conv_node.target, args, {}),\n self.conv_node)\n if output_activation_post_process:\n op_out = quantize_node(\n op_out,\n output_activation_post_process,\n node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n return op_out\n else:\n if convert_custom_config_dict is None:\n convert_custom_config_dict = {}\n additional_static_quant_mapping = convert_custom_config_dict.get(\"static\", {})\n # 1. attach activation post process to module\n self.conv.activation_post_process = output_activation_post_process\n # 2. select quantized class\n qconv_cls = get_static_quant_module_class(\n type(self.conv), additional_static_quant_mapping, is_reference=is_reference)\n quantized = qconv_cls.from_float(self.conv)\n parent_name, name = _parent_name(self.conv_node.target)\n setattr(modules[parent_name], name, quantized)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n self.conv_node.target,\n (load_arg(quantized=torch.quint8)(self.conv_node.args[0]),),\n {},\n ),\n self.conv_node)\n else: # call_function\n assert self.conv_node.op == \"call_function\"\n if is_reference:\n # make sure the input and weight are quantized to torch.quint8, torch.qint8, respectively\n load_arg(quantized={0: torch.quint8, 1: torch.qint8})(self.conv_node.args)\n args = load_arg(quantized=torch.float)(self.conv_node.args)\n kwargs = load_arg(quantized=torch.float)(self.conv_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", self.conv, args, kwargs),\n self.conv_node)\n if self.relu_node:\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n\n if activation_int8_quantized:\n root_module = modules['']\n act_post_process_name = self.relu_node.name if self.relu_node else self.conv_node.name\n act_post_process_node = self.relu_node if self.relu_node else self.conv_node\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n return quantize_node(\n op_out,\n activation_post_process,\n act_post_process_node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n else:\n # output for dynamically quantized conv op is not quantized\n return op_out\n else:\n assert len(self.conv_node.args) >= 7, \\\n \"only conv2d calls with all arguments specified is supported right now in is_reference=False option\"\n # make sure the input and weight are quantized to torch.quint8, torch.qint8, respectively\n args = load_arg(quantized={0: torch.quint8, 1: torch.qint8})(self.conv_node.args)\n # pack weight\n weight = load_arg(quantized=torch.qint8)(self.conv_node.args[1])\n other_args = load_arg(quantized=torch.float)(self.conv_node.args[2:])\n bias, stride, padding, dilation, groups = other_args\n if self.conv == torch.nn.functional.conv1d:\n # F.conv1d can take `int` as well as `list[int]` for stride,\n # padding, dilation, but the prepack op cannot. Convert\n # these to lists if needed.\n stride = [stride] if isinstance(stride, int) else stride\n padding = [padding] if isinstance(padding, int) else padding\n dilation = [dilation] if isinstance(dilation, int) else dilation\n prepack_args = (weight, bias, stride, padding, dilation, groups)\n prepack_op = get_qconv_prepack_op(self.conv)\n packed_weight = quantized_graph.create_node(\n \"call_function\", prepack_op, prepack_args, {})\n assert activation_int8_quantized, \\\n \"currently only static quantization is supported for conv\"\n # construct conv input\n if activation_int8_quantized:\n qconv_op = get_qconv_op(self.conv, self.relu_node is not None)\n conv_input = load_arg(quantized=torch.quint8)(self.conv_node.args[0])\n\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n\n scale, zero_point, _ = get_per_tensor_qparams(activation_post_process)\n scale_node, zero_point_node = \\\n create_qparam_nodes(\n self.conv_node.name, scale, zero_point, modules,\n quantized_graph, node_name_to_scope)\n qconv_args = (conv_input, packed_weight, scale_node, zero_point_node)\n kwargs = load_arg(quantized=torch.float)(self.conv_node.kwargs)\n op = create_node_from_old_node_preserve_meta(\n quantized_graph,\n ('call_function', qconv_op, qconv_args, kwargs),\n self.conv_node)\n # Store the name of the fused op to get the path of node after fusion as well.\n # TODO: may need to change the key to Node regenerate the map in each transformation,\n # since we might not be able to rely on the name\n node_name_to_scope[op.name] = node_name_to_scope[self.conv_node.name]\n return op\n else:\n # conv2d_dyanmic branch\n raise Exception(\"Only static quant is supported for conv\")\n\n@register_quant_pattern(torch.nn.Linear)\n@register_quant_pattern(torch.nn.functional.linear)\n@register_quant_pattern(torch.nn.qat.Linear)\n@register_quant_pattern(torch.nn.intrinsic.LinearReLU)\n@register_quant_pattern(torch.nn.intrinsic.qat.LinearReLU)\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.linear))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.linear))\n# for error checks\n@register_quant_pattern((torch.nn.ReLU, torch.nn.Linear))\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.Linear))\nclass LinearReLUQuantizeHandler(QuantizeHandler):\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n self.relu_node = None\n if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \\\n (node.op == 'call_module' and isinstance(modules[str(node.target)], torch.nn.ReLU)):\n self.relu_node = node\n node = node.args[0] # type: ignore[assignment]\n self.linear_node = node\n if node.op == 'call_module':\n self.linear = modules[str(self.linear_node.target)]\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n if convert_custom_config_dict is None:\n convert_custom_config_dict = {}\n # Supported combinations are:\n # quant_type | activation (compute_type) | weight\n # static quint8 qint8\n # dynamic float32 (quint8) qint8\n # weight_only float32 float16\n # tuple (activation_dtype, weight_dtype, compute_dtype)\n supported_dtypes = [\n (torch.quint8, torch.qint8, None),\n (torch.float32, torch.qint8, torch.quint8),\n (torch.float32, torch.float16, None),\n # static float16 quantization\n (torch.float16, torch.float16, None),\n ]\n dtypes = get_qconfig_dtypes(qconfig)\n # leave the op unquantized if the dtype combination is not supported\n if not is_reference and dtypes not in supported_dtypes:\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by Linear \"\n \"supported dtype combinations are: {}\".format(dtypes, supported_dtypes))\n if self.relu_node:\n op_out = quantized_graph.node_copy(self.linear_node, load_arg(quantized=torch.float))\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\n activation_int8_quantized = activation_is_int8_quantized(qconfig)\n activation_statically_quantized = activation_is_statically_quantized(qconfig)\n weight_dtype = dtypes[1]\n if self.linear_node.op == 'call_module':\n\n output_activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n\n # note that relu should already be fused into linear modul in the fusion step\n assert self.relu_node is None, 'linear module and relu fusion is not executed, ' \\\n 'please make sure to run fusion before prepare'\n # we'll always produce reference pattern for torch.nn.Linear,\n # will remove the else branch after we migrated all use cases\n if is_reference or \\\n type(self.linear) in [torch.nn.Linear] and dtypes in [(torch.quint8, torch.qint8, None)]:\n # produce dequant - float_op - quant pattern\n dtype = torch.float\n if activation_int8_quantized:\n dtype = activation_dtype(qconfig)\n activation = load_arg(quantized=dtype)(self.linear_node.args[0])\n args = load_arg(quantized=torch.float)(self.linear_node.args)\n # Get the float linear and attach qscheme and qparams\n # the the module\n float_linear = self.linear\n fused_linear = None\n if isinstance(float_linear, (torch.nn.qat.Linear, torch.nn.intrinsic.qat.LinearReLU)):\n float_linear = float_linear.to_float()\n # change qat linear to linear\n parent_name, name = _parent_name(self.linear_node.target)\n setattr(modules[parent_name], name, float_linear)\n # Attach weight fake quant to the linear module\n if isinstance(float_linear, torch.nn.intrinsic.LinearReLU):\n fused_linear = float_linear\n float_linear = float_linear[0]\n weight_post_process = self.linear.weight_fake_quant\n else:\n if isinstance(float_linear, torch.nn.intrinsic.LinearReLU):\n fused_linear = float_linear\n float_linear = self.linear[0] # type: ignore[index]\n # Attach the weight observer to the module\n weight_post_process = qconfig.weight() # type: ignore[union-attr]\n # Run weight observer\n weight_post_process(float_linear.weight) # type: ignore[operator]\n\n weight_qparams = get_qparam_dict(weight_post_process)\n # TODO: include the configuration in backend_config_dict\n # we can have a map from module to reference module\n # and allow user to register new ones\n qlinear_cls = get_static_quant_module_class(\n type(float_linear), is_reference=True)\n ref_linear = qlinear_cls.from_float(float_linear, weight_qparams)\n\n # if the parent is a fused linear (Sequential), we can replace the first\n # item to ref linear, otherwise we can update\n # the linear instance in the module tree\n if fused_linear is not None:\n fused_linear[0] = ref_linear\n else:\n parent_name, name = _parent_name(self.linear_node.target)\n setattr(modules[parent_name], name, ref_linear)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n ('call_module', self.linear_node.target, args, {}),\n self.linear_node)\n if output_activation_post_process:\n op_out = quantize_node(\n op_out,\n output_activation_post_process,\n node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n return op_out\n # non-reference option\n else:\n # 1. attach output activation post process to linear module\n if output_activation_post_process:\n self.linear.activation_post_process = output_activation_post_process\n\n # 2. select corresponding quantized linear class for the float linear class\n if activation_int8_quantized:\n additional_static_quant_mapping = convert_custom_config_dict.get(\"static\", {})\n qlinear = get_static_quant_module_class(\n type(self.linear), additional_static_quant_mapping)\n else:\n assert dtypes in [\n (torch.float32, torch.qint8, torch.quint8),\n (torch.float32, torch.float16, None),\n ], f\"dtype {dtypes} not supported yet\"\n additional_dynamic_quant_mapping = convert_custom_config_dict.get(\"dynamic\", {})\n qlinear = get_dynamic_quant_module_class(type(self.linear), additional_dynamic_quant_mapping)\n\n quantized = qlinear.from_float(self.linear)\n parent_name, name = _parent_name(self.linear_node.target)\n setattr(modules[parent_name], name, quantized)\n # activation needs to be quantized for static quantization\n dtype = torch.float\n if activation_int8_quantized:\n dtype = activation_dtype(qconfig)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n self.linear_node.target,\n (load_arg(quantized=dtype)(self.linear_node.args[0]),), {},\n ),\n self.linear_node)\n else: # call_function\n assert self.linear_node.op == 'call_function'\n if is_reference:\n quantized_input_dtypes = [torch.float, torch.float]\n if activation_int8_quantized:\n quantized_input_dtypes[0] = torch.quint8\n if weight_is_statically_quantized(qconfig):\n quantized_input_dtypes[1] = torch.qint8\n args = load_arg(quantized=quantized_input_dtypes)(self.linear_node.args)\n args = load_arg(quantized=torch.float)(self.linear_node.args)\n kwargs = load_arg(quantized=torch.float)(self.linear_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.linear, args, kwargs),\n self.linear_node)\n if self.relu_node:\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n\n if activation_statically_quantized:\n # quantize output for statically quantized linear op\n root_module = modules['']\n act_post_process_name = self.relu_node.name if self.relu_node else self.linear_node.name\n act_post_process_node = self.relu_node if self.relu_node else self.linear_node\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n return quantize_node(\n op_out,\n activation_post_process,\n act_post_process_node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n else:\n # output for dynamically quantized linear op is not quantized\n return op_out\n else: # non-reference option\n # prepacking weights for static int8 quant and dynamic quant\n if dtypes != (torch.float16, torch.float16, None):\n # linear args\n # (x, weight, bias, ...)\n # TODO: the name should be weight is int8 quantized\n weight_quantized = weight_is_statically_quantized(qconfig)\n dtype = weight_dtype if weight_quantized else torch.float\n linear_weight = load_arg(quantized=dtype)(self.linear_node.args[1])\n\n # get other arguments\n kwargs = {**load_arg(quantized=torch.float)(self.linear_node.kwargs)}\n # all args after bias, including bias\n other_args = load_arg(quantized=torch.float)(self.linear_node.args[2:])\n # bias might be either positional, or a keyword argument\n if len(self.linear_node.args) > 2:\n bias = load_arg(quantized=torch.float)(self.linear_node.args[2])\n other_args = other_args[1:] # remove the bias argument\n else:\n bias = kwargs.pop('bias', None)\n\n prepack_args = (linear_weight, bias)\n prepack_op = get_linear_prepack_op_for_dtype(weight_dtype)\n packed_weight = quantized_graph.create_node(\n 'call_function', prepack_op, prepack_args, {})\n # construct linear input\n if activation_int8_quantized:\n qlinear_op = torch.ops.quantized.linear_relu if self.relu_node else torch.ops.quantized.linear\n linear_input = load_arg(quantized=torch.quint8)(self.linear_node.args[0])\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n scale, zero_point, _ = get_per_tensor_qparams(activation_post_process)\n scale_node, zero_point_node = \\\n create_qparam_nodes(\n self.linear_node.name, scale, zero_point, modules,\n quantized_graph, node_name_to_scope)\n\n qlinear_args = (linear_input, packed_weight, scale_node, zero_point_node)\n op = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", qlinear_op, qlinear_args, kwargs),\n self.linear_node)\n # Store the name of the fused op to get the path of node after fusion as well.\n # TODO: may need to change the key to Node regenerate the map in each transformation,\n # since we might not be able to rely on the name\n node_name_to_scope[op.name] = node_name_to_scope[self.linear_node.name]\n return op\n elif dtypes in [(torch.float32, torch.qint8, torch.quint8),\n (torch.float32, torch.float16, None)]:\n # choose linear dynamic or linear dynamic fp16 op based on weight dtype\n if weight_dtype == torch.qint8:\n if self.relu_node:\n qlinear_op = torch.ops.quantized.linear_relu_dynamic\n else:\n qlinear_op = torch.ops.quantized.linear_dynamic\n else:\n if self.relu_node:\n qlinear_op = torch.ops.quantized.linear_relu_dynamic_fp16\n else:\n qlinear_op = torch.ops.quantized.linear_dynamic_fp16\n\n linear_input = load_arg(quantized=torch.float)(self.linear_node.args[0])\n qlinear_args = (linear_input, packed_weight) # type: ignore[assignment]\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", qlinear_op, qlinear_args, kwargs),\n self.linear_node)\n # Store the name of the dynamic op to get the path of node after replacement as well.\n # TODO: may need to change the key to Node regenerate the map in each transformation,\n # since we might not be able to rely on the name\n node_name_to_scope[op_out.name] = node_name_to_scope[self.linear_node.name]\n return op_out\n else:\n assert dtypes == (torch.float16, torch.float16, None)\n # TODO (refactor) this is duplicated, maybe have a helper function\n if self.relu_node:\n op_out = quantized_graph.node_copy(self.linear_node, load_arg(quantized=torch.float))\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n else:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantized_graph.create_node(\n \"call_method\", \"to\", (op_out, torch.float16), {})\n\n@register_quant_pattern(torch.nn.BatchNorm2d)\n@register_quant_pattern(torch.nn.BatchNorm3d)\n@register_quant_pattern(torch.nn.intrinsic.BNReLU2d)\n@register_quant_pattern(torch.nn.intrinsic.BNReLU3d)\nclass BatchNormQuantizeHandler(QuantizeHandler):\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n assert node.op == 'call_module'\n self.bn_node = node\n self.bn = modules[str(self.bn_node.target)]\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n if convert_custom_config_dict is None:\n convert_custom_config_dict = {}\n additional_static_quant_mapping = convert_custom_config_dict.get(\"static\", {})\n # 1. attach activation post process to module\n output_activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert output_activation_post_process is not None\n if is_reference:\n # produce dequant - float_op - quant pattern\n dtype = activation_dtype(qconfig)\n activation = load_arg(quantized=dtype)(self.bn_node.args[0])\n args = load_arg(quantized=torch.float)(self.bn_node.args)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_module\", self.bn_node.target, args, {}),\n self.bn_node)\n if output_activation_post_process:\n op_out = quantize_node(\n op_out,\n output_activation_post_process,\n node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n return op_out\n else:\n self.bn.activation_post_process = output_activation_post_process\n qbn_cls = get_static_quant_module_class(type(self.bn), additional_static_quant_mapping)\n quantized = qbn_cls.from_float(self.bn)\n parent_name, name = _parent_name(self.bn_node.target)\n setattr(modules[parent_name], name, quantized)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n self.bn_node.target,\n load_arg(quantized=[0])(self.bn_node.args),\n load_arg(quantized=torch.float)(self.bn_node.kwargs),\n ),\n self.bn_node)\n\n@register_quant_pattern(torch.nn.qat.Embedding)\n@register_quant_pattern(torch.nn.qat.EmbeddingBag)\n@register_quant_pattern(torch.nn.Embedding)\n@register_quant_pattern(torch.nn.EmbeddingBag)\nclass EmbeddingQuantizeHandler(QuantizeHandler):\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n\n def input_output_observed(self) -> bool:\n return False\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n # Supported combinations are:\n # quant_type | activation | weight | activation_compute_type\n # weight_only | float32 | quint8 | None\n # weight_only | float32 | quint4x2 | None\n # tuple (activation_dtype, weight_dtype, compute_dtype)\n supported_dtypes = [\n (torch.float32, torch.quint8, None),\n (torch.float32, torch.quint4x2, None),\n ]\n assert node.op == 'call_module'\n emb_node = node\n dtypes = get_qconfig_dtypes(qconfig)\n # leave the op unquantized if the dtype combination is not supported\n if dtypes not in supported_dtypes:\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by Embedding/EmbeddingBag, \"\n \"supported dtype combinations are: {}\".format(dtypes, supported_dtypes))\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\n emb = modules[str(emb_node.target)]\n qemb = get_static_quant_module_class(type(emb))\n quantized = qemb.from_float(emb)\n parent_name, name = _parent_name(emb_node.target)\n setattr(modules[parent_name], name, quantized)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n emb_node.target,\n load_arg(quantized=torch.float)(emb_node.args),\n load_arg(quantized=torch.float)(emb_node.kwargs),\n ),\n emb_node)\n\n# TODO (maybe): merge with embedding quantize handler\n@register_quant_pattern(torch.nn.GRUCell)\n@register_quant_pattern(torch.nn.LSTMCell)\n@register_quant_pattern(torch.nn.RNNCell)\n@register_quant_pattern(torch.nn.LSTM)\nclass RNNDynamicQuantizeHandler(QuantizeHandler):\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n\n def input_output_observed(self) -> bool:\n return False\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n # Supported combinations are:\n # quant_type | activation | weight | activation_compute_type\n # dynamic | float32 | qint8 | quint8\n # dynamic | float32 | float16 | None\n # tuple (activation_dtype, weight_dtype, compute_dtype)\n supported_dtypes = [\n (torch.float32, torch.qint8, torch.quint8),\n (torch.float32, torch.float16, None),\n ]\n assert node.op == 'call_module'\n dtypes = get_qconfig_dtypes(qconfig)\n # leave the op unquantized if the dtype combination is not supported\n if dtypes not in supported_dtypes:\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by Embedding/EmbeddingBag, \"\n \"supported dtype combinations are: {}\".format(dtypes, supported_dtypes))\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\n module = modules[str(node.target)]\n qmodule_cls = get_dynamic_quant_module_class(type(module))\n qmodule = qmodule_cls.from_float(module)\n parent_name, name = _parent_name(node.target)\n setattr(modules[parent_name], name, qmodule)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n node.target,\n load_arg(quantized=torch.float)(node.args),\n load_arg(quantized=torch.float)(node.kwargs),\n ),\n node)\n\nARGS_TO_SKIP = {\n torch._ops.ops.quantized.hardswish: ['inplace'],\n torch._ops.ops.quantized.elu: ['inplace'],\n torch._ops.ops.quantized.dropout: ['inplace'],\n torch._ops.ops.quantized.instance_norm:\n ['running_mean', 'running_var', 'use_input_stats', 'momentum'],\n}\n@register_quant_pattern(torch.nn.ConvTranspose1d)\n@register_quant_pattern(torch.nn.ConvTranspose2d)\n@register_quant_pattern(torch.nn.ELU)\n@register_quant_pattern(torch.nn.LeakyReLU)\n@register_quant_pattern(torch.nn.Hardswish)\n@register_quant_pattern(torch.nn.InstanceNorm1d)\n@register_quant_pattern(torch.nn.InstanceNorm2d)\n@register_quant_pattern(torch.nn.InstanceNorm3d)\n@register_quant_pattern(torch.nn.LayerNorm)\n@register_quant_pattern(torch.nn.SiLU)\n@register_quant_pattern(torch.nn.Mish)\n@register_quant_pattern(torch.nn.Dropout)\n# we currently only support reference patterns for these ops so they have been removed\n# until they receive a proper fp16 kernel. To use the reference pattern, use a custom qconfig\n# @register_quant_pattern(torch.nn.GELU)\n# @register_quant_pattern(torch.nn.Softmax)\n@register_quant_pattern(torch.nn.functional.elu)\n@register_quant_pattern(torch.nn.functional.hardswish)\n@register_quant_pattern(torch.nn.functional.instance_norm)\n@register_quant_pattern(torch.nn.functional.layer_norm)\n@register_quant_pattern(torch.nn.functional.leaky_relu)\n@register_quant_pattern(torch.nn.functional.silu)\n@register_quant_pattern(torch.nn.functional.mish)\n@register_quant_pattern(torch.nn.functional.dropout)\n# we currently only support reference patterns for these ops so they have been removed\n# until they receive a proper fp16 kernel. To use the reference pattern, use a custom qconfig\n# @register_quant_pattern(torch.nn.functional.gelu)\n# @register_quant_pattern(torch.nn.functional.softmax)\n@register_quant_pattern(torch.sum)\nclass DefaultNodeQuantizeHandler(QuantizeHandler):\n \"\"\" Common quantized op, first input and first output will be quantized\n \"\"\"\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n if node.op == \"call_function\" or node.op == \"call_method\":\n self.op = node.target\n elif node.op == \"call_module\":\n self.op = type(modules[str(node.target)])\n\n def is_output_quantized(self, qconfig):\n dtypes = get_qconfig_dtypes(qconfig)\n return self.op in default_op_supported_dtypes and \\\n dtypes in default_op_supported_dtypes[self.op]\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n if not self.all_node_args_are_tensors:\n return NotImplemented\n assert node.op in ['call_module', 'call_function'], 'Only call_module and ' + \\\n 'call_function are handled in DefaultNode'\n if convert_custom_config_dict is None:\n convert_custom_config_dict = {}\n additional_static_quant_mapping = convert_custom_config_dict.get(\"static\", {})\n\n dtypes = get_qconfig_dtypes(qconfig)\n if not is_reference and dtypes not in default_op_supported_dtypes[self.op]:\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by {} \"\n \"supported dtype combinations are: {}\".format(dtypes, self.op, default_op_supported_dtypes[self.op]))\n return quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n # TODO: make helper functions for (torch.quint8, torch.qint8, None)\n if not is_reference:\n if dtypes in [(torch.quint8, torch.qint8, None)]:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n if node.op == 'call_module':\n module = modules[str(node.target)]\n module.activation_post_process = activation_post_process\n quantized_module_cls = get_static_quant_module_class(\n type(module), additional_static_quant_mapping)\n quantized_module = quantized_module_cls.from_float(module)\n parent_name, name = _parent_name(node.target)\n setattr(modules[parent_name], name, quantized_module)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n node.target,\n load_arg(quantized=[0])(node.args),\n load_arg(quantized=torch.float)(node.kwargs),\n ),\n node)\n else:\n assert node.op == \"call_function\"\n # call_function\n scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[operator]\n scale = float(scale)\n zero_point = int(zero_point)\n scale_arg, zero_point_arg = \\\n create_qparam_nodes(\n node.name, scale, zero_point, modules,\n quantized_graph, node_name_to_scope)\n\n assert not isinstance(node.target, str), \"Expecting node.target for \"\n \"call_function to be a function instead of a string\"\n quantized_op = get_quantized_operator(node.target)\n args = load_arg(quantized=[0])(node.args)\n kwargs = {**load_arg(quantized=torch.float)(node.kwargs), \"output_scale\": scale_arg,\n \"output_zero_point\": zero_point_arg}\n if quantized_op in ARGS_TO_SKIP:\n args_to_skip = ARGS_TO_SKIP[quantized_op]\n for arg in args_to_skip:\n if arg in kwargs:\n kwargs.pop(arg)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", quantized_op, args, kwargs), # type: ignore[arg-type]\n node)\n else:\n assert dtypes in [(torch.float16, torch.float16, None)]\n # Generally fp16 kernels don't exist for fp16 ops\n warnings.warn(\n \"Only reference patterns are currently supported for {dtype} dtype with {op} op\"\n \"\".format(dtype=dtypes, op=self.op))\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantized_graph.create_node(\n \"call_method\", \"to\", (op_out, torch.float16), {})\n else:\n assert is_reference\n # We can produce reference for a dtypes including\n # (torch.quint8, torch.qint8, torch.qint32, torch.float16)\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.float:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return op_out\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n # make sure the input is quantized to act_dtype\n load_arg(quantized={0: act_dtype})(node.args)\n args = load_arg(quantized=torch.float)(node.args)\n kwargs = load_arg(quantized=torch.float)(node.kwargs)\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantize_node(\n op_out, activation_post_process,\n node, modules, quantized_graph, node_name_to_scope, is_input=False)\n\n@register_quant_pattern(torch.nn.Hardsigmoid, default_affine_fixed_qparams_observer)\n@register_quant_pattern(torch.nn.functional.hardsigmoid, default_affine_fixed_qparams_observer)\n@register_quant_pattern('hardsigmoid', default_affine_fixed_qparams_observer)\n@register_quant_pattern('hardsigmoid_', default_affine_fixed_qparams_observer)\n@register_quant_pattern(torch.nn.Sigmoid, default_affine_fixed_qparams_observer)\n@register_quant_pattern(torch.sigmoid, default_affine_fixed_qparams_observer)\n@register_quant_pattern('sigmoid', default_affine_fixed_qparams_observer)\n@register_quant_pattern('sigmoid_', default_affine_fixed_qparams_observer)\n@register_quant_pattern(torch.nn.Tanh, default_symmetric_fixed_qparams_observer)\n@register_quant_pattern(torch.tanh, default_symmetric_fixed_qparams_observer)\n@register_quant_pattern('tanh', default_symmetric_fixed_qparams_observer)\n@register_quant_pattern('tanh_', default_symmetric_fixed_qparams_observer)\nclass FixedQParamsOpQuantizeHandler(QuantizeHandler):\n def __init__(self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n self.node = node\n\n def should_mark_output_quantized_from_input_quantized_status(\n self,\n qconfig: QConfigAny\n ) -> bool:\n # FixQParamOps are the same as CopyNode in int8 quantization\n return activation_dtype(qconfig) in [torch.quint8, torch.qint8]\n\n # some qhandlers override the activations constructor\n def get_activation_ctr(self, qconfig, pattern, is_training) -> Optional[Callable]:\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.quint8:\n return get_default_output_activation_post_process_map(is_training).get(\n pattern, qconfig.activation)\n else:\n return qconfig.activation\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n if not is_reference:\n dtypes = get_qconfig_dtypes(qconfig)\n if dtypes == (torch.float16, torch.float16, None):\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantized_graph.create_node(\n \"call_method\", \"to\", (op_out, torch.float16,), {}\n )\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n else:\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.float:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return op_out\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n # make sure the input is quantized to act_dtype\n load_arg(quantized={0: act_dtype})(node.args)\n args = load_arg(quantized=torch.float)(node.args)\n kwargs = load_arg(quantized=torch.float)(node.kwargs)\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantize_node(\n op_out, activation_post_process,\n node, modules, quantized_graph, node_name_to_scope, is_input=False)\n\n@register_quant_pattern(torch.nn.AdaptiveAvgPool1d)\n@register_quant_pattern(torch.nn.AdaptiveAvgPool2d)\n@register_quant_pattern(torch.nn.AdaptiveAvgPool3d)\n@register_quant_pattern(torch.nn.AvgPool1d)\n@register_quant_pattern(torch.nn.AvgPool2d)\n@register_quant_pattern(torch.nn.AvgPool3d)\n@register_quant_pattern(torch.nn.Hardtanh)\n@register_quant_pattern(torch.nn.MaxPool1d)\n@register_quant_pattern(torch.nn.MaxPool2d)\n@register_quant_pattern(torch.nn.MaxPool3d)\n@register_quant_pattern(torch.nn.ReLU)\n@register_quant_pattern(torch.nn.ReLU6)\n@register_quant_pattern(torch.adaptive_avg_pool1d)\n@register_quant_pattern(torch.nn.functional.adaptive_avg_pool2d)\n@register_quant_pattern(torch.nn.functional.adaptive_avg_pool3d)\n@register_quant_pattern(torch.nn.functional.hardtanh)\n@register_quant_pattern(torch.nn.functional.hardtanh_)\n@register_quant_pattern(torch.nn.functional.interpolate)\n@register_quant_pattern(torch.nn.functional.max_pool1d)\n@register_quant_pattern(torch.nn.functional.max_pool2d)\n@register_quant_pattern(torch.nn.functional.max_pool3d)\n@register_quant_pattern(torch.nn.functional.relu)\n@register_quant_pattern(torch.nn.functional.relu6)\n@register_quant_pattern(torch.avg_pool1d)\n@register_quant_pattern(torch._C._nn.avg_pool2d)\n@register_quant_pattern(torch._C._nn.avg_pool3d)\n@register_quant_pattern(torch.clamp)\n@register_quant_pattern(torch.flatten)\n@register_quant_pattern(torch.mean)\n@register_quant_pattern(operator.floordiv)\n@register_quant_pattern('clamp')\n@register_quant_pattern('mean')\n@register_quant_pattern('relu')\n@register_quant_pattern('relu_')\nclass CopyNodeQuantizeHandler(QuantizeHandler):\n \"\"\" Operators that works on both float and quantized input\n if input is quantized, the output Tensor shares\n the same quantization parameter with input.\n These ops will do computation on the input Tensor, e.g. average pool, so we will\n insert extra observer/fake_quant for the output of these operators.\n TODO: maybe rename this to TensorValueOpQuantizeHandler\n \"\"\"\n def should_mark_output_quantized_from_input_quantized_status(\n self,\n qconfig: QConfigAny\n ) -> bool:\n return True\n\n def is_general_tensor_value_op(self) -> bool:\n return True\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n\n is_call_function, is_call_method, is_call_module = check_node(node, modules)\n if is_reference or (is_call_function or is_call_method or is_call_module):\n # when activation dtype is torch.float, the node does not require\n # observation\n # e.g. dynamic quantization or weight_only quantization\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.float:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return op_out\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n # make sure the input is quantized to act_dtype\n load_arg(quantized={0: act_dtype})(node.args)\n args = list(load_arg(quantized=torch.float)(node.args))\n kwargs = load_arg(quantized=torch.float)(node.kwargs)\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantize_node(\n op_out,\n activation_post_process,\n node, modules, quantized_graph, node_name_to_scope, is_input=False)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\nclass CustomModuleQuantizeHandler(QuantizeHandler):\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n \"\"\" Convert a float custom module to quantized custom module\n \"\"\"\n assert node.op == 'call_module'\n assert convert_custom_config_dict is not None\n custom_module_class_mapping = convert_custom_config_dict.get(\"observed_to_quantized_custom_module_class\", None)\n assert custom_module_class_mapping is not None\n observed_custom_module = modules[str(node.target)]\n if activation_is_statically_quantized(qconfig):\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n observed_custom_module.activation_post_process = activation_post_process\n quantized_custom_module_class = get_swapped_custom_module_class(\n observed_custom_module, custom_module_class_mapping, qconfig)\n quantized_custom_module = \\\n quantized_custom_module_class.from_observed(observed_custom_module)\n parent_name, name = _parent_name(node.target)\n setattr(modules[parent_name], name, quantized_custom_module)\n # hardcoded the quntized input to be None (take whatever is in the environemnt),\n # we can extend this\n # if there is a need, e.g. get the indexes of quantized inputs from some\n # module attribute like module._QUANTIZED_INPUT_INDEXES\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\n@register_quant_pattern(torch.nn.Identity)\n@register_quant_pattern(torch.transpose)\n@register_quant_pattern(torch.repeat_interleave)\n@register_quant_pattern(torch.squeeze)\n@register_quant_pattern(torch.stack)\n@register_quant_pattern(torch.unsqueeze)\n@register_quant_pattern('contiguous')\n@register_quant_pattern('detach')\n@register_quant_pattern('detach_')\n@register_quant_pattern('permute')\n@register_quant_pattern('repeat')\n@register_quant_pattern('repeat_interleave')\n@register_quant_pattern('reshape')\n@register_quant_pattern('resize_')\n@register_quant_pattern('shape')\n@register_quant_pattern('size')\n@register_quant_pattern('squeeze')\n@register_quant_pattern('squeeze_')\n@register_quant_pattern('transpose')\n@register_quant_pattern('unsqueeze')\n@register_quant_pattern('unsqueeze_')\n@register_quant_pattern('view')\nclass GeneralTensorShapeOpQuantizeHandler(QuantizeHandler):\n \"\"\" Operators that works on both float and quantized input\n if input is quantized, the output Tensor shares\n the same quantization parameter with input.\n These ops only do rearrangement of Tensor values, for\n example reshape, or just query the information about Tensor\n e.g. size, and we do not insert extra observer/fake_quant\n for the output of the operator.\n \"\"\"\n def is_general_tensor_shape_op(self) -> bool:\n return True\n\n def should_mark_output_quantized_from_input_quantized_status(\n self,\n qconfig: QConfigAny\n ) -> bool:\n return True\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n if is_reference:\n # when activation dtype is torch.float, the node does not require\n # observation\n # e.g. dynamic quantization or weight_only quantization\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.float:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return op_out\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n # TODO: remove special case for operator.getitem\n # make sure the input is quantized to act_dtype if it's not operator.getitem\n if node.target != operator.getitem:\n load_arg(quantized={0: act_dtype})(node.args)\n args = list(load_arg(quantized=torch.float)(node.args))\n kwargs = load_arg(quantized=torch.float)(node.kwargs)\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantize_node(\n op_out,\n activation_post_process,\n node, modules, quantized_graph, node_name_to_scope, is_input=False)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\nclass StandaloneModuleQuantizeHandler(QuantizeHandler):\n \"\"\" Converts an observed standalone module to quantized standalone module\n by calling convert_fx on the observed standalone module.\n \"\"\"\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n assert node.op == 'call_module'\n convert = torch.ao.quantization.quantize_fx._convert_standalone_module_fx # type: ignore[attr-defined]\n # We know that observed standalone module is a GraphModule since\n # it's produced by us\n observed_standalone_module : GraphModule = modules[str(node.target)] # type: ignore[assignment]\n input_quantized_idxs = observed_standalone_module._standalone_module_input_quantized_idxs.tolist() # type: ignore[operator]\n quantized_standalone_module = convert(observed_standalone_module, is_reference=is_reference)\n parent_name, name = _parent_name(node.target)\n # update the modules dict\n setattr(modules[parent_name], name, quantized_standalone_module)\n modules[str(node.target)] = quantized_standalone_module\n return quantized_graph.node_copy(node, load_arg(quantized=input_quantized_idxs))\n\n\nclass ConvReLUQuantizeHandlerNew(QuantizeHandler):\n \"\"\" This is to unblock perf testing for TensorRT, this will be\n changed in the future so don't depend on this.\n \"\"\"\n def __init__(self, node: Node, modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n self.relu_node = None\n if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \\\n (node.op == 'call_module' and isinstance(modules[str(node.target)], torch.nn.ReLU)):\n self.relu_node = node\n node = node.args[0] # type: ignore[assignment]\n self.conv_node = node\n if node.op == \"call_module\":\n self.conv = modules[str(self.conv_node.target)]\n elif node.op == \"call_function\":\n self.conv = node.target # type: ignore[assignment]\n\n def is_output_quantized(self, qconfig):\n return False\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n assert is_reference, \"ConvReLUQuantizeHandlerNew only works for the case when is_reference=True\"\n activation_int8_quantized = activation_is_int8_quantized(qconfig)\n if self.conv_node.op == 'call_module':\n output_activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n # note that relu should already be fused into conv module in the fusion step\n assert self.relu_node is None, 'conv module and relu fusion is not executed, ' \\\n 'please make sure to run fusion before prepare'\n # produce dequant - float_op - quant pattern\n dtype = torch.float\n if activation_int8_quantized:\n dtype = activation_dtype(qconfig)\n activation = load_arg(quantized=dtype)(self.conv_node.args[0])\n args = load_arg(quantized=torch.float)(self.conv_node.args)\n # Get the float conv and attach quantization scheme and quantization\n # parameters of weight to the module\n # and qparam is a dictionary of\n # {\"qscheme\": ..., \"scale\": ..., \"zero_point\": ...} for per tensor quantization or\n # {\"qscheme\": ..., \"scale\": ..., \"zero_point\": ..., \"axis\": ...} for per channel quantization\n float_conv = self.conv\n fused_conv = None\n if isinstance(\n float_conv,\n QAT_CONV_MODULE_CLASSES):\n # case 1. converting qat conv module to\n # a float conv module, we need to attch\n # weight fake_quant to the conv module,\n # weight fake_quant is assumed to be run during\n # QAT so we don't need to run it again here\n float_conv = self.conv.to_float() # type: ignore[operator]\n # change qat conv to conv\n parent_name, name = _parent_name(self.conv_node.target)\n setattr(modules[parent_name], name, float_conv)\n if isinstance(float_conv, torch.nn.intrinsic._FusedModule):\n fused_conv = float_conv\n float_conv = float_conv[0]\n weight_post_process = self.conv.weight_fake_quant\n else:\n # case 2. converting a conv module/fused conv module\n # to float conv module, we need to attach\n # weight observer to the conv module and run it\n # with conv weight\n if isinstance(float_conv, torch.nn.intrinsic._FusedModule):\n fused_conv = float_conv\n float_conv = float_conv[0] # type: ignore[index]\n assert qconfig is not None\n weight_post_process = qconfig.weight()\n # run weight observer\n weight_post_process(float_conv.weight) # type: ignore[operator]\n weight_qparams = get_qparam_dict(weight_post_process)\n # hardcoded for now, TODO: expose the api to user,\n # we can have a map from module to reference module\n # and allow user to register new ones\n qconv_cls = get_static_quant_module_class(\n type(float_conv), is_reference=is_reference)\n ref_conv = qconv_cls.from_float(float_conv, weight_qparams) # type: ignore[attr-defined]\n # if the parent is a fused conv (Sequential), we can replace the first\n # item to ref conv, otherwise we can update\n # the conv instance in the module tree\n if fused_conv is not None:\n fused_conv[0] = ref_conv\n else:\n parent_name, name = _parent_name(self.conv_node.target)\n setattr(modules[parent_name], name, ref_conv)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n ('call_module', self.conv_node.target, args, {}),\n self.conv_node)\n # disabling quantize node for output for now, this will be controlled by the\n # backend_config_dict in the final design\n if output_activation_post_process:\n op_out = quantize_node(\n op_out,\n output_activation_post_process,\n node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n return op_out\n else:\n assert self.conv_node.op == \"call_function\"\n # make sure the input and weight are quantized to torch.quint8, torch.qint8, respectively\n load_arg(quantized={0: torch.quint8, 1: torch.qint8})(self.conv_node.args)\n args = load_arg(quantized=torch.float)(self.conv_node.args)\n kwargs = load_arg(quantized=torch.float)(self.conv_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", self.conv, args, kwargs),\n self.conv_node)\n if self.relu_node:\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n\n # disabling quantize node for output for now, this will be controlled by the\n # backend_config_dict in the final design\n # if activation_int8_quantized:\n # root_module = modules['']\n # act_post_process_name = self.relu_node.name if self.relu_node else self.conv_node.name\n # act_post_process_node = self.relu_node if self.relu_node else self.conv_node\n # activation_post_process = \\\n # self._maybe_get_last_node_only_observer(modules)\n # assert activation_post_process is not None\n # return quantize_node(\n # op_out,\n # activation_post_process,\n # act_post_process_node,\n # modules,\n # quantized_graph,\n # node_name_to_scope,\n # is_input=False)\n # else:\n # # output for dynamically quantized conv op is not quantized\n # return op_out\n return op_out\n\nclass LinearReLUQuantizeHandlerNew(QuantizeHandler):\n \"\"\" This is to unblock perf testing for TensorRT, this will be\n changed in the future so don't depend on this.\n \"\"\"\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n self.relu_node = None\n if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \\\n (node.op == 'call_module' and isinstance(modules[str(node.target)], torch.nn.ReLU)):\n self.relu_node = node\n node = node.args[0] # type: ignore[assignment]\n self.linear_node = node\n if node.op == 'call_module':\n self.linear = modules[str(self.linear_node.target)]\n\n def is_output_quantized(self, qconfig):\n return False\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n assert is_reference, \"LinearReLUQuantizeHandlerNew only works for the case when is_reference=True\"\n if convert_custom_config_dict is None:\n convert_custom_config_dict = {}\n dtypes = get_qconfig_dtypes(qconfig)\n activation_int8_quantized = activation_is_int8_quantized(qconfig)\n activation_statically_quantized = activation_is_statically_quantized(qconfig)\n weight_dtype = dtypes[1]\n if self.linear_node.op == 'call_module':\n\n output_activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n\n # note that relu should already be fused into linear modul in the fusion step\n assert self.relu_node is None, 'linear module and relu fusion is not executed, ' \\\n 'please make sure to run fusion before prepare'\n # produce dequant - float_op - quant pattern\n dtype = torch.float\n if activation_int8_quantized:\n dtype = activation_dtype(qconfig)\n activation = load_arg(quantized=dtype)(self.linear_node.args[0])\n args = load_arg(quantized=torch.float)(self.linear_node.args)\n # Get the float linear and attach qscheme and qparams\n # the the module\n float_linear = self.linear\n fused_linear = None\n if isinstance(float_linear, (torch.nn.qat.Linear, torch.nn.intrinsic.qat.LinearReLU)):\n float_linear = float_linear.to_float()\n # change qat linear to linear\n parent_name, name = _parent_name(self.linear_node.target)\n setattr(modules[parent_name], name, float_linear)\n # Attach weight fake quant to the linear module\n if isinstance(float_linear, torch.nn.intrinsic.LinearReLU):\n fused_linear = float_linear\n float_linear = float_linear[0]\n weight_post_process = self.linear.weight_fake_quant\n else:\n if isinstance(float_linear, torch.nn.intrinsic.LinearReLU):\n fused_linear = float_linear\n float_linear = self.linear[0] # type: ignore[index]\n # Attach the weight observer to the module\n weight_post_process = qconfig.weight() # type: ignore[union-attr]\n # Run weight observer\n weight_post_process(float_linear.weight) # type: ignore[operator]\n\n weight_qparams = get_qparam_dict(weight_post_process)\n # TODO: include the configuration in backend_config_dict\n # we can have a map from module to reference module\n # and allow user to register new ones\n qlinear_cls = get_static_quant_module_class(\n type(float_linear), is_reference=is_reference)\n ref_linear = qlinear_cls.from_float(float_linear, weight_qparams)\n\n # if the parent is a fused linear (Sequential), we can replace the first\n # item to ref linear, otherwise we can update\n # the linear instance in the module tree\n if fused_linear is not None:\n fused_linear[0] = ref_linear\n else:\n parent_name, name = _parent_name(self.linear_node.target)\n setattr(modules[parent_name], name, ref_linear)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n ('call_module', self.linear_node.target, args, {}),\n self.linear_node)\n if output_activation_post_process:\n op_out = quantize_node(\n op_out,\n output_activation_post_process,\n node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n return op_out\n else: # call_function\n assert self.linear_node.op == 'call_function'\n quantized_input_dtypes = [torch.float, torch.float]\n if activation_int8_quantized:\n quantized_input_dtypes[0] = torch.quint8\n if weight_is_statically_quantized(qconfig):\n quantized_input_dtypes[1] = torch.qint8\n args = load_arg(quantized=quantized_input_dtypes)(self.linear_node.args)\n args = load_arg(quantized=torch.float)(self.linear_node.args)\n kwargs = load_arg(quantized=torch.float)(self.linear_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.linear, args, kwargs),\n self.linear_node)\n if self.relu_node:\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n\n return op_out\n # TODO: enable later\n # if activation_statically_quantized:\n # # quantize output for statically quantized linear op\n # root_module = modules['']\n # act_post_process_name = self.relu_node.name if self.relu_node else self.linear_node.name\n # act_post_process_node = self.relu_node if self.relu_node else self.linear_node\n # activation_post_process = \\\n # self._maybe_get_last_node_only_observer(modules)\n # assert activation_post_process is not None\n # return quantize_node(\n # op_out,\n # activation_post_process,\n # act_post_process_node,\n # modules,\n # quantized_graph,\n # node_name_to_scope,\n # is_input=False)\n # else:\n # # output for dynamically quantized linear op is not quantized\n # return op_out\n"
] |
[
[
"torch.ao.quantization.quantize.is_activation_post_process"
]
] |
Kyubyong/neurobind
|
[
"f872dbe76ea76f8dfa2423d99ccc44031a41591a"
] |
[
"validation_check.py"
] |
[
"# -*- coding: utf-8 -*-\n# /usr/bin/python2\n'''\nBy kyubyong park. [email protected].\nhttps://www.github.com/kyubyong/neurobind.\n'''\n\nfrom __future__ import print_function\n\nimport os\n\nfrom scipy.stats import spearmanr\n\nfrom data_load import get_batch_data, load_data\nfrom hyperparams import Hyperparams as hp\nimport tensorflow as tf\nfrom train import Graph\n\n\ndef validation_check():\n # Load graph\n g = Graph(is_training=False); print(\"Graph loaded\")\n\n # Load data\n X, Y = load_data(mode=\"val\")\n\n with g.graph.as_default():\n sv = tf.train.Supervisor()\n with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n # Restore parameters\n sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)); print(\"Restored!\")\n\n # Get model\n mname = open(hp.logdir + '/checkpoint', 'r').read().split('\"')[1] # model name\n\n # Inference\n if not os.path.exists(hp.results): os.mkdir(hp.results)\n with open(os.path.join(hp.results, \"validation_results.txt\"), 'a') as fout:\n expected, predicted = [], []\n for step in range(len(X) // hp.batch_size):\n x = X[step * hp.batch_size: (step + 1) * hp.batch_size]\n y = Y[step * hp.batch_size: (step + 1) * hp.batch_size]\n\n # predict intensities\n logits = sess.run(g.logits, {g.x: x})\n\n expected.extend(list(y))\n predicted.extend(list(logits))\n\n # Get spearman coefficients\n score, _ = spearmanr(expected, predicted)\n fout.write(\"{}\\t{}\\n\".format(mname, score))\n\nif __name__ == '__main__':\n validation_check()\n print(\"Done\")\n\n"
] |
[
[
"tensorflow.ConfigProto",
"scipy.stats.spearmanr",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.Supervisor"
]
] |
pysepty/finvizfinance
|
[
"ca270e7bd07da4d4c88b152fff936024713a09ef"
] |
[
"finvizfinance/screener/custom.py"
] |
[
"import pandas as pd\nfrom finvizfinance.screener.overview import Overview\nfrom finvizfinance.util import webScrap, progressBar, NUMBER_COL\n\"\"\"\n.. module:: screen.custom\n :synopsis: screen custom table.\n\n.. moduleauthor:: Tianning Li <[email protected]>\n\"\"\"\n\ncolumns = {\n 0: 'No.',\n 1: 'Ticker',\n 2: 'Company',\n 3: 'Sector',\n 4: 'Industry',\n 5: 'Country',\n 6: 'Market Cap.',\n 7: 'P/E',\n 8: 'Forward P/E',\n 9: 'PEG',\n 10: 'P/S',\n 11: 'P/B',\n 12: 'P/Cash',\n 13: 'P/Free Cash Flow',\n 14: 'Dividend Yield',\n 15: 'Payout Ratio',\n 16: 'EPS',\n 17: 'EPS growth this year',\n 18: 'EPS growth next year',\n 19: 'EPS growth past 5 years',\n 20: 'EPS growth next 5 years',\n 21: 'Sales growth past 5 years',\n 22: 'EPS growth qtr over qtr',\n 23: 'Sales growth qtr over qtr',\n 24: 'Shares Outstanding',\n 25: 'Shares Float',\n 26: 'Insider Ownership',\n 27: 'Insider Transactions',\n 28: 'Institutional Ownership',\n 29: 'Institutional Transactions',\n 30: 'Float Short',\n 31: 'Short Ratio',\n 32: 'Return on Assets',\n 33: 'Return on Equity',\n 34: 'Return on Investments',\n 35: 'Current Ratio',\n 36: 'Quick Ratio',\n 37: 'Long Term Debt/Equity',\n 38: 'Total Debt/Equity',\n 39: 'Gross Margin',\n 40: 'Operating Margin',\n 41: 'Net Profit Margin',\n 42: 'Performance (Week)',\n 43: 'Performance (Month)',\n 44: 'Performance (Quarter)',\n 45: 'Performance (Half Year)',\n 46: 'Performance (Year)',\n 47: 'Performance (YearToDate)',\n 48: 'Beta',\n 49: 'Average True Range',\n 50: 'Volatility (Week)',\n 51: 'Volatility (Month)',\n 52: '20-Day Simple Moving Average',\n 53: '50-Day Simple Moving Average',\n 54: '200-Day Simple Moving Average',\n 55: '50-Day High',\n 56: '50-Day Low',\n 57: '52-Week High',\n 58: '52-Week Low',\n 59: 'RSI',\n 60: 'Change from Open',\n 61: 'Gap',\n 62: 'Analyst Recom.',\n 63: 'Average Volume',\n 64: 'Relative Volume',\n 65: 'Price',\n 66: 'Change',\n 67: 'Volume',\n 68: 'Earnings Date',\n 69: 'Target Price',\n 70: 'IPO Date'\n}\n\n\nclass Custom(Overview):\n \"\"\"Custom inherit from overview module.\n Getting information from the finviz screener custom page.\n \"\"\"\n def __init__(self):\n \"\"\"initiate module\n \"\"\"\n self.BASE_URL = 'https://finviz.com/screener.ashx?v=151{signal}{filter}&ft=4{ticker}'\n self.url = self.BASE_URL.format(signal='', filter='', ticker='')\n Overview._loadSetting(self)\n\n def getColumns(self):\n \"\"\"Get information about the columns\n\n Returns:\n columns(dict): return the index and column name.\n \"\"\"\n return columns\n\n def _screener_helper(self, i, page, rows, df, num_col_index, table_header, limit):\n \"\"\"Get screener table helper function.\n\n Returns:\n df(pandas.DataFrame): screener information table\n \"\"\"\n if i == page - 1:\n df = self._get_table(rows, df, num_col_index, table_header, limit=((limit - 1) % 20 + 1))\n else:\n df = self._get_table(rows, df, num_col_index, table_header)\n return df\n\n def ScreenerView(self,\n order='ticker',\n limit=-1,\n verbose=1,\n ascend=True,\n columns=[0, 1, 2, 3, 4, 5, 6, 7, 65, 66, 67]):\n \"\"\"Get screener table.\n\n Args:\n order(str): sort the table by the choice of order.\n limit(int): set the top k rows of the screener.\n verbose(int): choice of visual the progress. 1 for visualize progress.\n ascend(bool): if True, the order is ascending.\n columns(list): columns of your choice. Default index: 0,1,2,3,4,5,6,7,65,66,67.\n Returns:\n df(pandas.DataFrame): screener information table\n \"\"\"\n url = self.url\n if order != 'ticker':\n if order not in self.order_dict:\n order_keys = list(self.order_dict.keys())\n raise ValueError(\"Invalid order '{}'. Possible order: {}\".format(order, order_keys))\n url = self.url+'&'+self.order_dict[order]\n if not ascend:\n url = url.replace('o=', 'o=-')\n columns = [str(i) for i in columns]\n url += '&c=' + ','.join(columns)\n soup = webScrap(url)\n\n page = self._get_page(soup)\n if page == 0:\n print('No ticker found.')\n return None\n\n if limit != -1:\n if page > (limit-1)//20+1:\n page = (limit-1)//20+1\n\n if verbose == 1:\n progressBar(1, page)\n table = soup.findAll('table')[18]\n rows = table.findAll('tr')\n table_header = [i.text for i in rows[0].findAll('td')][1:]\n num_col_index = [table_header.index(i) for i in table_header if i in NUMBER_COL]\n df = pd.DataFrame([], columns=table_header)\n df = self._screener_helper(0, page, rows, df, num_col_index, table_header, limit)\n\n for i in range(1, page):\n if verbose == 1:\n progressBar(i+1, page)\n\n url = self.url\n if order == 'ticker':\n url += '&r={}'.format(i * 20 + 1)\n else:\n url += '&r={}'.format(i * 20 + 1)+'&'+self.order_dict[order]\n if not ascend:\n url = url.replace('o=', 'o=-')\n url += '&c=' + ','.join(columns)\n soup = webScrap(url)\n table = soup.findAll('table')[18]\n rows = table.findAll('tr')\n df = self._screener_helper(i, page, rows, df, num_col_index, table_header, limit)\n return df\n"
] |
[
[
"pandas.DataFrame"
]
] |
StephenSpicer/Unit03_Sprint02_Clone
|
[
"be64a7f1ce6f94baa13b9f690d2e0b61a0df126b"
] |
[
"module2-sql-for-analysis/titanic_to_sql.py"
] |
[
"\"\"\" lets send that titanic csv on over to an sqlite db \"\"\"\n\nimport pandas as pd\nimport sqlite3\n\ndf_titanic = pd.read_csv('titanic.csv')\n\ndf_titanic = df_titanic.replace(\"'\", \" \", regex=True)\n\n\nconn = sqlite3.connect('titanic_to_sql.sqlite3')\nconn\n\ncurs = conn.cursor()\ncurs\n\ndf_titanic.to_sql('titanic', conn)"
] |
[
[
"pandas.read_csv"
]
] |
danzelenak-usgs/Science_Validation
|
[
"b437f1c90772227f2b509bd66ea57ba27d6461e6"
] |
[
"espa_validation/validate_data/file_io.py"
] |
[
"\"\"\"Various methods for interacting with files\"\"\"\n\nimport sys\nimport os\nimport logging\nimport tarfile\nimport time\nimport fnmatch\nimport shutil\nimport itertools\n\n\nclass Extract:\n @staticmethod\n def unzip_gz_files(tests: list, masts: list) -> None:\n \"\"\"\n Extract files from archives in sorted order\n :param tests: List of paths to the test .gz archives\n :param masts: List of paths to the master .gz archives\n :return:\n \"\"\"\n print('Warning: decompressing files. Make sure you have the necessary '\n 'disk space to complete this operation...\\n')\n\n time.sleep(5)\n\n # Make sure the lists are sorted\n masts.sort()\n\n tests.sort()\n\n for mast, test in zip(masts, tests):\n try:\n tar_mast = tarfile.open(mast, 'r:gz')\n\n tar_test = tarfile.open(test, 'r:gz')\n\n logging.info(\"{0} is {1} MB...\\n\".format(mast, os.path.getsize(mast) * 0.000001))\n\n logging.info(\"{0} is {1} MB...\\n\".format(test, os.path.getsize(test) * 0.000001))\n\n if os.path.getsize(mast) == 0:\n logging.critical(\"Archive {0} is of zero size!\".format(mast))\n\n sys.exit(1)\n\n elif os.path.getsize(test) == 0:\n logging.critical(\"Archive {0} is of zero size!\".format(test))\n\n sys.exit(1)\n\n except:\n logging.critical(\"Problem with .tar.gz file(s): {0} and {1}.\".format(mast, test))\n\n sys.exit(1)\n\n try:\n tar_mast.extractall(path=os.path.dirname(mast))\n\n tar_test.extractall(path=os.path.dirname(test))\n\n except:\n logging.critical(\"Problem extracting contents from .tar.gz. file:\"\n \"{0} and {1}.\".format(mast, test))\n\n return None\n\n\nclass Find:\n @staticmethod\n def find_files(target_dir: str, ext: str) -> list:\n \"\"\"\n Recursively find files by extension\n :param target_dir: The full path to the target directory\n :param ext: The file type to look for\n :return:\n \"\"\"\n out_files = list()\n\n for root, dirnames, filenames in os.walk(target_dir):\n for filename in fnmatch.filter(filenames, \"*{}\".format(ext)):\n out_files.append(os.path.join(root, filename))\n\n if len(out_files) == 0:\n logging.critical(\"No files found in dir {0}\".format(target_dir))\n\n return out_files\n\n @staticmethod\n def get_ext(*args):\n \"\"\"Get unique extensions for all extracted files. Ignore .gz files.\n\n Args:\n *args <str>: string(s) of file extensions\n \"\"\"\n\n exts = []\n for i in args:\n exts += [os.path.splitext(j)[1] for j in i if '.gz' not in j]\n\n logging.info(\"All extensions: {0}\".format(exts))\n logging.info(\"Unique extensions: {0}\".format(list(set(exts))))\n\n return list(set(exts))\n\n @staticmethod\n def count(fn_test, test, fn_mast, mast, ext):\n \"\"\"Count number of bands inside file to decide how to iterate through\n file.\n\n Args:\n fn_test <str>: file name of test raster.\n test <osgeo.gdal.Dataset>: test raster\n fn_mast <str>: file name of master raster.\n mast <osgeo.gdal.Dataset>: master raster\n ext <str>: file extension of raster\n \"\"\"\n\n def count_bands(r_name, raster):\n \"\"\"Count number of bands inside raster\n\n Args:\n r_name <str>: file name of raster\n raster <osgeo.gdal.Dataset>: raster\n \"\"\"\n try:\n from osgeo import gdal\n except ImportError:\n import gdal\n\n d_r = raster.RasterCount\n\n logging.info(\"Number of bands in {0}: {1}\".format(r_name, d_r))\n\n return d_r\n\n def count_sds(r_name, raster):\n \"\"\"Count number of SDS inside raster.\n\n Args:\n r_name <str>: file name of raster\n raster <osgeo.gdal.Dataset>: raster\n \"\"\"\n try:\n from osgeo import gdal\n except ImportError:\n import gdal\n\n d_r = len(raster.GetSubDatasets())\n\n logging.info(\"Number of SDS in {0}: {1}\".format(r_name, d_r))\n\n return d_r\n\n # count bands in each raster. if > 1, then handle differently\n if ext == \".img\":\n # count_bands returns a 0 if there's <= 1 band in data\n d_range_test = count_bands(fn_test, test)\n d_range_mast = count_bands(fn_mast, mast)\n\n elif ext == \".hdf\" or ext == \".nc\":\n d_range_test = count_sds(fn_test, test)\n d_range_mast = count_sds(fn_mast, mast)\n\n else:\n d_range_test = 1\n d_range_mast = 1\n\n if d_range_test == 1:\n logging.info(\"File {0} is a singleband raster.\".format(fn_test))\n else:\n logging.info(\"File {0} is a multiband raster.\".format(fn_test))\n\n if d_range_mast == 1:\n logging.info(\"File {0} is a singleband raster.\".format(fn_mast))\n else:\n logging.info(\"File {0} is a multiband raster.\".format(fn_mast))\n\n if int(d_range_test) != int(d_range_mast):\n logging.critical(\"Number of sub-bands inside raster do not match. \"\n \"Test: {0} | Master: {1}.\".\n format(d_range_test, d_range_mast))\n d_range = None\n\n else:\n d_range = d_range_test\n\n return d_range\n\n\nclass ImWrite:\n @staticmethod\n def plot_diff_image(test, mast, diff_raster, fn_out, fn_type, dir_out,\n do_abs=False):\n \"\"\"Take difference array and plot as image.\n\n Args:\n test <str>: name of test file\n mast <str>: name of mast file\n diff_raster <numpy.ndarray>: numpy array of values\n fn_out <str>: basename for file\n fn_type <str>: defines title of plot - \"diff\" or \"pct_diff\"\n dir_out <str>: directory where output data are being stored\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n\n # mask pixels that did not differ\n diff_raster = np.ma.masked_where(diff_raster == 0, diff_raster)\n\n # make output file\n im_out = dir_out + os.sep + fn_out + \"_\" + fn_type + \".png\"\n\n # plot diff figure\n if do_abs:\n plt.imshow(np.abs(diff_raster), cmap='gist_gray')\n plt.colorbar(label=\"Abs. Difference\")\n else:\n plt.imshow(diff_raster, cmap='PuOr')\n plt.colorbar(label=\"Difference\")\n\n # annotate plot with file names\n plt.annotate(str(mast) + \"\\n\" +\n str(test) + \"\\n\",\n fontsize=5,\n xy=(0.01, 0.94),\n xycoords='axes fraction')\n\n plt.title(fn_out, y=1.05)\n plt.savefig(im_out, dpi=250)\n plt.close()\n\n logging.warning(\"{0} raster written to {1}.\".format(fn_type, im_out))\n\n @staticmethod\n def plot_hist(test, mast, diff_raster, fn_out, fn_type, dir_out,\n bins=False):\n \"\"\"Take difference array and plot as histogram.\n\n Args:\n test <str>: name of test file\n mast <str>: name of master file\n diff_raster <numpy.ndarray>: numpy array of values\n fn_out <str>: basename for file\n fn_type <str>: defines title of plot - \"diff\" or \"pct_diff\"\n dir_out <str>: directory where output data are being stored\n bins <int>: number of bins for histogram (default=255)\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n\n def bin_size(rast):\n \"\"\"Determine bin size based upon data type.\n\n Args:\n rast <numpy.ndarray>: numpy array of values\n \"\"\"\n dt = rast.dtype\n\n if '64' or '32' in dt.name:\n return 2000\n elif '16' in dt.name:\n return 1000\n elif '8' in dt.name:\n return 256\n else:\n return 50\n\n # mask pixels that did not differ\n diff_raster = np.ma.masked_where(diff_raster == 0, diff_raster)\n\n # make output file\n im_out = dir_out + os.sep + fn_out + \"_\" + fn_type + \"_hist.png\"\n\n # get array of values that are actually different\n diff_valid = diff_raster.compressed()\n\n # determine bin size\n if not bins:\n bins = bin_size(diff_raster)\n\n # do histogram\n try:\n plt.hist(diff_valid, bins)\n except AttributeError:\n logging.warning(\"Difference values from diff_valid variable could\"\n \" not be plotted.\")\n return\n\n # do basic stats\n diff_mean = np.mean(diff_raster)\n diff_sd = np.std(diff_raster)\n diff_abs_mean = np.mean(np.abs(diff_raster))\n diff_pix = len(diff_valid)\n diff_pct = (np.float(diff_pix) / np.product(np.shape(diff_raster))) \\\n * 100.0\n\n # annotate plot with file names\n plt.annotate(str(mast) + \"\\n\" +\n str(test) + \"\\n\",\n fontsize=5,\n xy=(0.01, 0.94),\n xycoords='axes fraction')\n\n # annotate plot with basic stats\n plt.annotate(\"mean diff: \" + str(round(diff_mean, 3)) + \"\\n\" +\n \"std. dev.: \" + str(round(diff_sd, 3)) + \"\\n\" +\n \"abs. mean diff: \" + str(round(diff_abs_mean, 3)) + \"\\n\" +\n \"# diff pixels: \" + str(diff_pix) + \"\\n\" +\n \"% diff: \" + str(round(diff_pct, 3)) + \"\\n\" +\n \"# bins: \" + str(bins) + \"\\n\",\n xy=(0.68, 0.72),\n xycoords='axes fraction')\n\n # write figure out to PNG\n plt.savefig(im_out, bbox_inches=\"tight\", dpi=350)\n\n plt.close()\n\n logging.warning(\"Difference histogram written to {0}.\".format(im_out))\n\n\nclass Cleanup:\n @staticmethod\n def remove_nonmatching_files(test_fnames, mast_fnames):\n \"\"\"Get rid of files that do not match so files are not incorrectly\n compared.\n\n Args:\n test_fnames <str>: test file\n mast_fnames <str>: master file\n \"\"\"\n def rm_fn(fns):\n \"\"\"Grab just the filename\n\n Args:\n fns <list>: list of paths to files\n \"\"\"\n split_fnames = []\n for i in fns:\n split_fnames.append(i.split(os.sep)[-1])\n return split_fnames\n\n def compare_and_rm(t_names, m_names):\n \"\"\"\n Compare just the file names, remove non-matches and return a list\n :param t_names: test file names\n :param m_names: master file names\n :return:\n \"\"\"\n fn_diffs = sorted(list(set(rm_fn(t_names))\n .difference(set(rm_fn(m_names)))))\n\n if len(fn_diffs) > 0:\n logging.warning(\"Files to be removed: {0}\".format(fn_diffs))\n\n if len(fn_diffs) == 0:\n return t_names\n\n # get only file name\n test_fn = rm_fn(t_names)\n\n rm = []\n for ii in test_fn:\n if ii in fn_diffs:\n rm.append(False)\n else:\n rm.append(True)\n\n logging.debug(\"remove boolean: {0}\".format(rm))\n logging.debug(\"test_fn: {0}\".format(test_fn))\n logging.debug(\"final list: {0}\".format(list(\n itertools.compress(t_names, rm))))\n\n return list(itertools.compress(t_names, rm))\n\n test_output = compare_and_rm(test_fnames, mast_fnames)\n mast_output = compare_and_rm(mast_fnames, test_fnames)\n\n # if str, convert to list (other processes expect lists)\n if type(test_output) is str:\n test_output = [test_output]\n if type(mast_output) is str:\n mast_output = [mast_output]\n\n return test_output, mast_output\n\n @staticmethod\n def cleanup_files(indir: str):\n \"\"\"\n Clean up all unpacked files, leaving alone the .tar.gz archives\n :param indir: Full path to the target directory\n :return:\n \"\"\"\n print(\"Cleaning up files...\")\n\n all_files = [os.path.join(dirpath, f)\n for dirpath, dirnames, files in os.walk(indir)\n for f in fnmatch.filter(files, '*')]\n\n for f in all_files:\n if f.endswith(\".tar.gz\"):\n continue\n\n else:\n try:\n os.remove(f)\n\n except:\n continue\n\n logging.warning(\"Cleaned up all data files.\")\n\n # Clean up gap mask files\n gm = [os.path.join(dirpath, f)\n for dirpath, dirnames, files in os.walk(indir)\n for f in fnmatch.filter(dirnames, 'gap_mask')]\n\n st = [os.path.join(dirpath, f)\n for dirpath, dirnames, files in os.walk(indir)\n for f in fnmatch.filter(dirnames, 'stats')]\n\n [shutil.rmtree(i, ignore_errors=True) for i in gm]\n\n [shutil.rmtree(i, ignore_errors=True) for i in st]\n\n logging.warning(\"Removed all non-archive files.\")\n\n return None\n\n @staticmethod\n def rm_files(envi_files, ext):\n \"\"\"Remove files from list, by specific extension\n\n Args:\n envi_files <list>: file names to be checked\n ext <str>: file extension to be removed\"\"\"\n out_files = [i for i in envi_files if ext not in i]\n\n logging.info(\"Skipping analysis of {0} file {1}\".\n format(ext, [i for i in envi_files if ext in i]))\n\n return out_files\n\n\nclass Read:\n @staticmethod\n def open_xml(xml_in):\n \"\"\"\n Open XML and get band-specific information.\n\n Args:\n xml_in <str>: file path to XML\n \"\"\"\n from collections import defaultdict\n import xml.etree.ElementTree as ET\n\n '''source: http://stackoverflow.com/questions/7684333/converting-xml-\n to-dictionary-using-elementtree'''\n\n def etree_to_dict(t):\n d = {t.tag: {} if t.attrib else None}\n children = list(t)\n if children:\n dd = defaultdict(list)\n for dc in map(etree_to_dict, children):\n for k, v in dc.items():\n dd[k].append(v)\n d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in\n dd.items()}}\n if t.attrib:\n d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())\n if t.text:\n text = t.text.strip()\n if children or t.attrib:\n if text:\n d[t.tag]['#text'] = text\n else:\n d[t.tag] = text\n return d\n\n root = ET.parse(xml_in).getroot()\n\n xml_dict = etree_to_dict(root)\n\n bands = []\n for band in xml_dict['{http://espa.cr.usgs.gov/v2}espa_metadata'] \\\n ['{http://espa.cr.usgs.gov/v2}bands'] \\\n ['{http://espa.cr.usgs.gov/v2}band']:\n bands.append(band['{http://espa.cr.usgs.gov/v2}file_name'])\n\n if len(bands) == 0:\n return\n\n else:\n return bands\n"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.abs",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"numpy.std",
"numpy.mean",
"numpy.shape",
"matplotlib.pyplot.close",
"numpy.ma.masked_where",
"matplotlib.pyplot.hist",
"numpy.float"
]
] |
piglaker/SpecialEdition
|
[
"172688ef111e1b5c62bdb1ba0a523a2654201b90",
"172688ef111e1b5c62bdb1ba0a523a2654201b90"
] |
[
"models/bert/modeling_bert_v4.py",
"utils/fastNLP_module.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model. \"\"\"\n\"\"\"For Contrastive Learning \"\"\"\n\n\nimport math\nimport os\nfrom timeit import repeat\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\nfrom fastNLP import BCELoss\n\nimport torch\nimport torch.utils.checkpoint\nfrom packaging import version\nfrom torch import binary_cross_entropy_with_logits, nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\nfrom transformers import AutoConfig\n\nfrom transformers.activations import ACT2FN\nfrom transformers.file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom transformers.modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n NextSentencePredictorOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom transformers.modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom transformers.utils import logging\nfrom transformers.models.bert.configuration_bert import BertConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"bert-base-uncased\"\n_CONFIG_FOR_DOC = \"BertConfig\"\n_TOKENIZER_FOR_DOC = \"BertTokenizer\"\n\nBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"bert-base-uncased\",\n \"bert-large-uncased\",\n \"bert-base-cased\",\n \"bert-large-cased\",\n \"bert-base-multilingual-uncased\",\n \"bert-base-multilingual-cased\",\n \"bert-base-chinese\",\n \"bert-base-german-cased\",\n \"bert-large-uncased-whole-word-masking\",\n \"bert-large-cased-whole-word-masking\",\n \"bert-large-uncased-whole-word-masking-finetuned-squad\",\n \"bert-large-cased-whole-word-masking-finetuned-squad\",\n \"bert-base-cased-finetuned-mrpc\",\n \"bert-base-german-dbmdz-cased\",\n \"bert-base-german-dbmdz-uncased\",\n \"cl-tohoku/bert-base-japanese\",\n \"cl-tohoku/bert-base-japanese-whole-word-masking\",\n \"cl-tohoku/bert-base-japanese-char\",\n \"cl-tohoku/bert-base-japanese-char-whole-word-masking\",\n \"TurkuNLP/bert-base-finnish-cased-v1\",\n \"TurkuNLP/bert-base-finnish-uncased-v1\",\n \"wietsedv/bert-base-dutch-cased\",\n # See all BERT models at https://huggingface.co/models?filter=bert\n]\n\n\ndef load_tf_weights_in_bert(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {name}\")\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n if version.parse(torch.__version__) > version.parse(\"1.6.0\"):\n self.register_buffer(\n \"token_type_ids\",\n torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),\n persistent=False,\n )\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]\n\n # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs\n # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves\n # issue #5664\n if token_type_ids is None:\n if hasattr(self, \"token_type_ids\"):\n buffered_token_type_ids = self.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = BertAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n past_key_value=self_attn_past_key_value,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n cross_attn_past_key_value,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n #hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BertLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BertLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass BertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = BertConfig\n load_tf_weights = load_tf_weights_in_bert\n base_model_prefix = \"bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\n@dataclass\nclass BertForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.BertForPreTraining`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss as the sum of the masked language modeling loss and the next sequence prediction\n (classification) loss.\n prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation\n before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n seq_relationship_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.\",\n BERT_START_DOCSTRING,\n)\nclass BertModel(BertPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n\n self.pooler = BertPooler(config) if add_pooling_layer else None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n \"\"\",\n BERT_START_DOCSTRING,\n)\nclass BertForPreTraining(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BertModel(config)\n self.cls = BertPreTrainingHeads(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair\n (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:\n\n - 0 indicates sequence B is a continuation of sequence A,\n - 1 indicates sequence B is a random sequence.\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n\n Example::\n\n >>> from transformers import BertTokenizer, BertForPreTraining\n >>> import torch\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n >>> model = BertForPreTraining.from_pretrained('bert-base-uncased')\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> seq_relationship_logits = outputs.seq_relationship_logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n total_loss = None\n if labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return BertForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n seq_relationship_logits=seq_relationship_score,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", BERT_START_DOCSTRING\n)\nclass BertLMHeadModel(BertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`\")\n\n self.bert = BertModel(config, add_pooling_layer=False)\n self.cls = BertOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n Returns:\n\n Example::\n\n >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig\n >>> import torch\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')\n >>> config = BertConfig.from_pretrained(\"bert-base-cased\")\n >>> config.is_decoder = True\n >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"past_key_values\": past}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n\n\n@add_start_docstrings(\"\"\"Bert Model with a `language modeling` head on top.\"\"\", BERT_START_DOCSTRING)\nclass BertForMaskedLM(BertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.bert = BertModel(config, add_pooling_layer=False)\n self.cls = BertOnlyMLMHead(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n effective_batch_size = input_shape[0]\n\n # add a dummy token\n if self.config.pad_token_id is None:\n raise ValueError(\"The PAD token should be defined for generation\")\n\n attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)\n dummy_token = torch.full(\n (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device\n )\n input_ids = torch.cat([input_ids, dummy_token], dim=1)\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n\n\n@add_start_docstrings(\"\"\"Bert Model with a `language modeling` head on top. \"\"\", BERT_START_DOCSTRING)\nclass BertForMaskedLM_CL(BertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.bert = BertModel(config, add_pooling_layer=False)\n \n self.cls = BertOnlyMLMHead(config)\n\n self.mlp = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.mlp.bias = self.bias\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n #prediction_scores = self.cls(sequence_output)\n prediction_scores = self.mlp(sequence_output)\n\n logits = prediction_scores\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n \n #print(prediction_scores.shape)\n #(labels.shape)\n\n prediction_scores = torch.softmax(prediction_scores, dim=2)\n\n # prediction_scores: [batch_size, max_length, vocab_size], labels: [batch_size, max_length]\n # get gold P\n \n #prediction_scores = prediction_scores.float()\n\n #labels = labels.long()\n\n #labels_tmp = torch.where( labels == -100, torch.tensor(-1, dtype=torch.long).cuda(), labels)\n\n contrastive_learning_positive = prediction_scores.view(-1, self.config.vocab_size)[ torch.tensor(range(prediction_scores.view(-1, 21128).shape[0])), labels.view(-1) ].view(labels.shape[0], labels.shape[1]) * attention_mask\n\n mask = torch.ones_like(prediction_scores)\n\n mask.view(-1, self.config.vocab_size)[ torch.tensor(range(mask.view(-1, self.config.vocab_size).shape[0])) , labels.view(-1) ] = 0\n\n prediction_scores_tmp = mask * prediction_scores\n\n contrastive_laerning_negative = prediction_scores_tmp.topk(5, dim=2)[0]\n\n l_cpo = -1 * (contrastive_learning_positive.sum() - contrastive_laerning_negative.sum() / 5) / prediction_scores.shape[0] \n\n masked_lm_loss += 1 * l_cpo\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=logits,#prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n effective_batch_size = input_shape[0]\n\n # add a dummy token\n assert self.config.pad_token_id is not None, \"The PAD token should be defined for generation\"\n attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)\n dummy_token = torch.full(\n (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device\n )\n input_ids = torch.cat([input_ids, dummy_token], dim=1)\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n\n\n@add_start_docstrings(\"\"\" Our Proto Bert Model with a `language modeling` head on top. \"\"\", BERT_START_DOCSTRING)\nclass ProtoBertForMaskedLM(BertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config, cl_weight, repeat_weight, copy_weight):\n print(\"Serious Warning: deprecated ! Please Use ModelZero\")\n exit()\n super().__init__(config)\n\n self.cl_weight = cl_weight\n\n self.repeat_weight = repeat_weight\n\n self.copy_weight = copy_weight\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.bert = BertModel(config, add_pooling_layer=False)\n \n self.cls = BertOnlyMLMHead(config)\n\n self.mlp = nn.Linear(config.hidden_size, config.vocab_size, bias=True)\n\n #self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n self.copy = nn.Linear(config.hidden_size, 1, bias=False)#self.config.vocab_size, bias=False)\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n #self.mlp.bias = self.bias\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n\n noneg_labels = torch.where(labels != -100, labels, 0)\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids, # noneg_labels, \n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hiddens = outputs[0] # 0\n\n prediction_scores = self.cls(hiddens)\n\n #prediction_scores = torch.softmax(torch.matmul(hiddens, self.bert.embeddings.word_embeddings.weight.T), dim=2)\n\n #prediction_scores = self.mlp(sequence_output)\n\n if self.copy_weight > 0:\n \n from torch.nn.functional import one_hot, binary_cross_entropy_with_logits, binary_cross_entropy, nll_loss\n\n copy_label = (noneg_labels == input_ids).long()\n\n # replace 0 with -100, may cause ddp device error here,\n copy_label_mask = torch.where(labels != -100, torch.tensor(1).cuda(), labels) - 1\n\n copy_label += copy_label_mask\n\n wcopy_h = self.copy(hiddens)\n\n copy_scores = torch.exp(wcopy_h) / (torch.exp(wcopy_h) + 1)\n\n prediction_scores = copy_scores.detach() * one_hot(input_ids, self.config.vocab_size) + (1-copy_scores).detach() * prediction_scores \n\n logits = prediction_scores\n\n if self.repeat_weight != 0 or self.cl_weight != 0:\n\n distill_outputs = self.bert(\n input_ids=noneg_labels,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n distill_hiddens = distill_outputs[0]\n\n distill_hiddens_detach = distill_hiddens.detach()\n distill_scores = self.cls(distill_hiddens)\n\n distill_hiddens_detach = torch.nn.functional.normalize(distill_hiddens_detach, dim=-1)\n \n hiddens = torch.nn.functional.normalize(hiddens, dim=-1)\n\n total_loss = None\n masked_lm_loss = None\n copy_loss = 0\n cl_loss = 0\n repeat_loss = 0\n\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n from torch.nn import BCELoss, BCEWithLogitsLoss\n loss_fct2 = BCEWithLogitsLoss()\n #loss_fct3 = nll_loss()\n if self.copy_weight > 0:\n\n #x_orig_eq_mlm = one_hot(copy_label, self.config.vocab_size)\n\n # total_copy_score = ( x_orig_eq_mlm * copy_scores + (1-x_orig_eq_mlm) * (1 - copy_scores))\n\n #copy_loss = loss_fct(copy_scores.view(-1, self.config.vocab_size), copy_label.view(-1))# + loss_fct((1-copy_scores).view(-1, self.config.vocab_size), (1-copy_label).view(-1))\n\n #copy_loss = loss_fct2(copy_scores.view(-1, self.config.vocab_size), one_hot(copy_label, self.config.vocab_size).float().view(-1, self.config.vocab_size))\n\n copy_loss = loss_fct2(copy_scores.view(-1), copy_label.view(-1).float())\n\n eq_label = (noneg_labels == input_ids).long()\n\n mask_mask = -100 * (noneg_labels != input_ids).long()\n\n masked_lm_loss = nll_loss(prediction_scores.view(-1, self.config.vocab_size), (eq_label*labels + mask_mask).view(-1))\n else:\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if self.cl_weight > 0 :\n scores = torch.einsum('blh,bkh->blk', hiddens, distill_hiddens_detach) # bsz x len x len\n scores = scores.masked_fill(attention_mask.unsqueeze(1).repeat(1, attention_mask.size(1), 1).bool(), 0)\n scores = torch.exp(scores / 2)\n pos_scores = torch.diagonal(scores, dim1=1, dim2=2) # bsz x l\n neg_scores = scores.sum(dim=-1) # bsz x l \n cl_loss = -(pos_scores / neg_scores).log().masked_fill(attention_mask.eq(0), 0).sum() / attention_mask.sum()\n\n if self.repeat_weight > 0:\n no_target_mask = (input_ids != labels).bool()\n repeat_loss = torch.nn.functional.cross_entropy(distill_scores.view(-1, self.config.vocab_size), labels.masked_fill(no_target_mask, -100).view(-1), reduction='mean')\n\n total_loss = 1 * masked_lm_loss + \\\n self.cl_weight * cl_loss + \\\n self.repeat_weight * repeat_loss + \\\n self.copy_weight * copy_loss\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return MaskedLMOutput(\n loss=total_loss,\n logits=logits,#prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n effective_batch_size = input_shape[0]\n\n # add a dummy token\n assert self.config.pad_token_id is not None, \"The PAD token should be defined for generation\"\n attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)\n dummy_token = torch.full(\n (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device\n )\n input_ids = torch.cat([input_ids, dummy_token], dim=1)\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n\n\nclass ProtoModel(BertPreTrainedModel):\n def __init__(self, pretrained_model_name_or_path, cl_weight, repeat_weight, copy_weight):\n self.config = AutoConfig.from_pretrained(pretrained_model_name_or_path)\n\n super(ProtoModel, self).__init__(self.config)\n\n self.bert = BertModel.from_pretrained(pretrained_model_name_or_path)\n\n self.cl_weight = cl_weight\n\n self.repeat_weight = repeat_weight\n\n self.copy_weight = copy_weight\n\n self.cls = BertOnlyMLMHead(self.config)\n\n self.mlp = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=True)\n\n #self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n self.copy = nn.Linear(self.config.hidden_size, 1, bias=False)#self.config.vocab_size, bias=False)\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n #self.mlp.bias = self.bias\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n\n noneg_labels = torch.where(labels != -100, labels, 0)\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n noneg_labels,#input_ids, # noneg_labels, \n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hiddens = outputs[0] # 0\n\n prediction_scores = self.cls(hiddens)\n\n #prediction_scores = torch.softmax(torch.matmul(hiddens, self.bert.embeddings.word_embeddings.weight.T), dim=2)\n\n #prediction_scores = self.mlp(sequence_output)\n\n if self.copy_weight > 0:\n \n from torch.nn.functional import one_hot, binary_cross_entropy_with_logits, binary_cross_entropy, nll_loss\n\n copy_label = (noneg_labels == input_ids).long()\n\n # replace 0 with -100, may cause ddp device error here,\n copy_label_mask = torch.where(labels != -100, torch.tensor(1).cuda(), labels) - 1\n\n copy_label += copy_label_mask\n\n wcopy_h = self.copy(hiddens)\n\n copy_scores = torch.exp(wcopy_h) / (torch.exp(wcopy_h) + 1)\n\n prediction_scores = copy_scores.detach() * one_hot(input_ids, self.config.vocab_size) + (1-copy_scores).detach() * prediction_scores \n\n logits = prediction_scores\n\n if self.repeat_weight != 0 or self.cl_weight != 0:\n\n distill_outputs = self.bert(\n input_ids=noneg_labels,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n distill_hiddens = distill_outputs[0]\n\n distill_hiddens_detach = distill_hiddens.detach()\n distill_scores = self.cls(distill_hiddens)\n\n distill_hiddens_detach = torch.nn.functional.normalize(distill_hiddens_detach, dim=-1)\n \n hiddens = torch.nn.functional.normalize(hiddens, dim=-1)\n\n total_loss = None\n masked_lm_loss = None\n copy_loss = 0\n cl_loss = 0\n repeat_loss = 0\n\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n from torch.nn import BCELoss, BCEWithLogitsLoss\n loss_fct2 = BCEWithLogitsLoss()\n #loss_fct3 = nll_loss()\n if self.copy_weight > 0:\n\n #x_orig_eq_mlm = one_hot(copy_label, self.config.vocab_size)\n\n # total_copy_score = ( x_orig_eq_mlm * copy_scores + (1-x_orig_eq_mlm) * (1 - copy_scores))\n\n #copy_loss = loss_fct(copy_scores.view(-1, self.config.vocab_size), copy_label.view(-1))# + loss_fct((1-copy_scores).view(-1, self.config.vocab_size), (1-copy_label).view(-1))\n\n #copy_loss = loss_fct2(copy_scores.view(-1, self.config.vocab_size), one_hot(copy_label, self.config.vocab_size).float().view(-1, self.config.vocab_size))\n\n copy_loss = loss_fct2(copy_scores.view(-1), copy_label.view(-1).float())\n\n eq_label = (noneg_labels == input_ids).long()\n\n mask_mask = -100 * (noneg_labels != input_ids).long()\n\n masked_lm_loss = nll_loss(prediction_scores.view(-1, self.config.vocab_size), (eq_label*labels + mask_mask).view(-1))\n else:\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if self.cl_weight > 0 :\n scores = torch.einsum('blh,bkh->blk', hiddens, distill_hiddens_detach) # bsz x len x len\n scores = scores.masked_fill(attention_mask.unsqueeze(1).repeat(1, attention_mask.size(1), 1).bool(), 0)\n scores = torch.exp(scores / 2)\n pos_scores = torch.diagonal(scores, dim1=1, dim2=2) # bsz x l\n neg_scores = scores.sum(dim=-1) # bsz x l \n cl_loss = -(pos_scores / neg_scores).log().masked_fill(attention_mask.eq(0), 0).sum() / attention_mask.sum()\n\n if self.repeat_weight > 0:\n no_target_mask = (input_ids != labels).bool()\n repeat_loss = torch.nn.functional.cross_entropy(distill_scores.view(-1, self.config.vocab_size), labels.masked_fill(no_target_mask, -100).view(-1), reduction='mean')\n\n total_loss = 1 * masked_lm_loss + \\\n self.cl_weight * cl_loss + \\\n self.repeat_weight * repeat_loss + \\\n self.copy_weight * copy_loss\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return MaskedLMOutput(\n loss=total_loss,\n logits=logits,#prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n effective_batch_size = input_shape[0]\n\n # add a dummy token\n assert self.config.pad_token_id is not None, \"The PAD token should be defined for generation\"\n attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)\n dummy_token = torch.full(\n (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device\n )\n input_ids = torch.cat([input_ids, dummy_token], dim=1)\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n",
"#copy from https://github1s.com/LeeSureman/Flat-Lattice-Transformer/blob/HEAD/fastNLP_module.py#L1-L423\n\nfrom fastNLP.embeddings.embedding import TokenEmbedding\nfrom fastNLP.core import Vocabulary\nfrom fastNLP.io.file_utils import PRETRAIN_STATIC_FILES, _get_embedding_url, cached_path\nimport os\nimport warnings\nfrom collections import defaultdict\nfrom copy import deepcopy\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom fastNLP.core import logger\n#from fastNLP.modules.utils import _get_file_name_base_on_postfix\n#from utils import MyDropout\nfrom fastNLP.embeddings.contextual_embedding import ContextualEmbedding\nfrom fastNLP.embeddings.bert_embedding import _WordBertModel\nfrom fastNLP.io.file_utils import PRETRAINED_BERT_MODEL_DIR\n\nclass MyDropout(nn.Module):\n def __init__(self, p):\n super().__init__()\n assert 0<=p<=1\n self.p = p\n\n def forward(self, x):\n if self.training and self.p>0.001:\n # print('mydropout!')\n mask = torch.rand(x.size())\n # print(mask.device)\n mask = mask.to(x)\n # print(mask.device)\n mask = mask.lt(self.p)\n x = x.masked_fill(mask, 0)/(1-self.p)\n return x\n\n\nclass StaticEmbedding(TokenEmbedding):\n \"\"\"\n StaticEmbedding组件. 给定预训练embedding的名称或路径,根据vocab从embedding中抽取相应的数据(只会将出现在vocab中的词抽取出来,\n 如果没有找到,则会随机初始化一个值(但如果该word是被标记为no_create_entry的话,则不会单独创建一个值,而是会被指向unk的index))。\n 当前支持自动下载的预训练vector有以下的几种(待补充);\n\n Example::\n\n >>> from fastNLP import Vocabulary\n >>> from fastNLP.embeddings import StaticEmbedding\n >>> vocab = Vocabulary().add_word_lst(\"The whether is good .\".split())\n >>> embed = StaticEmbedding(vocab, model_dir_or_name='en-glove-50d')\n\n >>> vocab = Vocabulary().add_word_lst([\"The\", 'the', \"THE\"])\n >>> embed = StaticEmbedding(vocab, model_dir_or_name=\"en-glove-50d\", lower=True)\n >>> # \"the\", \"The\", \"THE\"它们共用一个vector,且将使用\"the\"在预训练词表中寻找它们的初始化表示。\n\n >>> vocab = Vocabulary().add_word_lst([\"The\", \"the\", \"THE\"])\n >>> embed = StaticEmbedding(vocab, model_dir_or_name=None, embedding_dim=5, lower=True)\n >>> words = torch.LongTensor([[vocab.to_index(word) for word in [\"The\", \"the\", \"THE\"]]])\n >>> embed(words)\n >>> tensor([[[ 0.5773, 0.7251, -0.3104, 0.0777, 0.4849],\n [ 0.5773, 0.7251, -0.3104, 0.0777, 0.4849],\n [ 0.5773, 0.7251, -0.3104, 0.0777, 0.4849]]],\n grad_fn=<EmbeddingBackward>) # 每种word的输出是一致的。\n\n \"\"\"\n\n def __init__(self, vocab: Vocabulary, model_dir_or_name: str = 'en', embedding_dim=-1, requires_grad: bool = True,\n init_method=None, lower=False, dropout=0, word_dropout=0, normalize=False, min_freq=1, **kwargs):\n \"\"\"\n\n :param vocab: Vocabulary. 若该项为None则会读取所有的embedding。\n :param model_dir_or_name: 可以有两种方式调用预训练好的static embedding:第一种是传入embedding文件夹(文件夹下应该只有一个\n 以.txt作为后缀的文件)或文件路径;第二种是传入embedding的名称,第二种情况将自动查看缓存中是否存在该模型,没有的话将自动下载。\n 如果输入为None则使用embedding_dim的维度随机初始化一个embedding。\n :param int embedding_dim: 随机初始化的embedding的维度,当该值为大于0的值时,将忽略model_dir_or_name。\n :param bool requires_grad: 是否需要gradient. 默认为True\n :param callable init_method: 如何初始化没有找到的值。可以使用torch.nn.init.*中各种方法, 传入的方法应该接受一个tensor,并\n inplace地修改其值。\n :param bool lower: 是否将vocab中的词语小写后再和预训练的词表进行匹配。如果你的词表中包含大写的词语,或者就是需要单独\n 为大写的词语开辟一个vector表示,则将lower设置为False。\n :param float dropout: 以多大的概率对embedding的表示进行Dropout。0.1即随机将10%的值置为0。\n :param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。\n :param bool normalize: 是否对vector进行normalize,使得每个vector的norm为1。\n :param int min_freq: Vocabulary词频数小于这个数量的word将被指向unk。\n :param dict kwarngs: only_train_min_freq, 仅对train中的词语使用min_freq筛选; only_norm_found_vector是否仅对在预训练中找到的词语使用normalize。\n \"\"\"\n super(StaticEmbedding, self).__init__(vocab, word_dropout=word_dropout, dropout=dropout)\n if embedding_dim > 0:\n model_dir_or_name = None\n\n # 得到cache_path\n if model_dir_or_name is None:\n assert embedding_dim >= 1, \"The dimension of embedding should be larger than 1.\"\n embedding_dim = int(embedding_dim)\n model_path = None\n elif model_dir_or_name.lower() in PRETRAIN_STATIC_FILES:\n model_url = _get_embedding_url('static', model_dir_or_name.lower())\n model_path = cached_path(model_url, name='embedding')\n # 检查是否存在\n elif os.path.isfile(os.path.abspath(os.path.expanduser(model_dir_or_name))):\n model_path = os.path.abspath(os.path.expanduser(model_dir_or_name))\n elif os.path.isdir(os.path.abspath(os.path.expanduser(model_dir_or_name))):\n pass\n #model_path = _get_file_name_base_on_postfix(os.path.abspath(os.path.expanduser(model_dir_or_name)), '.txt')\n else:\n raise ValueError(f\"Cannot recognize {model_dir_or_name}.\")\n\n # 根据min_freq缩小vocab\n truncate_vocab = (vocab.min_freq is None and min_freq > 1) or (vocab.min_freq and vocab.min_freq < min_freq)\n if truncate_vocab:\n truncated_vocab = deepcopy(vocab)\n truncated_vocab.min_freq = min_freq\n truncated_vocab.word2idx = None\n if lower: # 如果有lower,将大小写的的freq需要同时考虑到\n lowered_word_count = defaultdict(int)\n for word, count in truncated_vocab.word_count.items():\n lowered_word_count[word.lower()] += count\n for word in truncated_vocab.word_count.keys():\n word_count = truncated_vocab.word_count[word]\n if lowered_word_count[word.lower()] >= min_freq and word_count < min_freq:\n truncated_vocab.add_word_lst([word] * (min_freq - word_count),\n no_create_entry=truncated_vocab._is_word_no_create_entry(word))\n\n # 只限制在train里面的词语使用min_freq筛选\n if kwargs.get('only_train_min_freq', False) and model_dir_or_name is not None:\n for word in truncated_vocab.word_count.keys():\n if truncated_vocab._is_word_no_create_entry(word) and truncated_vocab.word_count[word] < min_freq:\n truncated_vocab.add_word_lst([word] * (min_freq - truncated_vocab.word_count[word]),\n no_create_entry=True)\n truncated_vocab.build_vocab()\n truncated_words_to_words = torch.arange(len(vocab)).long()\n for word, index in vocab:\n truncated_words_to_words[index] = truncated_vocab.to_index(word)\n logger.info(\n f\"{len(vocab) - len(truncated_vocab)} out of {len(vocab)} words have frequency less than {min_freq}.\")\n vocab = truncated_vocab\n\n self.only_norm_found_vector = kwargs.get('only_norm_found_vector', False)\n # 读取embedding\n if lower:\n lowered_vocab = Vocabulary(padding=vocab.padding, unknown=vocab.unknown)\n for word, index in vocab:\n if vocab._is_word_no_create_entry(word):\n lowered_vocab.add_word(word.lower(), no_create_entry=True)\n else:\n lowered_vocab.add_word(word.lower()) # 先加入需要创建entry的\n logger.info(f\"All word in the vocab have been lowered. There are {len(vocab)} words, {len(lowered_vocab)} \"\n f\"unique lowered words.\")\n if model_path:\n embedding = self._load_with_vocab(model_path, vocab=lowered_vocab, init_method=init_method)\n else:\n embedding = self._randomly_init_embed(len(vocab), embedding_dim, init_method)\n self.register_buffer('words_to_words', torch.arange(len(vocab)).long())\n if lowered_vocab.unknown:\n unknown_idx = lowered_vocab.unknown_idx\n else:\n unknown_idx = embedding.size(0) - 1 # 否则是最后一个为unknow\n self.register_buffer('words_to_words', torch.arange(len(vocab)).long())\n words_to_words = torch.full((len(vocab),), fill_value=unknown_idx).long()\n for word, index in vocab:\n if word not in lowered_vocab:\n word = word.lower()\n if word not in lowered_vocab and lowered_vocab._is_word_no_create_entry(word):\n continue # 如果不需要创建entry,已经默认unknown了\n words_to_words[index] = self.words_to_words[lowered_vocab.to_index(word)]\n self.register_buffer('words_to_words', words_to_words)\n self._word_unk_index = lowered_vocab.unknown_idx # 替换一下unknown的index\n else:\n if model_path:\n embedding = self._load_with_vocab(model_path, vocab=vocab, init_method=init_method)\n else:\n embedding = self._randomly_init_embed(len(vocab), embedding_dim, init_method)\n self.register_buffer('words_to_words', torch.arange(len(vocab)).long())\n if not self.only_norm_found_vector and normalize:\n embedding /= (torch.norm(embedding, dim=1, keepdim=True) + 1e-12)\n\n if truncate_vocab:\n for i in range(len(truncated_words_to_words)):\n index_in_truncated_vocab = truncated_words_to_words[i]\n truncated_words_to_words[i] = self.words_to_words[index_in_truncated_vocab]\n del self.words_to_words\n self.register_buffer('words_to_words', truncated_words_to_words)\n self.embedding = nn.Embedding(num_embeddings=embedding.shape[0], embedding_dim=embedding.shape[1],\n padding_idx=vocab.padding_idx,\n max_norm=None, norm_type=2, scale_grad_by_freq=False,\n sparse=False, _weight=embedding)\n self._embed_size = self.embedding.weight.size(1)\n self.requires_grad = requires_grad\n self.dropout = MyDropout(dropout)\n\n def _randomly_init_embed(self, num_embedding, embedding_dim, init_embed=None):\n \"\"\"\n\n :param int num_embedding: embedding的entry的数量\n :param int embedding_dim: embedding的维度大小\n :param callable init_embed: 初始化方法\n :return: torch.FloatTensor\n \"\"\"\n embed = torch.zeros(num_embedding, embedding_dim)\n\n if init_embed is None:\n nn.init.uniform_(embed, -np.sqrt(3 / embedding_dim), np.sqrt(3 / embedding_dim))\n else:\n init_embed(embed)\n\n return embed\n\n def _load_with_vocab(self, embed_filepath, vocab, dtype=np.float32, padding='<pad>', unknown='<unk>',\n error='ignore', init_method=None):\n \"\"\"\n 从embed_filepath这个预训练的词向量中抽取出vocab这个词表的词的embedding。EmbedLoader将自动判断embed_filepath是\n word2vec(第一行只有两个元素)还是glove格式的数据。\n\n :param str embed_filepath: 预训练的embedding的路径。\n :param vocab: 词表 :class:`~fastNLP.Vocabulary` 类型,读取出现在vocab中的词的embedding。\n 没有出现在vocab中的词的embedding将通过找到的词的embedding的正态分布采样出来,以使得整个Embedding是同分布的。\n :param dtype: 读出的embedding的类型\n :param str padding: 词表中padding的token\n :param str unknown: 词表中unknown的token\n :param str error: `ignore` , `strict` ; 如果 `ignore` ,错误将自动跳过; 如果 `strict` , 错误将抛出。\n 这里主要可能出错的地方在于词表有空行或者词表出现了维度不一致。\n :param init_method: 如何初始化没有找到的值。可以使用torch.nn.init.*中各种方法。默认使用torch.nn.init.zeros_\n :return torch.tensor: shape为 [len(vocab), dimension], dimension由pretrain的embedding决定。\n \"\"\"\n assert isinstance(vocab, Vocabulary), \"Only fastNLP.Vocabulary is supported.\"\n if not os.path.exists(embed_filepath):\n raise FileNotFoundError(\"`{}` does not exist.\".format(embed_filepath))\n with open(embed_filepath, 'r', encoding='utf-8') as f:\n line = f.readline().strip()\n parts = line.split()\n start_idx = 0\n if len(parts) == 2:\n dim = int(parts[1])\n start_idx += 1\n else:\n dim = len(parts) - 1\n f.seek(0)\n matrix = {}\n if vocab.padding:\n matrix[vocab.padding_idx] = torch.zeros(dim)\n if vocab.unknown:\n matrix[vocab.unknown_idx] = torch.zeros(dim)\n found_count = 0\n found_unknown = False\n for idx, line in enumerate(f, start_idx):\n try:\n parts = line.strip().split()\n word = ''.join(parts[:-dim])\n nums = parts[-dim:]\n # 对齐unk与pad\n if word == padding and vocab.padding is not None:\n word = vocab.padding\n elif word == unknown and vocab.unknown is not None:\n word = vocab.unknown\n found_unknown = True\n if word in vocab:\n index = vocab.to_index(word)\n matrix[index] = torch.from_numpy(np.fromstring(' '.join(nums), sep=' ', dtype=dtype, count=dim))\n if self.only_norm_found_vector:\n matrix[index] = matrix[index] / np.linalg.norm(matrix[index])\n found_count += 1\n except Exception as e:\n if error == 'ignore':\n warnings.warn(\"Error occurred at the {} line.\".format(idx))\n else:\n logger.error(\"Error occurred at the {} line.\".format(idx))\n raise e\n logger.info(\"Found {} out of {} words in the pre-training embedding.\".format(found_count, len(vocab)))\n for word, index in vocab:\n if index not in matrix and not vocab._is_word_no_create_entry(word):\n if found_unknown: # 如果有unkonwn,用unknown初始化\n matrix[index] = matrix[vocab.unknown_idx]\n else:\n matrix[index] = None\n # matrix中代表是需要建立entry的词\n vectors = self._randomly_init_embed(len(matrix), dim, init_method)\n\n if vocab.unknown is None: # 创建一个专门的unknown\n unknown_idx = len(matrix)\n vectors = torch.cat((vectors, torch.zeros(1, dim)), dim=0).contiguous()\n else:\n unknown_idx = vocab.unknown_idx\n self.register_buffer('words_to_words', torch.full((len(vocab),), fill_value=unknown_idx).long())\n for index, (index_in_vocab, vec) in enumerate(matrix.items()):\n if vec is not None:\n vectors[index] = vec\n self.words_to_words[index_in_vocab] = index\n\n return vectors\n\n def drop_word(self, words):\n \"\"\"\n 按照设定随机将words设置为unknown_index。\n\n :param torch.LongTensor words: batch_size x max_len\n :return:\n \"\"\"\n if self.word_dropout > 0 and self.training:\n mask = torch.rand(words.size())\n mask = mask.to(words.device)\n mask = mask.lt(self.word_dropout)\n # mask = torch.full_like(words, fill_value=self.word_dropout, dtype=torch.float)\n # # mask = torch.bernoulli(mask).eq(1) # dropout_word越大,越多位置为1\n # # mask = mask.to(words.device)\n pad_mask = words.ne(self._word_pad_index)\n mask = mask.__and__(pad_mask)\n words = words.masked_fill(mask, self._word_unk_index)\n return words\n\n def forward(self, words):\n \"\"\"\n 传入words的index\n\n :param words: torch.LongTensor, [batch_size, max_len]\n :return: torch.FloatTensor, [batch_size, max_len, embed_size]\n \"\"\"\n if hasattr(self, 'words_to_words'):\n words = self.words_to_words[words]\n words = self.drop_word(words)\n words = self.embedding(words)\n words = self.dropout(words)\n return words\n\n\nclass BertEmbedding(ContextualEmbedding):\n \"\"\"\n 使用BERT对words进行编码的Embedding。建议将输入的words长度限制在430以内,而不要使用512(根据预训练模型参数,可能有变化)。这是由于\n 预训练的bert模型长度限制为512个token,而因为输入的word是未进行word piece分割的(word piece的分割有BertEmbedding在输入word\n 时切分),在分割之后长度可能会超过最大长度限制。\n\n BertEmbedding可以支持自动下载权重,当前支持的模型有以下的几种(待补充):\n\n Example::\n\n >>> import torch\n >>> from fastNLP import Vocabulary\n >>> from fastNLP.embeddings import BertEmbedding\n >>> vocab = Vocabulary().add_word_lst(\"The whether is good .\".split())\n >>> embed = BertEmbedding(vocab, model_dir_or_name='en-base-uncased', requires_grad=False, layers='4,-2,-1')\n >>> words = torch.LongTensor([[vocab.to_index(word) for word in \"The whether is good .\".split()]])\n >>> outputs = embed(words)\n >>> outputs.size()\n >>> # torch.Size([1, 5, 2304])\n \"\"\"\n\n def __init__(self, vocab: Vocabulary, model_dir_or_name: str = 'en-base-uncased', layers: str = '-1',\n pool_method: str = 'first', word_dropout=0, dropout=0, include_cls_sep: bool = False,\n pooled_cls=True, requires_grad: bool = True, auto_truncate: bool = False):\n \"\"\"\n\n :param ~fastNLP.Vocabulary vocab: 词表\n :param str model_dir_or_name: 模型所在目录或者模型的名称。当传入模型所在目录时,目录中应该包含一个词表文件(以.txt作为后缀名),\n 权重文件(以.bin作为文件后缀名), 配置文件(以.json作为后缀名)。\n :param str layers: 输出embedding表示来自于哪些层,不同层的结果按照layers中的顺序在最后一维concat起来。以','隔开层数,层的序号是\n 从0开始,可以以负数去索引倒数几层。\n :param str pool_method: 因为在bert中,每个word会被表示为多个word pieces, 当获取一个word的表示的时候,怎样从它的word pieces\n 中计算得到它对应的表示。支持 ``last`` , ``first`` , ``avg`` , ``max``。\n :param float word_dropout: 以多大的概率将一个词替换为unk。这样既可以训练unk也是一定的regularize。\n :param float dropout: 以多大的概率对embedding的表示进行Dropout。0.1即随机将10%的值置为0。\n :param bool include_cls_sep: bool,在bert计算句子的表示的时候,需要在前面加上[CLS]和[SEP], 是否在结果中保留这两个内容。 这样\n 会使得word embedding的结果比输入的结果长两个token。如果该值为True,则在使用 :class::StackEmbedding 可能会与其它类型的\n embedding长度不匹配。\n :param bool pooled_cls: 返回的[CLS]是否使用预训练中的BertPool映射一下,仅在include_cls_sep时有效。如果下游任务只取[CLS]做预测,\n 一般该值为True。\n :param bool requires_grad: 是否需要gradient以更新Bert的权重。\n :param bool auto_truncate: 当句子words拆分为word pieces长度超过bert最大允许长度(一般为512), 自动截掉拆分后的超过510个\n word pieces后的内容,并将第512个word piece置为[SEP]。超过长度的部分的encode结果直接全部置零。一般仅有只使用[CLS]\n 来进行分类的任务将auto_truncate置为True。\n \"\"\"\n super(BertEmbedding, self).__init__(vocab, word_dropout=word_dropout, dropout=dropout)\n self.device_cpu = torch.device('cpu')\n if model_dir_or_name.lower() in PRETRAINED_BERT_MODEL_DIR:\n if 'cn' in model_dir_or_name.lower() and pool_method not in ('first', 'last'):\n logger.warning(\"For Chinese bert, pooled_method should choose from 'first', 'last' in order to achieve\"\n \" faster speed.\")\n warnings.warn(\"For Chinese bert, pooled_method should choose from 'first', 'last' in order to achieve\"\n \" faster speed.\")\n self.dropout_p = dropout\n self._word_sep_index = None\n if '[SEP]' in vocab:\n self._word_sep_index = vocab['[SEP]']\n\n self.model = _WordBertModel(model_dir_or_name=model_dir_or_name, vocab=vocab, layers=layers,\n pool_method=pool_method, include_cls_sep=include_cls_sep,\n pooled_cls=pooled_cls, auto_truncate=auto_truncate, min_freq=2)\n\n self.requires_grad = requires_grad\n self._embed_size = len(self.model.layers) * self.model.encoder.hidden_size\n\n def _delete_model_weights(self):\n del self.model\n\n def forward(self, words):\n \"\"\"\n 计算words的bert embedding表示。计算之前会在每句话的开始增加[CLS]在结束增加[SEP], 并根据include_cls_sep判断要不要\n 删除这两个token的表示。\n\n :param torch.LongTensor words: [batch_size, max_len]\n :return: torch.FloatTensor. batch_size x max_len x (768*len(self.layers))\n \"\"\"\n words = self.drop_word(words)\n outputs = self._get_sent_reprs(words)\n if outputs is not None:\n if self.dropout_p >1e-5:\n return self.dropout(outputs)\n else:\n return outputs\n outputs = self.model(words)\n # print(outputs.size())\n\n outputs = torch.cat([*outputs], dim=-1)\n # print(outputs.size())\n # exit()\n if self.dropout_p > 1e-5:\n return self.dropout(outputs)\n else:\n return outputs\n\n def drop_word(self, words):\n \"\"\"\n 按照设定随机将words设置为unknown_index。\n\n :param torch.LongTensor words: batch_size x max_len\n :return:\n \"\"\"\n if self.word_dropout > 0 and self.training:\n with torch.no_grad():\n if self._word_sep_index: # 不能drop sep\n sep_mask = words.eq(self._word_sep_index)\n\n mask = torch.full(words.size(), fill_value=self.word_dropout, dtype=torch.float)\n # print(mask.device)\n # print(mask)\n # print(mask.device)\n # exit()\n # mask = mask.to(self.device_cpu)\n mask = torch.bernoulli(mask).eq(1) # dropout_word越大,越多位置为1\n mask = mask.to(words.device)\n pad_mask = words.ne(0)\n mask = pad_mask.__and__(mask) # pad的位置不为unk\n words = words.masked_fill(mask, self._word_unk_index)\n if self._word_sep_index:\n words.masked_fill_(sep_mask, self._word_sep_index)\n return words"
] |
[
[
"torch.nn.Softmax",
"torch.cat",
"torch.zeros",
"torch.nn.Embedding",
"torch.nn.BCEWithLogitsLoss",
"torch.where",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.softmax",
"torch.einsum",
"torch.from_numpy",
"torch.tensor",
"tensorflow.train.list_variables",
"torch.arange",
"torch.ones_like",
"torch.full",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"torch.exp",
"numpy.transpose",
"torch.nn.functional.normalize",
"torch.diagonal",
"torch.nn.LayerNorm",
"torch.nn.Tanh",
"torch.matmul",
"torch.nn.functional.one_hot"
],
[
"torch.norm",
"numpy.sqrt",
"torch.cat",
"torch.zeros",
"numpy.linalg.norm",
"torch.nn.Embedding",
"torch.no_grad",
"torch.bernoulli",
"torch.device"
]
] |
sailfish009/ReAgent
|
[
"cb425ca6a161b687c81a74a0cbad849cf43803f8"
] |
[
"ml/rl/test/gym/world_model/test_state_embed_gym.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport json\nimport logging\nimport random\nimport unittest\nfrom typing import List\n\nimport numpy as np\nimport torch\nfrom ml.rl.json_serialize import json_to_object\nfrom ml.rl.test.base.horizon_test_base import HorizonTestBase\nfrom ml.rl.test.gym.run_gym import OpenAiGymParameters\nfrom ml.rl.test.gym.world_model.state_embed_gym import (\n create_mdnrnn_trainer_and_embed_dataset,\n run_gym,\n)\n\n\nlogger = logging.getLogger(__name__)\n\nMDNRNN_STRING_GAME_JSON = \"ml/rl/test/configs/mdnrnn_string_game_v0.json\"\nDQN_STRING_GAME_JSON = \"ml/rl/test/configs/discrete_dqn_string_game_v0.json\"\n\n\nclass TestStateEmbedGym(HorizonTestBase):\n def setUp(self):\n logging.getLogger().setLevel(logging.INFO)\n torch.manual_seed(0)\n np.random.seed(0)\n random.seed(0)\n super().setUp()\n\n @staticmethod\n def verify_result(reward_history: List[float], expected_reward: float):\n assert reward_history[-1] >= expected_reward\n\n @unittest.skipIf(not torch.cuda.is_available(), \"CUDA not available\")\n def test_string_game_gpu(self):\n with open(MDNRNN_STRING_GAME_JSON, \"r\") as f:\n mdnrnn_params = json_to_object(f.read(), OpenAiGymParameters)\n mdnrnn_params = mdnrnn_params._replace(use_gpu=True)\n with open(DQN_STRING_GAME_JSON, \"r\") as f:\n rl_params = json_to_object(f.read(), OpenAiGymParameters)\n rl_params = rl_params._replace(use_gpu=True)\n avg_reward_history = self._test_state_embed(mdnrnn_params, rl_params)\n self.verify_result(avg_reward_history, 10)\n\n @staticmethod\n def _test_state_embed(\n mdnrnn_params: OpenAiGymParameters, rl_params: OpenAiGymParameters\n ):\n env, mdnrnn_trainer, embed_rl_dataset = create_mdnrnn_trainer_and_embed_dataset(\n mdnrnn_params, rl_params.use_gpu\n )\n max_embed_seq_len = mdnrnn_params.run_details.seq_len\n avg_reward_history, _, _, _, _ = run_gym(\n rl_params,\n None, # score bar\n embed_rl_dataset,\n env.env,\n mdnrnn_trainer.mdnrnn,\n max_embed_seq_len,\n )\n return avg_reward_history\n"
] |
[
[
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.random.seed"
]
] |
dnanexus/dnanexus-example-applets
|
[
"04a6abebfef3039416cede58f39a8c3542468d3b"
] |
[
"Tutorials/python/dash-web-app/resources/home/dnanexus/my_app.py"
] |
[
"# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.graph_objs as go\n\ndef create_app():\n app = dash.Dash(__name__)\n\n df = pd.read_csv('gdp-life-exp-2007.csv')\n app.layout = html.Div(children=[\n html.H1(children='Dash works on DNAnexus!'),\n\n dcc.Graph(\n id='life-exp-vs-gdp',\n figure={\n 'data': [\n go.Scatter(\n x=df[df['continent'] == i]['gdp per capita'],\n y=df[df['continent'] == i]['life expectancy'],\n text=df[df['continent'] == i]['country'],\n mode='markers',\n opacity=0.7,\n marker={\n 'size': 15,\n 'line': {'width': 0.5, 'color': 'white'}\n },\n name=i\n ) for i in df.continent.unique()\n ],\n 'layout': go.Layout(\n xaxis={'type': 'log', 'title': 'GDP Per Capita'},\n yaxis={'title': 'Life Expectancy'},\n legend={'x': 0, 'y': 1},\n hovermode='closest',\n title=\"As proof, here is an interactive Gapminder-style scatter plot\"\n )\n }\n ),\n dcc.Markdown('''\nYou can also write in Markdown, so we can easily write documentation straight into the interface. This is how you make an applet open up HTTPS by the way. Just add this to the dxapp.json:\n```\n\"httpsApp\": {\"ports\":[443], \"shared_access\": \"VIEW\"},\n```\nAnd then your web app should output on port 443.\n '''),\n dcc.Markdown('''\nFor more information on what you can build with Dash, see the Dash [tutorial](https://dash.plot.ly/).\n ''')\n ])\n return app\n"
] |
[
[
"pandas.read_csv"
]
] |
logic-and-learning/AdvisoRL
|
[
"3bbd741e681e6ea72562fec142d54e9d781d097d"
] |
[
"src/tester/livetester.py"
] |
[
"import os, copy\nfrom datetime import datetime\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nclass LiveTester:\n \"\"\"\n Plotter for one experiment.\n \"\"\"\n def __init__(self, curriculum, *,\n label=None, filebasename=None,\n show=True, keep_open=True,\n ):\n # super(LiveTester, self).__init__()\n\n self.curriculum = curriculum\n\n self.label = label\n self.filebasename = filebasename\n self.show = show\n self.keep_open = keep_open\n\n self.episode_steps = np.array([], dtype=int)\n self.episode_rewards = np.array([], dtype=float)\n self.episode_perfs = np.array([], dtype=float)\n\n # self.sample_steps = np.array([], dtype=int)\n # self.sample_positive = np.array([], dtype=int)\n # self.sample_negative = np.array([], dtype=int)\n default_traces = {\n 'steps': np.array([], dtype=int),\n 'positive': np.array([], dtype=int),\n 'negative': np.array([], dtype=int),\n }\n self.traces = {\n var: copy.deepcopy(default_traces)\n for var in [\n 'all_traces',\n 'new_traces',\n 'traces_numerical',\n ]\n }\n\n self.default_bool_data = {\n 'steps': [],\n 'values': [],\n 'color': None,\n }\n self.bool_datas = {\n var: copy.deepcopy(self.default_bool_data)\n for var in [] # filled when adding vars\n }\n\n self.current_step = 0\n\n def start(self):\n if not self.show: return self\n\n # self.fig, self.axes = plt.subplots()\n # self.fig, self.axes = plt.subplots(3,1, sharex=True)\n self.fig, self.axes = plt.subplots(3,1, sharex=True, gridspec_kw={'height_ratios': [2, 2, 1]})\n self.axes = np.array(self.axes).flatten()\n\n if self.label is not None:\n self.fig.canvas.set_window_title(self.label)\n self.fig.canvas.get_default_filename = lambda : 'livefigure_{}.{}'.format(\n self.filebasename or 1,\n self.fig.canvas.get_default_filetype(),\n )\n self.old_savefig_directory = mpl.rcParams[\"savefig.directory\"]\n mpl.rcParams[\"savefig.directory\"] = os.path.abspath(\"../plotdata/\")\n self.fig.set_figheight(8)\n\n self.axes_handles = [[] for ax in self.axes]\n\n self.axes[0].set_xlim(0, self.curriculum.total_steps)\n\n self.plottmp = []\n\n\n a = 0 #\n self.axes[a].grid()\n self.axes[a].set(xlabel='number of steps', ylabel='reward')\n self.axes[a].set_ylim(-0.1, 1.1)\n\n self.plot_perfs, = self.axes[a].plot(self.episode_steps, self.episode_perfs,\n color='black',\n drawstyle='steps-post',\n label='performance',\n )\n self.axes_handles[a].append(self.plot_perfs)\n\n\n self.traces_plotparam = [\n ('all_traces', 1),\n # ('new_traces', 1),\n # ('traces_numerical', 1),\n ]\n for _,a in self.traces_plotparam: # traces count\n self.axes[a].grid()\n self.axes[a].set(xlabel='number of steps', ylabel='count')\n\n self.plot_pos = {}\n self.plot_tot = {}\n for var,a in self.traces_plotparam:\n self.plot_pos[var], = self.axes[a].plot(0,0,\n color='#00aa00',\n drawstyle='steps-post',\n label=var+'.positive',\n )\n self.plot_tot[var], = self.axes[a].plot(0,0,\n color='black',\n drawstyle='steps-post',\n label=var+'.total',\n )\n self.axes_handles[a].append(self.plot_tot[var])\n self.axes_handles[a].append(self.plot_pos[var])\n\n\n a = 2 # bool variables\n self.axes[a].set(xlabel='number of steps')\n self.axes[a].get_yaxis().set_visible(False)\n\n\n self.markers = {}\n self.markers['rm_update'] = mpl.lines.Line2D([], [],\n label='RM update', color='#cc66aa', alpha=0.5,\n marker='|', linestyle='None', markersize=10, markeredgewidth=1.5,\n )\n self.markers['rm_learn'] = mpl.lines.Line2D([], [],\n label='RM relearn', color='#ff6666', alpha=0.5,\n marker='|', linestyle='None', markersize=10, markeredgewidth=1.5,\n )\n # self.markers['rm_learn_failed'] = mpl.lines.Line2D([], [],\n # label='RM relearn attempt', color='#0000ff', alpha=0.2,\n # marker='|', linestyle='None', markersize=10, markeredgewidth=1.5,\n # )\n self.markers['rm_refresh'] = mpl.lines.Line2D([], [],\n label='RM refresh', color='#00dd99', alpha=0.2,\n marker='|', linestyle='None', markersize=10, markeredgewidth=1.5,\n )\n for handles in self.axes_handles:\n for marker in self.markers.values():\n handles.append(marker)\n break # just the first one\n\n for a,handles in enumerate(self.axes_handles):\n self.axes[a].legend(handles=handles)\n\n plt.ion()\n\n # self.fig.canvas.draw_idle()\n self.last_update = datetime.now()\n self.last_update_duration = 0\n self.__update(force=True)\n plt.pause(0.0001)\n\n return self\n\n def __update(self, force=False):\n if not self.show: return\n\n start_update_time = datetime.now()\n if not force:\n elapsed_time = (start_update_time-self.last_update).total_seconds()\n max_elapsed_time = max(0.1, min(2, self.last_update_duration*5)) # take only 20% of compute time\n if elapsed_time < max_elapsed_time: return\n\n for elem in self.plottmp:\n elem.remove()\n self.plottmp.clear()\n\n for ax in self.axes:\n self.plottmp.append(ax.axvspan(self.current_step, self.curriculum.total_steps,\n facecolor=\"black\", alpha=0.1, zorder=-128,\n ))\n\n if len(self.episode_steps):\n steps = np.append(self.episode_steps, self.current_step)\n perfs = np.append(self.episode_perfs, self.episode_perfs[-1])\n self.plot_perfs.set_data(steps, perfs)\n\n for var,a in self.traces_plotparam:\n if len(self.traces[var]['steps']):\n total = self.traces[var]['positive'] + self.traces[var]['negative']\n\n steps = np.append(self.traces[var]['steps'], self.current_step)\n pos = np.append(self.traces[var]['positive'], self.traces[var]['positive'][-1])\n tot = np.append(total, total[-1])\n\n self.plot_pos[var].set_data(steps, pos)\n self.plot_tot[var].set_data(steps, tot)\n self.plottmp.append(self.axes[a].fill_between(steps, 0, pos,\n facecolor=self.plot_pos[var].get_color(),\n alpha=0.5,\n step=\"post\",\n ))\n self.plottmp.append(self.axes[a].fill_between(steps, pos, tot,\n facecolor=\"#dd0000\",\n alpha=0.5,\n step=\"post\",\n ))\n\n a=2\n for v,(var,bool_data) in enumerate(self.bool_datas.items()):\n if not len(bool_data['steps']): continue\n steps = np.append(bool_data['steps'], self.current_step)\n values = np.append(bool_data['values'], bool_data['values'][-1])\n args = {}\n if bool_data['color'] is not None: args['facecolor'] = bool_data['color']\n fill = self.axes[a].fill_between(steps, v, v+values,\n alpha=0.5, step=\"post\",\n label=var,\n **args,\n )\n if bool_data['color'] is None: # not already set\n bool_data['color'] = fill.get_facecolor()\n self.axes_handles[a].insert(0, fill)\n self.plottmp.append(fill)\n self.axes[a].set_ylim(0, len(self.bool_datas))\n\n for a,handles in enumerate(self.axes_handles):\n self.axes[a].legend(handles=handles)\n\n self.fig.canvas.draw_idle()\n self.fig.canvas.start_event_loop(0.0001)\n\n self.last_update = datetime.now()\n self.last_update_duration = (self.last_update-start_update_time).total_seconds()\n\n def save(filename=None):\n if not self.show: raise RuntimeError(\"Graph not active.\")\n if filename is None:\n filename = os.path.join(mpl.rcParams[\"savefig.directory\"], self.fig.canvas.get_default_filename())\n self.fig.savefig(filename)\n\n def close(self):\n self.current_step = self.curriculum.total_steps\n if not self.show: return\n\n plt.ioff()\n if self.keep_open:\n self.__update(True)\n plt.show()\n else:\n self.fig.close()\n\n mpl.rcParams[\"savefig.directory\"] = self.old_savefig_directory\n\n def add_reward(self, step, reward, *, force_update=False):\n self.current_step = step\n self.episode_steps = np.append(self.episode_steps, step)\n self.episode_rewards = np.append(self.episode_rewards, reward)\n perf = np.average(self.episode_rewards[-10:])\n self.episode_perfs = np.append(self.episode_perfs, perf)\n self.__update(force_update)\n\n def add_event(self, step, event='rm_update', *, force_update=False):\n self.current_step = step\n if self.show and event in self.markers.keys():\n marker = self.markers[event]\n for ax in self.axes[:-1]:\n ax.axvline(x=step, color=marker.get_color(), alpha=marker.get_alpha(), zorder=-32)\n self.__update(force_update)\n\n def add_traces_size(self, step, traces, var='all_traces', *, force_update=False):\n self.current_step = step\n if not var in self.traces.keys(): return\n traces_data = self.traces[var]\n traces_data['steps'] = np.append(traces_data['steps'], step)\n traces_data['positive'] = np.append(traces_data['positive'], len(traces.positive))\n traces_data['negative'] = np.append(traces_data['negative'], len(traces.negative))\n self.__update(force_update)\n\n def add_bool(self, step, var, val, *, force_update=False):\n self.current_step = step\n if not var in self.bool_datas.keys():\n self.bool_datas[var] = copy.deepcopy(self.default_bool_data)\n self.bool_datas[var]['steps'].append(step)\n self.bool_datas[var]['values'].append(bool(val))\n self.__update(force_update)\n\n def __enter__(self):\n self.start()\n return self\n def __exit__(self, type, value, traceback):\n self.close()\n"
] |
[
[
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ioff",
"numpy.append",
"numpy.average",
"numpy.array",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ion"
]
] |
alexnowakvila/pystruct
|
[
"fda06786999e3cfc168046cfce2ac976f0adfd70"
] |
[
"pystruct/learners/generalized_frankwolfe_ssvm.py"
] |
[
"######################\n# Authors:\n# Xianghang Liu <[email protected]>\n# Andreas Mueller <[email protected]>\n#\n# License: BSD 3-clause\n#\n# Implements structured SVM as described in Joachims et. al.\n# Cutting-Plane Training of Structural SVMs\n\nimport warnings\nfrom time import time\nimport numpy as np\nimport pdb;\nfrom sklearn.utils import check_random_state\n\nfrom pystruct.learners.ssvm import BaseSSVM\nfrom pystruct.utils import generalized_find_constraint\n\n\nclass GeneralizedFrankWolfeSSVM(BaseSSVM):\n \"\"\"Structured SVM solver using Block-coordinate Frank-Wolfe.\n\n This implementation is somewhat experimental. Use with care.\n\n References\n ----------\n * Lacoste-Julien, Jaggi, Schmidt, Pletscher:\n Block-Coordinate Frank-Wolfe Optimization for Structural SVMs,xi\n JMLR 2013\n\n With batch_mode=False, this implements the online (block-coordinate)\n version of the algorithm (BCFW)\n BCFW is an attractive alternative to subgradient methods, as no\n learning rate is needed and a duality gap guarantee is given.\n\n Parameters\n ----------\n model : StructuredModel\n Object containing the model structure. Has to implement\n `loss`, `inference` and `loss_augmented_inference`.\n\n max_iter : int, default=1000\n Maximum number of passes over dataset to find constraints.\n\n C : float, default=1\n Regularization parameter. Corresponds to 1 / (lambda * n_samples).\n\n verbose : int\n Verbosity.\n\n n_jobs : int, default=1\n Number of parallel processes. Currently only n_jobs=1 is supported.\n\n show_loss_every : int, default=0\n How often the training set loss should be computed.\n Zero corresponds to never.\n\n tol : float, default=1e-3\n Convergence tolerance on the duality gap.\n\n logger : logger object, default=None\n Pystruct logger for storing the model or extracting additional\n information.\n\n batch_mode : boolean, default=False\n Whether to use batch updates. Will slow down learning enormously.\n\n line_search : boolean, default=True\n Whether to compute the optimum step size in each step.\n The line-search is done in closed form and cheap.\n There is usually no reason to turn this off.\n\n check_dual_every : int, default=10\n How often the stopping criterion should be checked. Computing\n the stopping criterion is as costly as doing one pass over the dataset,\n so check_dual_every=1 will make learning twice as slow.\n\n do_averaging : bool, default=True\n Whether to use weight averaging as described in the reference paper.\n Currently this is only supported in the block-coordinate version.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n\n Attributes\n ----------\n w : nd-array, shape=(model.size_joint_feature,)\n The learned weights of the SVM.\n\n ``loss_curve_`` : list of float\n List of loss values if show_loss_every > 0.\n\n ``objective_curve_`` : list of float\n Cutting plane objective after each pass through the dataset.\n\n ``primal_objective_curve_`` : list of float\n Primal objective after each pass through the dataset.\n\n ``timestamps_`` : list of int\n Total training time stored before each iteration.\n \"\"\"\n def __init__(self, model, max_iter=1000, C=1.0, verbose=0, n_jobs=1,\n show_loss_every=0, logger=None, batch_mode=False,\n line_search=True, check_dual_every=10, tol=.001,\n do_averaging=True, sample_method='perm', random_state=None, X_test=None, Y_test=None):\n\n if n_jobs != 1:\n warnings.warn(\"FrankWolfeSSVM does not support multiprocessing\"\n \" yet. Ignoring n_jobs != 1.\")\n\n if sample_method not in ['perm', 'rnd', 'seq']:\n raise ValueError(\"sample_method can only be perm, rnd, or seq\")\n\n BaseSSVM.__init__(self, model, max_iter, C, verbose=verbose,\n n_jobs=n_jobs, show_loss_every=show_loss_every,\n logger=logger)\n self.tol = tol\n self.batch_mode = batch_mode\n self.line_search = line_search\n self.check_dual_every = check_dual_every\n self.do_averaging = do_averaging\n self.sample_method = sample_method\n self.random_state = random_state\n self.X_test = X_test\n self.Y_test = Y_test\n self.oracle_errs = []\n\n def _calc_dual_gap(self, X, Y, mu, mu_hats, iteration):\n n_samples = len(X)\n # FIXME don't calculate this again\n joint_feature_gt = self.model.batch_joint_feature(X, Y)\n \n # in order to compute the dual gap we need a full pass over the data\n Q_hat = self.model.batch_loss_augmented_inference(X, mu_hats, self.w,\n relaxed=True)\n djoint_feature = joint_feature_gt - self.model.batch_mean_joint_feature(X, Q_hat)\n ls = np.sum(self.model.batch_Bayes_risk(Q_hat))\n ws = djoint_feature * self.C\n # l = np.sum(self.model.batch_Bayes_risk(mu)) / n_samples\n l_rescaled = self.l * n_samples * self.C\n # l_rescaled = l * n_samples * self.C\n dual_val = -0.5 * np.sum(self.w ** 2) + l_rescaled\n w_diff = self.w - ws\n dual_gap = w_diff.T.dot(self.w) - l_rescaled + ls * self.C\n # both must be the same\n primal_val = dual_val + dual_gap\n primal_val = 0.5 * np.sum(self.w ** 2) + self.C * ls - ws.T.dot(self.w)\n # import pdb; pdb.set_trace()\n return dual_val, dual_gap, primal_val\n\n def _gfrank_wolfe_bc(self, X, Y):\n \"\"\"Generalized Block-Coordinate Frank-Wolfe learning.\n \"\"\"\n n_samples = len(X)\n \n w = self.w.copy()\n w_mat = np.zeros((n_samples, self.model.size_joint_feature))\n mu = self.model.output_embedding(X, Y)\n mu_hats = mu\n l_mat = np.zeros(n_samples)\n l = 0.0\n k = 0\n\n rng = check_random_state(self.random_state)\n for iteration in range(self.max_iter):\n # print(\"Full pass: \", iteration)\n perm = np.arange(n_samples)\n if self.sample_method == 'perm':\n rng.shuffle(perm)\n elif self.sample_method == 'rnd':\n perm = rng.randint(low=0, high=n_samples, size=n_samples)\n oracle_err_avg = 0\n for j in range(n_samples):\n i = perm[j]\n x, y = X[i], Y[i]\n ###############################################################\n # Find constraint\n ###############################################################\n mu_hat, oracle_err = self.model.loss_augmented_inference(x, mu_hats[i], w, relaxed=True)\n delta_joint_feature = self.model.joint_feature(x, y) - self.model.mean_joint_feature(x, mu_hat)\n loss = self.model.Bayes_risk(mu_hat)\n # mu_hat, delta_joint_feature, slack, loss, oracle_err = generalized_find_constraint(self.model, x, y, mu_hats[i], w)\n oracle_err_avg += oracle_err\n ###############################################################\n # Compute ws and ls\n ###############################################################\n ws = delta_joint_feature * self.C\n ls = loss / n_samples\n\n # no line search\n gamma = 2.0 * n_samples / (k + 2.0 * n_samples)\n\n w -= w_mat[i]\n w_mat[i] = (1.0 - gamma) * w_mat[i] + gamma * ws\n w += w_mat[i]\n # if iteration > 10:\n # pdb.set_trace()\n mu_hats[i] = mu_hat\n mu[i] = (1.0 - gamma) * mu[i] + gamma * mu_hat\n l_mat_i = self.model.Bayes_risk(mu[i]) / n_samples\n l = l + l_mat_i - l_mat[i] \n l_mat[i] = l_mat_i\n\n if self.do_averaging:\n rho = 2. / (k + 2.)\n self.w = (1. - rho) * self.w + rho * w\n self.l = (1. - rho) * self.l + rho * l\n else:\n self.w = w\n self.l = l\n k += 1\n self.oracle_errs.append(oracle_err_avg / n_samples)\n # if (self.check_dual_every != 0) and (iteration % self.check_dual_every == 0):\n # score_train = self.score(X, Y)\n # score_test = self.score(self.X_test, self.Y_test)\n # print(\"Error oracles %f\", self.oracle_errs[-1])\n # print(\"SCORES %f\", score_train, score_test)\n if (self.check_dual_every != 0) and (iteration % self.check_dual_every == 0):\n dual_val, dual_gap, primal_val = self._calc_dual_gap(X, Y, mu, mu_hats, iteration)\n self.primal_objective_curve_.append(primal_val)\n self.objective_curve_.append(dual_val)\n self.timestamps_.append(time() - self.timestamps_[0])\n if self.verbose > 0:\n print(\"dual: %f, dual_gap: %f, primal: %f, Iteration: %d\"\n % (dual_val, dual_gap, primal_val, iteration))\n if self.logger is not None:\n self.logger(self, iteration)\n\n def fit(self, X, Y, constraints=None, initialize=True):\n \"\"\"Learn parameters using (block-coordinate) Frank-Wolfe learning.\n\n Parameters\n ----------\n X : iterable\n Traing instances. Contains the structured input objects.\n No requirement on the particular form of entries of X is made.\n\n Y : iterable\n Training labels. Contains the strctured labels for inputs in X.\n Needs to have the same length as X.\n\n contraints : ignored\n\n initialize : boolean, default=True\n Whether to initialize the model for the data.\n Leave this true except if you really know what you are doing.\n \"\"\"\n if initialize:\n self.model.initialize(X, Y)\n self.objective_curve_, self.primal_objective_curve_ = [], []\n self.timestamps_ = [time()]\n self.w = getattr(self, \"w\", np.zeros(self.model.size_joint_feature))\n self.l = getattr(self, \"l\", 0)\n try:\n if self.batch_mode:\n self._frank_wolfe_batch(X, Y)\n else:\n self._gfrank_wolfe_bc(X, Y)\n except KeyboardInterrupt:\n pass\n if self.verbose:\n print(\"Calculating final objective.\")\n self.timestamps_.append(time() - self.timestamps_[0])\n # self.primal_objective_curve_.append(self._objective(X, Y))\n # self.objective_curve_.append(self.objective_curve_[-1])\n if self.logger is not None:\n self.logger(self, 'final')\n return self\n"
] |
[
[
"numpy.sum",
"numpy.arange",
"numpy.zeros",
"sklearn.utils.check_random_state"
]
] |
Eddie-Hwang/Co-Eye_Motion_Generation
|
[
"8e244680115fb63bc26018cb6b53bcfbd04e9683"
] |
[
"Transformer/model.py"
] |
[
"from Transformer.layers import EncoderLayer, DecoderLayer\nfrom Transformer.modules import PositionalEncoding\n\nimport torch.nn as nn\nimport torch\n\n\ndef get_pad_mask(seq, pad_idx):\n return (seq == pad_idx).unsqueeze(-2)\n\ndef get_subsequent_mask(seq):\n sz_b, len_s, sz_d = seq.size()\n subsequent_mask = torch.triu(torch.ones((1, len_s, len_s), device=seq.device), diagonal=1).bool()\n return subsequent_mask\n\nclass Encoder(nn.Module):\n # d_k, d_v = d_model / n_head\n # d_model == d_word_vec\n def __init__(self, n_src_vocab, d_word_vec, n_layers, n_head, d_k, d_v, d_model, d_inner, pad_idx, dropout=0.1, n_position=200):\n super().__init__()\n self.emb = nn.Embedding(n_src_vocab, d_word_vec, padding_idx=pad_idx)\n self.postion_enc = PositionalEncoding(d_word_vec, n_position=n_position)\n self.dropout = nn.Dropout(p=dropout)\n self.layer_stack = nn.ModuleList([\n EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n for _ in range(n_layers)])\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n def forward(self, src_seq, src_mask, return_attns=False):\n enc_intput = self.dropout(self.postion_enc(self.emb(src_seq)))\n enc_output = enc_intput\n # to store self attention\n enc_slf_attn_list = []\n for enc_layer in self.layer_stack:\n enc_output, enc_slf_attn = enc_layer(enc_output, slf_attn_mask=src_mask)\n enc_slf_attn_list += [enc_slf_attn] if return_attns else []\n enc_output = self.layer_norm(enc_output)\n \n if return_attns:\n return enc_output, enc_slf_attn_list\n else:\n return enc_output\n\n\nclass Decoder(nn.Module):\n\n def __init__(self, n_component, n_layers, n_head, d_model, d_k, d_v, d_inner, n_position=200, dropout=0.1):\n super().__init__()\n\n self.postion_enc = PositionalEncoding(n_component, n_position=n_position)\n self.dropout = nn.Dropout(p=dropout)\n self.layer_stack = nn.ModuleList([\n DecoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n for _ in range(n_layers)])\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n def forward(self, trg_seq, trg_mask, enc_output, src_mask, return_attns=False):\n dec_intput = self.dropout(self.postion_enc(trg_seq))\n dec_output = dec_intput\n\n dec_slf_attn_list, dec_enc_attn_list = [], []\n for dec_layer in self.layer_stack:\n dec_output, dec_slf_attn, dec_enc_attn = dec_layer(\n dec_output, enc_output, \n slf_attn_mask=trg_mask, \n dec_enc_attn_mask=src_mask)\n dec_slf_attn_list += [dec_slf_attn] if return_attns else []\n dec_enc_attn_list += [dec_enc_attn] if return_attns else []\n dec_output = self.layer_norm(dec_output)\n \n if return_attns:\n return dec_output, dec_slf_attn_list, dec_enc_attn_list\n return dec_output\n\n\nclass Transformer(nn.Module):\n\n def __init__(self, n_src_vocab, src_pad_idx,\n d_word_vec, d_model, d_inner, n_layers,\n n_head, d_k, d_v, dropout, \n src_n_position, n_component, trg_n_position):\n super().__init__()\n self.src_pad_idx = src_pad_idx\n\n self.encoder = Encoder(n_src_vocab=n_src_vocab, d_word_vec=d_word_vec,\n n_layers=n_layers, n_head=n_head,\n d_model=d_model, d_k=d_k, d_v=d_v, d_inner=d_inner,\n pad_idx=src_pad_idx, n_position=src_n_position)\n\n self.decoder = Decoder(n_component=n_component, n_layers=n_layers,\n n_head=n_head, d_model=d_model, d_k=d_k, d_v=d_v,\n d_inner=d_inner, n_position=trg_n_position)\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n \n assert d_model == d_word_vec, \\\n 'To facilitate the residual connections, \\\n the dimensions of all module outputs shall be the same.'\n\n def forward(self, src_seq, trg_seq):\n src_mask = get_pad_mask(src_seq, self.src_pad_idx)\n trg_mask = get_subsequent_mask(trg_seq)\n\n enc_output, *_ = self.encoder(src_seq, src_mask)\n dec_output, *_ = self.decoder(trg_seq, trg_mask, enc_output, src_mask) \n\n return dec_output"
] |
[
[
"torch.nn.Dropout",
"torch.ones",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.init.xavier_uniform_"
]
] |
bgerofi/mlperf-deepcam
|
[
"5e6524b30372d916a224ef826988128638264723"
] |
[
"src/deepCam/utils/parsing_helpers.py"
] |
[
"# The MIT License (MIT)\n#\n# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport re\nimport numpy as np\nimport torch\nimport torch.optim as optim\n\ndef get_lr_schedule(start_lr, scheduler_arg, optimizer, last_step = -1):\n #add the initial_lr to the optimizer\n optimizer.param_groups[0][\"initial_lr\"] = start_lr\n\n #now check\n if scheduler_arg[\"type\"] == \"multistep\":\n milestones = [ int(x) for x in scheduler_arg[\"milestones\"].split() ]\n gamma = float(scheduler_arg[\"decay_rate\"])\n return optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma, last_epoch = last_step)\n else:\n raise ValueError(\"Error, scheduler type {} not supported.\".format(scheduler_arg[\"type\"]))\n"
] |
[
[
"torch.optim.lr_scheduler.MultiStepLR"
]
] |
ma-compbio/Phylo-HMRF
|
[
"47b1d0eb11dc1674b8cd1a0710a29ff4232c733e"
] |
[
"base.py"
] |
[
"from __future__ import print_function\n\nimport string\nimport sys\nimport os\nfrom collections import deque\n\nimport numpy as np\nfrom scipy.misc import logsumexp\nfrom sklearn.base import BaseEstimator, _pprint\nfrom sklearn.utils import check_array, check_random_state\nfrom sklearn.utils.validation import check_is_fitted\n\nimport matplotlib.pyplot as plt\n\nimport scipy.misc\nimport scipy.io\nimport multiprocessing as mp\n\nimport time\n\nclass ConvergenceMonitor(object):\n\t\"\"\"Monitors and reports convergence to :data:`sys.stderr`.\n\n\tParameters\n\t----------\n\ttol : double\n\t\tConvergence threshold. EM has converged either if the maximum\n\t\tnumber of iterations is reached or the log probability\n\t\timprovement between the two consecutive iterations is less\n\t\tthan threshold.\n\n\tn_iter : int\n\t\tMaximum number of iterations to perform.\n\n\tverbose : bool\n\t\tIf ``True`` then per-iteration convergence reports are printed,\n\t\totherwise the monitor is mute.\n\n\tAttributes\n\t----------\n\thistory : deque\n\t\tThe log probability of the data for the last two training\n\t\titerations. If the values are not strictly increasing, the\n\t\tmodel did not converge.\n\n\titer : int\n\t\tNumber of iterations performed while training the model.\n\t\"\"\"\n\t_template = \"{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}\"\n\n\tdef __init__(self, tol, n_iter, verbose):\n\t\tself.tol = tol\n\t\tself.n_iter = n_iter\n\t\tself.verbose = verbose\n\t\tself.history = deque(maxlen=2)\n\t\tself.iter = 0\n\n\tdef __repr__(self):\n\t\tclass_name = self.__class__.__name__\n\t\tparams = dict(vars(self), history=list(self.history))\n\t\treturn \"{0}({1})\".format(\n\t\t\tclass_name, _pprint(params, offset=len(class_name)))\n\n\tdef report(self, logprob):\n\t\t\"\"\"Reports convergence to :data:`sys.stderr`.\n\n\t\tThe output consists of three columns: iteration number, log\n\t\tprobability of the data at the current iteration and convergence\n\t\trate. At the first iteration convergence rate is unknown and\n\t\tis thus denoted by NaN.\n\n\t\tParameters\n\t\t----------\n\t\tlogprob : float\n\t\t\tThe log probability of the data as computed by EM algorithm\n\t\t\tin the current iteration.\n\t\t\"\"\"\n\t\tif self.verbose:\n\t\t\tdelta = logprob - self.history[-1] if self.history else np.nan\n\t\t\tmessage = self._template.format(\n\t\t\t\titer=self.iter + 1, logprob=logprob, delta=delta)\n\t\t\tprint(message, file=sys.stderr)\n\n\t\tself.history.append(logprob)\n\t\tself.iter += 1\n\n\t@property\n\tdef converged(self):\n\t\t\"\"\"``True`` if the EM algorithm converged and ``False`` otherwise.\"\"\"\n\t\t# XXX we might want to check that ``logprob`` is non-decreasing.\n\t\treturn (self.iter == self.n_iter or\n\t\t\t\t(len(self.history) == 2 and\n\t\t\t\t self.history[1] - self.history[0] < self.tol))\n\nclass _BaseGraph(BaseEstimator):\n\t\"\"\"Base class for Markov Random Field Models.\n\t\"\"\"\n\tdef __init__(self, n_components=1, run_id=0, estimate_type=0, weight_type=0,\n\t\t\t\t startprob_prior=1.0, transmat_prior=1.0,\n\t\t\t\t algorithm=\"viterbi\", random_state=None,\n\t\t\t\t n_iter=10, tol=1e-2, verbose=False,\n\t\t\t\t params=string.ascii_letters,\n\t\t\t\t init_params=string.ascii_letters):\n\t\tself.n_components = n_components\n\t\tself.params = params\n\t\tself.init_params = init_params\n\t\tself.startprob_prior = startprob_prior\n\t\tself.transmat_prior = transmat_prior\n\t\tself.algorithm = algorithm\n\t\tself.random_state = random_state\n\t\tself.n_iter = n_iter\n\t\tself.tol = tol\n\t\tself.verbose = verbose\n\t\tself.run_id = run_id\n\t\tself.estimate_type = estimate_type\n\t\tself.weight_type = weight_type\n\n\tdef score_samples(self, X, lengths=None):\n\t\t\"\"\"Compute the log probability under the model and compute posteriors.\n\t\t\"\"\"\n\t\t# check_is_fitted(self, \"startprob_\")\n\t\t# self._check()\n\n\tdef predict(self, X, lengths=None):\n\t\t\"\"\"Find most likely state sequence corresponding to ``X``.\n\n\t\tParameters\n\t\t----------\n\t\tX : array-like, shape (n_samples, n_features)\n\t\t\tFeature matrix of individual samples.\n\n\t\tlengths : array-like of integers, shape (n_sequences, ), optional\n\t\t\tLengths of the individual sequences in ``X``. The sum of\n\t\t\tthese should be ``n_samples``.\n\n\t\tReturns\n\t\t-------\n\t\tstate_sequence : array, shape (n_samples, )\n\t\t\tLabels for each sample from ``X``.\n\t\t\"\"\"\n\t\t# _, state_sequence = self.decode(X, lengths)\n\t\t#state = self._estimate_state_graphcuts(X)\n\t\t#return state\n\n\tdef predict_proba(self, X, lengths=None):\n\t\t\"\"\"Compute the posterior probability for each state in the model.\n\n\t\tX : array-like, shape (n_samples, n_features)\n\t\t\tFeature matrix of individual samples.\n\n\t\tlengths : array-like of integers, shape (n_sequences, ), optional\n\t\t\tLengths of the individual sequences in ``X``. The sum of\n\t\t\tthese should be ``n_samples``.\n\n\t\tReturns\n\t\t-------\n\t\tposteriors : array, shape (n_samples, n_components)\n\t\t\tState-membership probabilities for each sample from ``X``.\n\t\t\"\"\"\n\t\t_, posteriors = self.score_samples(X, lengths)\n\t\treturn posteriors\n\n\tdef fit_accumulate(self, X, len_vec, threshold, lengths=None):\n\n\t\tprint(\"Initilization...\")\n\t\t# X = check_array(X)\n\t\tself._init(X, lengths=lengths)\n\t\tprint(\"return from initialization...\")\n\t\tprint(\"starting...\")\n\t\tself._check()\n\n\t\tprint(\"model fitting...\")\n\t\tself.monitor_ = ConvergenceMonitor(self.tol, self.n_iter, self.verbose)\n\t\t# self.n_iter = 100\n\t\tmax_iter = int(self.max_iter)\n\t\tmax_iter1 = 20\t# iterations after the previous minimum\n\t\ttype_id = 0\n\t\tpairwise_cost_pre, unary_cost_pre, cost1_pre = 0.001, 0.001, 0.001\n\t\tthreshold1, threshold2 = threshold, threshold\n\t\t# threshold1, threshold2 = 1e-03, 1e-03\n\t\tcost_vec = []\n\t\tmin_cost = [0,1000]\n\t\tmin_cost1 = [0,1000]\n\t\tparams_vec = self.params_vec1.copy()\n\t\tparams_vec1 = self.params_vec1.copy()\n\t\tnum_region = len(len_vec)\n\n\t\tratio_vec = np.zeros(num_region)\n\t\tfor i in range(0,num_region):\n\t\t\tratio_vec[i] = len_vec[i][0]\n\n\t\tn_samples = int(sum(ratio_vec))\n\t\tratio_vec = ratio_vec*1.0/n_samples\n\t\tprint(n_samples)\n\t\t\n\t\tparams_vecList = []\n\t\tstate_vecList = []\n\t\t\n\t\tfor iter in range(self.n_iter):\n\t\t\tprint(iter)\n\t\t\tstats = self._initialize_sufficient_statistics()\n\t\t\tcurr_logprob = 0\n\n\t\t\tself.queue = mp.Queue()\n\n\t\t\tprint(\"processes\")\n\t\t\tstart = time.time()\n\t\t\t# processes = [mp.Process(target=self._compute_posteriors_graph_test, args=(len_vec, X, region_id,self.posteriors_test,self.posteriors_test1,self.queue)) for region_id in range(0,num_region)]\n\t\t\tprocesses = [mp.Process(target=self._predict_posteriors, \n\t\t\t\t\t\targs=(X, len_vec, region_id, self.queue)) for region_id in range(0,num_region)]\n\n\t\t\t# Run processes\n\t\t\tfor p in processes:\n\t\t\t\tp.start()\n\n\t\t\t# m_queue.put((region_id, labels, posteriors, t_pairwise_cost1, t_pairwise_cost, t_unary_cost, t_cost1))\n\t\t\tprint(\"query\")\n\t\t\tresults = [self.queue.get() for p in processes]\n\t\t\tprint(len(results))\t\t\t\n\n\t\t\t# Exit the completed processes\n\t\t\tprint(\"join\")\n\t\t\tfor p in processes:\n\t\t\t\tp.join()\n\n\t\t\tend = time.time()\n\t\t\tprint(\"use time %d:\"%(iter))\n\t\t\tprint(end - start)\n\n\t\t\tpairwise_cost1, pairwise_cost, unary_cost, cost1 = 0, 0, 0, 0\n\t\t\t\n\t\t\tid1 = 3\n\t\t\tlabels = np.zeros(n_samples)\n\t\t\tfor i in range(0, num_region):\t\t\n\t\t\t\tvec1 = results[i]\n\t\t\t\t# print(vec1[1])\n\t\t\t\tregion_id = vec1[0]\n\t\t\t\tpairwise_cost1 += vec1[id1]*ratio_vec[region_id]\n\t\t\t\tpairwise_cost += vec1[id1+1]*ratio_vec[region_id]\n\t\t\t\tunary_cost += vec1[id1+2]*ratio_vec[region_id]\n\t\t\t\tcost1 += vec1[id1+3]*ratio_vec[region_id]\n\t\t\t\ts1, s2 = len_vec[region_id][1], len_vec[region_id][2]\n\t\t\t\tstats = self._accumulate_sufficient_statistics_1(stats, vec1[1])\n\t\t\t\tlabels[s1:s2] = vec1[2]\n\n\t\t\t\tprint(vec1[id1:id1+4])\n\n\t\t\tt_difference1 = abs((pairwise_cost-pairwise_cost_pre)*1.0/pairwise_cost_pre)\n\t\t\tt_difference2 = abs((unary_cost-unary_cost_pre)*1.0/unary_cost_pre)\n\t\t\tt_difference3 = abs((cost1-cost1_pre)*1.0/cost1_pre)\n\t\t\t\n\t\t\t# print(\"Maximization...\")\n\t\t\tprint(pairwise_cost_pre,pairwise_cost,unary_cost_pre,unary_cost,cost1_pre,cost1)\n\t\t\tprint(t_difference1,t_difference2,t_difference3)\n\t\t\tpairwise_cost_pre, unary_cost_pre, cost1_pre = pairwise_cost, unary_cost, cost1\n\t\t\tcost_vec.append([iter, pairwise_cost, unary_cost, cost1])\n\n\t\t\tparams_vecList.append(self.params_vec1.copy())\n\t\t\tstate_vecList.append(labels)\n\t\t\tself.labels = labels.copy()\n\n\t\t\tif cost1<min_cost[1]:\n\t\t\t\tmin_cost = [iter,cost1]\n\t\t\t\tparams_vec = self.params_vec1.copy()\n\t\t\t\tself.labels_local = self.labels.copy()\t# current local optimal state estimate\n\t\t\t\t\n\t\t\t\tprint(\"another temp min\")\n\n\t\t\tif cost1<min_cost1[1] and iter>=3:\n\t\t\t\tmin_cost1 = [iter,cost1]\n\t\t\t\tparams_vec1 = self.params_vec1.copy()\n\t\t\t\tprint(\"another temp min from iteration 3\")\n\n\t\t\tif (t_difference1<threshold1 and t_difference2<threshold2) or (t_difference3<threshold1):\n\t\t\t\tbreak\n\n\t\t\tif iter>max_iter:\n\t\t\t\tbreak\n\n\t\t\tif iter-min_cost1[0]>max_iter1:\n\t\t\t\tbreak\n\n\t\t\tprint(\"Maximization...\")\n\n\t\t\tself._do_mstep(stats)\n\n\t\tself.params_vec1 = params_vec1.copy()\n\t\tself._ou_param_varied_constraint(params_vec)\n\n\t\tprint(min_cost)\n\t\tprint(params_vec)\n\t\tcost_vec = np.asarray(cost_vec)\n\t\tprint(cost_vec)\n\n\t\tparams_vecList = np.asarray(params_vecList)\n\t\tstate_vecList = np.asarray(state_vecList)\n\n\t\treturn params_vec, params_vec1, params_vecList, state_vecList, min_cost[0], min_cost1[0], cost_vec\n\n\tdef fit_accumulate_test(self, X, len_vec, threshold, annotation, m_iter, lengths=None):\n\t\t\"\"\"Estimate model parameters.\n\t\t\"\"\"\n\t\tprint(\"Initilization...\")\n\t\t# X = check_array(X)\n\t\tstart = time.time()\n\t\tself._init(X, lengths=lengths)\n\t\tprint(\"return from initialization...\")\n\t\tend = time.time()\n\t\tprint(\"use time %s:\"%(end-start))\n\n\t\tprint(\"starting...\")\n\t\tself._check()\n\n\t\tprint(\"model fitting...\")\n\t\tself.monitor_ = ConvergenceMonitor(self.tol, self.n_iter, self.verbose)\n\t\t# self.n_iter = 100\n\t\tmax_iter = m_iter\n\t\tmax_iter1 = 50\t# iterations after the previous minimum\n\t\ttype_id = 0\n\t\tpairwise_cost_pre, unary_cost_pre, cost1_pre = 0.001, 0.001, 0.001\n\n\t\tthreshold1, threshold2 = threshold, threshold\n\t\t# threshold1, threshold2 = 1e-03, 1e-03\n\t\tcost_vec = []\n\t\tmin_cost = [0,1000]\n\t\tmin_cost1 = [0,1000]\n\t\tparams_vec = self.params_vec1.copy()\n\t\tparams_vec1 = self.params_vec1.copy()\n\t\tnum_region = len(len_vec)\n\n\t\tratio_vec = np.zeros(num_region)\n\t\tfor i in range(0,num_region):\n\t\t\tratio_vec[i] = len_vec[i][0]\n\n\t\tn_samples = int(sum(ratio_vec))\n\t\tratio_vec = ratio_vec*1.0/n_samples\n\t\tprint(n_samples)\n\t\t\n\t\tparams_vecList = []\n\t\tstate_vecList = []\t\t\n\t\tt_labels = np.zeros(n_samples)\n\n\t\tprint(\"n_iter, m_iter: %d %d\"%(self.n_iter, max_iter))\n\t\t\n\t\t# for iter in range(self.n_iter):\n\t\tfor iter in range(max_iter):\n\t\t\tprint(iter)\n\t\t\tstats = self._initialize_sufficient_statistics()\n\t\t\tcurr_logprob = 0\n\n\t\t\tself.queue = mp.Queue()\n\n\t\t\tprint(\"processes\")\n\t\t\tstart = time.time()\n\t\t\t# processes = [mp.Process(target=self._compute_posteriors_graph_test, args=(len_vec, X, region_id,self.posteriors_test,self.posteriors_test1,self.queue)) for region_id in range(0,num_region)]\n\t\t\tprocesses = [mp.Process(target=self._predict_posteriors, \n\t\t\t\t\t\targs=(X, len_vec, region_id, self.queue)) for region_id in range(0,num_region)]\n\n\t\t\t# Run processes\n\t\t\tfor p in processes:\n\t\t\t\tp.start()\n\n\t\t\t# m_queue.put((region_id, labels, posteriors, t_pairwise_cost1, t_pairwise_cost, t_unary_cost, t_cost1))\n\t\t\tprint(\"query\")\n\t\t\tresults = [self.queue.get() for p in processes]\n\t\t\tprint(len(results))\t\t\t\n\n\t\t\t# Exit the completed processes\n\t\t\tprint(\"join\")\n\t\t\tfor p in processes:\n\t\t\t\tp.join()\n\n\t\t\tend = time.time()\n\t\t\tprint(\"use time %d:\"%(iter))\n\t\t\tprint(end - start)\n\n\t\t\tpairwise_cost1, pairwise_cost, unary_cost, cost1 = 0, 0, 0, 0\n\t\t\t\n\t\t\tid1 = 3\n\t\t\tlabels = np.zeros(n_samples)\n\n\t\t\tstart = time.time()\n\t\t\tfor i in range(0, num_region):\t\t\n\t\t\t\tvec1 = results[i]\n\t\t\t\t# print(vec1[1])\n\t\t\t\tregion_id = vec1[0]\n\t\t\t\tpairwise_cost1 += vec1[id1]*ratio_vec[region_id]\n\t\t\t\tpairwise_cost += vec1[id1+1]*ratio_vec[region_id]\n\t\t\t\tunary_cost += vec1[id1+2]*ratio_vec[region_id]\n\t\t\t\tcost1 += vec1[id1+3]*ratio_vec[region_id]\n\t\t\t\ts1, s2 = len_vec[region_id][1], len_vec[region_id][2]\n\t\t\t\tstats = self._accumulate_sufficient_statistics_1(stats, vec1[1])\n\t\t\t\tlabels[s1:s2] = vec1[2]\n\n\t\t\t\tprint(vec1[id1:id1+4])\n\n\t\t\tend = time.time()\n\t\t\tprint(\"accumulate use time %d:\"%(iter))\n\t\t\tprint(end - start)\n\n\t\t\tt_difference1 = abs((pairwise_cost-pairwise_cost_pre)*1.0/pairwise_cost_pre)\n\t\t\tt_difference2 = abs((unary_cost-unary_cost_pre)*1.0/unary_cost_pre)\n\t\t\tt_difference3 = abs((cost1-cost1_pre)*1.0/cost1_pre)\n\t\t\t\n\t\t\t# print(\"Maximization...\")\n\t\t\tprint(pairwise_cost_pre,pairwise_cost,unary_cost_pre,unary_cost,cost1_pre,cost1)\n\t\t\tprint(t_difference1,t_difference2,t_difference3)\n\t\t\tpairwise_cost_pre, unary_cost_pre, cost1_pre = pairwise_cost, unary_cost, cost1\n\t\t\tcost_vec.append([iter, pairwise_cost, unary_cost, cost1])\n\n\t\t\tparams_vecList.append(self.params_vec1.copy())\n\t\t\t# state_vecList.append(labels)\n\t\t\tself.labels = labels.copy()\n\n\t\t\tif cost1<min_cost[1]:\n\t\t\t\tmin_cost = [iter,cost1]\n\t\t\t\tparams_vec = self.params_vec1.copy()\n\t\t\t\tself.labels_local = self.labels.copy()\t# current local optimal state estimate\t\t\t\t\n\t\t\t\tprint(\"another temp min\")\n\n\t\t\tif cost1<min_cost1[1] and iter>=3:\n\t\t\t\tmin_cost1 = [iter,cost1]\n\t\t\t\tparams_vec1 = self.params_vec1.copy()\n\t\t\t\tprint(\"another temp min from iteration 3\")\n\t\t\t\tt_labels = self.labels.copy() # keep the estimated labels\n\n\t\t\tif ((t_difference1<threshold1 and t_difference2<threshold2) or (t_difference3<threshold1)) and (iter>5):\n\t\t\t\tbreak\n\n\t\t\tif iter>max_iter:\n\t\t\t\tbreak\n\n\t\t\tif iter-min_cost1[0]>max_iter1:\n\t\t\t\tbreak\n\n\t\t\tprint(\"Maximization...\")\n\n\t\t\tstart = time.time()\n\t\t\tself._do_mstep(stats)\n\t\t\tend = time.time()\n\t\t\tprint(\"maximization use time %d %s\"%(iter,end-start))\n\n\t\tself.params_vec1 = params_vec1.copy()\n\t\tself._ou_param_varied_constraint(params_vec)\n\n\t\tprint(min_cost)\n\t\tprint(params_vec)\n\t\tcost_vec = np.asarray(cost_vec)\n\t\tprint(cost_vec)\n\n\t\tparams_vecList = np.asarray(params_vecList)\n\t\t# state_vecList = np.asarray(state_vecList)\n\n\t\treturn params_vec, params_vec1, params_vecList, min_cost[0], min_cost1[0], cost_vec, t_labels\n\n\tdef _do_func(self, framelogprob):\n\t\t\"\"\"Estimate objection function value.\n\n\t\t\"\"\"\n\n\tdef _compute_posteriors_graph(self, X, label, logprob, region_id):\n\t\t\"\"\"Computes per-component posteriors under the model.\n\n\t\tParameters\n\t\t----------\n\t\tX : array-like, shape (n_samples in a synteny region, n_features)\n\t\t\tFeature matrix of individual samples.\n\n\t\tlabel : array-like, shape (n_samples in a syntenty region)\n\t\t\tcurrent estimated states of individual samples.\n\n\t\tlogprob : array-like, shape (n_samples in a synteny region)\n\t\t\tlog probability of individual samples.\n\n\t\tregion_id: int, index of the synteny region\n\n\t\tReturns\n\t\t-------\n\t\tposteriors : array, shape (n_samples, n_components)\n\t\t\tLog probability of each sample in ``X`` for each of the\n\t\t\tmodel states.\n\t\t\"\"\"\n\n\tdef _estimate_state_graphcuts(self, X):\n\t\t\"\"\"Estimates states under the model.\n\n\t\tParameters\n\t\t----------\n\n\t\t-------\n\t\tstates : array, shape (n_samples, 1)\n\t\t\tEstimated estate of each sample in ``X``\n\t\t\"\"\"\n\n\tdef _init(self, X, lengths=None):\n\t\t\"\"\"Initializes model parameters prior to fitting.\n\n\t\tParameters\n\t\t----------\n\t\tX : array-like, shape (n_samples, n_features)\n\t\t\tFeature matrix of individual samples.\n\n\t\tlengths : array-like of integers, shape (n_sequences, )\n\t\t\tLengths of the individual sequences in ``X``. The sum of\n\t\t\tthese should be ``n_samples``.\n\t\t\"\"\"\n\t\tinit = 1. / self.n_components\n\t\tif 's' in self.init_params or not hasattr(self, \"startprob_\"):\n\t\t\tself.startprob_ = np.full(self.n_components, init)\n\t\tif 't' in self.init_params or not hasattr(self, \"transmat_\"):\n\t\t\tself.transmat_ = np.full((self.n_components, self.n_components),\n\t\t\t\t\t\t\t\t\t init)\n\n\tdef _check(self):\n\t\t\"\"\"Validates model parameters prior to fitting.\n\n\t\tRaises\n\t\t------\n\n\t\tValueError\n\t\t\tIf any of the parameters are invalid, e.g. if :attr:`startprob_`\n\t\t\tdon't sum to 1.\n\t\t\"\"\"\n\t\tself.startprob_ = np.asarray(self.startprob_)\n\t\tif len(self.startprob_) != self.n_components:\n\t\t\traise ValueError(\"startprob_ must have length n_components\")\n\t\tif not np.allclose(self.startprob_.sum(), 1.0):\n\t\t\traise ValueError(\"startprob_ must sum to 1.0 (got {0:.4f})\"\n\t\t\t\t\t\t\t .format(self.startprob_.sum()))\n\n\t\tself.transmat_ = np.asarray(self.transmat_)\n\t\tif self.transmat_.shape != (self.n_components, self.n_components):\n\t\t\traise ValueError(\n\t\t\t\t\"transmat_ must have shape (n_components, n_components)\")\n\t\tif not np.allclose(self.transmat_.sum(axis=1), 1.0):\n\t\t\traise ValueError(\"rows of transmat_ must sum to 1.0 (got {0})\"\n\t\t\t\t\t\t\t .format(self.transmat_.sum(axis=1)))\n\n\tdef _compute_log_likelihood(self, X):\n\t\t\"\"\"Computes per-component log probability under the model.\n\n\t\tParameters\n\t\t----------\n\t\tX : array-like, shape (n_samples, n_features)\n\t\t\tFeature matrix of individual samples.\n\n\t\tReturns\n\t\t-------\n\t\tlogprob : array, shape (n_samples, n_components)\n\t\t\tLog probability of each sample in ``X`` for each of the\n\t\t\tmodel states.\n\t\t\"\"\"\n\n\tdef _generate_sample_from_state(self, state, random_state=None):\n\t\t\"\"\"Generates a random sample from a given component.\n\n\t\t\"\"\"\n\n\t# Methods used by self.fit()\n\n\tdef _initialize_sufficient_statistics(self):\n\t\t\"\"\"Initializes sufficient statistics required for M-step.\n\n\t\t\"\"\"\n\t\tstats = {'nobs': 0,\n\t\t\t\t 'start': np.zeros(self.n_components),\n\t\t\t\t 'trans': np.zeros((self.n_components, self.n_components))}\n\t\treturn stats\n\n\tdef _accumulate_sufficient_statistics_1(self, stats, stats1):\n\t\t\"\"\"Updates sufficient statistics from a given sample.\n\n\t\t\"\"\"\n\t\tstats['post'] += stats1['post']\n\t\tstats['obs'] += stats1['obs']\n\t\t# stats['obs**2'] = np.zeros((self.n_components, self.n_features)\n\t\tstats['obs*obs.T'] += stats1['obs*obs.T']\n\n\t\treturn stats\n\n\tdef _accumulate_sufficient_statistics(self, stats, X, framelogprob,\n\t\t\t\t\t\t\t\t\t\t posteriors):\n\t\t\"\"\"Updates sufficient statistics from a given sample.\n\n\t\t\"\"\"\n\t\tstats['nobs'] += 1\n\n\tdef _do_mstep(self, stats):\n\t\t\"\"\"Performs the M-step of EM algorithm.\n\n\t\t\"\"\"\n\t\t# The ``np.where`` calls guard against updating forbidden states\n\t\t# or transitions in e.g. a left-right HMM.\n"
] |
[
[
"numpy.asarray",
"numpy.zeros",
"numpy.full"
]
] |
ahalfpen727/Genomic-Analyses-w-Python
|
[
"7da10e4bb7d5ca0bdb2cf77870be8a55ad4fc082"
] |
[
"scripts/single_vcf_stats.py"
] |
[
"import argparse\nfrom collections import defaultdict\nimport gzip\n\nimport numpy as np\nfrom text_histogram import histogram\n\n\np = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\np.add_argument(\"vcf\", nargs=\"+\")\nargs = p.parse_args()\n\nvcf_paths = args.vcf\n\nc = defaultdict(int)\nfor vcf_path in vcf_paths:\n print(\"============================\")\n alt_allele_sizes = []\n with gzip.open(vcf_path) if vcf_path.endswith(\".gz\") else open(vcf_path) as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n fields = line.strip().split(\"\\t\")\n ref = fields[3]\n alt_alleles = fields[4]\n for alt in alt_alleles.split(\",\"):\n c['total alleles'] += 1\n if len(ref) < len(alt):\n c['insertions'] += 1\n alt_allele_sizes.append(len(alt))\n elif len(ref) > len(alt):\n c['deletions'] += 1\n elif len(ref) == 1 and len(alt) == 1:\n c['snps'] += 1\n\n for label, value in sorted(c.items(), key=lambda x: (x[1], x[0]), reverse=True):\n print(\"==> %15s : %9d\" % (label, value))\n\n print(\"log2(alt allele length) for insertions in \" + vcf_path)\n histogram(np.log2(alt_allele_sizes), custbuckets=\",\".join([\"%0.1f\" % (k/2.0) for k in range(1, 20)]))\n"
] |
[
[
"numpy.log2"
]
] |
pschindler/Cirq
|
[
"b008e8373c2025ae28eaf3b7c21b7a0b307f860e"
] |
[
"cirq/contrib/quantum_volume/quantum_volume.py"
] |
[
"\"\"\"Utility functions to run the Quantum Volume benchmark defined by IBM in\nhttps://arxiv.org/abs/1811.12926.\n\"\"\"\n\nfrom typing import Optional, List, cast, Callable, Dict, Tuple, Set\nfrom dataclasses import dataclass\n\nimport numpy as np\nimport pandas as pd\n\nimport cirq\nimport cirq.contrib.routing as ccr\n\n\ndef generate_model_circuit(num_qubits: int,\n depth: int,\n *,\n random_state: cirq.value.RANDOM_STATE_LIKE = None\n ) -> cirq.Circuit:\n \"\"\"Generates a model circuit with the given number of qubits and depth.\n\n The generated circuit consists of `depth` layers of random qubit\n permutations followed by random two-qubit gates that are sampled from the\n Haar measure on SU(4).\n\n Args:\n num_qubits: The number of qubits in the generated circuit.\n depth: The number of layers in the circuit.\n random_state: Random state or random state seed.\n\n Returns:\n The generated circuit.\n \"\"\"\n # Setup the circuit and its qubits.\n qubits = cirq.LineQubit.range(num_qubits)\n circuit = cirq.Circuit()\n random_state = cirq.value.parse_random_state(random_state)\n\n # For each layer.\n for _ in range(depth):\n # Generate uniformly random permutation Pj of [0...n-1]\n perm = random_state.permutation(num_qubits)\n\n # For each consecutive pair in Pj, generate Haar random SU(4)\n # Decompose each SU(4) into CNOT + SU(2) and add to Ci\n for k in range(0, num_qubits - 1, 2):\n permuted_indices = [int(perm[k]), int(perm[k + 1])]\n special_unitary = cirq.testing.random_special_unitary(\n 4, random_state=random_state)\n\n # Convert the decomposed unitary to Cirq operations and add them to\n # the circuit.\n circuit.append(\n cirq.TwoQubitMatrixGate(special_unitary).on(\n qubits[permuted_indices[0]], qubits[permuted_indices[1]]))\n\n # Don't measure all of the qubits at the end of the circuit because we will\n # need to classically simulate it to compute its heavy set.\n return circuit\n\n\ndef compute_heavy_set(circuit: cirq.Circuit) -> List[int]:\n \"\"\"Classically compute the heavy set of the given circuit.\n\n The heavy set is defined as the output bit-strings that have a greater than\n median probability of being generated.\n\n Args:\n circuit: The circuit to classically simulate.\n\n Returns:\n A list containing all of the heavy bit-string results.\n \"\"\"\n # Classically compute the probabilities of each output bit-string through\n # simulation.\n simulator = cirq.Simulator()\n results = cast(cirq.WaveFunctionTrialResult,\n simulator.simulate(program=circuit))\n\n # Compute the median probability of the output bit-strings. Note that heavy\n # output is defined in terms of probabilities, where our wave function is in\n # terms of amplitudes. We convert it by using the Born rule: squaring each\n # amplitude and taking their absolute value\n median = np.median(np.abs(results.state_vector()**2))\n\n # The output wave function is a vector from the result value (big-endian) to\n # the probability of that bit-string. Return all of the bit-string\n # values that have a probability greater than the median.\n return [\n idx for idx, amp in enumerate(results.state_vector())\n if np.abs(amp**2) > median\n ]\n\n\n@dataclass\nclass CompilationResult:\n swap_network: ccr.SwapNetwork\n parity_map: Dict[cirq.Qid, cirq.Qid]\n\n\ndef sample_heavy_set(compilation_result: CompilationResult,\n heavy_set: List[int],\n *,\n repetitions=10_000,\n sampler: cirq.Sampler = cirq.Simulator()) -> float:\n \"\"\"Run a sampler over the given circuit and compute the percentage of its\n outputs that are in the heavy set.\n\n Args:\n compilation_result: All the information from the compilation.\n heavy_set: The previously-computed heavy set for the given circuit.\n repetitions: The number of times to sample the circuit.\n sampler: The sampler to run on the given circuit.\n\n Returns:\n A probability percentage, from 0 to 1, representing how many of the\n output bit-strings were in the heavy set.\n\n \"\"\"\n mapping = compilation_result.swap_network.final_mapping()\n circuit = compilation_result.swap_network.circuit\n\n # Add measure gates to the end of (a copy of) the circuit. Ensure that those\n # gates measure those in the given mapping, preserving this order.\n qubits = circuit.all_qubits()\n key = None\n if mapping:\n # Add any qubits that were not explicitly mapped, so they aren't lost in\n # the sorting.\n key = lambda q: mapping.get(q, q)\n qubits = frozenset(mapping.keys())\n\n # Don't do a single large measurement gate because then the key will be one\n # large string. Instead, do a bunch of single-qubit measurement gates so we\n # preserve the qubit keys.\n sorted_qubits = sorted(qubits, key=key)\n circuit_copy = circuit + [cirq.measure(q) for q in sorted_qubits]\n\n # Run the sampler to compare each output against the Heavy Set.\n trial_result = sampler.run(program=circuit_copy, repetitions=repetitions)\n\n # Post-process the results, e.g. to handle error corrections.\n results = process_results(mapping, compilation_result.parity_map,\n trial_result)\n\n # Aggregate the results into bit-strings (since we are using individual\n # measurement gates).\n\n results = results.agg(lambda meas: cirq.value.big_endian_bits_to_int(meas),\n axis=1)\n # Compute the number of outputs that are in the heavy set.\n num_in_heavy_set = np.sum(np.in1d(results, heavy_set))\n\n # Return the number of Heavy outputs over the number of valid runs.\n return num_in_heavy_set / len(results)\n\n\ndef process_results(mapping: Dict[cirq.Qid, cirq.Qid],\n parity_mapping: Dict[cirq.Qid, cirq.Qid],\n trial_result: cirq.TrialResult) -> pd.DataFrame:\n \"\"\"Checks the given results for parity and throws away all of the runs that\n don't pass the parity test.\n\n Args:\n mapping: The circuit's mapping from logical qubit to physical qubit.\n parity_mapping: The mapping from result qubit to its parity qubit.\n trial_result: The results to process.\n\n Returns:\n Returns the rows that passed the parity test, with the parity qubit\n measurements removed.\n\n \"\"\"\n # The circuit's mapping from physical qubit to logical qubit.\n inverse_mapping: Dict[cirq.Qid, cirq.Qid] = {\n v: k for k, v in mapping.items()\n }\n\n # Calculate all the invalid parity pairs.\n data = trial_result.data\n bad_measurements: Set[int] = set()\n for final_qubit, original_qubit in mapping.items():\n if original_qubit in parity_mapping:\n final_parity_qubit = inverse_mapping[parity_mapping[original_qubit]]\n mismatches = np.nonzero(\n data[str(final_qubit)] == data[str(final_parity_qubit)])\n bad_measurements.update(*mismatches)\n\n # Remove the parity qubits from the measurements.\n for parity_qubit in parity_mapping.values():\n data.drop(str(inverse_mapping[parity_qubit]), axis=1, inplace=True)\n\n print(f\"Dropping {len(bad_measurements)} measurements\")\n data.drop(bad_measurements, inplace=True)\n\n return data\n\n\ndef compile_circuit(\n circuit: cirq.Circuit,\n *,\n device: cirq.google.XmonDevice,\n routing_attempts: int,\n compiler: Callable[[cirq.Circuit], cirq.Circuit] = None,\n routing_algo_name: Optional[str] = None,\n router: Optional[Callable[..., ccr.SwapNetwork]] = None,\n add_readout_error_correction=False,\n) -> CompilationResult:\n \"\"\"Compile the given model circuit onto the given device. This uses a\n different compilation method than described in\n https://arxiv.org/pdf/1811.12926.pdf Appendix A. The latter goes through a\n 7-step process involving various decompositions, routing, and optimization\n steps. We route the model circuit and then run a series of optimizers on it\n (which can be passed into this function).\n\n Args:\n circuit: The model circuit to compile.\n device: The device to compile onto.\n routing_attempts: See doc for calculate_quantum_volume.\n compiler: An optional function to deconstruct the model circuit's\n gates down to the target devices gate set and then optimize it.\n add_readout_error_correction: If true, add some parity bits that will\n later be used to detect readout error.\n\n Returns: A tuple where the first value is the compiled circuit and the\n second value is the final mapping from the model circuit to the compiled\n circuit. The latter is necessary in order to preserve the measurement\n order.\n\n \"\"\"\n compiled_circuit = circuit.copy()\n\n # Optionally add some the parity check bits.\n parity_map: Dict[cirq.Qid, cirq.Qid] = {} # original -> parity\n if add_readout_error_correction:\n num_qubits = len(compiled_circuit.all_qubits())\n # Sort just to make it deterministic.\n for idx, qubit in enumerate(sorted(compiled_circuit.all_qubits())):\n # For each qubit, create a new qubit that will serve as its parity\n # check. This parity bit is initialized to 0 and then CNOTed with\n # the original qubit. Later, these two qubits will be checked for\n # equality - if they don't match, there was likely a readout error.\n qubit_num = idx + num_qubits\n parity_qubit = cirq.LineQubit(qubit_num)\n compiled_circuit.append(cirq.X(parity_qubit))\n compiled_circuit.append(cirq.CNOT(qubit, parity_qubit))\n parity_map[qubit] = parity_qubit\n\n # Swap Mapping (Routing). Ensure the gates can actually operate on the\n # target qubits given our topology.\n if router is None and routing_algo_name is None:\n # TODO: The routing algorithm sometimes does a poor job with the parity\n # qubits, adding SWAP gates that are unnecessary. This should be fixed,\n # or we can add the parity qubits manually after routing.\n routing_algo_name = 'greedy'\n\n swap_networks: List[ccr.SwapNetwork] = []\n for _ in range(routing_attempts):\n swap_network = ccr.route_circuit(compiled_circuit,\n ccr.xmon_device_to_graph(device),\n router=router,\n algo_name=routing_algo_name)\n swap_networks.append(swap_network)\n assert len(swap_networks) > 0, 'Unable to get routing for circuit'\n\n swap_networks.sort(key=lambda swap_network: len(swap_network.circuit))\n\n if not compiler:\n return CompilationResult(swap_network=swap_networks[0],\n parity_map=parity_map)\n\n # Compile. This should decompose the routed circuit down to a gate set that\n # our device supports, and then optimize. The paper uses various\n # compiling techniques - because Quantum Volume is intended to test those\n # as well, we allow this to be passed in. This compiler is not allowed to\n # change the order of the qubits.\n swap_networks[0].circuit = compiler(swap_networks[0].circuit)\n return CompilationResult(swap_network=swap_networks[0],\n parity_map=parity_map)\n\n\n@dataclass\nclass QuantumVolumeResult:\n \"\"\"Stores one run of the results and test information used when running the\n quantum volume benchmark so it may be analyzed in detail afterwards.\n\n \"\"\"\n # The model circuit used.\n model_circuit: cirq.Circuit\n # The heavy set computed from the above model circuit.\n heavy_set: List[int]\n # The model circuit after being compiled.\n compiled_circuit: cirq.Circuit\n # The percentage of outputs that this sampler had that were in the heavy\n # set.\n sampler_result: float\n\n def _json_dict_(self):\n return cirq.protocols.obj_to_dict_helper(self, [\n 'model_circuit', 'heavy_set', 'compiled_circuit', 'sampler_result'\n ])\n\n\ndef prepare_circuits(\n *,\n num_qubits: int,\n depth: int,\n num_circuits: int,\n random_state: cirq.value.RANDOM_STATE_LIKE = None,\n) -> List[Tuple[cirq.Circuit, List[int]]]:\n \"\"\"Generates circuits and computes their heavy set.\n\n Args:\n num_qubits: The number of qubits in the generated circuits.\n depth: The number of layers in the circuits.\n num_circuits: The number of circuits to create.\n random_state: Random state or random state seed.\n\n Returns:\n A list of tuples where the first element is a generated model\n circuit and the second element is the heavy set for that circuit.\n \"\"\"\n circuits = []\n print(\"Computing heavy sets\")\n for circuit_i in range(num_circuits):\n model_circuit = generate_model_circuit(num_qubits,\n depth,\n random_state=random_state)\n heavy_set = compute_heavy_set(model_circuit)\n print(f\" Circuit {circuit_i + 1} Heavy Set: {heavy_set}\")\n circuits.append((model_circuit, heavy_set))\n return circuits\n\ndef execute_circuits(\n *,\n device: cirq.google.XmonDevice,\n samplers: List[cirq.Sampler],\n circuits: List[Tuple[cirq.Circuit, List[int]]],\n routing_attempts: int,\n compiler: Callable[[cirq.Circuit], cirq.Circuit] = None,\n repetitions: int = 10_000,\n add_readout_error_correction=False,\n) -> List[QuantumVolumeResult]:\n \"\"\"Executes the given circuits on the given samplers.\n\n Args\n device: The device to run the compiled circuit on.\n samplers: The samplers to run the algorithm on.\n circuits: The circuits to sample from.\n routing_attempts: See doc for calculate_quantum_volume.\n compiler: An optional function to compiler the model circuit's\n gates down to the target devices gate set and the optimize it.\n repetitions: The number of bitstrings to sample per circuit.\n add_readout_error_correction: If true, add some parity bits that will\n later be used to detect readout error.\n\n Returns:\n A list of QuantumVolumeResults that contains all of the information for\n running the algorithm and its results.\n\n \"\"\"\n # First, compile all of the model circuits.\n print(\"Compiling model circuits\")\n compiled_circuits: List[CompilationResult] = []\n for idx, (model_circuit, heavy_set) in enumerate(circuits):\n print(f\" Compiling model circuit #{idx + 1}\")\n compiled_circuits.append(\n compile_circuit(\n model_circuit,\n device=device,\n compiler=compiler,\n routing_attempts=routing_attempts,\n add_readout_error_correction=add_readout_error_correction))\n\n # Next, run the compiled circuits on each sampler.\n results = []\n print(\"Running samplers over compiled circuits\")\n for sampler_i, sampler in enumerate(samplers):\n print(f\" Running sampler #{sampler_i + 1}\")\n for circuit_i, compilation_result in enumerate(compiled_circuits):\n model_circuit, heavy_set = circuits[circuit_i]\n prob = sample_heavy_set(compilation_result,\n heavy_set,\n repetitions=repetitions,\n sampler=sampler)\n print(f\" Compiled HOG probability #{circuit_i + 1}: {prob}\")\n results.append(\n QuantumVolumeResult(\n model_circuit=model_circuit,\n heavy_set=heavy_set,\n compiled_circuit=compilation_result.swap_network.circuit,\n sampler_result=prob))\n return results\n\n\ndef calculate_quantum_volume(\n *,\n num_qubits: int,\n depth: int,\n num_circuits: int,\n device: cirq.google.XmonDevice,\n samplers: List[cirq.Sampler],\n random_state: cirq.value.RANDOM_STATE_LIKE = None,\n compiler: Callable[[cirq.Circuit], cirq.Circuit] = None,\n repetitions=10_000,\n routing_attempts=30,\n add_readout_error_correction=False,\n) -> List[QuantumVolumeResult]:\n \"\"\"Run the quantum volume algorithm.\n\n This algorithm should compute the same values as Algorithm 1 in\n https://arxiv.org/abs/1811.12926. To summarize, we generate a random model\n circuit, compute its heavy set, then transpile an implementation onto our\n architecture. This implementation is run a series of times and if the\n percentage of outputs that are in the heavy set is greater than 2/3, we\n consider the quantum volume test passed for that size.\n\n Args:\n num_qubits: The number of qubits for the circuit.\n depth: The number of gate layers to generate.\n num_circuits: The number of random circuits to run.\n random_state: Random state or random state seed.\n device: The device to run the compiled circuit on.\n samplers: The samplers to run the algorithm on.\n compiler: An optional function to compiler the model circuit's\n gates down to the target devices gate set and the optimize it.\n repetitions: The number of bitstrings to sample per circuit.\n routing_attempts: The number of times to route each model circuit onto\n the device. Each attempt will be graded using an ideal simulator\n and the best one will be used.\n add_readout_error_correction: If true, add some parity bits that will\n later be used to detect readout error. WARNING: This makes the\n simulator run extremely slowly for any width/depth of 4 or more,\n because it doubles the circuit size. In reality, the simulator\n shouldn't need to use this larger circuit for the majority of\n operations, since they only come into play at the end.\n\n Returns: A list of QuantumVolumeResults that contains all of the information\n for running the algorithm and its results.\n\n \"\"\"\n circuits = prepare_circuits(num_qubits=num_qubits,\n depth=depth,\n num_circuits=num_circuits,\n random_state=random_state)\n return execute_circuits(\n circuits=circuits,\n device=device,\n compiler=compiler,\n samplers=samplers,\n repetitions=repetitions,\n routing_attempts=routing_attempts,\n add_readout_error_correction=add_readout_error_correction,\n )\n"
] |
[
[
"numpy.in1d",
"numpy.abs"
]
] |
Helianus/Reinforcement-Learning-Specialization-Coursera
|
[
"fef680e97da195aaf65c74d933d8d4a7983ac561"
] |
[
"1-Fundamentals-of-Reinforcement-Learning/Bandits/ten_arm_env.py"
] |
[
"#!/usr/bin/env python\n\nfrom rlglue.environment import BaseEnvironment\n\nimport numpy as np\n\nclass Environment(BaseEnvironment):\n \"\"\"Implements the environment for an RLGlue environment\n\n Note:\n env_init, env_start, env_step, env_cleanup, and env_message are required\n methods.\n \"\"\"\n\n actions = [0]\n\n def __init__(self):\n reward = None\n observation = None\n termination = None\n self.reward_obs_term = (reward, observation, termination)\n self.count = 0\n self.arms = []\n self.seed = None\n\n def env_init(self, env_info={}):\n \"\"\"Setup for the environment called when the experiment first starts.\n\n Note:\n Initialize a tuple with the reward, first state observation, boolean\n indicating if it's terminal.\n \"\"\"\n\n self.arms = np.random.randn(10)#[np.random.normal(0.0, 1.0) for _ in range(10)]\n local_observation = 0 # An empty NumPy array\n\n self.reward_obs_term = (0.0, local_observation, False)\n\n\n def env_start(self):\n \"\"\"The first method called when the experiment starts, called before the\n agent starts.\n\n Returns:\n The first state observation from the environment.\n \"\"\"\n return self.reward_obs_term[1]\n\n def env_step(self, action):\n \"\"\"A step taken by the environment.\n\n Args:\n action: The action taken by the agent\n\n Returns:\n (float, state, Boolean): a tuple of the reward, state observation,\n and boolean indicating if it's terminal.\n \"\"\"\n\n # if action == 0:\n # if np.random.random() < 0.2:\n # reward = 14\n # else:\n # reward = 6\n\n # if action == 1:\n # reward = np.random.choice(range(10,14))\n\n # if action == 2:\n # if np.random.random() < 0.8:\n # reward = 174\n # else:\n # reward = 7\n\n # reward = np.random.normal(self.arms[action], 1.0)\n\n reward = self.arms[action] + np.random.randn()\n\n obs = self.reward_obs_term[1]\n\n self.reward_obs_term = (reward, obs, False)\n\n return self.reward_obs_term\n\n def env_cleanup(self):\n \"\"\"Cleanup done after the environment ends\"\"\"\n pass\n\n def env_message(self, message):\n \"\"\"A message asking the environment for information\n\n Args:\n message (string): the message passed to the environment\n\n Returns:\n string: the response (or answer) to the message\n \"\"\"\n if message == \"what is the current reward?\":\n return \"{}\".format(self.reward_obs_term[0])\n\n # else\n return \"I don't know how to respond to your message\"\n"
] |
[
[
"numpy.random.randn"
]
] |
Pratheebhak/Digitize-Docs
|
[
"14521b2687011927b016f7eebef12f145b4fa894"
] |
[
"source/extract.py"
] |
[
"\"\"\" Extract entities from the handwritten OCR text \"\"\"\n\"\"\" Parts of code redacted due to copyright \"\"\"\n\n\nimport spacy\nimport pandas as pd\nimport re\nimport jellyfish\nfrom fuzzywuzzy import fuzz\n\n\nclass handwrittenText:\n\n nlp = spacy.load(\"en_core_web_sm\")\n\n # Load the input data into dataframes\n dfGenus = pd.read_csv(\"data/genus_combined.txt\", header=None)\n dfSpecies = pd.read_csv(\"data/plant_species.csv\")\n\n # Preprocess the textual information in the dataframes\n dfGenus.columns = [\"genus\"]\n dfGenus[\"genus\"] = dfGenus[\"genus\"].str.replace(r\"\\(.*\\)\", \"\")\n dfGenus[\"genus\"] = dfGenus[\"genus\"].str.lower()\n dfGenus[\"genus\"] = dfGenus[\"genus\"].str.rstrip()\n genusList = dfGenus[\"genus\"].to_list()\n\n dfSpecies[\"species\"] = dfSpecies[\"species\"].str.lower()\n speciesset = set(dfSpecies[\"species\"].dropna().unique().tolist())\n speciesList = list(speciesset)\n dfSpecies[\"genus\"] = dfSpecies[\"genus\"].str.lower()\n\n # Create a dictionary using the dataframes\n # key : genus\n # values: list of species associated with the genus\n namesDictionary = {key: [] for key in set(genusList)}\n\n for index in range(len(dfSpecies)):\n if dfSpecies.loc[index, \"genus\"] in namesDictionary:\n namesDictionary[dfSpecies.loc[index, \"genus\"]].append(\n dfSpecies.loc[index, \"species\"]\n )\n\n # Load and preprocess countries and states input text files\n dfCountries = pd.read_csv(\"data/countries.txt\", header=None)\n countries = dfCountries[0].to_list()\n countries = [x.lower() for x in countries]\n\n dfStates = pd.read_csv(\"data/states.csv\")\n states = dfStates[\"State\"].to_list()\n states = [x.lower() for x in states]\n\n def findBarcode(self, text):\n \"\"\"\n Extract barcode(s) from the processed text\n \"\"\"\n barcodes = []\n for string in text:\n if string.isdigit() and len(string) == 8:\n if len(set(string)) == 1:\n break\n barcodes.append(string)\n return barcodes\n\n def findYear(self, text):\n \"\"\"\n Extract year(s) from the processed text\n \"\"\"\n years = []\n for string in text:\n if string.isdigit() and len(string) == 4:\n if re.search(r\"1[8-9]\\d\\d\", string):\n years.append(string)\n if re.search(r\"20[0-2]\\d\", string):\n years.append(string)\n\n return list(set(years))\n\n def findScientificName(self, text):\n \"\"\"\n Extract scientific name(s) (genus + species) from the processed text\n Code Redacted\n \"\"\"\n\n def findCollector(self, text):\n \"\"\"\n Extract the collector's name from the OCR text using NER tags and pattern matching\n \"\"\"\n doc = self.nlp(str(text))\n collectors = []\n for ent in doc.ents:\n if ent.label_ == \"PERSON\":\n if re.search(r\"\\w\\.\\s\\w\\.\\s\\w+\", ent.text):\n collectors.append(ent.text)\n if re.search(r\"\\w\\.\\s\\w\\.\\s\\w\\.\\s\\w+\", ent.text):\n collectors.append(ent.text)\n if re.search(r\"\\w+\\w\\.\\s\\w+\", ent.text):\n collectors.append(ent.text)\n if re.search(r\"^\\w+\\s\\w\\.\\s\\w+\", ent.text):\n collectors.append(ent.text)\n if re.search(r\"^coll\", ent.text):\n collectors.append(ent.text)\n if re.search(r\"^Coll\", ent.text):\n collectors.append(ent.text)\n\n return collectors\n\n def findGeography(self, text):\n \"\"\"\n Extract the collector's name from the OCR text using NER tags and pattern matching\n \"\"\"\n text = \" \".join([x for x in text])\n doc = self.nlp(text)\n geography = []\n for ent in doc.ents:\n if ent.label_ == \"GPE\" or ent.label_ == \"LOC\":\n if ent.text.lower() in self.countries:\n geography.append(ent.text)\n if ent.text.lower() in self.states:\n geography.append(ent.text + \" US\")\n\n text1 = text.split()\n\n for token in text1:\n for country in self.countries:\n if fuzz.ratio(token, country) > 90:\n geography.append(token)\n for state in self.states:\n if fuzz.ratio(token, state) > 90:\n geography.append(token + \" US\")\n\n return list(set(geography))\n"
] |
[
[
"pandas.read_csv"
]
] |
madtomy/udacity_image_classifier
|
[
"e5f6fa54ebe8f405b99905a12c29588e9aaf4d1d"
] |
[
"predict.py"
] |
[
"#!/usr/bin/env python3\n'''\nusage: python predict.py /path/to/image checkpoint\n\nOptions:\n- Return most likely classes: python predict.py input checkpoint --top_k 3\n- Use a mapping of categories to names: python predict.py input checkpoint --category_names cat_to_name.json\n- Use GPU for inference: python predict.py input checkpoint --gpu\n'''\n\nimport argparse\nimport numpy as np\nimport torch\nfrom PIL import Image\nimport json\n\nclass Predict_class :\n @staticmethod\n def process_image(img_path):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n img = Image.open(img_path)\n width,height = img.size\n aspect_ratio = width / height \n if aspect_ratio > 1 :\n img = img.resize((round(aspect_ratio*256),256))\n else:\n img = img.resize((256,round(256/aspect_ratio)))\n \n #Crop\n width, height =img.size\n n_width = 224\n n_height = 224\n top = (height - n_height)/2\n right = (width + n_width)/2\n bottom = (height + n_height) / 2\n left = (width - n_width)/2\n img = img.crop((round(left),round(top),round(right),round(bottom)))\n \n #convert channels, normalize, rorder dimmensions \n np_img = np.array(img) /225\n np_img = (np_img - np.array([0.485,0.456,0.406])/np.array([0.229,0.224,0.225]))\n np_img = np_img.transpose((2,0,1))\n return np_img\n @staticmethod\n def predict(np_image, model,gpu,topk=5):\n ''' Predict the class (or classes) of an image using a trained deep learning model.'''\n \n device = torch.device(\"cuda:0\" if gpu else \"cpu\")\n model.to(device)\n model.eval()\n \n with torch.no_grad():\n imgs = torch.from_numpy(np_image)\n imgs = imgs.unsqueeze(0)\n imgs = imgs.type(torch.FloatTensor)\n imgs = imgs.to(device)\n out = model.forward(imgs)\n ps = torch.exp(out)\n pbs, inds = torch.topk(ps,topk)\n pbs = [float(pb) for pb in pbs[0]]\n inv_map = {val:key for key, val in model.class_to_idx.items()}\n clss = [inv_map[int(idx)] for idx in inds[0]]\n return pbs, clss\n\n# Get the command line input\nparser = argparse.ArgumentParser()\n\n\nparser.add_argument('image_path', action='store',\n default = 'flowers/test/1/image_06743.jpg',\n help='Path to image, e.g., \"flowers/test/1/image_06743.jpg\"')\n\nparser.add_argument('checkpoint', action='store',\n default = '.',\n help='Directory of saved checkpoints, e.g., \"assets\"')\n\n# Return top KK most likely classes\nparser.add_argument('--top_k', action='store',\n default = 5,\n dest='top_k',\n help='Return top KK most likely classes, e.g., 5')\n\n# Use a mapping of categories to real names\nparser.add_argument('--category_names', action='store',\n default = 'cat_to_name.json',\n dest='category_names',\n help='File name of the mapping of flower categories to real names, e.g., \"cat_to_name.json\"')\n\n# Use GPU for inference\nparser.add_argument('--gpu', action='store_true',\n default=False,\n dest='gpu',\n help='Use GPU for inference, set a switch to true')\n\nparse_results = parser.parse_args()\n\nimage_path = parse_results.image_path\ncheckpoint = parse_results.checkpoint\ntop_k = int(parse_results.top_k)\ncategory_names = parse_results.category_names\ngpu = parse_results.gpu\n\n# Label mapping\nwith open(category_names, 'r') as f:\n cat_to_name = json.load(f)\n\n# Load the checkpoint\nfilepath = checkpoint + '/checkpoint.pth'\ncheckpoint = torch.load(filepath, map_location='cpu')\nmodel = checkpoint[\"model\"]\nmodel.load_state_dict(checkpoint['state_dict'])\n\n#Create an object of class predict\npred_obj = Predict_class()\n# Image preprocessing\nnp_image = pred_obj.process_image(image_path)\n\n# Predict class and probabilities\nprint(f\"Predicting top {top_k} most likely flower names from image {image_path}.\")\n\nprobs, classes = pred_obj.predict(np_image, model,gpu, top_k )\nclasses_name = [cat_to_name[class_i] for class_i in classes]\n\nprint(\"\\nFlower name (probability): \")\nprint(\"*********\")\nfor i in range(len(probs)):\n print(f\"{classes_name[i]} ({round(probs[i], 3)})\")\nprint(\"\")"
] |
[
[
"torch.load",
"torch.topk",
"torch.from_numpy",
"torch.exp",
"torch.no_grad",
"torch.device",
"numpy.array"
]
] |
mlares/iate_audit
|
[
"3ae8aff2b3de853d43619c592ac8cedf0ff35591"
] |
[
"pinnacle/pub_dataviz.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# PUB_DATAVIZ: Visualization tools for PINNACLE\n# Copyright (c) 2020, Marcelo Lares\n#\n# MIT License:\n# https://github.com/IATE-CONICET-UNC/pinnacle/blob/master/LICENSE\n\nfrom matplotlib import pyplot as plt\nfrom pinnacle.plot_styles import cycling_attrs, aes_attrs\n\nimport numpy as np\nimport random\n\n\nclass pub_dataviz:\n\n def __init__(self, inst):\n '''\n Initialize an instance of a visualizerbecariosthods)\n ----------------\n\n - papers_histogram: histogram of the years of publications\n - cumulative_per_author: cumulative number of papers per author\n - authors_citations_years: scatter for number of authors and\n citations.\n - top_proceedings: relation between total number of\n publications and papers.\n - number_authors: distribution of the number of authors with\n time.\n '''\n\n self.inst = inst\n self.config = inst.config\n\n # def filter_quality(self):\n\n def papers_histogram(self, top=False, per_auth=False, quality=5):\n '''\n Papers_histogram: histogram of the years of publications\n\n Parameters\n ----------\n\n top: bool\n If True, paper in selected journals are used, otherwise,\n all papers.\n '''\n\n if top:\n y = self.inst.pub_inst_top.year.values\n else:\n\n # ACA HACER UNA FUNCION PARA FILTRAR CON EL Q\n\n y = self.inst.pub_inst_all.year.values\n\n if per_auth:\n y = list(self.inst.history.index)\n Ht = []\n for a in y:\n k = self.inst.history.loc[a][0]\n Ht.append(k)\n w = []\n for i in range(len(Ht)):\n w.append(1/(max(1, Ht[i])))\n sufix = '_norm'\n else:\n y = [int(a) for a in y]\n Ht = np.ones(len(y))\n w = np.ones(len(Ht))\n sufix = ''\n\n tbreaks = np.arange(int(min(y))-0.5, int(max(y)+1)+0.5, 1)\n\n fig = plt.figure(figsize=(8, 5))\n ax = fig.add_subplot()\n\n H = ax.hist(y, bins=tbreaks, weights=w)\n\n ymax = max(H[0])\n ax.set_ylim(0, ymax)\n ax.grid()\n\n ax.set_xlabel('year')\n\n if top:\n ax.set_ylabel('number of papers')\n ax.set_title('publications by IATE')\n fout = (f\"{self.config.dir_plot}/\"\n f\"papers_per_year_top{sufix}.png\")\n else:\n ax.set_ylabel('number of published works')\n ax.set_title('papers published by IATE')\n fout = (f\"{self.config.dir_plot}/\"\n f\"papers_per_year_all{sufix}.png\")\n\n fig.savefig(fout)\n plt.close()\n\n def papers_histogram2(self, top=False, per_auth=False):\n '''\n Papers_histogram: histogram of the years of publications\n\n Parameters\n ----------\n\n top: bool\n If True, paper in selected journals are used, otherwise,\n all papers.\n '''\n\n if per_auth:\n y = list(self.inst.history.index)\n npp = []\n for a in y:\n k = self.inst.history.loc[a]\n if top:\n npp.append(k[2]/max(1, k[0]))\n else:\n npp.append(k[1]/max(1, k[0]))\n sufix = '_norm'\n hist = npp\n else:\n y = list(self.inst.history.index)\n y = [int(a) for a in y]\n sufix = ''\n tbreaks = np.arange(int(min(y))-0.5, int(max(y)+1)+0.5, 1)\n H = np.histogram(y, bins=tbreaks)\n hist = H[0]\n\n fig = plt.figure(figsize=(8, 5))\n ax = fig.add_subplot()\n\n ax.step(y, hist)\n\n ymax = max(hist)*1.05\n ax.set_ylim(0, ymax)\n ax.grid()\n\n ax.set_xlabel('year')\n\n if top:\n ax.set_ylabel('number of papers')\n ax.set_title('publications by IATE')\n fout = (f\"{self.config.dir_plot}/\"\n f\"papers_per_year_top{sufix}.png\")\n else:\n ax.set_ylabel('number of published works')\n ax.set_title('papers published by IATE')\n fout = (f\"{self.config.dir_plot}/\"\n f\"papers_per_year_all{sufix}.png\")\n\n fig.savefig(fout)\n plt.close()\n\n def cumulative_per_author(self, top=False, normalize_first=False):\n '''\n Parameters\n ----------\n\n top: bool\n Use all works or papers from selected journals\n\n normalize_first: bool\n Normalize to the year of the first publication\n\n '''\n import datetime\n now = datetime.datetime.now()\n current_year = now.year\n\n if normalize_first:\n tedges = np.arange(-0.5, 20.5, 1)\n tmeans = np.arange(0, 20, 1)\n fout = (f\"{self.config.dir_plot}/papers_by_author_zero.png\")\n titlen = 'normalized to first'\n xlab = 'years from first publication'\n else:\n tedges = np.arange(1995, 2021, 1)\n tmeans = np.arange(1995, 2020, 1)\n fout = (f\"{self.config.dir_plot}/papers_by_author_year.png\")\n titlen = ''\n xlab = 'year'\n\n if top:\n df = self.inst.pub_auth_top\n titlet = 'papers'\n else:\n df = self.inst.pub_auth_all\n titlet = 'publications'\n\n fig = plt.figure(figsize=(14, 7))\n ax = fig.add_subplot()\n cycling_attrs()\n\n y_max = 0\n auth_names = list(df.author1.unique())\n for a in auth_names:\n\n d = df[df['author1'].isin([a])]\n\n y = [int(i) for i in d.year.values]\n if len(y) == 0:\n continue\n y = np.array(y)\n if normalize_first:\n active = current_year - min(y) + 1\n y = y - min(y)\n tedges = np.arange(-0.5, active + 0.5, 1)\n tmeans = np.arange(0, active, 1)\n\n H = np.histogram(y, bins=tedges)\n ac = H[0].cumsum()\n y_max = max(y_max, max(ac))\n\n aesthetics = aes_attrs()\n ax.plot(tmeans, ac, label=a, **aesthetics)\n\n title = f'Cumulative {titlet} by IATE researchers {titlen}'\n ax.set_title(title)\n ax.set_xlabel(xlab)\n ax.set_ylabel('cumulative number')\n ax.legend(loc=2, ncol=2, fontsize='small', frameon=False,\n handlelength=6)\n fig.savefig(fout)\n plt.close()\n\n def authors_citations_years(self, top=True):\n '''\n Plot a scatter of number of authors and number of citations\n\n Parameters\n ----------\n\n top: bool\n Use all works or papers from selected journals\n '''\n\n if top:\n df = self.inst.pub_inst_top\n else:\n df = self.inst.pub_inst_all\n\n npapers = df.shape[0]\n\n na = []\n nc = []\n ye = []\n for i in range(npapers):\n pprs = df.iloc[i]\n\n nauths = len(pprs.authors)\n ncitas = pprs.citation_count\n year = pprs.year\n\n r = random.random()*0.6 - 0.3\n na.append(nauths+r)\n r = random.random()*0.6 - 0.3\n nc.append(ncitas+1+r)\n ye.append(int(year))\n\n y = ((np.array(ye)-1980)*0.2)**2.6\n\n fig = plt.figure(figsize=(10, 5))\n ax = fig.add_subplot()\n ax.scatter(na, nc, s=y, color=(0, 0, 1, 0.3))\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlabel('Number of authors')\n ax.set_ylabel('Number of citations + 1')\n ax.legend(loc='center left', bbox_to_anchor=(1.1, 0.5), labelspacing=3)\n\n fout = (f\"{self.config.dir_plot}/nauth_ncitas_year.png\")\n fig.savefig(fout)\n plt.close()\n\n def top_proceedings(self):\n '''\n Plot a scatter of number of publications vs number of papers\n\n '''\n tod = []\n top = []\n\n auth_names = list(self.inst.pub_inst_all.author1.unique())\n for a in auth_names:\n\n df = self.inst.pub_inst_all\n dfa = df[df['author1'].isin([a])]\n df = self.inst.pub_inst_top\n dft = df[df['author1'].isin([a])]\n\n tod.append(dfa.shape[0])\n top.append(dft.shape[0])\n\n fig = plt.figure(figsize=(10, 5))\n ax = fig.add_subplot()\n ax.scatter(tod, top)\n m = max(tod)\n ax.plot([0, m], [0, m])\n\n ax.set_title('all works vs. top papers')\n ax.set_xlabel('all works')\n ax.set_ylabel('papers top')\n\n fout = (f\"{self.config.dir_plot}/top_vs_all.png\")\n fig.savefig(fout)\n plt.close()\n\n def number_authors(self, top=True):\n '''\n Plot a scatter for the number of authors as a function of time\n\n Parameters\n ----------\n\n top: bool\n Use all works or papers from selected journals\n '''\n\n if top:\n df = self.inst.pub_inst_top\n else:\n df = self.inst.pub_inst_all\n\n nauth = []\n for i, p in df.iterrows():\n nauth.append(len(p.authors))\n\n fig = plt.figure(figsize=(10, 5))\n ax = fig.add_subplot()\n\n years = [int(y) for y in df.year.values]\n\n ax.scatter(years, nauth)\n ax.set_yscale('log')\n\n ax.set_title('number of authors per year')\n ax.set_xlabel('year')\n ax.set_ylabel('N authors')\n\n fout = (f\"{self.config.dir_plot}/year_nauth.png\")\n fig.savefig(fout)\n plt.close()\n\n def nauth_npprs(self, top=True):\n fig = plt.figure(figsize=(10, 5))\n ax = fig.add_subplot()\n\n x = list(self.inst.history.index)\n y = self.inst.history['pop']\n\n if top:\n z = self.inst.history['npapers_top']\n else:\n z = self.inst.history['npapers_all']\n\n ax.plot(x, y, label='authors')\n ax.plot(x, z, label='papers')\n ax.legend()\n ax.set_title('number of authors per paper')\n ax.set_xlabel('year')\n ax.set_ylabel('N authors / paper')\n\n if top:\n ax.set_title('publications by IATE, top papers')\n fout = (f\"{self.config.dir_plot}/nauth_npprs_years_top.png\")\n else:\n ax.set_title('papers published by IATE, all works')\n fout = (f\"{self.config.dir_plot}/nauth_npprs_years_all.png\")\n\n fig.savefig(fout)\n plt.close()\n\n def plot_all(self):\n '''\n Make all the plots.\n\n '''\n self.papers_histogram2(top=True)\n self.papers_histogram2(top=False)\n self.papers_histogram2(top=True, per_auth=True)\n self.papers_histogram2(top=False, per_auth=True)\n\n self.cumulative_per_author(top=False, normalize_first=False)\n self.cumulative_per_author(top=False, normalize_first=True)\n self.cumulative_per_author(top=True, normalize_first=False)\n self.cumulative_per_author(top=True, normalize_first=True)\n\n self.authors_citations_years()\n self.top_proceedings()\n self.nauth_npprs()\n"
] |
[
[
"numpy.arange",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.histogram",
"matplotlib.pyplot.figure"
]
] |
JaiWillems/SatPy
|
[
"a46d01d14976828eabc63b4a86c6cb97e2ae4c41"
] |
[
"celest/encounter/_encounter_math_utils.py"
] |
[
"\"\"\"Encounter indices utilities.\n\nThis module contains the geometric utilities necessary for analytical pass\nanalysis implementation.\n\"\"\"\n\n\nfrom typing import Literal\nimport numpy as np\n\n\ndef _cone_constraint(theta: np.array, U: np.array, X: np.array) -> np.array:\n \"\"\"Return an array of indices for `X` points that fall within a cone.\n\n This method defines a double-sided cone with an apex located at `U` and an\n aperture angle `theta` and returns the indices for `X` points that fall\n within this geometry.\n\n Parameters\n ----------\n theta : np.array\n Array of size (n,) containing cone aperture angles in degrees.\n U : np.array\n Array of shape (n, 3) containing the cartesian coordinates of the apex.\n X : np.array\n Array of shape (n, 3) containing the cartesian coordinate points to\n check.\n\n Returns\n -------\n np.array\n Array of indices corresponding to points that fall within the cone.\n \"\"\"\n\n U_norm = np.repeat(np.linalg.norm(U, axis=1).reshape((-1, 1)), 3, axis=1)\n D = np.divide(U, U_norm)\n Y = X - U\n\n d1, d2, d3 = D[:, 0], D[:, 1], D[:, 2]\n y1, y2, y3 = Y[:, 0], Y[:, 1], Y[:, 2]\n\n ct2 = np.cos(np.radians(theta)) ** 2\n\n r1 = (d1 ** 2 - ct2) * y1 + d1 * d2 * y2 + d1 * d3 * y3\n r2 = d2 * d1 * y1 + (d2 ** 2 - ct2) * y2 + d2 * d3 * y3\n r3 = d3 * d1 * y1 + d3 * d2 * y2 + (d3 ** 2 - ct2) * y3\n\n point = y1 * r1 + y2 * r2 + y3 * r3\n\n ind = np.where(point >= 0)[0]\n\n return ind\n\n\ndef _plane_constraint(U: np.array, X: np.array) -> np.array:\n \"\"\"Return an array of indices containing points that fall above a plane.\n\n Parameters\n ----------\n U, X : np.array\n Arrays of shape (n, 3) containing cartesian vectors defining a plane.\n\n Returns\n -------\n np.array\n Array of points that fall above the plane.\n\n Notes\n -----\n A point :math:`\\textbf{X}=\\langle x, y, z\\rangle^T` falls falls above the\n plane defined with :math:`\\textbf{U}=\\langle x_0, y_0, z_0\\rangle^T` if it\n satisfies the following inequality:\n\n .. math:: 0\\leq\\left(\\textbf{X}-\\textbf{U}\\right)^T\\cdot\\textbf{U}\n \"\"\"\n\n Y = X - U\n\n u1, u2, u3 = U[:, 0], U[:, 1], U[:, 2]\n y1, y2, y3 = Y[:, 0], Y[:, 1], Y[:, 2]\n\n point = y1 * u1 + y2 * u2 + y3 * u3\n\n ind = np.where(point >= 0)[0]\n\n return ind\n\n\ndef _aperature_theta(ang: float, form: Literal[0, 1], n: int=None, U:\n np.array=None, X: np.array=None) -> np.array:\n \"\"\"Calculate cone aperature angles from constraint angles.\n\n This method calculates the aperture angles for the cone that defines a\n quasi-valid encounter region based on the input angle type.\n\n Parameters\n ----------\n ang : float\n The constraint angle in degrees.\n form : {0, 1}\n Defines the angle as altitude if `angType=0` and off-nadir if\n `angType=1`.\n n : int, optional\n Length of returned theta array.\n U : np.array, optional\n Array of shape (n, 3) containing the cartesian coordinates of the apex.\n X : np.array, optional\n Array of shape (n, 3) containing the cartesian coordinate points to\n check.\n\n Returns\n -------\n np.array\n Array of shape (n,) containing theta angles in degrees.\n\n Notes\n -----\n The `n` parameter is necessary when `angType=0`. The `U` and `X` parameters\n are necessary when `angtype=1`.\n\n If the constraint angle type is an altitude angle, then :math:`\\theta` can\n be calculated from :math:`\\theta=90\\degree-a\\degree` where :math:`a` is the\n altitude constraint angle.\n\n If the constraint angle type is an off-nadir angle, then :math:`theta` can\n be calculated from:\n\n .. math::\n\n \\theta = \\gamma+\\sin^{-1}\\left(\\frac{\\left|\\textbf{X}-\\textbf{U}\n \\right|}{\\left|\\textbf{U}\\right|}\\sin{\\gamma}\\right)\n\n where :math:`\\gamma` is the off-nadir constraint angle.\n \"\"\"\n\n if form:\n ang = np.radians(ang)\n\n num = np.linalg.norm(X - U, axis=1)\n denom = np.linalg.norm(U, axis=1)\n\n theta = ang + np.arcsin(np.sin(ang) * num / denom)\n theta = np.degrees(theta)\n\n else:\n theta = np.full((n,), 90 - ang)\n\n return theta\n\n\ndef _analytical_encounter_ind(sat_position: np.array, gnd_position: np.array,\n ang: float, form: Literal[0, 1]) -> np.array:\n \"\"\"Return encounter indices.\n\n This method returns the encounter indices corresponding only to valid\n satellite positions. It does not consider the sun constraint angle or\n lighting conditions.\n\n Parameters\n ----------\n sat_position : np.array\n Array of shape (n, 3) containing satellite ECEF positions.\n gnd_position : np.array\n Array of shape (n, 3) containing ground location ECEF positions.\n ang : float\n Constraint angle in degrees.\n angType : {0, 1}\n Defines the angle as altitude if `angType=0` and off-nadir if\n `angType=1`.\n\n Returns\n -------\n np.array\n Array of indices defining valid encounter positions.\n\n Notes\n -----\n This method finds all indices that fall within a single-sided cone\n extending above the Earth's surface with the apex at a ground location of\n interest and its aperture angle defined by the constraint angle.\n \"\"\"\n\n n = sat_position.shape[0]\n U = gnd_position\n X = sat_position\n theta = _aperature_theta(ang, form, n, U, X)\n\n ind_1 = _cone_constraint(theta, U, X)\n ind_2 = _plane_constraint(U, X)\n\n ind_final = np.intersect1d(ind_1, ind_2)\n\n return ind_final\n"
] |
[
[
"numpy.radians",
"numpy.degrees",
"numpy.linalg.norm",
"numpy.full",
"numpy.sin",
"numpy.intersect1d",
"numpy.where",
"numpy.divide"
]
] |
amit17133129/pyMG-2016
|
[
"b82a60811bb0a8b91d8793c47177a240221f9176",
"b82a60811bb0a8b91d8793c47177a240221f9176"
] |
[
"project/mymultigrid.py",
"project/poisson1d.py"
] |
[
"import scipy.sparse.linalg as sLA\nimport numpy as np\n\nfrom pymg.multigrid_base import MultigridBase\n\n\nclass MyMultigrid(MultigridBase):\n \"\"\"Implementation of a multigrid solver with different cycle implementations\n \"\"\"\n\n def __init__(self, ndofs, nlevels):\n \"\"\"Initialization routine\n \"\"\"\n assert np.log2(ndofs+1) >= nlevels\n super(MyMultigrid, self).__init__(ndofs, nlevels)\n\n def do_v_cycle(self, v0, rhs, nu1, nu2, lstart):\n \"\"\"Straightforward implementation of a V-cycle\n\n This can also be used inside an FMG-cycle!\n\n Args:\n v0 (numpy.array): initial values on finest level\n rhs (numpy.array): right-hand side on finest level\n nu1 (int): number of downward smoothing steps\n nu2 (int): number of upward smoothing steps\n lstart (int): starting level\n\n Returns:\n numpy.array: solution vector on finest level\n \"\"\"\n\n assert self.nlevels >= lstart >= 0\n assert v0.size == self.vh[lstart].size\n\n # set intial conditions (note: resetting vectors here is important!)\n self.reset_vectors(lstart)\n self.vh[lstart] = v0\n self.fh[lstart] = rhs\n\n # downward cycle\n for l in range(lstart, self.nlevels-1):\n # print('V-down: %i -> %i' %(l,l+1))\n # pre-smoothing\n for i in range(nu1):\n self.vh[l] = self.smoo[l].smooth(self.fh[l], self.vh[l])\n\n # restrict\n self.fh[l+1] = self.trans[l].restrict(self.fh[l] - self.smoo[l].A.dot(self.vh[l]))\n\n # solve on coarsest level\n self.vh[-1] = sLA.spsolve(self.Acoarse, self.fh[-1])\n\n # upward cycle\n for l in reversed(range(lstart, self.nlevels-1)):\n # print('V-up: %i -> %i' %(l+1,l))\n # correct\n self.vh[l] += self.trans[l].prolong(self.vh[l+1])\n\n # post-smoothing\n for i in range(nu2):\n self.vh[l] = self.smoo[l].smooth(self.fh[l], self.vh[l])\n\n return self.vh[lstart]\n\n def do_v_cycle_recursive(self, v0, rhs, nu1, nu2, level):\n \"\"\"Recursive implementation of a V-cycle\n\n This can also be used inside an FMG-cycle!\n\n Args:\n v0 (numpy.array): initial values on finest level\n rhs (numpy.array): right-hand side on finest level\n nu1 (int): number of downward smoothing steps\n nu2 (int): number of upward smoothing steps\n level (int): current level\n\n Returns:\n numpy.array: solution vector on current level\n \"\"\"\n\n assert self.nlevels > level >= 0\n assert v0.size == self.vh[level].size\n\n # set intial conditions\n self.vh[level] = v0\n self.fh[level] = rhs\n\n # downward cycle\n if level < self.nlevels-1:\n\n # pre-smoothing\n for i in range(nu1):\n self.vh[level] = self.smoo[level].smooth(self.fh[level], self.vh[level])\n\n # restrict\n self.fh[level+1] = self.trans[level].restrict(self.fh[level] -\n self.smoo[level].A.dot(self.vh[level]))\n # recursive call to v-cycle\n self.vh[level+1] = self.do_v_cycle_recursive(np.zeros(self.vh[level+1].size),\n self.fh[level+1], nu1, nu2, level+1)\n # on coarsest level\n else:\n\n # solve on coarsest level\n self.vh[level] = sLA.spsolve(self.Acoarse, self.fh[level])\n\n return self.vh[level]\n\n # correct\n self.vh[level] += self.trans[level].prolong(self.vh[level+1])\n\n # post-smoothing\n for i in range(nu2):\n self.vh[level] = self.smoo[level].smooth(self.fh[level], self.vh[level])\n\n return self.vh[level]\n",
"# coding=utf-8\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom pymg.problem_base import ProblemBase\n\n\nclass Poisson1D(ProblemBase):\n \"\"\"Implementation of the 1D Poission problem.\n\n Here we define the 1D Poisson problem :math:`-\\Delta u = 0` with\n Dirichlet-Zero boundary conditions. This is the homogeneous problem,\n derive from this class if you want to play around with different RHS.\n\n Attributes:\n dx (float): mesh size\n \"\"\"\n\n def __init__(self, ndofs, *args, **kwargs):\n \"\"\"Initialization routine for the Poisson1D problem\n\n Args:\n ndofs (int): number of degrees of freedom (see\n :attr:`pymg.problem_base.ProblemBase.ndofs`)\n *args: Variable length argument list\n **kwargs: Arbitrary keyword arguments\n \"\"\"\n self.dx = 1.0 / (ndofs + 1)\n # compute system matrix A, scale by 1/dx^2\n A = 1.0 / (self.dx ** 2) * self.__get_system_matrix(ndofs)\n rhs = self.__get_rhs(ndofs)\n\n super(Poisson1D, self).__init__(ndofs, A, rhs, *args, **kwargs)\n\n @staticmethod\n def __get_system_matrix(ndofs):\n \"\"\"Helper routine to get the system matrix discretizing :math:`-Delta` with second order FD\n\n Args:\n ndofs (int): number of inner grid points (no boundaries!)\n Returns:\n scipy.sparse.csc_matrix: sparse system matrix A\n of size :attr:`ndofs` x :attr:`ndofs`\n \"\"\"\n data = np.array([[2] * ndofs, [-1] * ndofs, [-1] * ndofs])\n diags = np.array([0, -1, 1])\n return sp.spdiags(data, diags, ndofs, ndofs, format='csc')\n\n @staticmethod\n def __get_rhs(ndofs):\n \"\"\"Helper routine to set the right-hand side\n\n Args:\n ndofs (int): number of inner grid points (no boundaries!)\n Returns:\n numpy.ndarray: the right-hand side vector of size :attr:`ndofs`\n \"\"\"\n return np.zeros(ndofs)\n\n @property\n def u_exact(self):\n \"\"\"Routine to compute the exact solution\n\n Returns:\n numpy.ndarray: exact solution array of size :attr:`ndofs`\n \"\"\"\n return np.zeros(self.ndofs)\n\n @property\n def domain(self):\n return np.array([(i + 1) * self.dx for i in range(self.ndofs)])\n\n @ProblemBase.ndofs.setter\n def ndofs(self, val):\n\n ProblemBase.ndofs.fset(self, val)\n self.dx = 1.0 / (val + 1)\n # compute system matrix A, scale by 1/dx^2\n self.A = 1.0 / (self.dx ** 2) * self.__get_system_matrix(val)\n self.rhs = self.__get_rhs(self._ndofs)\n\n\nclass Poisson1DPeriodic(ProblemBase):\n \"\"\"Implementation of the 1D Poission problem.\n\n Here we define the 1D Poisson problem :math:`-\\Delta u = 0` with\n Dirichlet-Zero boundary conditions. This is the homogeneous problem,\n derive from this class if you want to play around with different RHS.\n\n Attributes:\n dx (float): mesh size\n \"\"\"\n\n def __init__(self, ndofs, sigma, *args, **kwargs):\n \"\"\"Initialization routine for the Poisson1D problem\n\n Args:\n ndofs (int): number of degrees of freedom (see\n :attr:`pymg.problem_base.ProblemBase.ndofs`)\n *args: Variable length argument list\n **kwargs: Arbitrary keyword arguments\n \"\"\"\n self.dx = 1.0 / ndofs\n self.sigma = sigma\n # compute system matrix A, scale by 1/dx^2\n A = self.__get_system_matrix(ndofs)\n A[0, -1] = A[0, 1]\n A[-1, 0] = A[1, 0]\n A = -sigma * 1.0 / (self.dx ** 2) * A\n rhs = self.__get_rhs(ndofs)\n\n super(Poisson1DPeriodic, self).__init__(ndofs, A, rhs, *args, **kwargs)\n\n @staticmethod\n def __get_system_matrix(ndofs):\n \"\"\"Helper routine to get the system matrix discretizing :math:`-Delta` with second order FD\n\n Args:\n ndofs (int): number of inner grid points (no boundaries!)\n Returns:\n scipy.sparse.csc_matrix: sparse system matrix A\n of size :attr:`ndofs` x :attr:`ndofs`\n \"\"\"\n data = np.array([[2] * ndofs, [-1] * ndofs, [-1] * ndofs])\n diags = np.array([0, -1, 1])\n return sp.spdiags(data, diags, ndofs, ndofs, format='csc')\n\n @staticmethod\n def __get_rhs(ndofs):\n \"\"\"Helper routine to set the right-hand side\n\n Args:\n ndofs (int): number of inner grid points (no boundaries!)\n Returns:\n numpy.ndarray: the right-hand side vector of size :attr:`ndofs`\n \"\"\"\n return np.zeros(ndofs)\n\n @property\n def u_exact(self):\n \"\"\"Routine to compute the exact solution\n\n Returns:\n numpy.ndarray: exact solution array of size :attr:`ndofs`\n \"\"\"\n return np.zeros(self.ndofs)\n\n @property\n def domain(self):\n return np.array([(i) * self.dx for i in range(self.ndofs)])\n\n @ProblemBase.ndofs.setter\n def ndofs(self, val):\n ProblemBase.ndofs.fset(self, val)\n self.dx = 1.0 / val\n # compute system matrix A, scale by 1/dx^2\n self.A = -self.sigma * 1.0 / (self.dx ** 2) * self.__get_system_matrix(val)\n self.A[0, -1] = self.A[0, 1]\n self.A[-1, 0] = self.A[1, 0]\n\n self.rhs = self.__get_rhs(self._ndofs)\n"
] |
[
[
"numpy.log2",
"numpy.zeros",
"scipy.sparse.linalg.spsolve"
],
[
"scipy.sparse.spdiags",
"numpy.array",
"numpy.zeros"
]
] |
Kait0/leaderboard
|
[
"e44c169d541832439f98c70b33cfe6f7d89dfa31"
] |
[
"team_code/transfuser_agent.py"
] |
[
"import os\nimport json\nimport datetime\nimport pathlib\nimport time\nimport cv2\nimport carla\nfrom collections import deque\n\nimport torch\nimport carla\nimport numpy as np\nfrom PIL import Image\n\nfrom leaderboard.autoagents import autonomous_agent\nfrom transfuser.model import TransFuser\nfrom transfuser.config import GlobalConfig\nfrom transfuser.data import scale_and_crop_image, lidar_to_histogram_features, transform_2d_points\nfrom team_code.planner import RoutePlanner\n\n\nSAVE_PATH = os.environ.get('SAVE_PATH', None)\n\n\ndef get_entry_point():\n\treturn 'TransFuserAgent'\n\n\nclass TransFuserAgent(autonomous_agent.AutonomousAgent):\n\tdef setup(self, path_to_conf_file):\n\t\tself.track = autonomous_agent.Track.SENSORS\n\t\tself.config_path = path_to_conf_file\n\t\tself.step = -1\n\t\tself.wall_start = time.time()\n\t\tself.initialized = False\n\n\t\tself.input_buffer = {'rgb': deque(), 'rgb_left': deque(), 'rgb_right': deque(), \n\t\t\t\t\t\t\t'rgb_rear': deque(), 'lidar': deque(), 'gps': deque(), 'thetas': deque()}\n\n\t\tself.config = GlobalConfig()\n\t\tself.net = TransFuser(self.config, 'cuda')\n\t\tself.net.load_state_dict(torch.load(os.path.join(path_to_conf_file, 'best_model.pth')))\n\t\tself.net.cuda()\n\t\tself.net.eval()\n\n\t\tself.save_path = None\n\t\tif SAVE_PATH is not None:\n\t\t\tnow = datetime.datetime.now()\n\t\t\tstring = pathlib.Path(os.environ['ROUTES']).stem + '_'\n\t\t\tstring += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second)))\n\n\t\t\tprint (string)\n\n\t\t\tself.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string\n\t\t\tself.save_path.mkdir(parents=True, exist_ok=False)\n\n\t\t\t(self.save_path / 'rgb').mkdir(parents=True, exist_ok=False)\n\t\t\t(self.save_path / 'meta').mkdir(parents=True, exist_ok=False)\n\n\tdef _init(self):\n\t\tself._route_planner = RoutePlanner(4.0, 50.0)\n\t\tself._route_planner.set_route(self._global_plan, True)\n\n\t\tself.initialized = True\n\n\tdef _get_position(self, tick_data):\n\t\tgps = tick_data['gps']\n\t\tgps = (gps - self._route_planner.mean) * self._route_planner.scale\n\n\t\treturn gps\n\n\tdef sensors(self):\n\t\treturn [\n\t\t\t\t{\n\t\t\t\t\t'type': 'sensor.camera.rgb',\n\t\t\t\t\t'x': 1.3, 'y': 0.0, 'z':2.3,\n\t\t\t\t\t'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n\t\t\t\t\t'width': 400, 'height': 300, 'fov': 100,\n\t\t\t\t\t'id': 'rgb'\n\t\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'type': 'sensor.camera.rgb',\n\t\t\t\t\t'x': 1.3, 'y': 0.0, 'z':2.3,\n\t\t\t\t\t'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,\n\t\t\t\t\t'width': 400, 'height': 300, 'fov': 100,\n\t\t\t\t\t'id': 'rgb_left'\n\t\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'type': 'sensor.camera.rgb',\n\t\t\t\t\t'x': 1.3, 'y': 0.0, 'z':2.3,\n\t\t\t\t\t'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,\n\t\t\t\t\t'width': 400, 'height': 300, 'fov': 100,\n\t\t\t\t\t'id': 'rgb_right'\n\t\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'type': 'sensor.camera.rgb',\n\t\t\t\t\t'x': -1.3, 'y': 0.0, 'z':2.3,\n\t\t\t\t\t'roll': 0.0, 'pitch': 0.0, 'yaw': -180.0,\n\t\t\t\t\t'width': 400, 'height': 300, 'fov': 100,\n\t\t\t\t\t'id': 'rgb_rear'\n\t\t\t\t\t},\n { \n 'type': 'sensor.lidar.ray_cast',\n 'x': 1.3, 'y': 0.0, 'z': 2.5,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0,\n 'id': 'lidar'\n },\n\t\t\t\t{\n\t\t\t\t\t'type': 'sensor.other.imu',\n\t\t\t\t\t'x': 0.0, 'y': 0.0, 'z': 0.0,\n\t\t\t\t\t'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n\t\t\t\t\t'sensor_tick': 0.05,\n\t\t\t\t\t'id': 'imu'\n\t\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'type': 'sensor.other.gnss',\n\t\t\t\t\t'x': 0.0, 'y': 0.0, 'z': 0.0,\n\t\t\t\t\t'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n\t\t\t\t\t'sensor_tick': 0.01,\n\t\t\t\t\t'id': 'gps'\n\t\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t'type': 'sensor.speedometer',\n\t\t\t\t\t'reading_frequency': 20,\n\t\t\t\t\t'id': 'speed'\n\t\t\t\t\t}\n\t\t\t\t]\n\n\tdef tick(self, input_data):\n\t\tself.step += 1\n\n\t\trgb = cv2.cvtColor(input_data['rgb'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n\t\trgb_left = cv2.cvtColor(input_data['rgb_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n\t\trgb_right = cv2.cvtColor(input_data['rgb_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n\t\trgb_rear = cv2.cvtColor(input_data['rgb_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n\t\tgps = input_data['gps'][1][:2]\n\t\tspeed = input_data['speed'][1]['speed']\n\t\tcompass = input_data['imu'][1][-1]\n\t\tlidar = input_data['lidar'][1][:, :3]\n\n\t\tresult = {\n\t\t\t\t'rgb': rgb,\n\t\t\t\t'rgb_left': rgb_left,\n\t\t\t\t'rgb_right': rgb_right,\n\t\t\t\t'rgb_rear': rgb_rear,\n\t\t\t\t'lidar': lidar,\n\t\t\t\t'gps': gps,\n\t\t\t\t'speed': speed,\n\t\t\t\t'compass': compass,\n\t\t\t\t}\n\t\t\n\t\tpos = self._get_position(result)\n\t\tresult['gps'] = pos\n\t\tnext_wp, next_cmd = self._route_planner.run_step(pos)\n\t\tresult['next_command'] = next_cmd.value\n\n\t\ttheta = compass + np.pi/2\n\t\tR = np.array([\n\t\t\t[np.cos(theta), -np.sin(theta)],\n\t\t\t[np.sin(theta), np.cos(theta)]\n\t\t\t])\n\n\t\tlocal_command_point = np.array([next_wp[0]-pos[0], next_wp[1]-pos[1]])\n\t\tlocal_command_point = R.T.dot(local_command_point)\n\t\tresult['target_point'] = tuple(local_command_point)\n\n\t\treturn result\n\n\[email protected]_grad()\n\tdef run_step(self, input_data, timestamp):\n\t\tif not self.initialized:\n\t\t\tself._init()\n\n\t\ttick_data = self.tick(input_data)\n\n\t\tif self.step < self.config.seq_len:\n\t\t\trgb = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb']), crop=self.config.input_resolution)).unsqueeze(0)\n\t\t\tself.input_buffer['rgb'].append(rgb.to('cuda', dtype=torch.float32))\n\t\t\t\n\t\t\tif not self.config.ignore_sides:\n\t\t\t\trgb_left = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_left']), crop=self.config.input_resolution)).unsqueeze(0)\n\t\t\t\tself.input_buffer['rgb_left'].append(rgb_left.to('cuda', dtype=torch.float32))\n\t\t\t\t\n\t\t\t\trgb_right = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_right']), crop=self.config.input_resolution)).unsqueeze(0)\n\t\t\t\tself.input_buffer['rgb_right'].append(rgb_right.to('cuda', dtype=torch.float32))\n\n\t\t\tif not self.config.ignore_rear:\n\t\t\t\trgb_rear = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_rear']), crop=self.config.input_resolution)).unsqueeze(0)\n\t\t\t\tself.input_buffer['rgb_rear'].append(rgb_rear.to('cuda', dtype=torch.float32))\n\n\t\t\tself.input_buffer['lidar'].append(tick_data['lidar'])\n\t\t\tself.input_buffer['gps'].append(tick_data['gps'])\n\t\t\tself.input_buffer['thetas'].append(tick_data['compass'])\n\n\t\t\tcontrol = carla.VehicleControl()\n\t\t\tcontrol.steer = 0.0\n\t\t\tcontrol.throttle = 0.0\n\t\t\tcontrol.brake = 0.0\n\t\t\t\n\t\t\treturn control\n\n\t\tgt_velocity = torch.FloatTensor([tick_data['speed']]).to('cuda', dtype=torch.float32)\n\t\tcommand = torch.FloatTensor([tick_data['next_command']]).to('cuda', dtype=torch.float32)\n\n\t\ttick_data['target_point'] = [torch.FloatTensor([tick_data['target_point'][0]]),\n\t\t\t\t\t\t\t\t\t\t\ttorch.FloatTensor([tick_data['target_point'][1]])]\n\t\ttarget_point = torch.stack(tick_data['target_point'], dim=1).to('cuda', dtype=torch.float32)\n\n\t\tencoding = []\n\t\trgb = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb']), crop=self.config.input_resolution)).unsqueeze(0)\n\t\tself.input_buffer['rgb'].popleft()\n\t\tself.input_buffer['rgb'].append(rgb.to('cuda', dtype=torch.float32))\n\t\t\n\t\tif not self.config.ignore_sides:\n\t\t\trgb_left = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_left']), crop=self.config.input_resolution)).unsqueeze(0)\n\t\t\tself.input_buffer['rgb_left'].popleft()\n\t\t\tself.input_buffer['rgb_left'].append(rgb_left.to('cuda', dtype=torch.float32))\n\t\t\t\n\t\t\trgb_right = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_right']), crop=self.config.input_resolution)).unsqueeze(0)\n\t\t\tself.input_buffer['rgb_right'].popleft()\n\t\t\tself.input_buffer['rgb_right'].append(rgb_right.to('cuda', dtype=torch.float32))\n\n\t\tif not self.config.ignore_rear:\n\t\t\trgb_rear = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_rear']), crop=self.config.input_resolution)).unsqueeze(0)\n\t\t\tself.input_buffer['rgb_rear'].popleft()\n\t\t\tself.input_buffer['rgb_rear'].append(rgb_rear.to('cuda', dtype=torch.float32))\n\n\t\tself.input_buffer['lidar'].popleft()\n\t\tself.input_buffer['lidar'].append(tick_data['lidar'])\n\t\tself.input_buffer['gps'].popleft()\n\t\tself.input_buffer['gps'].append(tick_data['gps'])\n\t\tself.input_buffer['thetas'].popleft()\n\t\tself.input_buffer['thetas'].append(tick_data['compass'])\n\n\t\tlidar_processed = list()\n\t\t# transform the lidar point clouds to local coordinate frame\n\t\tego_theta = self.input_buffer['thetas'][-1]\n\t\tego_x, ego_y = self.input_buffer['gps'][-1]\n\t\tfor i, lidar_point_cloud in enumerate(self.input_buffer['lidar']):\n\t\t\tcurr_theta = self.input_buffer['thetas'][i]\n\t\t\tcurr_x, curr_y = self.input_buffer['gps'][i]\n\t\t\tlidar_point_cloud[:,1] *= -1 # inverts x, y\n\t\t\tlidar_transformed = transform_2d_points(lidar_point_cloud,\n\t\t\t\t\tnp.pi/2-curr_theta, -curr_x, -curr_y, np.pi/2-ego_theta, -ego_x, -ego_y)\n\t\t\tlidar_transformed = torch.from_numpy(lidar_to_histogram_features(lidar_transformed, crop=self.config.input_resolution)).unsqueeze(0)\n\t\t\tlidar_processed.append(lidar_transformed.to('cuda', dtype=torch.float32))\n\n\t\tpred_wp = self.net(self.input_buffer['rgb'] + self.input_buffer['rgb_left'] + \\\n\t\t\t\t\t\t self.input_buffer['rgb_right']+self.input_buffer['rgb_rear'], \\\n\t\t\t\t\t\t lidar_processed, target_point, gt_velocity)\n\t\tsteer, throttle, brake, metadata = self.net.control_pid(pred_wp, gt_velocity)\n\t\tself.pid_metadata = metadata\n\n\t\tif brake < 0.05: brake = 0.0\n\t\tif throttle > brake: brake = 0.0\n\n\t\tcontrol = carla.VehicleControl()\n\t\tcontrol.steer = float(steer)\n\t\tcontrol.throttle = float(throttle)\n\t\tcontrol.brake = float(brake)\n\n\t\tif SAVE_PATH is not None and self.step % 10 == 0:\n\t\t\tself.save(tick_data)\n\n\t\treturn control\n\n\tdef save(self, tick_data):\n\t\tframe = self.step // 10\n\n\t\tImage.fromarray(tick_data['rgb']).save(self.save_path / 'rgb' / ('%04d.png' % frame))\n\n\t\toutfile = open(self.save_path / 'meta' / ('%04d.json' % frame), 'w')\n\t\tjson.dump(self.pid_metadata, outfile, indent=4)\n\t\toutfile.close()\n\n\tdef destroy(self):\n\t\tdel self.net\n\n"
] |
[
[
"numpy.cos",
"numpy.sin",
"torch.FloatTensor",
"torch.no_grad",
"torch.stack",
"numpy.array"
]
] |
SiliconScientist/protein-surface-calculator
|
[
"ffc1243c2f65d359006a039fb2cdae92005916dd"
] |
[
"protein_surface_calculator.py"
] |
[
"import os\nimport freesasa\nimport numpy as np\n\ntaxa = ['psychro', 'thermo']\nnum_taxa = len(taxa)\n\n# Create empty arrays to contain the ratios or their averages\nratio = [[] for i in range(num_taxa)]\nratio_mean = []\n\n# Loop for each taxa\nfor i in range(num_taxa):\n # Locate folder for use use\n path = f'/Users/averyhill/MyDocuments/schoeffler_research_summer_2021/pdbs/{taxa[i]}philes/{taxa[i]}_gyra_pdb_folder'\n file_array = os.listdir(path)\n \n # Calculate the surface area of each pdb model\n for pdb in file_array:\n structure = freesasa.Structure(f'{path}/{pdb}')\n result = freesasa.calc(structure)\n area_classes = freesasa.classifyResults(result, structure)\n \n # Calculate the polar to apolar ratio and put them in an array\n ratio[i].append(area_classes['Polar']/area_classes['Apolar'])\n \n # Average the items in each ratio array\n ratio_mean.append(np.mean(ratio[i]))\n\n# Print out statistics\nprint('Mean ratio:')\nprint(f'Psychrophiles: {ratio_mean[0]}')\nprint(f'Thermophiles: {ratio_mean[1]}')\nprint()\nprint('Highest ratio:')\nprint(f'Psychrophiles: {np.max(ratio[0])}')\nprint(f'Thermophiles: {np.max(ratio[1])}')\nprint()\nprint('Lowest ratio:')\nprint(f'Psychrophiles {np.min(ratio[0])}')\nprint(f'Thermophiles {np.min(ratio[1])}')"
] |
[
[
"numpy.max",
"numpy.mean",
"numpy.min"
]
] |
nontas/menpo3d
|
[
"f29324b12a147f5b716ae5c3048d2c6b7a298752"
] |
[
"menpo3d/io/output/mesh.py"
] |
[
"import numpy as np\n\nfrom menpo.io.output.base import _enforce_only_paths_supported\nfrom menpo.shape.mesh import TexturedTriMesh\n\n\ndef obj_exporter(mesh, file_handle, **kwargs):\n r\"\"\"\n Given a file handle to write in to (which should act like a Python `file`\n object), write out the mesh data. No value is returned.\n\n Note that this does not save out textures of textured images, and so should\n not be used in isolation.\n\n Parameters\n ----------\n file_handle : `str`\n The full path where the obj will be saved out.\n mesh : :map:`TriMesh`\n Any subclass of :map:`TriMesh`. If :map:`TexturedTriMesh` texture\n coordinates will be saved out. Note that :map:`ColouredTriMesh`\n will only have shape data saved out, as .OBJ doesn't robustly support\n per-vertex colour information.\n \"\"\"\n for v in mesh.points:\n file_handle.write('v {} {} {}\\n'.format(*v).encode('utf-8'))\n file_handle.write(b'\\n')\n if isinstance(mesh, TexturedTriMesh):\n for tc in mesh.tcoords.points:\n file_handle.write('vt {} {}\\n'.format(*tc).encode('utf-8'))\n file_handle.write(b'\\n')\n # triangulation of points and tcoords is identical\n for t in (mesh.trilist + 1):\n file_handle.write('f {0}/{0} {1}/{1} {2}/{2}\\n'.format(*t).encode('utf-8'))\n else:\n # no tcoords - so triangulation is straight forward\n for t in (mesh.trilist + 1):\n file_handle.write('f {} {} {}\\n'.format(*t).encode('utf-8'))\n\n\ndef ply_exporter(mesh, file_path, binary=False, **kwargs):\n r\"\"\"\n Given a file path to write in to write out the mesh data.\n No value is returned. Only file paths are supported and if a file handle\n is passed it will be ignored and a warning will be raised.\n\n Note that this does not save out textures of textured images, and so should\n not be used in isolation.\n\n Parameters\n ----------\n file_path : `str`\n The full path where the obj will be saved out.\n mesh : :map:`TriMesh`\n Any subclass of :map:`TriMesh`. If :map:`TexturedTriMesh` texture\n coordinates will be saved out. Note that :map:`ColouredTriMesh`\n will only have shape data saved out, as .PLY doesn't robustly support\n per-vertex colour information.\n binary: `bool`, optional\n Specify whether to format output in binary or ascii, defaults to False\n \"\"\"\n import vtk\n from vtk.util.numpy_support import numpy_to_vtk, numpy_to_vtkIdTypeArray\n\n file_path = _enforce_only_paths_supported(file_path, 'PLY')\n\n polydata = vtk.vtkPolyData()\n points = vtk.vtkPoints()\n points.SetData(numpy_to_vtk(mesh.points))\n polydata.SetPoints(points)\n\n cells = vtk.vtkCellArray()\n counts = np.empty((mesh.trilist.shape[0], 1), dtype=np.int)\n counts.fill(3)\n tris = np.concatenate((counts, mesh.trilist), axis=1)\n cells.SetCells(mesh.trilist.shape[0], numpy_to_vtkIdTypeArray(tris))\n polydata.SetPolys(cells)\n\n if isinstance(mesh, TexturedTriMesh):\n pointdata = polydata.GetPointData()\n pointdata.SetTCoords(numpy_to_vtk(mesh.tcoords.points))\n\n ply_writer = vtk.vtkPLYWriter()\n ply_writer.SetFileName(str(file_path))\n ply_writer.SetInputData(polydata)\n if not binary:\n ply_writer.SetFileTypeToASCII()\n else:\n ply_writer.SetFileTypeToBinary()\n ply_writer.Update()\n ply_writer.Write()\n"
] |
[
[
"numpy.concatenate",
"numpy.empty"
]
] |
m1m0r1/lh_calib
|
[
"ff16083e0250517ee7a18aab1931d2a576736bc8",
"ff16083e0250517ee7a18aab1931d2a576736bc8"
] |
[
"lh_calib/simulation/debias_effect.py",
"lh_calib/predictions.py"
] |
[
"import logging\nimport pandas as pd\nimport numpy as np\nimport scipy\nfrom argtools import command, argument\nfrom tqdm import tqdm\nimport sys\nsys.path.insert(0, __file__.rsplit('/', 3)[0])\nfrom lh_calib import evaluation\nfrom lh_calib import predictions\nimport itertools\n\ndef create_probs(N, alpha=1., K=2):\n p_mat = np.random.dirichlet(np.ones(K) * alpha, N)\n return p_mat\n\ndef create_counts(p_mat, n=2):\n count_mat = np.asarray([np.random.multinomial(n, pvals) for pvals in p_mat]) # (N, K)\n return count_mat\n\ndef scale_logit(p_mat, t=1.): # (N, K) -> (N, K)\n p_logit = np.log(p_mat) # (N, K)\n p_unnorm = np.exp(p_logit / t) # (N, K)\n p_mat = p_unnorm / p_unnorm.sum(axis=1, keepdims=True) # (N, K)\n return p_mat\n\ndef apply_blur(p_mat, rho=0.): # (N, K) -> (N, K)\n # rho: 0 - 1\n alpha0 = 1 / (1 - rho)\n return np.asarray([np.random.dirichlet(p_vec) for p_vec in p_mat * alpha0]) # (N, K) <- precision parameter\n\n\ndef _gen_and_eval_basic(N, n, K=2, alpha=1.):\n probs = create_probs(N, alpha=alpha, K=K)\n counts = create_counts(probs, n=n)\n ids = np.arange(len(probs))\n pred = predictions.ProbPrediction(probs, ids)\n label_hist = evaluation.LabelHist(counts)\n lh_eval = evaluation.LHEval(label_hist, pred)\n return lh_eval\n\n\[email protected]_sub\n@argument('-N', '--ninstance', nargs='+', required=True, type=int)\n@argument('-n', '--nrater', nargs='+', type=int, default=[2])\n@argument('-r', '--nrep', type=int, default=5)\n@argument('-s', '--start-seed', type=int, default=0)\n@argument('-o', '--output', default='/dev/stdout')\ndef gen_and_eval_basic(args):\n binning_scheme = evaluation.EquallySpacedBinningScheme(15)\n loop = list(itertools.product(range(args.nrep), args.nrater, args.ninstance))\n\n seed = args.start_seed\n K = 2\n recs = [] \n for rep, n, N in tqdm(loop):\n np.random.seed(seed)\n lh_eval = _gen_and_eval_basic(N=N, n=n, K=K)\n result_tab, binnings = lh_eval.get_cpe_decomp(binning_scheme)\n ps = lh_eval.get_prob_score()\n # Series.sum ignores nan by default\n recs.append({\n 'seed': seed, 'rep': rep, 'n': n, 'N': N, 'K': K,\n 'ps': ps,\n 'el_plugin': result_tab['el_plugin'].sum(),\n 'el_bias': result_tab['el_bias'].sum(),\n 'cl_plugin': result_tab['cl_plugin'].sum(),\n 'cl_bias': result_tab['cl_bias'].sum(),\n 'dl_plugin': result_tab['dl_plugin'].sum(),\n 'dl_bias': result_tab['dl_bias'].sum(),\n })\n seed += 1\n columns = 'seed rep n N K ps el_plugin el_bias cl_plugin cl_bias dl_plugin dl_bias'.split(' ')\n tab = pd.DataFrame.from_records(recs, columns=columns)\n tab.to_csv(args.output, index=False)\n\nif __name__ == '__main__':\n command.run()\n",
"import logging\nimport numpy as np\nimport pandas as pd\nimport scipy.special\nimport scipy.stats\n\n\ndef encode_array(vals, sep=',', fmt='{:.6g}'):\n return sep.join(map(fmt.format, vals))\n\ndef decode_array(vals, sep=','):\n return np.asarray(list(map(float, vals.split(','))))\n\ndef encode_matrix(vals, sep1=',', sep2=';', fmt='{:.6g}'):\n return sep2.join(encode_array(vals1, sep=sep1, fmt=fmt) for vals1 in vals)\n\ndef decode_matrix(vals, sep1=',', sep2=';'):\n return np.asarray([decode_array(vals1, sep=sep1) for vals1 in vals.split(';')])\n\n\ndef load(path):\n cands = [\n MCAlphaPrediction,\n AlphaPrediction,\n WMCProbPrediction,\n MCProbPrediction,\n ProbPrediction,\n ]\n errors = []\n for cls in cands:\n try:\n return cls.load(path)\n except KeyError as e:\n errors.append(e)\n for e in errors:\n logging.error(e)\n raise NotImplementedError\n\n\nclass Prediction:\n @property\n def ids(self):\n return self._ids\n\n def get_probs(self): # (N, K)\n return self._probs\n\n @classmethod\n def load(cls, path):\n raise NotImplementedError\n\n def save(self, path, ids):\n raise NotImplementedError\n\n def get_posterior(self, hists):\n raise NotImplementedError\n\n\ndef hist_likelihood(hists, probs): # (..., K), (..., K) -> (...,)\n return (probs ** hists).sum(axis=-1)\n\n\ndef get_posterior_dirichlet0(hists, alpha0=1.):\n K = hists.shape[1] # (N, K)\n alpha = alpha0 * np.ones(K) / K\n post_alpha = hists + alpha[:, None]\n return AlphaPrediction(post_alpha, pred.ids)\n\ndef get_posterior_dirichlet(pred, hists, alpha0=1.):\n probs = pred.get_probs()\n alpha = alpha0 * probs\n assert hists.shape == probs.shape # (N, K)\n post_alpha = hists + alpha\n return AlphaPrediction(post_alpha, pred.ids)\n\n\nclass ProbPrediction(Prediction):\n def __init__(self, probs, ids):\n self._probs = np.asarray(probs) # (N, K)\n assert len(self._probs.shape) == 2\n self._ids = ids\n\n def get_agreement_probs(self): # (N,)\n return (self._probs ** 2).sum(axis=1)\n\n @classmethod\n def load(cls, path):\n tab = pd.read_csv(path, sep='\\t')\n probs = np.asarray(list(map(decode_array, tab['prob'])))\n return cls(probs, tab['id'])\n\n def save(self, path):\n columns = ['id', 'prob']\n tab = pd.DataFrame({\n 'id': self._ids,\n 'prob': list(map(encode_array, self._probs)),\n }, columns=columns)\n tab.to_csv(path, sep='\\t', index=False)\n\n\nclass MCProbPrediction(Prediction):\n def __init__(self, mc_probs, ids):\n self._mc_probs = np.asarray(mc_probs) # (N, S, K)\n assert len(self._mc_probs.shape) == 3\n self._probs = self._mc_probs.mean(axis=1) # (N, K)\n self._ids = ids\n\n def get_agreement_probs(self): # (N,)\n mc_agree_probs = (self._mc_probs ** 2).sum(axis=2) # (N, S)\n return mc_agree_probs.mean(axis=1)\n\n @classmethod\n def load(cls, path):\n tab = pd.read_csv(path, sep='\\t')\n mc_probs = np.asarray(list(map(decode_matrix, tab['mc_prob'])))\n return cls(mc_probs, tab['id'])\n\n def save(self, path):\n columns = ['id', 'mc_prob']\n tab = pd.DataFrame({\n 'id': self._ids,\n 'mc_prob': list(map(encode_matrix, self._mc_probs)),\n }, columns=columns)\n tab.to_csv(path, sep='\\t', index=False)\n\n def get_posterior(self, hists):\n hl = hist_likelihood(hists[:, None, :], self._mc_probs) # (N, S, K) -> (N, S)\n weights = hl / hl.sum(axis=-1, keepdims=True) # normalized -> (N, S)\n logging.info(weights)\n\n wmc_pred = WMCProbPrediction(self._mc_probs, weights, ids=self.ids) # (N, S, K), (N, S)\n return wmc_pred\n\n\nclass WMCProbPrediction(Prediction):\n def __init__(self, mc_probs, mc_weights, ids):\n self._mc_probs = np.asarray(mc_probs) # (N, S, K)\n self._mc_weights = np.asarray(mc_weights) # (N, S) or (1, S)\n assert len(self._mc_probs.shape) == 3\n assert self._mc_weights.shape == self._mc_probs.shape[:2]\n self._probs = (self._mc_probs * self._mc_weights[:, :, None]).sum(axis=1) # (N, K)\n self._ids = ids\n\n @classmethod\n def load(cls, path):\n tab = pd.read_csv(path, sep='\\t')\n mc_probs = np.asarray(list(map(decode_matrix, tab['mc_prob'])))\n mc_weights = np.asarray(list(map(decode_array, tab['mc_weight'])))\n return cls(mc_probs, mc_weights, tab['id'])\n\n def save(self, path):\n columns = ['id', 'mc_prob', 'mc_weight']\n tab = pd.DataFrame({\n 'id': self._ids,\n 'mc_prob': list(map(encode_matrix, self._mc_probs)),\n 'mc_weight': list(map(encode_array, self._mc_weights)),\n }, columns=columns)\n tab.to_csv(path, sep='\\t', index=False)\n\n\nclass AlphaPrediction(Prediction):\n eps = clip_min = np.finfo(float).eps\n clip_max = 1./np.finfo(float).eps\n\n def __init__(self, alphas, ids):\n self._alphas = np.asarray(alphas) # (N, K)\n self._alphas[np.isnan(self._alphas)] = self.clip_min # Repair underflowed values\n self._alphas = np.clip(self._alphas, self.clip_min, self.clip_max)\n assert len(self._alphas.shape) == 2\n self._alpha0s = self._alphas.sum(axis=1)\n self._probs = self._alphas / self._alpha0s[:,None]\n self._ids = ids\n\n def get_alphas(self):\n return self._alphas\n\n def get_agreement_probs(self): # (N,)\n denom = self._alpha0s * (self._alpha0s + 1)\n square_moments = self._alphas * (self._alphas + 1) / denom[:, None] # (N, K)\n agree_probs = square_moments.sum(axis=1) # (N,)\n return agree_probs\n\n @classmethod\n def load(cls, path):\n tab = pd.read_csv(path, sep='\\t')\n alphas = np.asarray(list(map(decode_array, tab['alpha'])))\n return cls(alphas, tab['id'])\n\n def save(self, path):\n columns = ['id', 'alpha']\n tab = pd.DataFrame({\n 'id': self._ids,\n 'alpha': list(map(encode_array, self._alphas)),\n }, columns=columns)\n tab.to_csv(path, sep='\\t', index=False)\n\n def get_posterior(self, hists):\n alpha = self._alphas\n assert hists.shape == alpha.shape # (N, K)\n post_alpha = hists + alpha\n return AlphaPrediction(post_alpha, self.ids)\n\n\nclass MCAlphaPrediction(Prediction):\n eps = clip_min = np.finfo(float).eps\n clip_max = 1./np.finfo(float).eps\n\n def __init__(self, mc_alphas, ids):\n self._mc_alphas = np.asarray(mc_alphas) # (N, S, K)\n self._mc_alphas[np.isnan(self._mc_alphas)] = self.clip_min # repair underflowed values\n self._mc_alphas = np.clip(self._mc_alphas, self.clip_min, self.clip_max)\n assert len(self._mc_alphas.shape) == 3\n self._alphas = self._mc_alphas.mean(axis=1) # (N, K)\n self._mc_alpha0s = self._mc_alphas.sum(axis=2) # (N, S)\n self._mc_mean_probs = self._mc_alphas / self._mc_alpha0s[:, :, None] #(N, S, K)\n self._probs = self._mc_mean_probs.mean(axis=1) #(N, K)\n self._ids = ids\n\n def get_alphas(self):\n return self._alphas\n\n def get_agreement_probs(self): # (N,)\n mc_square_moments = self._mc_alphas * (self._mc_alphas + 1) / (self._mc_alpha0s * (self._mc_alpha0s + 1))[:, :, None] # (N, S, K)\n mc_agree_probs = mc_square_moments.sum(axis=2) # (N, S)\n return mc_agree_probs.mean(axis=1)\n\n @classmethod\n def load(cls, path):\n tab = pd.read_csv(path, sep='\\t')\n mc_alphas = np.asarray(list(map(decode_matrix, tab['mc_alpha'])))\n return cls(mc_alphas, tab['id'])\n\n def save(self, path):\n columns = ['id', 'mc_alpha']\n tab = pd.DataFrame({\n 'id': self._ids,\n 'mc_alpha': list(map(encode_matrix, self._mc_alphas)),\n }, columns=columns)\n tab.to_csv(path, sep='\\t', index=False)\n"
] |
[
[
"numpy.log",
"numpy.random.seed",
"numpy.ones",
"numpy.random.multinomial",
"pandas.DataFrame.from_records",
"numpy.exp",
"numpy.random.dirichlet"
],
[
"pandas.read_csv",
"numpy.clip",
"numpy.asarray",
"numpy.isnan",
"numpy.ones",
"numpy.finfo"
]
] |
Floozutter/buzzspike
|
[
"fadb44bcae234695e19cee9ecf27e8f7afe0515d"
] |
[
"bzsp/core.py"
] |
[
"from . import recog\nimport cv2\nimport numpy\nfrom numpy import ndarray\nfrom typing import Iterable\n\ndef handle_source(source: Iterable[ndarray], delay: int) -> None:\n maskname = \"\"\n show_green_chevrons = True\n show_green_boxes = True\n show_red_chevrons = True\n show_red_boxes = True\n for frame in source:\n # process frame\n _, work = recog.killfeed_with_work(frame)\n # show work\n demo = frame.copy()\n if maskname:\n demo[numpy.where(work[maskname] == 0)] //= 8\n if show_green_chevrons:\n for x, y, w, h in work[\"green_chevrons\"]:\n cv2.rectangle(demo, (x, y), (x + w, y + h), (0, 255, 0), 2)\n if show_green_boxes:\n for x, y, w, h in work[\"green_boxes\"]:\n cv2.rectangle(demo, (x, y), (x + w, y + h), (0, 122, 0), 2)\n if show_red_chevrons:\n for x, y, w, h in work[\"red_chevrons\"]:\n cv2.rectangle(demo, (x, y), (x + w, y + h), (0, 0, 255), 2)\n if show_red_boxes:\n for x, y, w, h in work[\"red_boxes\"]:\n cv2.rectangle(demo, (x, y), (x + w, y + h), (0, 0, 122), 2)\n cv2.imshow(\"bzst\", demo)\n # handle input\n keycode = cv2.waitKey(delay) & 0xFF\n if keycode == 27:\n cv2.destroyAllWindows()\n break\n elif keycode == ord(\"n\"):\n maskname = \"\"\n elif keycode == ord(\"w\"):\n maskname = \"white_segment\"\n elif keycode == ord(\"g\"):\n maskname = \"green_segment\"\n elif keycode == ord(\"h\"):\n show_green_chevrons = not show_green_chevrons\n elif keycode == ord(\"j\"):\n show_green_boxes = not show_green_boxes\n elif keycode == ord(\"r\"):\n maskname = \"red_segment\"\n elif keycode == ord(\"t\"):\n show_red_chevrons = not show_red_chevrons\n elif keycode == ord(\"y\"):\n show_red_boxes = not show_red_boxes"
] |
[
[
"numpy.where"
]
] |
artvalencio/coupledlogistic
|
[
"15015329c808e5faf606b350d8577e05a1c1b302"
] |
[
"coupledlogistic/coupledlogistic.py"
] |
[
"def coupledlogistic(tslength,r,A,sigma,couplingtype,verbose=False):\r\n '''Generates time-series dynamics for coupled logistic networks of parameter r\r\n --------------------------------\r\n Inputs:\r\n tslength: length of the time-series (number of points)\r\n r: logistic map free parameter. Can be a single number (all nodes\r\n are equal) or a vector (specifying different r for each node). \r\n A: adjacency matrix\r\n sigma: coupling strength\r\n couplingtype: one of the options: 'diffusive' or 'kaneko'\r\n verbose: whether to display progress bar or not\r\n --------------------------------\r\n Output:\r\n out: each column is time-series for a node \r\n described in the adjacency matrix\r\n --------------------------------\r\n Usage examples:\r\n - Simple X->Y system:\r\n A=[[0,1],[0,0]]\r\n x=coupledlogistic(1e5,4,A,0.2,'diffusive')\r\n \r\n \r\n - Serial:\r\n A=[[0,1,0,0],[0,0,1,0],[0,0,0,1],[0,0,0,0]]\r\n x=coupledlogistic(1e7,4,A,0.2,'diffusive')\r\n \r\n (This adjacency matrix A defines a system with nodes [i] connected\r\n as: \r\n [1]->[2]->[3]->[4]. \r\n The dynamics of each node is a r=4 logistic map.\r\n Each link is a linear diffusive coupling with strength 0.2.\r\n The time-series from each node has 1*10^7 points)\r\n \r\n - Parallel:\r\n A=[[0,1,1,0],[0,0,0,1],[0,0,0,1],[0,0,0,0]]\r\n x=coupledlogistic(1e7,4,A,0.1,'diffusive')\r\n \r\n (This adjacency matrix A defines a system with nodes [i] connected\r\n as: \r\n [1]->[2]\r\n | |\r\n V V\r\n [3]->[4]. \r\n The dynamics of each node is a r=4 logistic map.\r\n Each link is a linear diffusive coupling with strength 0.1.\r\n The time-series from each node has 1*10^7 points)\r\n \r\n - Wheatstone-bridge-like:\r\n A=[[0,1,1,0],[0,0,1,1],[0,0,0,1],[0,0,0,0]]\r\n x=coupledlogistic(1e7,4,A,0.15,'diffusive')\r\n \r\n (This adjacency matrix A defines a system with nodes [i] connected\r\n as: \r\n [1]->[2]\r\n | / |\r\n V V V\r\n [3]->[4]. \r\n The dynamics of each node is a r=4 logistic map.\r\n Each link is a linear diffusive coupling with strength 0.15.\r\n The time-series from each node has 1*10^7 points)\r\n --------------------------------\r\n LaTeX expression:\r\n \r\n If couplingtype='diffusive':\r\n $x_{n+1}^i=(1-\\sigma)f(x_n^i)+\\frac{\\sigma}{k_i}\\sum_j{A_{ji}(x_n^j-x_n^i)}$\r\n Or if couplingtype='kaneko':\r\n $x_{n+1}^i=(1-\\sigma)f(x_n^i)+\\frac{\\sigma}{k_i}\\sum_j{A_{ji}f(x_n^j)}$\r\n where $f(x)=r*x*(1-x)$\r\n \r\n --------------------------------\r\n (C) Arthur Valencio(1)* and Murilo Baptista(1), 15 December 2017 (Python translation: 16 Sep 2020)\r\n (1)ICSMB, University of Aberdeen,UK\r\n *Support: CNPq (Brazil)'''\r\n\r\n import numpy as np\r\n import pandas as pd\r\n A=np.array(A)\r\n tslength=int(tslength)\r\n if verbose==True:\r\n print('Generating time-series')\r\n nonodes=len(A[1,:])\r\n \r\n if type(r)==int or type(r)==float:\r\n r=np.ones(nonodes)*r\r\n elif len(r)==1:\r\n r=np.ones(nonodes)*r[0]\r\n\r\n\r\n def diffusivecalc(tslength,r,A,sigma,nonodes):\r\n #calculation when diffusive\r\n cond=1\r\n count=0\r\n while cond: \r\n temp=0\r\n count=count+1\r\n #initial cond\r\n out=np.full([tslength+10001,nonodes],np.nan)\r\n out[0,:]=np.random.random(nonodes)\r\n #calculate\r\n for n in range(tslength+10000):\r\n #progress bar\r\n if verbose==True:\r\n if n % np.floor(tslength/10)==0:\r\n print('.',end='')\r\n #actual calc\r\n deg=[]\r\n for k in range(nonodes):\r\n #calc coupling \r\n sumterm=0\r\n deg.append(0)\r\n for l in range(nonodes):\r\n if A[l,k]==1:\r\n sumterm=sumterm+out[n,l]-out[n,k]\r\n deg[k]=deg[k]+1\r\n #calc next step\r\n if deg[k]>0:\r\n sumterm=sumterm/deg[k]\r\n out[n+1,k]=(1-sigma)*r[k]*out[n,k]*(1-out[n,k])+sigma*sumterm\r\n else: #deg=0 means it's an input node, so calc only logistic dynamics\r\n out[n+1,k]=r[k]*out[n,k]*(1-out[n,k])\r\n \r\n #error: repeat for new initial cond\r\n if out[n+1,k]==np.nan:\r\n temp=1 \r\n break\r\n elif out[n+1,k]==0 and out[n,k]==0:\r\n temp=1\r\n break\r\n #error handling\r\n if temp==0:\r\n break\r\n else:\r\n print('recalculating...', end=' ')\r\n print(count)\r\n if count>10:\r\n break\r\n return out\r\n\r\n def kanekocalc(tslength,r,A,sigma,nonodes):\r\n #calculation when kaneko\r\n cond=1\r\n count=0\r\n while cond:\r\n count=count+1\r\n temp=0\r\n #initial cond\r\n out=np.full([tslength+10001,nonodes],np.nan)\r\n out[0,:]=np.random.random(nonodes)\r\n #calculate\r\n for n in range(tslength+10000):\r\n #progress bar\r\n if verbose==True:\r\n if n % np.floor(tslength/10)==0:\r\n print('.',end='')\r\n #actual calc\r\n deg=[]\r\n for k in range(nonodes):\r\n #calc coupling \r\n sumterm=0\r\n deg.append(0)\r\n for l in range(nonodes):\r\n if A[l,k]==1:\r\n sumterm=sumterm+r[k]*out[n,l]*(1-out[n,l])\r\n deg[k]=deg[k]+1\r\n #calc next step\r\n if deg[k]>0:\r\n sumterm=sumterm/deg[k]\r\n out[n+1,k]=(1-sigma)*r[k]*out[n,k]*(1-out[n,k])+sigma*sumterm\r\n else: #deg=0 means it's an input node, so calc only logistic dynamics\r\n out[n+1,k]=r[k]*out[n,k]*(1-out[n,k])\r\n \r\n #error: repeat for new initial cond\r\n if out[n+1,k]==np.nan:\r\n temp=1 \r\n break\r\n elif out[n+1,k]==0 and out[n,k]==0:\r\n temp=1\r\n break \r\n #error handling\r\n if temp==0:\r\n break\r\n else:\r\n print('recalculating...',end=' ')\r\n print(count)\r\n if count>10:\r\n break\r\n return out\r\n\r\n def normal(x):\r\n #NORMAL Quick 0 to 1 nomalization\r\n return (x-min(x))/(max(x)-min(x))\r\n \r\n if couplingtype=='diffusive':\r\n out=diffusivecalc(tslength,r,A,sigma,nonodes)\r\n elif couplingtype=='kaneko':\r\n out=kanekocalc(tslength,r,A,sigma,nonodes)\r\n \r\n #cut transient\r\n out=out[10002:,:]\r\n #normalize\r\n for i in range(nonodes):\r\n out[:,i]=normal(out[:,i])\r\n return out\r\n"
] |
[
[
"numpy.random.random",
"numpy.full",
"numpy.ones",
"numpy.floor",
"numpy.array"
]
] |
AisahAlfiyatusR/Image_Retrieval_Heroku
|
[
"5db06ebe95926810601a563ebf45d6f5b2cfc89f",
"5db06ebe95926810601a563ebf45d6f5b2cfc89f"
] |
[
"venv/share/doc/networkx-2.6.3/examples/subclass/plot_printgraph.py",
"venv/share/doc/networkx-2.6.3/examples/drawing/plot_weighted_graph.py"
] |
[
"\"\"\"\n===========\nPrint Graph\n===========\n\nExample subclass of the Graph class.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom networkx import Graph\n\n\nclass PrintGraph(Graph):\n \"\"\"\n Example subclass of the Graph class.\n\n Prints activity log to file or standard output.\n \"\"\"\n\n def __init__(self, data=None, name=\"\", file=None, **attr):\n super().__init__(data=data, name=name, **attr)\n if file is None:\n import sys\n\n self.fh = sys.stdout\n else:\n self.fh = open(file, \"w\")\n\n def add_node(self, n, attr_dict=None, **attr):\n super().add_node(n, attr_dict=attr_dict, **attr)\n self.fh.write(f\"Add node: {n}\\n\")\n\n def add_nodes_from(self, nodes, **attr):\n for n in nodes:\n self.add_node(n, **attr)\n\n def remove_node(self, n):\n super().remove_node(n)\n self.fh.write(f\"Remove node: {n}\\n\")\n\n def remove_nodes_from(self, nodes):\n for n in nodes:\n self.remove_node(n)\n\n def add_edge(self, u, v, attr_dict=None, **attr):\n super().add_edge(u, v, attr_dict=attr_dict, **attr)\n self.fh.write(f\"Add edge: {u}-{v}\\n\")\n\n def add_edges_from(self, ebunch, attr_dict=None, **attr):\n for e in ebunch:\n u, v = e[0:2]\n self.add_edge(u, v, attr_dict=attr_dict, **attr)\n\n def remove_edge(self, u, v):\n super().remove_edge(u, v)\n self.fh.write(f\"Remove edge: {u}-{v}\\n\")\n\n def remove_edges_from(self, ebunch):\n for e in ebunch:\n u, v = e[0:2]\n self.remove_edge(u, v)\n\n def clear(self):\n super().clear()\n self.fh.write(\"Clear graph\\n\")\n\n\nG = PrintGraph()\nG.add_node(\"foo\")\nG.add_nodes_from(\"bar\", weight=8)\nG.remove_node(\"b\")\nG.remove_nodes_from(\"ar\")\nprint(\"Nodes in G: \", G.nodes(data=True))\nG.add_edge(0, 1, weight=10)\nprint(\"Edges in G: \", G.edges(data=True))\nG.remove_edge(0, 1)\nG.add_edges_from(zip(range(0, 3), range(1, 4)), weight=10)\nprint(\"Edges in G: \", G.edges(data=True))\nG.remove_edges_from(zip(range(0, 3), range(1, 4)))\nprint(\"Edges in G: \", G.edges(data=True))\n\nG = PrintGraph()\nnx.add_path(G, range(10))\nnx.add_star(G, range(9, 13))\npos = nx.spring_layout(G, seed=225) # Seed for reproducible layout\nnx.draw(G, pos)\nplt.show()\n",
"\"\"\"\n==============\nWeighted Graph\n==============\n\nAn example using Graph as a weighted network.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nG = nx.Graph()\n\nG.add_edge(\"a\", \"b\", weight=0.6)\nG.add_edge(\"a\", \"c\", weight=0.2)\nG.add_edge(\"c\", \"d\", weight=0.1)\nG.add_edge(\"c\", \"e\", weight=0.7)\nG.add_edge(\"c\", \"f\", weight=0.9)\nG.add_edge(\"a\", \"d\", weight=0.3)\n\nelarge = [(u, v) for (u, v, d) in G.edges(data=True) if d[\"weight\"] > 0.5]\nesmall = [(u, v) for (u, v, d) in G.edges(data=True) if d[\"weight\"] <= 0.5]\n\npos = nx.spring_layout(G, seed=7) # positions for all nodes - seed for reproducibility\n\n# nodes\nnx.draw_networkx_nodes(G, pos, node_size=700)\n\n# edges\nnx.draw_networkx_edges(G, pos, edgelist=elarge, width=6)\nnx.draw_networkx_edges(\n G, pos, edgelist=esmall, width=6, alpha=0.5, edge_color=\"b\", style=\"dashed\"\n)\n\n# labels\nnx.draw_networkx_labels(G, pos, font_size=20, font_family=\"sans-serif\")\n\nax = plt.gca()\nax.margins(0.08)\nplt.axis(\"off\")\nplt.tight_layout()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis"
]
] |
jinsanity07git/tmip-emat
|
[
"ff816cf50f141825078bb276d6da46d92c5028a9",
"ff816cf50f141825078bb276d6da46d92c5028a9",
"ff816cf50f141825078bb276d6da46d92c5028a9"
] |
[
"emat/analysis/explore_2/explore_visualizer.py",
"emat/database/sqlite/callback.py",
"tests/test_road_test.py"
] |
[
"import numpy\nimport pandas\nimport warnings\nimport functools\nfrom ...viz import colors\nfrom ...scope.box import GenericBox\nfrom ...database import Database\nfrom traitlets import TraitError\n\nfrom plotly import graph_objs as go\n\nfrom ipywidgets import Dropdown\nimport ipywidgets as widget\n\nimport logging\n_logger = logging.getLogger('EMAT.widget')\n\nfrom .explore_base import DataFrameExplorer\nfrom ..prim import PrimBox\nfrom ...exceptions import ScopeError\n\n\ndef _deselect_all_points(trace):\n\ttrace.selectedpoints = None\n\n\n# def _debugprint(s):\n# \tprint(s.replace(\"rgb(255, 127, 14)\", \"<ORANGE>\").replace(\"rgb(255, 46, 241)\",\"<PINK>\"))\n\n\nfrom .components import *\n\nrange_caption_css = (\n\t\"<style> \"\n\t\".emat-rangecaption > input \"\n\t\"{ border: solid 1px #eeeeee !important; text-align: center;} \"\n\t\".emat-rangecaption > input::placeholder \"\n\t\"{color:#dddddd}</style>\"\n)\n\n\nclass Visualizer(DataFrameExplorer):\n\t\"\"\"\n\tA data visualization framework.\n\n\tArgs:\n\t\tdata (pandas.DataFrame or str):\n\t\t\tThe base data to visualize. Give the data directly as a\n\t\t\tDataFrame, or give the name of a design that can be loaded\n\t\t\tfrom the `db` Database.\n\t\tselections (Mapping or pandas.DataFrame, optional):\n\t\t\tAny pre-existing selections. Each selection should be a\n\t\t\tboolean pandas.Series indexed the same as the data.\n\t\tscope (emat.Scope, optional):\n\t\t\tThe scope that describes the data.\n\t\tactive_selection_name (str, optional):\n\t\t\tThe name of the selection to activate.\n\t\treference_point (Mapping or pandas.DataFrame):\n\t\t\tAn optional reference point to visualize. Give as a simple\n\t\t\tmapping, or as a one-row DataFrame with the same columns as\n\t\t\t`data`, or give the name of a one-row design that can be loaded\n\t\t\tfrom the `db` Database.\n\t\tdb (emat.Database, optional): A database from which to read content.\n\t\"\"\"\n\n\tdef __init__(\n\t\t\tself,\n\t\t\tdata,\n\t\t\tselections=None,\n\t\t\tscope=None,\n\t\t\tactive_selection_name=None,\n\t\t\treference_point=None,\n\t\t\t*,\n\t\t\tdb=None,\n\t):\n\t\tif db is not None:\n\t\t\tif scope is None:\n\t\t\t\tscope = db.read_scope()\n\t\t\telif isinstance(scope, str):\n\t\t\t\tscope = db.read_scope(scope)\n\t\t\tif isinstance(data, str):\n\t\t\t\tdata = db.read_experiment_all(\n\t\t\t\t\tscope_name=scope.name,\n\t\t\t\t\tdesign_name=data,\n\t\t\t\t\tensure_dtypes=True,\n\t\t\t\t)\n\t\t\tif isinstance(reference_point, str):\n\t\t\t\treference_point = db.read_experiment_all(\n\t\t\t\t\tscope_name=scope.name,\n\t\t\t\t\tdesign_name=reference_point,\n\t\t\t\t\tensure_dtypes=True,\n\t\t\t\t)\n\n\t\tif selections is None:\n\t\t\tfrom ...scope.box import Box\n\t\t\tselections = {'Explore': Box(name='Explore', scope=scope)}\n\t\t\tif active_selection_name is None:\n\t\t\t\tactive_selection_name = 'Explore'\n\n\t\tsuper().__init__(\n\t\t\tdata,\n\t\t\tselections=selections,\n\t\t\tactive_selection_name=active_selection_name,\n\t\t\treference_point=reference_point,\n\t\t)\n\t\tself.scope = scope\n\t\tself._figures_hist = {}\n\t\tself._figures_freq = {}\n\t\tself._base_histogram = {}\n\t\tself._categorical_data = {}\n\t\tself._freeze = False\n\t\tself._two_way = {}\n\t\tself._three_way = {}\n\t\tself._splom = {}\n\t\tself._hmm = {}\n\t\tself._parcoords = {}\n\t\tself._selection_feature_score_fig = None\n\n\t\tself._status_txt = widget.HTML(\n\t\t\tvalue=\"<i>Explore Status Not Set</i>\",\n\t\t)\n\t\tself._status_pie = go.FigureWidget(\n\t\t\tgo.Pie(\n\t\t\t\tvalues=[75, 250],\n\t\t\t\tlabels=['Inside', 'Outside'],\n\t\t\t\thoverinfo='label+value',\n\t\t\t\ttextinfo='percent',\n\t\t\t\ttextfont_size=10,\n\t\t\t\tmarker=dict(\n\t\t\t\t\tcolors=[\n\t\t\t\t\t\tself.active_selection_color(),\n\t\t\t\t\t\tcolors.DEFAULT_BASE_COLOR,\n\t\t\t\t\t],\n\t\t\t\t\tline=dict(color='#FFF', width=0.25),\n\t\t\t\t)\n\t\t\t),\n\t\t\tlayout=dict(\n\t\t\t\twidth=100,\n\t\t\t\theight=100,\n\t\t\t\tshowlegend=False,\n\t\t\t\tmargin=dict(l=10, r=10, t=10, b=10),\n\t\t\t)\n\t\t)\n\t\tself._status = widget.HBox(\n\t\t\t[\n\t\t\t\twidget.VBox([self._active_selection_chooser, self._status_txt]),\n\t\t\t\tself._status_pie\n\t\t\t],\n\t\t\tlayout=dict(\n\t\t\t\tjustify_content = 'space-between',\n\t\t\t\talign_items = 'center',\n\t\t\t)\n\t\t)\n\t\tself._update_status()\n\n\tdef get_histogram_figure(self, col, bins=20, marker_line_width=None):\n\t\ttry:\n\t\t\tthis_type = self.scope.get_dtype(col)\n\t\texcept:\n\t\t\tthis_type = 'float'\n\t\tif this_type in ('cat','bool'):\n\t\t\treturn self.get_frequency_figure(col)\n\t\tif this_type in ('int',):\n\t\t\tparam = self.scope[col]\n\t\t\tif param.max - param.min + 1 <= bins * configuration.config.get(\"integer_bin_ratio\", 4):\n\t\t\t\tprint(\"OVERLOAD BINS\",bins, configuration.config.get(\"integer_bin_ratio\", 4), param.max - param.min + 1)\n\t\t\t\tbins = param.max - param.min + 1\n\t\t\t\tif marker_line_width is None:\n\t\t\t\t\tmarker_line_width = 0\n\t\tself._create_histogram_figure(col, bins=bins, marker_line_width=marker_line_width)\n\t\treturn self._figures_hist[col]\n\n\tdef get_frequency_figure(self, col):\n\t\tif self.scope.get_dtype(col) == 'cat':\n\t\t\tlabels = self.scope.get_cat_values(col)\n\t\telse:\n\t\t\tlabels = [False, True]\n\t\tself._create_frequencies_figure(col, labels=labels)\n\t\treturn self._figures_freq[col]\n\n\tdef __get_plain_box(self):\n\t\tif self.active_selection_deftype() == 'box':\n\t\t\tbox = self._selection_defs[self.active_selection_name()]\n\t\telif self.active_selection_deftype() == 'primbox':\n\t\t\tbox = self._selection_defs[self.active_selection_name()].to_emat_box()\n\t\telse:\n\t\t\tbox = None\n\t\treturn box\n\n\tdef _create_histogram_figure(self, col, bins=20, *, marker_line_width=None):\n\t\tif col in self._figures_hist:\n\t\t\tself._update_histogram_figure(col)\n\t\telse:\n\t\t\tselection = self.active_selection()\n\t\t\tbox = self.__get_plain_box()\n\t\t\tfig = new_histogram_figure(\n\t\t\t\tselection, self.data[col], bins,\n\t\t\t\tmarker_line_width=marker_line_width,\n\t\t\t\ton_deselect=lambda *a: self._on_deselect_from_histogram(*a,name=col),\n\t\t\t\ton_select=lambda *a: self._on_select_from_histogram(*a,name=col),\n\t\t\t\tbox=box,\n\t\t\t\ttitle_text=self.scope.shortname(col),\n\t\t\t\tref_point=self.reference_point(col),\n\t\t\t\tselected_color=self.active_selection_color(),\n\t\t\t)\n\t\t\tfig_rangecaption = widget.Text(\n\t\t\t\tvalue=\"\",\n\t\t\t\tplaceholder=\"any value\",\n\t\t\t\tcontinuous_update=False,\n\t\t\t\tlayout={'padding': '0px 25px 15px', },\n\t\t\t).add_class(\"emat-rangecaption\")\n\t\t\tfig_rangecaption.observe(\n\t\t\t\tlambda payload: self._on_select_from_rangestring(payload, name=col),\n\t\t\t\tnames='value',\n\t\t\t)\n\t\t\tself._figures_hist[col] = widget.VBox([fig, fig_rangecaption, widget.HTML(range_caption_css)])\n\n\tdef _create_frequencies_figure(self, col, labels=None, *, marker_line_width=None):\n\t\tif col in self._figures_freq:\n\t\t\tself._update_frequencies_figure(col)\n\t\telse:\n\t\t\tselection = self.active_selection()\n\t\t\tbox = self.__get_plain_box()\n\t\t\tfig = new_frequencies_figure(\n\t\t\t\tselection, self.data[col], labels,\n\t\t\t\tmarker_line_width=marker_line_width,\n\t\t\t\ton_deselect=functools.partial(self._on_deselect_from_histogram, name=col),\n\t\t\t\ton_select=functools.partial(self._on_select_from_freq, name=col),\n\t\t\t\t#on_click=functools.partial(self._on_click_from_frequencies, name=col), # not always stable\n\t\t\t\tbox=box,\n\t\t\t\ttitle_text=self.scope.shortname(col),\n\t\t\t\tref_point=self.reference_point(col),\n\t\t\t\tlabel_name_map=self.scope[col].abbrev,\n\t\t\t\tselected_color=self.active_selection_color(),\n\t\t\t)\n\t\t\tfig_rangecaption = widget.Text(\n\t\t\t\tvalue=\"\",\n\t\t\t\tplaceholder=\"any value\",\n\t\t\t\tcontinuous_update=False,\n\t\t\t\tlayout={'padding': '0px 25px 15px', },\n\t\t\t).add_class(\"emat-rangecaption\")\n\t\t\tfig_rangecaption.observe(\n\t\t\t\tlambda payload: self._on_select_from_setstring(payload, name=col),\n\t\t\t\tnames='value',\n\t\t\t)\n\t\t\tself._figures_freq[col] = widget.VBox([fig, fig_rangecaption, widget.HTML(range_caption_css)])\n\n\tdef _update_histogram_figure(self, col):\n\t\tif col in self._figures_hist:\n\t\t\tfig = self._figures_hist[col].children[0]\n\t\t\tbox = self.__get_plain_box()\n\t\t\twith fig.batch_update():\n\t\t\t\tupdate_histogram_figure(\n\t\t\t\t\tfig,\n\t\t\t\t\tself.active_selection(),\n\t\t\t\t\tself.data[col],\n\t\t\t\t\tbox=box,\n\t\t\t\t\tref_point=self.reference_point(col),\n\t\t\t\t)\n\t\t\trangestring_input = self._figures_hist[col].children[1]\n\t\t\tif box is not None:\n\t\t\t\tbounds = box.thresholds.get(col, None)\n\t\t\telse:\n\t\t\t\tbounds = None\n\t\t\trangestring_input.value = convert_bounds_to_rangestring(bounds)\n\n\tdef _update_frequencies_figure(self, col):\n\t\tif col in self._figures_freq:\n\t\t\tfig = self._figures_freq[col].children[0]\n\t\t\tbox = self.__get_plain_box()\n\t\t\twith fig.batch_update():\n\t\t\t\tupdate_frequencies_figure(\n\t\t\t\t\tfig,\n\t\t\t\t\tself.active_selection(),\n\t\t\t\t\tself.data[col],\n\t\t\t\t\tbox=box,\n\t\t\t\t\tref_point=self.reference_point(col),\n\t\t\t\t)\n\t\t\trangestring_input = self._figures_freq[col].children[1]\n\t\t\tif box is not None:\n\t\t\t\tallowedset = box.thresholds.get(col, None)\n\t\t\telse:\n\t\t\t\tallowedset = None\n\t\t\trangestring_input.value = convert_set_to_rangestring(allowedset)\n\n\tdef _compute_histogram(self, col, selection, bins=None):\n\t\tif col not in self._base_histogram:\n\t\t\tif bins is None:\n\t\t\t\tbins = 20\n\t\t\tbar_heights, bar_x = numpy.histogram(self.data[col], bins=bins)\n\t\t\tself._base_histogram[col] = bar_heights, bar_x\n\t\telse:\n\t\t\tbar_heights, bar_x = self._base_histogram[col]\n\t\tbins_left = bar_x[:-1]\n\t\tbins_width = bar_x[1:] - bar_x[:-1]\n\t\tbar_heights_select, bar_x = numpy.histogram(self.data[col][selection], bins=bar_x)\n\t\treturn bar_heights, bar_heights_select, bins_left, bins_width\n\n\tdef _compute_frequencies(self, col, selection, labels):\n\t\tif col in self._categorical_data:\n\t\t\tv = self._categorical_data[col]\n\t\telse:\n\t\t\tself._categorical_data[col] = v = self.data[col].astype(\n\t\t\t\tpandas.CategoricalDtype(categories=labels, ordered=False)\n\t\t\t).cat.codes\n\t\tif col not in self._base_histogram:\n\t\t\tbar_heights, bar_x = numpy.histogram(v, bins=numpy.arange(0, len(labels) + 1))\n\t\t\tself._base_histogram[col] = bar_heights, bar_x\n\t\telse:\n\t\t\tbar_heights, bar_x = self._base_histogram[col]\n\t\tbar_heights_select, _ = numpy.histogram(v[selection], bins=numpy.arange(0, len(labels) + 1))\n\t\treturn bar_heights, bar_heights_select, labels\n\n\tdef _on_select_from_histogram(self, *args, name=None):\n\t\tif self._freeze:\n\t\t\treturn\n\t\ttry:\n\t\t\tself._freeze = True\n\t\t\tselect_min, select_max = args[2].xrange\n\t\t\t_logger.debug(\"name: %s range: %f - %f\", name, select_min, select_max)\n\t\t\tself._figures_hist[name].children[0].for_each_trace(_deselect_all_points)\n\n\t\t\tif self.active_selection_deftype() == 'box':\n\t\t\t\tbox = self._selection_defs[self.active_selection_name()]\n\t\t\t\tbox = interpret_histogram_selection(name, args[2].xrange, box, self.data, self.scope)\n\t\t\t\tself.new_selection(box, name=self.active_selection_name())\n\t\t\t\tself._active_selection_changed()\n\t\texcept:\n\t\t\t_logger.exception(\"error in _on_select_from_histogram\")\n\t\t\traise\n\t\tfinally:\n\t\t\tself._freeze = False\n\n\tdef _on_select_from_rangestring(self, payload, name=None):\n\t\tif self._freeze:\n\t\t\treturn\n\t\ttry:\n\t\t\tself._freeze = True\n\t\t\tfrom .components import convert_rangestring_to_tuple\n\t\t\tselect_min, select_max = convert_rangestring_to_tuple(payload.get('new', None))\n\t\t\t_logger.debug(\"name: %s range: %f - %f\", name, select_min, select_max)\n\n\t\t\tif self.active_selection_deftype() == 'box':\n\t\t\t\tbox = self._selection_defs[self.active_selection_name()]\n\t\t\t\tbox = interpret_histogram_selection(name, (select_min, select_max), box, self.data, self.scope)\n\t\t\t\tself.new_selection(box, name=self.active_selection_name())\n\t\t\t\tself._active_selection_changed()\n\t\texcept:\n\t\t\t_logger.exception(\"error in _on_select_from_histogram\")\n\t\t\traise\n\t\tfinally:\n\t\t\tself._freeze = False\n\n\tdef _on_deselect_from_histogram(self, *args, name=None):\n\t\t_logger.debug(\"deselect %s\", name)\n\t\tif self.active_selection_deftype() == 'box':\n\t\t\tbox = self._selection_defs[self.active_selection_name()]\n\t\t\tif name in box:\n\t\t\t\tdel box[name]\n\t\t\t\tself.new_selection(box, name=self.active_selection_name())\n\t\t\t\tself._active_selection_changed()\n\n\n\tdef _on_select_from_freq(self, *args, name=None):\n\t\tselect_min, select_max = args[2].xrange\n\t\tselect_min = int(numpy.ceil(select_min))\n\t\tselect_max = int(numpy.ceil(select_max))\n\n\t\tfig = self.get_figure(name).children[0]\n\n\t\ttoggles = fig.layout['meta']['x_tick_values'][select_min:select_max]\n\t\tfig.for_each_trace(_deselect_all_points)\n\n\t\tif self.active_selection_deftype() == 'box':\n\t\t\tbox = self._selection_defs[self.active_selection_name()]\n\t\t\tbox.scope = self.scope\n\n\t\t\tif name not in box:\n\t\t\t\tfor x in toggles:\n\t\t\t\t\tbox.add_to_allowed_set(name, x)\n\t\t\telse:\n\t\t\t\tfor x in toggles:\n\t\t\t\t\tif name not in box or x in box[name]:\n\t\t\t\t\t\tbox.remove_from_allowed_set(name, x)\n\t\t\t\t\t\tif name in box and len(box[name]) == 0:\n\t\t\t\t\t\t\tdel box[name]\n\t\t\t\t\telse:\n\t\t\t\t\t\tbox.add_to_allowed_set(name, x)\n\t\t\tif toggles:\n\t\t\t\tself.new_selection(box, name=self.active_selection_name())\n\t\t\t\tself._active_selection_changed()\n\n\tdef _on_select_from_setstring(self, payload, name=None):\n\t\tif self._freeze:\n\t\t\treturn\n\t\ttry:\n\t\t\tself._freeze = True\n\t\t\tfrom .components import convert_rangestring_to_set\n\t\t\tallowed_set = convert_rangestring_to_set(payload.get('new', None))\n\n\t\t\tif self.active_selection_deftype() == 'box':\n\t\t\t\tbox = self._selection_defs[self.active_selection_name()]\n\t\t\t\ttry:\n\t\t\t\t\tif allowed_set is None:\n\t\t\t\t\t\tif name in box._thresholds:\n\t\t\t\t\t\t\tdel box._thresholds[name]\n\t\t\t\t\telse:\n\t\t\t\t\t\tbox.replace_allowed_set(name, allowed_set)\n\t\t\t\texcept ScopeError:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tself.new_selection(box, name=self.active_selection_name())\n\t\t\t\t\tself._active_selection_changed()\n\t\texcept:\n\t\t\t_logger.exception(\"error in _on_select_from_setstring\")\n\t\t\traise\n\t\tfinally:\n\t\t\tself._freeze = False\n\n\tdef _on_click_from_frequencies(self, *args, name=None):\n\t\tx = None\n\t\tif len(args) >= 2:\n\t\t\txs = getattr(args[1],'xs',None)\n\t\t\tif xs:\n\t\t\t\tx = xs[0]\n\t\tif x is not None:\n\t\t\tif self.active_selection_deftype() == 'box':\n\t\t\t\tbox = self._selection_defs[self.active_selection_name()]\n\t\t\t\tbox.scope = self.scope\n\t\t\t\tif name not in box or x in box[name]:\n\t\t\t\t\tbox.remove_from_allowed_set(name, x)\n\t\t\t\t\tif name in box and len(box[name]) == 0:\n\t\t\t\t\t\tdel box[name]\n\t\t\t\telse:\n\t\t\t\t\tbox.add_to_allowed_set(name, x)\n\t\t\t\tself.new_selection(box, name=self.active_selection_name())\n\t\t\t\tself._active_selection_changed()\n\n\tdef _active_selection_changed(self):\n\t\tif hasattr(self, '_active_selection_changing_'):\n\t\t\treturn # prevent recursive looping\n\t\ttry:\n\t\t\tself._active_selection_changing_ = True\n\t\t\twith self._status_pie.batch_update():\n\t\t\t\tsuper()._active_selection_changed()\n\t\t\t\tself._pre_update_selection_feature_score_figure()\n\t\t\t\tself._update_status()\n\t\t\t\tfor col in self._figures_hist:\n\t\t\t\t\tself._update_histogram_figure(col)\n\t\t\t\tfor col in self._figures_freq:\n\t\t\t\t\tself._update_frequencies_figure(col)\n\t\t\t\tfor key in self._two_way:\n\t\t\t\t\tself._two_way[key].refresh_selection_names()\n\t\t\t\t\tself._two_way[key]._on_change_selection_choose(payload={\n\t\t\t\t\t\t'new':self.active_selection_name(),\n\t\t\t\t\t})\n\t\t\t\tfor key in self._three_way:\n\t\t\t\t\tself._three_way[key].change_selection(\n\t\t\t\t\t\tself.active_selection(),\n\t\t\t\t\t\tself.active_selection_color(),\n\t\t\t\t\t)\n\t\t\t\tself._update_sploms()\n\t\t\t\tself._update_hmms()\n\t\t\t\tself._update_selection_feature_score_figure()\n\t\tfinally:\n\t\t\tdel self._active_selection_changing_\n\n\tdef status(self):\n\t\t\"\"\"Display the status widget.\"\"\"\n\t\treturn self._status\n\n\tdef _update_status(self):\n\t\ttext = '<span style=\"font-weight:bold;font-size:150%\">{:,d} Cases Selected out of {:,d} Total Cases</span>'\n\t\tselection = self.active_selection()\n\t\tvalues = (int(numpy.sum(selection)), int(selection.size))\n\t\tself._status_txt.value = text.format(*values)\n\t\tself._status_pie.data[0].values = [values[0], values[1]-values[0]]\n\n\n\n\tdef get_figure(self, col):\n\t\tif col in self._figures_hist:\n\t\t\treturn self._figures_hist[col]\n\t\tif col in self._figures_freq:\n\t\t\treturn self._figures_freq[col]\n\t\treturn None\n\n\tdef _clear_boxes_on_figure(self, col):\n\t\tfig = self.get_figure(col).children[0]\n\t\tif fig is None: return\n\n\t\tforeground_shapes = []\n\t\trefpoint = self.reference_point(col)\n\t\tif refpoint is not None:\n\t\t\tif refpoint in (True, False):\n\t\t\t\trefpoint = str(refpoint).lower()\n\t\t\t_y_max = sum(t.y for t in fig.select_traces()).max()\n\t\t\ty_range = (\n\t\t\t\t-_y_max * 0.02,\n\t\t\t\t_y_max * 1.04,\n\t\t\t)\n\t\t\tforeground_shapes.append(\n\t\t\t\tgo.layout.Shape(\n\t\t\t\t\ttype=\"line\",\n\t\t\t\t\txref=\"x1\",\n\t\t\t\t\tyref=\"y1\",\n\t\t\t\t\tx0=refpoint,\n\t\t\t\t\ty0=y_range[0],\n\t\t\t\t\tx1=refpoint,\n\t\t\t\t\ty1=y_range[1],\n\t\t\t\t\t**colors.DEFAULT_REF_LINE_STYLE,\n\t\t\t\t)\n\t\t\t)\n\n\t\tfig.layout.shapes= foreground_shapes\n\t\tfig.layout.title.font.color = 'black'\n\t\tfig.layout.title.text = col\n\n\t# def _draw_boxes_on_figure(self, col):\n\t#\n\t# \tif self.active_selection_deftype() != 'box':\n\t# \t\tself._clear_boxes_on_figure(col)\n\t# \t\treturn\n\t#\n\t# \tfig = self.get_figure(col)\n\t# \tif fig is None: return\n\t# \tbox = self._selection_defs[self.active_selection_name()]\n\t# \tif box is None:\n\t# \t\tself._clear_boxes_on_figure(col)\n\t# \t\treturn\n\t#\n\t# \tfrom ...scope.box import Bounds\n\t#\n\t# \tif col in box.thresholds:\n\t# \t\tx_lo, x_hi = None, None\n\t# \t\tthresh = box.thresholds.get(col)\n\t# \t\tif isinstance(thresh, Bounds):\n\t# \t\t\tx_lo, x_hi = thresh\n\t# \t\tif isinstance(thresh, set):\n\t# \t\t\tx_lo, x_hi = [], []\n\t# \t\t\tfor tickval, ticktext in enumerate(fig.data[0].x):\n\t# \t\t\t\tif ticktext in thresh:\n\t# \t\t\t\t\tx_lo.append(tickval-0.45)\n\t# \t\t\t\t\tx_hi.append(tickval+0.45)\n\t#\n\t# \t\ttry:\n\t# \t\t\tx_range = (\n\t# \t\t\t\tfig.data[0].x[0] - (fig.data[0].width[0] / 2),\n\t# \t\t\t\tfig.data[0].x[-1] + (fig.data[0].width[-1] / 2),\n\t# \t\t\t)\n\t# \t\texcept TypeError:\n\t# \t\t\tx_range = (\n\t# \t\t\t\t-0.5,\n\t# \t\t\t\tlen(fig.data[0].x)+0.5\n\t# \t\t\t)\n\t# \t\tx_width = x_range[1] - x_range[0]\n\t# \t\tif x_lo is None:\n\t# \t\t\tx_lo = x_range[0]-x_width * 0.02\n\t# \t\tif x_hi is None:\n\t# \t\t\tx_hi = x_range[1]+x_width * 0.02\n\t# \t\tif not isinstance(x_lo, list):\n\t# \t\t\tx_lo = [x_lo]\n\t# \t\tif not isinstance(x_hi, list):\n\t# \t\t\tx_hi = [x_hi]\n\t#\n\t# \t\ty_lo, y_hi = None, None\n\t# \t\t_y_max = sum(t.y for t in fig.select_traces()).max()\n\t# \t\ty_range = (\n\t# \t\t\t-_y_max * 0.02,\n\t# \t\t\t_y_max * 1.04,\n\t# \t\t)\n\t# \t\ty_width = y_range[1] - y_range[0]\n\t# \t\tif y_lo is None:\n\t# \t\t\ty_lo = y_range[0]-y_width * 0\n\t# \t\tif y_hi is None:\n\t# \t\t\ty_hi = y_range[1]+y_width * 0\n\t# \t\tif not isinstance(y_lo, list):\n\t# \t\t\ty_lo = [y_lo]\n\t# \t\tif not isinstance(y_hi, list):\n\t# \t\t\ty_hi = [y_hi]\n\t#\n\t# \t\tx_pairs = list(zip(x_lo, x_hi))\n\t# \t\ty_pairs = list(zip(y_lo, y_hi))\n\t#\n\t# \t\tbackground_shapes = [\n\t# \t\t\t# Rectangle background color\n\t# \t\t\tgo.layout.Shape(\n\t# \t\t\t\ttype=\"rect\",\n\t# \t\t\t\txref=\"x1\",\n\t# \t\t\t\tyref=\"y1\",\n\t# \t\t\t\tx0=x_pair[0],\n\t# \t\t\t\ty0=y_pair[0],\n\t# \t\t\t\tx1=x_pair[1],\n\t# \t\t\t\ty1=y_pair[1],\n\t# \t\t\t\tline=dict(\n\t# \t\t\t\t\twidth=0,\n\t# \t\t\t\t),\n\t# \t\t\t\tfillcolor=colors.DEFAULT_BOX_BG_COLOR,\n\t# \t\t\t\topacity=0.2,\n\t# \t\t\t\tlayer=\"below\",\n\t# \t\t\t)\n\t# \t\t\tfor x_pair in x_pairs\n\t# \t\t\tfor y_pair in y_pairs\n\t# \t\t]\n\t#\n\t# \t\tforeground_shapes = [\n\t# \t\t\t# Rectangle reference to the axes\n\t# \t\t\tgo.layout.Shape(\n\t# \t\t\t\ttype=\"rect\",\n\t# \t\t\t\txref=\"x1\",\n\t# \t\t\t\tyref=\"y1\",\n\t# \t\t\t\tx0=x_pair[0],\n\t# \t\t\t\ty0=y_pair[0],\n\t# \t\t\t\tx1=x_pair[1],\n\t# \t\t\t\ty1=y_pair[1],\n\t# \t\t\t\tline=dict(\n\t# \t\t\t\t\twidth=2,\n\t# \t\t\t\t\tcolor=colors.DEFAULT_BOX_LINE_COLOR,\n\t# \t\t\t\t),\n\t# \t\t\t\tfillcolor='rgba(0,0,0,0)',\n\t# \t\t\t\topacity=1.0,\n\t# \t\t\t)\n\t# \t\t\tfor x_pair in x_pairs\n\t# \t\t\tfor y_pair in y_pairs\n\t# \t\t]\n\t#\n\t# \t\trefpoint = self.reference_point(col)\n\t# \t\tif refpoint is not None:\n\t# \t\t\tif refpoint in (True, False):\n\t# \t\t\t\trefpoint = str(refpoint).lower()\n\t# \t\t\tforeground_shapes.append(\n\t# \t\t\t\tgo.layout.Shape(\n\t# \t\t\t\t\ttype=\"line\",\n\t# \t\t\t\t\txref=\"x1\",\n\t# \t\t\t\t\tyref=\"y1\",\n\t# \t\t\t\t\tx0=refpoint,\n\t# \t\t\t\t\ty0=y_range[0],\n\t# \t\t\t\t\tx1=refpoint,\n\t# \t\t\t\t\ty1=y_range[1],\n\t# \t\t\t\t\t**colors.DEFAULT_REF_LINE_STYLE,\n\t# \t\t\t\t)\n\t# \t\t\t)\n\t#\n\t# \t\tfig.layout.shapes=background_shapes+foreground_shapes\n\t# \t\tfig.layout.title.font.color = colors.DEFAULT_BOX_LINE_COLOR\n\t# \t\tfig.layout.title.text = f'<b>{col}</b>'\n\t# \telse:\n\t# \t\tself._clear_boxes_on_figure(col)\n\n\n\tdef _get_widgets(self, *include):\n\n\t\tif self.scope is None:\n\t\t\traise ValueError('cannot create visualization with no scope')\n\n\t\tviz_widgets = []\n\t\tfor i in include:\n\t\t\tif i not in self.scope:\n\t\t\t\twarnings.warn(f'{i} not in scope')\n\t\t\telif i not in self.data.columns:\n\t\t\t\twarnings.warn(f'{i} not in data')\n\t\t\telse:\n\t\t\t\tfig = self.get_histogram_figure(i)\n\t\t\t\tif fig is not None:\n\t\t\t\t\tviz_widgets.append(fig)\n\n\t\treturn widget.Box(viz_widgets, layout=widget.Layout(flex_flow='row wrap'))\n\n\tdef selectors(self, names):\n\t\t\"\"\"\n\t\tDisplay selector widgets for certain dimensions.\n\n\t\tThis method returns an ipywidgets Box containing\n\t\tthe selector widgets.\n\n\t\tArgs:\n\t\t\tnames (Collection[str]):\n\t\t\t\tThese names will included in this set of\n\t\t\t\twidgets. If the name is not found in the\n\t\t\t\tscope or this visualizer's data, a warning\n\t\t\t\tis issued but the remaining valid widgets\n\t\t\t\tare still returned.\n\n\t\tReturns:\n\t\t\tipywidgets.Box\n\t\t\"\"\"\n\t\treturn self._get_widgets(*names)\n\n\tdef uncertainty_selectors(self):\n\t\t\"\"\"\n\t\tDisplay selector widgets for all uncertainties.\n\n\t\tReturns:\n\t\t\tipywidgets.Box\n\t\t\"\"\"\n\t\treturn self._get_widgets(*self.scope.get_uncertainty_names())\n\n\tdef lever_selectors(self):\n\t\t\"\"\"\n\t\tDisplay selector widgets for all policy levers.\n\n\t\tReturns:\n\t\t\tipywidgets.Box\n\t\t\"\"\"\n\t\treturn self._get_widgets(*self.scope.get_lever_names())\n\n\tdef measure_selectors(self):\n\t\t\"\"\"\n\t\tDisplay selector widgets for all performance measures.\n\n\t\tReturns:\n\t\t\tipywidgets.Box\n\t\t\"\"\"\n\t\treturn self._get_widgets(*self.scope.get_measure_names())\n\n\tdef complete(self, measures=None):\n\t\t\"\"\"\n\t\tDisplay status and selector widgets for all dimensions.\n\n\t\tReturns:\n\t\t\tipywidgets.Box\n\t\t\"\"\"\n\t\tcontent = [self.status()]\n\t\tlevers = self.lever_selectors()\n\t\tif levers.children:\n\t\t\tcontent += [\n\t\t\t\twidget.HTML(\"<h3>Policy Levers</h3>\"),\n\t\t\t\tlevers,\n\t\t\t]\n\t\tuncs = self.uncertainty_selectors()\n\t\tif uncs.children:\n\t\t\tcontent += [\n\t\t\t\twidget.HTML(\"<h3>Exogenous Uncertainties</h3>\"),\n\t\t\t\tuncs,\n\t\t\t]\n\t\tif measures is None:\n\t\t\tmeas = self.measure_selectors()\n\t\telse:\n\t\t\tmeas = self.selectors(measures)\n\t\tif meas.children:\n\t\t\tcontent += [\n\t\t\t\twidget.HTML(\"<h3>Performance Measures</h3>\"),\n\t\t\t\tmeas,\n\t\t\t]\n\t\treturn widget.VBox(content)\n\n\tdef set_active_selection_color(self, color):\n\t\tsuper().set_active_selection_color(color)\n\t\tfor col, fig in self._figures_freq.items():\n\t\t\tfig.children[0].data[0].marker.color = color\n\t\tfor col, fig in self._figures_hist.items():\n\t\t\tfig.children[0].data[0].marker.color = color\n\t\tc = self._status_pie.data[0].marker.colors\n\t\tself._status_pie.data[0].marker.colors = [color, c[1]]\n\t\tfor k, twoway in self._two_way.items():\n\t\t\t#_debugprint(f\"twoway[{self._active_selection_name}][{k}] to {color}\")\n\t\t\ttwoway.change_selection_color(color)\n\n\tdef refresh_selection_names(self):\n\t\tsuper().refresh_selection_names()\n\t\ttry:\n\t\t\t_two_way = self._two_way\n\t\texcept AttributeError:\n\t\t\tpass\n\t\telse:\n\t\t\tfor k, twoway in _two_way.items():\n\t\t\t\ttwoway.refresh_selection_names()\n\n\tdef two_way(\n\t\t\tself,\n\t\t\tkey=None,\n\t\t\treset=False,\n\t\t\t*,\n\t\t\tx=None,\n\t\t\ty=None,\n\t\t\tuse_gl=True,\n\t):\n\t\t\"\"\"\n\t\tCreate or display a two-way widget.\n\n\t\tArgs:\n\t\t\tkey (hashable, optional):\n\t\t\t\tA hashable key value (e.g. `str`) to identify\n\t\t\t\tthis two_way widget. Subsequent calls to\n\t\t\t\tthis command with he same key will return\n\t\t\t\treferences to the same widget, instead of\n\t\t\t\tcreating new widgets.\n\t\t\treset (bool, default False):\n\t\t\t\tWhether to reset the two_way widget for the\n\t\t\t\tgiven key. Doing so will create a new two_way\n\t\t\t\twidget, and will break any other existing references\n\t\t\t\tto the same keyed widget (they will no longer live\n\t\t\t\tupdate with this visualizer).\n\t\t\tx, y (str, optional):\n\t\t\t\tThe names of the initial x- and y-axis dimensions to\n\t\t\t\tdisplay. Because the resulting figure widget is\n\t\t\t\tinteractive, these dimensions may be changed later.\n\t\t\tuse_gl (bool, default True):\n\t\t\t\tUse Plotly's `Scattergl` instead of `Scatter`, which may\n\t\t\t\tprovide some performance benefit for large data sets.\n\n\t\tReturns:\n\t\t\tTwoWayFigure\n\t\t\"\"\"\n\t\tif key is None and (x is not None or y is not None):\n\t\t\tkey = (x,y)\n\n\t\tif key in self._two_way and not reset:\n\t\t\treturn self._two_way[key]\n\n\t\tfrom .twoway import TwoWayFigure\n\t\tself._two_way[key] = TwoWayFigure(self, use_gl=use_gl)\n\t\tself._two_way[key].selection_choose.value = self.active_selection_name()\n\n\t\tdef _try_set_value(where, value, describe):\n\t\t\tif value is not None:\n\t\t\t\ttry:\n\t\t\t\t\twhere.value = value\n\t\t\t\texcept TraitError:\n\t\t\t\t\twarnings.warn(f'\"{value}\" is not a valid value for {describe}')\n\n\t\t_try_set_value(self._two_way[key].x_axis_choose, x, 'the x axis dimension')\n\t\t_try_set_value(self._two_way[key].y_axis_choose, y, 'the y axis dimension')\n\t\treturn self._two_way[key]\n\n\tdef three_way(\n\t\t\tself,\n\t\t\tkey=None,\n\t\t\treset=False,\n\t\t\t*,\n\t\t\tx=None,\n\t\t\ty=None,\n\t\t\tz=None,\n\t\t\ts=None,\n\t):\n\t\t\"\"\"\n\t\tCreate or display a three-way widget.\n\t\t\"\"\"\n\t\tif key is None and (x is not None or y is not None or z is not None or s is not None):\n\t\t\tkey = (x,y,z,s)\n\n\t\tif key in self._three_way and not reset:\n\t\t\treturn self._three_way[key]\n\n\t\tfrom .threeway import ThreeWayFigure\n\t\tself._three_way[key] = ThreeWayFigure(self, x=x,y=y,z=z,s=s)\n\t\treturn self._three_way[key]\n\n\n\tdef splom(\n\t\t\tself,\n\t\t\tkey=None,\n\t\t\treset=False,\n\t\t\t*,\n\t\t\tcols='M',\n\t\t\trows='L',\n\t\t\tuse_gl=True,\n\t):\n\t\t\"\"\"\n\t\tCreate or display a scatter plot matrix widget.\n\n\t\tArgs:\n\t\t\tkey (hashable, optional):\n\t\t\t\tA hashable key value (e.g. `str`) to identify\n\t\t\t\tthis splom widget. Subsequent calls to\n\t\t\t\tthis command with he same key will return\n\t\t\t\treferences to the same widget, instead of\n\t\t\t\tcreating new widgets.\n\t\t\treset (bool, default False):\n\t\t\t\tWhether to reset the two_way widget for the\n\t\t\t\tgiven key. Doing so will create a new splom\n\t\t\t\twidget, and will break any other existing references\n\t\t\t\tto the same keyed widget (they will no longer live\n\t\t\t\tupdate with this visualizer).\n\t\t\tcols, rows (str or Collection[str]):\n\t\t\t\tThe dimensions to display across each of the\n\t\t\t\tcolumns (rows) of the scatter plot matrix.\n\t\t\t\tCan be given as a list of dimension names, or\n\t\t\t\ta single string that is some subset of 'XLM' to\n\t\t\t\tinclude all uncertainties, policy levers, and/or\n\t\t\t\tperformance measures respectively.\n\t\t\tuse_gl (bool, default True):\n\t\t\t\tUse Plotly's `Scattergl` instead of `Scatter`, which may\n\t\t\t\tprovide some performance benefit for large data sets.\n\n\t\tReturns:\n\t\t\tplotly.FigureWidget\n\t\t\"\"\"\n\t\tif not isinstance(rows, str):\n\t\t\trows = tuple(rows)\n\t\tif not isinstance(cols, str):\n\t\t\tcols = tuple(cols)\n\n\t\tif key is None and (cols is not None or rows is not None):\n\t\t\tkey = (cols,rows)\n\n\t\tif key in self._splom and not reset:\n\t\t\treturn self._splom[key]\n\n\t\tbox = None\n\t\tif self.active_selection_deftype() == 'box':\n\t\t\tname = self.active_selection_name()\n\t\t\tbox = self._selection_defs[name]\n\t\telif self.active_selection_deftype() == 'primbox':\n\t\t\tname = self.active_selection_name()\n\t\t\tbox = self._selection_defs[name].to_emat_box()\n\n\t\tself._splom[key] = new_splom_figure(\n\t\t\tself.scope,\n\t\t\tself.data,\n\t\t\trows=rows,\n\t\t\tcols=cols,\n\t\t\tuse_gl=use_gl,\n\t\t\tmass=250,\n\t\t\trow_titles='side',\n\t\t\tsize=150,\n\t\t\tselection=self.active_selection(),\n\t\t\tbox=box,\n\t\t\trefpoint=self._reference_point,\n\t\t\tfigure_class=go.FigureWidget,\n\t\t\ton_select=functools.partial(self._on_select_from_splom, name=key),\n\t\t\tselected_color=self.active_selection_color(),\n\t\t)\n\n\t\treturn self._splom[key]\n\n\tdef _on_select_from_splom(self, row, col, trace, points, selection, name=None):\n\t\t# if len(points.point_inds)==0:\n\t\t# \treturn\n\t\t# print(\"name=\",name)\n\t\t# print(row, col, \"->\", selection)\n\t\t# print( \"->\", selection.xrange)\n\t\t# print( \"->\", selection.yrange)\n\t\t# print( \"->\", type(selection.yrange))\n\t\t# trace.selectedpoints = None\n\t\tpass\n\n\tdef _update_sploms(self):\n\t\tbox = None\n\t\tif self.active_selection_deftype() == 'box':\n\t\t\tname = self.active_selection_name()\n\t\t\tbox = self._selection_defs[name]\n\t\telif self.active_selection_deftype() == 'primbox':\n\t\t\tname = self.active_selection_name()\n\t\t\tbox = self._selection_defs[name].to_emat_box()\n\t\tfor fig in self._splom.values():\n\t\t\twith fig.batch_update():\n\t\t\t\tupdate_splom_figure(\n\t\t\t\t\tself.scope,\n\t\t\t\t\tself.data,\n\t\t\t\t\tfig,\n\t\t\t\t\tself.active_selection(),\n\t\t\t\t\tbox,\n\t\t\t\t\tmass=None,\n\t\t\t\t\tselected_color=self.active_selection_color(),\n\t\t\t\t)\n\n\tdef hmm(\n\t\t\tself,\n\t\t\tkey=None,\n\t\t\treset=False,\n\t\t\t*,\n\t\t\tcols='M',\n\t\t\trows='L',\n\t\t\temph_selected=True,\n\t\t\tshow_points=30,\n\t\t\tsize=150,\n\t\t\twith_hover=True,\n\t):\n\t\t\"\"\"\n\t\tCreate or display a heat map matrix widget.\n\n\t\tArgs:\n\t\t\tkey (hashable, optional):\n\t\t\t\tA hashable key value (e.g. `str`) to identify\n\t\t\t\tthis hmm widget. Subsequent calls to\n\t\t\t\tthis command with he same key will return\n\t\t\t\treferences to the same widget, instead of\n\t\t\t\tcreating new widgets.\n\t\t\treset (bool, default False):\n\t\t\t\tWhether to reset the two_way widget for the\n\t\t\t\tgiven key. Doing so will create a new hmm\n\t\t\t\twidget, and will break any other existing references\n\t\t\t\tto the same keyed widget (they will no longer live\n\t\t\t\tupdate with this visualizer).\n\t\t\tcols, rows (str or Collection[str]):\n\t\t\t\tThe dimensions to display across each of the\n\t\t\t\tcolumns (rows) of the heat map matrix.\n\t\t\t\tCan be given as a list of dimension names, or\n\t\t\t\ta single string that is some subset of 'XLM' to\n\t\t\t\tinclude all uncertainties, policy levers, and/or\n\t\t\t\tperformance measures respectively.\n\t\t\temph_selected (bool, default True):\n\t\t\t\tEmphasize selected points, using a variety of\n\t\t\t\ttechniques to ensure that small sized selections\n\t\t\t\tremain visible. If disabled, when small sized\n\t\t\t\tselections are shown from large visualization\n\t\t\t\tdatasets, the selected points will typically\n\t\t\t\tbecome washed out and undetectable.\n\t\t\tshow_points (int, default 30):\n\t\t\t\tIf `emph_selected` is true and the number of\n\t\t\t\tselected points is less than this threshold,\n\t\t\t\tthe selection will be overlaid on the heatmap\n\t\t\t\tas a scatter plot instead of a heatmap colorization.\n\t\t\tsize (int, default 150):\n\t\t\t\tThe plot size for each heatmap.\n\n\t\tReturns:\n\t\t\tplotly.FigureWidget\n\t\t\"\"\"\n\t\tif not isinstance(rows, str):\n\t\t\trows = tuple(rows)\n\t\tif not isinstance(cols, str):\n\t\t\tcols = tuple(cols)\n\n\t\tif key is None and (cols is not None or rows is not None):\n\t\t\tkey = (cols,rows)\n\n\t\tif key in self._hmm and not reset:\n\t\t\treturn self._hmm[key]\n\n\t\tbox = None\n\t\tif self.active_selection_deftype() == 'box':\n\t\t\tname = self.active_selection_name()\n\t\t\tbox = self._selection_defs[name]\n\t\telif self.active_selection_deftype() == 'primbox':\n\t\t\tname = self.active_selection_name()\n\t\t\tbox = self._selection_defs[name].to_emat_box()\n\n\t\tself._hmm[key] = new_hmm_figure(\n\t\t\tself.scope,\n\t\t\tself.data,\n\t\t\trows=rows,\n\t\t\tcols=cols,\n\t\t\trow_titles='side',\n\t\t\tsize=size,\n\t\t\tselection=self.active_selection(),\n\t\t\tbox=box,\n\t\t\trefpoint=self._reference_point,\n\t\t\tfigure_class=go.FigureWidget,\n\t\t\temph_selected=emph_selected,\n\t\t\tshow_points=show_points,\n\t\t\tselected_color=self.active_selection_color(),\n\t\t\twith_hover=with_hover,\n\t\t)\n\n\t\treturn self._hmm[key]\n\n\tdef _update_hmms(self):\n\t\tbox = None\n\t\tif self.active_selection_deftype() == 'box':\n\t\t\tname = self.active_selection_name()\n\t\t\tbox = self._selection_defs[name]\n\t\telif self.active_selection_deftype() == 'primbox':\n\t\t\tname = self.active_selection_name()\n\t\t\tbox = self._selection_defs[name].to_emat_box()\n\t\tfor fig in self._hmm.values():\n\t\t\twith fig.batch_update():\n\t\t\t\tupdate_hmm_figure(\n\t\t\t\t\tself.scope,\n\t\t\t\t\tself.data,\n\t\t\t\t\tfig,\n\t\t\t\t\tself.active_selection(),\n\t\t\t\t\tbox,\n\t\t\t\t\tselected_color=self.active_selection_color(),\n\t\t\t\t)\n\n\tdef parcoords(\n\t\t\tself,\n\t\t\tkey=None,\n\t\t\treset=False,\n\t\t\t*,\n\t\t\tcoords='XLM',\n\t):\n\t\t\"\"\"\n\n\t\tArgs:\n\t\t\tkey (hashable, optional):\n\t\t\t\tA hashable key value (e.g. `str`) to identify\n\t\t\t\tthis parcoords widget. Subsequent calls to\n\t\t\t\tthis command with he same key will return\n\t\t\t\treferences to the same widget, instead of\n\t\t\t\tcreating new widgets.\n\t\t\treset (bool, default False):\n\t\t\t\tWhether to reset the parcoords widget for the\n\t\t\t\tgiven key. Doing so will create a new parcoords\n\t\t\t\twidget, and will break any other existing references\n\t\t\t\tto the same keyed widget (they will no longer live\n\t\t\t\tupdate with this visualizer).\n\t\t\tcoords (str or Collection[str]):\n\t\t\t\tNames of the visualizer dimensions to display\n\t\t\t\tin this parcoords widget. Give a list-like set\n\t\t\t\tof named dimensions, or a string that is some\n\t\t\t\tsubset of 'XLM' to include all uncertainties,\n\t\t\t\tpolicy levers, and/or performance measures\n\t\t\t\trespectively.\n\n\t\tReturns:\n\t\t\tplotly.FigureWidget: A parallel coordinates plot.\n\t\t\"\"\"\n\t\tif not isinstance(coords, str):\n\t\t\tcoords = tuple(coords)\n\n\t\tif key is None and coords is not None:\n\t\t\tkey = coords\n\n\t\tif key in self._parcoords and not reset:\n\t\t\treturn self._parcoords[key]\n\n\t\tself._parcoords[key] = new_parcoords_figure(\n\t\t\tself.scope,\n\t\t\tself.data,\n\t\t\tcoords=coords,\n\t\t\tselection=self.active_selection(),\n\t\t\tfigure_class=go.FigureWidget,\n\t\t\tselected_color=self.active_selection_color(),\n\t\t\t# on_select=functools.partial(self._on_select_from_splom, name=key),\n\t\t)\n\n\t\treturn self._parcoords[key]\n\n\tdef new_selection(self, value, name=None, color=None, activate=True):\n\t\t\"\"\"\n\t\tAdd a new selection set to the Visualizer.\n\n\t\tArgs:\n\t\t\tvalue (Box, PrimBox, str, or array-like):\n\t\t\t\tThe new selection. If given as an `emat.Box`,\n\t\t\t\tthe selection is defined entirely by the boundaries of the\n\t\t\t\tbox, as applied to the visualizer data.\n\t\t\t\tIf given as a `PrimBox`, the box boundaries are defined\n\t\t\t\tby the selected point on the peeling trajectory (and\n\t\t\t\tare immutable within the Visualizer interface), but the\n\t\t\t\tselection is taken from the Prim target.\n\t\t\t\tIf given as a `str`, a new immutable selection array is created\n\t\t\t\tby evaluating the string in the context of the visualizer data.\n\t\t\t\tIf given as an array-like, the array is used to explicitly\n\t\t\t\tdefine an immutable selection.\n\t\t\tname (str, optional):\n\t\t\t\tA name for this selection. If not given, the name is inferred\n\t\t\t\tfrom the `name` attribute of the `value` argument, if possible.\n\t\t\tcolor (str, optional):\n\t\t\t\tA color to use for this selection, in \"rgb(n,n,n)\" format.\n\t\t\t\tIf not provided, a default color is selected based on the\n\t\t\t\ttype of `value`.\n\t\t\tactivate (bool, default True):\n\t\t\t\tWhether to immediately make this new selection as the \"active\"\n\t\t\t\tselection for this visualizer.\n\n\t\tRaises:\n\t\t\tTypeError: If `name` is not a string or cannot be inferred.\n\t\t\"\"\"\n\t\tif name is None and hasattr(value, 'name'):\n\t\t\tname = value.name\n\t\tif not isinstance(name, str):\n\t\t\traise TypeError(f'selection names must be str not {type(name)}')\n\t\tcolor = None\n\t\tif value is None:\n\t\t\tfrom ...scope.box import Box\n\t\t\tvalue = Box(name=name, scope=self.scope)\n\t\tif isinstance(value, GenericBox):\n\t\t\tcolor = colors.DEFAULT_HIGHLIGHT_COLOR\n\t\telif isinstance(value, str):\n\t\t\tcolor = colors.DEFAULT_EXPRESSION_COLOR\n\t\telif isinstance(value, pandas.Series):\n\t\t\tcolor = colors.DEFAULT_LASSO_COLOR\n\t\telif isinstance(value, PrimBox):\n\t\t\tcolor = colors.DEFAULT_PRIMTARGET_COLOR\n\t\tsuper().new_selection(value, name=name, color=color, activate=activate)\n\n\tdef __setitem__(self, key, value):\n\t\tself.new_selection(value, name=key)\n\n\tdef __getitem__(self, item):\n\t\tif item not in self.selection_names():\n\t\t\treturn KeyError(item)\n\t\treturn self._selection_defs.get(item, None)\n\n\tdef prim(self, data='parameters', target=None, **kwargs):\n\t\t\"\"\"\n\t\tCreate a new Prim search for this Visualizer.\n\n\t\tArgs:\n\t\t\tdata ({'parameters', 'levers', 'uncertainties', 'measures', 'all'}):\n\t\t\t\tLimit the restricted dimensions to only be drawn\n\t\t\t\tfrom this subset of possible dimensions from the scope.\n\t\t\t\tDefaults to 'parameters` (i.e. levers and uncertainties).\n\t\t\ttarget (str, optional):\n\t\t\t\tIf not given, the current active selection is used as the\n\t\t\t\ttarget for Prim. Otherwise, give the name of an existing\n\t\t\t\tselection, or an expression to be evaluated on the visualizer\n\t\t\t\tdata to create a new target.\n\t\t\t**kwargs:\n\t\t\t\tAll other keyword arguments are forwarded to the\n\t\t\t\t`emat.analysis.Prim` constructor.\n\n\t\tReturns:\n\t\t\temat.analysis.Prim\n\t\t\"\"\"\n\t\tfrom ..prim import Prim\n\n\t\tif target is None:\n\t\t\tof_interest = self.active_selection()\n\t\telif isinstance(target, str):\n\t\t\ttry:\n\t\t\t\tof_interest = self._selections[target]\n\t\t\texcept KeyError:\n\t\t\t\tself.new_selection(target, name=f\"PRIM Target: {target}\")\n\t\t\t\tof_interest = self.active_selection()\n\t\telse:\n\t\t\tself.new_selection(target, name=\"PRIM Target\")\n\t\t\tof_interest = self.active_selection()\n\n\t\tif data == 'parameters':\n\t\t\tdata_ = self.data[self.scope.get_parameter_names()]\n\t\telif data == 'levers':\n\t\t\tdata_ = self.data[self.scope.get_lever_names()]\n\t\telif data == 'uncertainties':\n\t\t\tdata_ = self.data[self.scope.get_uncertainty_names()]\n\t\telif data == 'measures':\n\t\t\tdata_ = self.data[self.scope.get_measure_names()]\n\t\telif data == 'all':\n\t\t\tdata_ = self.data\n\t\telse:\n\t\t\tdata_ = self.data[data]\n\n\t\tself._prim_target = of_interest\n\n\t\tif (of_interest).all():\n\t\t\traise ValueError(\"all points are in the target, cannot run PRIM\")\n\t\tif (~of_interest).all():\n\t\t\traise ValueError(\"no points are in the target, cannot run PRIM\")\n\n\t\tresult = Prim(\n\t\t\tdata_,\n\t\t\tof_interest,\n\t\t\t**kwargs,\n\t\t)\n\n\t\tresult._explorer = self\n\n\t\treturn result\n\n\n\tdef clear_box(self, name=None):\n\t\t\"\"\"\n\t\tClear the contents of an editable selection box.\n\n\t\tIf the selection to be cleared is not editable\n\t\t(i.e. if it is not based on an :class:`emat.Box`)\n\t\tthis method does nothing.\n\n\t\tArgs:\n\t\t\tname (str, optional):\n\t\t\t\tThe name of the box to clear. If not\n\t\t\t\tspecified, the currently active selection\n\t\t\t\tis cleared.\n\t\t\"\"\"\n\t\tif name is None:\n\t\t\tname = self.active_selection_name()\n\t\tif self.selection_deftype(name) == 'box':\n\t\t\tbox = self._selection_defs[name]\n\t\t\tif box.thresholds:\n\t\t\t\tbox.clear()\n\t\t\t\tself[name] = box\n\t\t\t\tself._active_selection_changed()\n\n\tdef new_box(self, name, **kwargs):\n\t\t\"\"\"\n\t\tCreate a new Box and add it to this Visualizer.\n\n\t\tArgs:\n\t\t\tname (str):\n\t\t\t\tThe name of the selection box to add.\n\t\t\t\tIf this name already exists in this\n\t\t\t\tVisualizer, it will be overwritten.\n\t\t\tactivate (bool, default True):\n\t\t\t\tImmediately make this new box the active\n\t\t\t\tselection in this Visualizer.\n\t\t\t**kwargs:\n\t\t\t\tAll other keyword arguments are\n\t\t\t\tforwarded to the :class:`emat.Box`\n\t\t\t\tconstructor.\n\n\t\tReturns:\n\t\t\temat.Box: The newly created box.\n\t\t\"\"\"\n\t\tfrom ...scope.box import Box\n\t\tscope = kwargs.pop('scope', self.scope)\n\t\tactivate = kwargs.pop('activate', True)\n\t\tself.new_selection(\n\t\t\tBox(name, scope=scope, **kwargs),\n\t\t\tname=name,\n\t\t\tcolor=colors.DEFAULT_HIGHLIGHT_COLOR,\n\t\t\tactivate=activate,\n\t\t)\n\t\treturn self[name]\n\n\tdef add_box(self, box, activate=True):\n\t\t\"\"\"\n\t\tAdd an existing Box to this Visualizer.\n\n\t\tArgs:\n\t\t\tbox (emat.Box): The box to add.\n\t\t\"\"\"\n\t\tself.new_selection(\n\t\t\tbox,\n\t\t\tname=box.name,\n\t\t\tactivate=activate,\n\t\t)\n\n\tdef _compute_selection_feature_scores(self, name=None):\n\t\tif name is None:\n\t\t\tname = self.active_selection_name()\n\t\tif self.selection_deftype(name) == 'box':\n\t\t\tbox = self._selection_defs[name]\n\t\t\tfrom ..feature_scoring import box_feature_scores\n\t\t\ttry:\n\t\t\t\treturn box_feature_scores(\n\t\t\t\t\tself.scope,\n\t\t\t\t\tbox,\n\t\t\t\t\tself.data,\n\t\t\t\t\treturn_type='styled',\n\t\t\t\t\tdb=None,\n\t\t\t\t\trandom_state=None,\n\t\t\t\t\tcmap='viridis',\n\t\t\t\t\texclude_measures=True,\n\t\t\t\t)\n\t\t\texcept ValueError:\n\t\t\t\treturn pandas.DataFrame(\n\t\t\t\t\tindex=['target'],\n\t\t\t\t\tcolumns=[],\n\t\t\t\t\tdata=None,\n\t\t\t\t)\n\t\telse:\n\t\t\tfrom ..feature_scoring import target_feature_scores\n\t\t\ttarget = self._selections[name]\n\t\t\treturn target_feature_scores(\n\t\t\t\tself.scope,\n\t\t\t\ttarget,\n\t\t\t\tself.data,\n\t\t\t\treturn_type='styled',\n\t\t\t\tdb=None,\n\t\t\t\trandom_state=None,\n\t\t\t\tcmap='viridis',\n\t\t\t\texclude_measures=True,\n\t\t\t)\n\n\n\tdef selection_feature_scores(self):\n\t\ttry:\n\t\t\tscores = self._compute_selection_feature_scores().data.iloc[0]\n\t\texcept KeyboardInterrupt:\n\t\t\traise\n\t\texcept:\n\t\t\tscores = {}\n\t\ty = self.scope.get_parameter_names(False)\n\t\tx = [scores.get(yi, numpy.nan) for yi in y]\n\t\tfmt = lambda x: x if isinstance(x, str) else \"{:.3f}\".format(x)\n\t\tt = [fmt(scores.get(yi, \"N/A\")) for yi in y]\n\t\tfig = go.FigureWidget(\n\t\t\tgo.Bar(\n\t\t\t\tx=x,\n\t\t\t\ty=y,\n\t\t\t\ttext=t,\n\t\t\t\torientation='h',\n\t\t\t\ttextposition='outside',\n\t\t\t\ttexttemplate='%{text}',\n\t\t\t\tmarker_color=colors.DEFAULT_HIGHLIGHT_COLOR,\n\t\t\t),\n\t\t\tlayout=dict(\n\t\t\t\tmargin=dict(t=0, b=0, l=0, r=0),\n\t\t\t\theight = len(x) * 22,\n\t\t\t\tyaxis_autorange=\"reversed\",\n\t\t\t)\n\t\t)\n\t\tself._selection_feature_score_fig = fig\n\t\treturn fig\n\n\tdef _pre_update_selection_feature_score_figure(self):\n\t\tif self._selection_feature_score_fig is None:\n\t\t\treturn\n\t\tfig = self._selection_feature_score_fig\n\t\tfig.data[0].marker.color = 'yellow'\n\n\tdef _update_selection_feature_score_figure(self):\n\t\tif self._selection_feature_score_fig is None:\n\t\t\treturn\n\t\tfig = self._selection_feature_score_fig\n\t\ttry:\n\t\t\tscores = self._compute_selection_feature_scores().data.iloc[0]\n\t\texcept KeyboardInterrupt:\n\t\t\traise\n\t\texcept:\n\t\t\tscores = {}\n\t\ty = self.scope.get_parameter_names(False)\n\t\tx = [scores.get(yi, numpy.nan) for yi in y]\n\t\tfmt = lambda x: x if isinstance(x, str) else \"{:.3f}\".format(x)\n\t\tt = [fmt(scores.get(yi, \"N/A\")) for yi in y]\n\t\twith fig.batch_update():\n\t\t\tfig.data[0].x = x\n\t\t\tfig.data[0].text = t\n\t\t\tfig.data[0].marker.color = colors.DEFAULT_HIGHLIGHT_COLOR\n\n\tdef subvisualize(self, query=None, iloc=None, copy=True):\n\t\tkwargs = dict(\n\t\t\treference_point=self._reference_point,\n\t\t\tscope=self.scope,\n\t\t)\n\t\tif isinstance(query, str):\n\t\t\tkwargs['data'] = self.data.query(query)\n\t\telif iloc is not None:\n\t\t\tkwargs['data'] = self.data.iloc[query]\n\t\telse:\n\t\t\tkwargs['data'] = self.data[query]\n\t\tif copy:\n\t\t\tkwargs['data'] = kwargs['data'].copy()\n\t\treturn type(self)(**kwargs)\n",
"\nimport pandas as pd\nimport numpy as np\n\nfrom ...workbench import RealParameter, IntegerParameter, BooleanParameter, CategoricalParameter\nfrom ...workbench.em_framework.callbacks import AbstractCallback\nfrom ...workbench.util.ema_exceptions import EMAError\n\nfrom ...util.loggers import get_module_logger\nfrom ..._pkg_constants import *\n\n_logger = get_module_logger(__name__)\n\nclass SQLiteCallback(AbstractCallback):\n \"\"\"\n default callback system\n callback can be used in perform_experiments as a means for\n specifying the way in which the results should be handled. If no\n callback is specified, this default implementation is used. This\n one can be overwritten or replaced with a callback of your own\n design. For example if you prefer to store the result in a database\n or write them to a text file\n \"\"\"\n i = 0\n cases = None\n results = {}\n\n shape_error_msg = \"can only save up to 2d arrays, this array is {}d\"\n constraint_error_msg = ('can only save 1d arrays for constraint, '\n 'this array is {}d')\n\n def __init__(self, uncs, levers, outcomes, nr_experiments,\n reporting_interval=100, reporting_frequency=10,\n scope_name=None, design_name=None, db=None,\n using_metamodel=False, metamodel_id=12345,\n ):\n '''\n\n Parameters\n ----------\n uncs : list\n a list of the parameters over which the experiments\n are being run.\n outcomes : list\n a list of outcomes\n nr_experiments : int\n the total number of experiments to be executed\n reporting_interval : int, optional\n the interval between progress logs\n reporting_frequency: int, optional\n the total number of progress logs\n\n '''\n super().__init__(uncs, levers, outcomes,\n nr_experiments, reporting_interval,\n reporting_frequency)\n self.i = 0\n self.cases = None\n self.results = {}\n\n self.outcomes = [outcome.name for outcome in outcomes]\n\n # determine data types of parameters\n columns = []\n dtypes = []\n self.parameters = []\n\n for parameter in uncs + levers:\n name = parameter.name\n self.parameters.append(name)\n dataType = 'float'\n\n if isinstance(parameter, CategoricalParameter):\n dataType = 'object'\n elif isinstance(parameter, BooleanParameter):\n dataType = 'bool'\n elif isinstance(parameter, IntegerParameter):\n dataType = 'int'\n columns.append(name)\n dtypes.append(dataType)\n\n for name in ['scenario', 'policy', 'model']:\n columns.append(name)\n dtypes.append('object')\n\n df = pd.DataFrame(index=np.arange(nr_experiments))\n\n for name, dtype in zip(columns, dtypes):\n df[name] = pd.Series(dtype=dtype)\n self.cases = df\n self.nr_experiments = nr_experiments\n\n self.scope_name = scope_name\n self.design_name = design_name\n self.db = db\n self.using_metamodel = using_metamodel\n self.metamodel_id = metamodel_id\n\n def _store_case(self, experiment):\n scenario = experiment.scenario\n policy = experiment.policy\n index = experiment.experiment_id\n\n self.cases.at[index, 'scenario'] = scenario.name\n self.cases.at[index, 'policy'] = policy.name\n self.cases.at[index, 'model'] = experiment.model_name\n\n for k, v in scenario.items():\n self.cases.at[index, k] = v\n\n for k, v in policy.items():\n self.cases.at[index, k] = v\n\n ex_ids = self.db.write_experiment_parameters(self.scope_name, self.design_name, self.cases.iloc[index:index+1, :-3])\n return ex_ids[0]\n\n def _store_outcomes(self, case_id, outcomes, ex_id):\n for outcome in self.outcomes:\n\n try:\n outcome_res = outcomes[outcome]\n except KeyError:\n message = \"%s not specified as outcome in msi\" % outcome\n _logger.debug(message)\n else:\n # outcome is found, store it\n try:\n self.results[outcome][case_id, ] = outcome_res\n except KeyError:\n # outcome is non-scalar\n shape = np.asarray(outcome_res).shape\n\n if len(shape) > 2:\n message = self.shape_error_msg.format(len(shape))\n raise EMAError(message)\n\n shape = list(shape)\n shape.insert(0, self.nr_experiments)\n\n self.results[outcome] = np.empty(shape)\n self.results[outcome][:] = np.NAN\n self.results[outcome][case_id, ] = outcome_res\n\n _logger.debug(\"stored {} = {}\".format(outcome, outcome_res))\n\n self.db.write_ex_m_1(self.scope_name,\n SOURCE_IS_CORE_MODEL if not self.using_metamodel else self.metamodel_id,\n ex_id,\n outcome,\n outcome_res,)\n\n def __call__(self, experiment, outcomes):\n '''\n Method responsible for storing results. This method calls\n :meth:`super` first, thus utilizing the logging provided there.\n\n Parameters\n ----------\n experiment: Experiment instance\n outcomes: dict\n the outcomes dict\n\n '''\n super().__call__(experiment, outcomes)\n\n # store the case\n ex_id = self._store_case(experiment)\n\n # store outcomes\n self._store_outcomes(experiment.experiment_id, outcomes, ex_id)\n\n def get_results(self):\n return self.cases, self.results\n\n\n\ndef SQLiteCallbackFactory(scope_name=None, design_name=None, db=None, using_metamodel=False):\n return lambda *a, **k: SQLiteCallback(*a,**k,\n scope_name=scope_name,\n design_name=design_name,\n db=db,\n using_metamodel=using_metamodel)\n\n",
"import numpy as np\nimport pandas as pd\nimport pytest\nfrom pytest import approx, raises\nfrom sklearn.utils import Bunch\nimport emat.examples\n\n\n\n\ndef test_basic_road_test_example(dataframe_regression):\n\tscope, db, model = emat.examples.road_test()\n\tdesign = model.design_experiments()\n\tresult = model.run_experiments(design)\n\tdataframe_regression.check(\n\t\tpd.DataFrame(result),\n\t\tbasename='test_basic_road_test_example_first_result',\n\t)\n\n\tref_result = model.run_reference_experiment()\n\tdataframe_regression.check(\n\t\tpd.DataFrame(ref_result),\n\t\tbasename='test_basic_road_test_example_ref_result',\n\t)\n\n\t# re-run reference model, should get same experiment number\n\tref_result2 = model.run_reference_experiment()\n\tpd.testing.assert_frame_equal(ref_result, ref_result2)\n\n\nif __name__ == '__main__':\n\n\tclass Noop:\n\t\tdef check(self, *args, **kwargs):\n\t\t\tpass\n\n\ttest_basic_road_test_example(Noop())"
] |
[
[
"pandas.CategoricalDtype",
"pandas.DataFrame",
"numpy.ceil",
"numpy.histogram",
"numpy.sum"
],
[
"numpy.asarray",
"numpy.arange",
"pandas.Series",
"numpy.empty"
],
[
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
]
] |
ssmithTaylor/openpilot
|
[
"ae1083d700190eb050d1da1ce88ecd53a790a61d"
] |
[
"selfdrive/debug/toyota_eps_factor.py"
] |
[
"#!/usr/bin/env python3\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model # pylint: disable=import-error\nfrom selfdrive.car.toyota.values import STEER_THRESHOLD\n\nfrom tools.lib.route import Route\nfrom tools.lib.logreader import MultiLogIterator\n\nMIN_SAMPLES = 30 * 100\n\n\ndef to_signed(n, bits):\n if n >= (1 << max((bits - 1), 0)):\n n = n - (1 << max(bits, 0))\n return n\n\n\ndef get_eps_factor(lr, plot=False):\n engaged = False\n steering_pressed = False\n torque_cmd, eps_torque = None, None\n cmds, eps = [], []\n\n for msg in lr:\n if msg.which() != 'can':\n continue\n\n for m in msg.can:\n if m.address == 0x2e4 and m.src == 128:\n engaged = bool(m.dat[0] & 1)\n torque_cmd = to_signed((m.dat[1] << 8) | m.dat[2], 16)\n elif m.address == 0x260 and m.src == 0:\n eps_torque = to_signed((m.dat[5] << 8) | m.dat[6], 16)\n steering_pressed = abs(to_signed((m.dat[1] << 8) | m.dat[2], 16)) > STEER_THRESHOLD\n\n if engaged and torque_cmd is not None and eps_torque is not None and not steering_pressed:\n cmds.append(torque_cmd)\n eps.append(eps_torque)\n else:\n if len(cmds) > MIN_SAMPLES:\n break\n cmds, eps = [], []\n\n if len(cmds) < MIN_SAMPLES:\n raise Exception(\"too few samples found in route\")\n\n lm = linear_model.LinearRegression(fit_intercept=False)\n lm.fit(np.array(cmds).reshape(-1, 1), eps)\n scale_factor = 1. / lm.coef_[0]\n\n if plot:\n plt.plot(np.array(eps) * scale_factor)\n plt.plot(cmds)\n plt.show()\n return scale_factor\n\nif __name__ == \"__main__\":\n r = Route(sys.argv[1])\n lr = MultiLogIterator(r.log_paths(), wraparound=False)\n n = get_eps_factor(lr, plot=\"--plot\" in sys.argv)\n print(\"EPS torque factor: \", n)\n"
] |
[
[
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.show",
"sklearn.linear_model.LinearRegression"
]
] |
ChenddatHKU/scanpy
|
[
"1b290be6fd297023a0bd705e66f69254c1626fc4"
] |
[
"scanpy/tools/_rank_genes_groups.py"
] |
[
"\"\"\"Rank genes according to differential expression.\n\"\"\"\nfrom math import floor\nfrom typing import Iterable, Union, Optional\n\nimport numpy as np\nimport pandas as pd\nfrom anndata import AnnData\nfrom scipy.sparse import issparse, vstack\n\nfrom .. import _utils\nfrom .. import logging as logg\nfrom ..preprocessing._simple import _get_mean_var\nfrom .._compat import Literal\n\n\n_Method = Optional[Literal['logreg', 't-test', 'wilcoxon', 't-test_overestim_var']]\n_CorrMethod = Literal['benjamini-hochberg', 'bonferroni']\n\n\ndef _select_top_n(scores, n_top):\n n_from = scores.shape[0]\n reference_indices = np.arange(n_from, dtype=int)\n partition = np.argpartition(scores, -n_top)[-n_top:]\n partial_indices = np.argsort(scores[partition])[::-1]\n global_indices = reference_indices[partition][partial_indices]\n\n return global_indices\n\n\ndef _ranks(X, mask=None, mask_rest=None):\n CONST_MAX_SIZE = 10000000\n\n n_genes = X.shape[1]\n\n if issparse(X):\n merge = lambda tpl: vstack(tpl).toarray()\n adapt = lambda X: X.toarray()\n else:\n merge = np.vstack\n adapt = lambda X: X\n\n masked = mask is not None and mask_rest is not None\n\n if masked:\n n_cells = np.count_nonzero(mask) + np.count_nonzero(mask_rest)\n get_chunk = lambda X, left, right: merge(\n (X[mask, left:right], X[mask_rest, left:right])\n )\n else:\n n_cells = X.shape[0]\n get_chunk = lambda X, left, right: adapt(X[:, left:right])\n\n # Calculate chunk frames\n max_chunk = floor(CONST_MAX_SIZE / n_cells)\n\n for left in range(0, n_genes, max_chunk):\n right = min(left + max_chunk, n_genes)\n\n df = pd.DataFrame(data=get_chunk(X, left, right))\n ranks = df.rank()\n yield ranks, left, right\n\n\ndef _tiecorrect(ranks):\n size = np.float64(ranks.shape[0])\n if size < 2:\n return np.repeat(ranks.shape[1], 1.0)\n\n arr = np.sort(ranks, axis=0)\n tf = np.insert(arr[1:] != arr[:-1], (0, arr.shape[0] - 1), True, axis=0)\n idx = np.where(tf, np.arange(tf.shape[0])[:, None], 0)\n idx = np.sort(idx, axis=0)\n cnt = np.diff(idx, axis=0).astype(np.float64)\n\n return 1.0 - (cnt ** 3 - cnt).sum(axis=0) / (size ** 3 - size)\n\n\nclass _RankGenes:\n def __init__(\n self,\n adata,\n groups,\n groupby,\n reference='rest',\n use_raw=True,\n layer=None,\n comp_pts=False,\n ):\n\n if 'log1p' in adata.uns_keys() and adata.uns['log1p']['base'] is not None:\n self.expm1_func = lambda x: np.expm1(x * np.log(adata.uns['log1p']['base']))\n else:\n self.expm1_func = np.expm1\n\n self.groups_order, self.groups_masks = _utils.select_groups(\n adata, groups, groupby\n )\n\n adata_comp = adata\n if layer is not None:\n if use_raw:\n raise ValueError(\"Cannot specify `layer` and have `use_raw=True`.\")\n X = adata_comp.layers[layer]\n else:\n if use_raw and adata.raw is not None:\n adata_comp = adata.raw\n X = adata_comp.X\n\n # for correct getnnz calculation\n if issparse(X):\n X.eliminate_zeros()\n\n self.X = X\n self.var_names = adata_comp.var_names\n\n self.ireference = None\n if reference != 'rest':\n self.ireference = np.where(self.groups_order == reference)[0][0]\n\n self.means = None\n self.vars = None\n\n self.means_rest = None\n self.vars_rest = None\n\n self.comp_pts = comp_pts\n self.pts = None\n self.pts_rest = None\n\n self.stats = None\n\n # for logreg only\n self.grouping_mask = adata.obs[groupby].isin(self.groups_order)\n self.grouping = adata.obs.loc[self.grouping_mask, groupby]\n\n def _basic_stats(self):\n n_genes = self.X.shape[1]\n n_groups = self.groups_masks.shape[0]\n\n self.means = np.zeros((n_groups, n_genes))\n self.vars = np.zeros((n_groups, n_genes))\n self.pts = np.zeros((n_groups, n_genes)) if self.comp_pts else None\n\n if self.ireference is None:\n self.means_rest = np.zeros((n_groups, n_genes))\n self.vars_rest = np.zeros((n_groups, n_genes))\n self.pts_rest = np.zeros((n_groups, n_genes)) if self.comp_pts else None\n else:\n mask_rest = self.groups_masks[self.ireference]\n X_rest = self.X[mask_rest]\n self.means[self.ireference], self.vars[self.ireference] = _get_mean_var(\n X_rest\n )\n # deleting the next line causes a memory leak for some reason\n del X_rest\n\n if issparse(self.X):\n get_nonzeros = lambda X: X.getnnz(axis=0)\n else:\n get_nonzeros = lambda X: np.count_nonzero(X, axis=0)\n\n for imask, mask in enumerate(self.groups_masks):\n X_mask = self.X[mask]\n\n if self.comp_pts:\n self.pts[imask] = get_nonzeros(X_mask) / X_mask.shape[0]\n\n if self.ireference is not None and imask == self.ireference:\n continue\n\n self.means[imask], self.vars[imask] = _get_mean_var(X_mask)\n\n if self.ireference is None:\n mask_rest = ~mask\n X_rest = self.X[mask_rest]\n self.means_rest[imask], self.vars_rest[imask] = _get_mean_var(X_rest)\n # this can be costly for sparse data\n if self.comp_pts:\n self.pts_rest[imask] = get_nonzeros(X_rest) / X_rest.shape[0]\n # deleting the next line causes a memory leak for some reason\n del X_rest\n\n def t_test(self, method):\n from scipy import stats\n\n self._basic_stats()\n\n for group_index, mask in enumerate(self.groups_masks):\n if self.ireference is not None and group_index == self.ireference:\n continue\n\n mean_group = self.means[group_index]\n var_group = self.vars[group_index]\n ns_group = np.count_nonzero(mask)\n\n if self.ireference is not None:\n mean_rest = self.means[self.ireference]\n var_rest = self.vars[self.ireference]\n ns_other = np.count_nonzero(self.groups_masks[self.ireference])\n else:\n mean_rest = self.means_rest[group_index]\n var_rest = self.vars_rest[group_index]\n ns_other = self.X.shape[0] - ns_group\n\n if method == 't-test':\n ns_rest = ns_other\n elif method == 't-test_overestim_var':\n # hack for overestimating the variance for small groups\n ns_rest = ns_group\n else:\n raise ValueError('Method does not exist.')\n\n # TODO: Come up with better solution. Mask unexpressed genes?\n # See https://github.com/scipy/scipy/issues/10269\n with np.errstate(invalid=\"ignore\"):\n scores, pvals = stats.ttest_ind_from_stats(\n mean1=mean_group,\n std1=np.sqrt(var_group),\n nobs1=ns_group,\n mean2=mean_rest,\n std2=np.sqrt(var_rest),\n nobs2=ns_rest,\n equal_var=False, # Welch's\n )\n\n # I think it's only nan when means are the same and vars are 0\n scores[np.isnan(scores)] = 0\n # This also has to happen for Benjamini Hochberg\n pvals[np.isnan(pvals)] = 1\n\n yield group_index, scores, pvals\n\n def wilcoxon(self, tie_correct):\n from scipy import stats\n\n self._basic_stats()\n\n n_genes = self.X.shape[1]\n # First loop: Loop over all genes\n if self.ireference is not None:\n # initialize space for z-scores\n scores = np.zeros(n_genes)\n # initialize space for tie correction coefficients\n if tie_correct:\n T = np.zeros(n_genes)\n else:\n T = 1\n\n for group_index, mask in enumerate(self.groups_masks):\n if group_index == self.ireference:\n continue\n\n mask_rest = self.groups_masks[self.ireference]\n\n n_active = np.count_nonzero(mask)\n m_active = np.count_nonzero(mask_rest)\n\n if n_active <= 25 or m_active <= 25:\n logg.hint(\n 'Few observations in a group for '\n 'normal approximation (<=25). Lower test accuracy.'\n )\n\n # Calculate rank sums for each chunk for the current mask\n for ranks, left, right in _ranks(self.X, mask, mask_rest):\n scores[left:right] = np.sum(ranks.iloc[0:n_active, :])\n if tie_correct:\n T[left:right] = _tiecorrect(ranks)\n\n std_dev = np.sqrt(\n T * n_active * m_active * (n_active + m_active + 1) / 12.0\n )\n\n scores = (\n scores - (n_active * ((n_active + m_active + 1) / 2.0))\n ) / std_dev\n scores[np.isnan(scores)] = 0\n pvals = 2 * stats.distributions.norm.sf(np.abs(scores))\n\n yield group_index, scores, pvals\n # If no reference group exists,\n # ranking needs only to be done once (full mask)\n else:\n n_groups = self.groups_masks.shape[0]\n scores = np.zeros((n_groups, n_genes))\n n_cells = self.X.shape[0]\n\n if tie_correct:\n T = np.zeros((n_groups, n_genes))\n\n for ranks, left, right in _ranks(self.X):\n # sum up adjusted_ranks to calculate W_m,n\n for imask, mask in enumerate(self.groups_masks):\n scores[imask, left:right] = np.sum(ranks.iloc[mask, :])\n if tie_correct:\n T[imask, left:right] = _tiecorrect(ranks)\n\n for group_index, mask in enumerate(self.groups_masks):\n n_active = np.count_nonzero(mask)\n\n if tie_correct:\n T_i = T[group_index]\n else:\n T_i = 1\n\n std_dev = np.sqrt(\n T_i * n_active * (n_cells - n_active) * (n_cells + 1) / 12.0\n )\n\n scores[group_index, :] = (\n scores[group_index, :] - (n_active * (n_cells + 1) / 2.0)\n ) / std_dev\n scores[np.isnan(scores)] = 0\n pvals = 2 * stats.distributions.norm.sf(np.abs(scores[group_index, :]))\n\n yield group_index, scores[group_index], pvals\n\n def logreg(self, **kwds):\n # if reference is not set, then the groups listed will be compared to the rest\n # if reference is set, then the groups listed will be compared only to the other groups listed\n from sklearn.linear_model import LogisticRegression\n\n # Indexing with a series causes issues, possibly segfault\n X = self.X[self.grouping_mask.values, :]\n\n if len(self.groups_order) == 1:\n raise ValueError('Cannot perform logistic regression on a single cluster.')\n\n clf = LogisticRegression(**kwds)\n clf.fit(X, self.grouping.cat.codes)\n scores_all = clf.coef_\n for igroup, _ in enumerate(self.groups_order):\n if len(self.groups_order) <= 2: # binary logistic regression\n scores = scores_all[0]\n else:\n scores = scores_all[igroup]\n\n yield igroup, scores, None\n\n if len(self.groups_order) <= 2:\n break\n\n def compute_statistics(\n self,\n method,\n corr_method='benjamini-hochberg',\n n_genes_user=None,\n rankby_abs=False,\n tie_correct=False,\n **kwds,\n ):\n\n if method in {'t-test', 't-test_overestim_var'}:\n generate_test_results = self.t_test(method)\n elif method == 'wilcoxon':\n generate_test_results = self.wilcoxon(tie_correct)\n elif method == 'logreg':\n generate_test_results = self.logreg(**kwds)\n\n self.stats = None\n\n n_genes = self.X.shape[1]\n\n for group_index, scores, pvals in generate_test_results:\n group_name = str(self.groups_order[group_index])\n\n if n_genes_user is not None:\n scores_sort = np.abs(scores) if rankby_abs else scores\n global_indices = _select_top_n(scores_sort, n_genes_user)\n first_col = 'names'\n else:\n global_indices = slice(None)\n first_col = 'scores'\n\n if self.stats is None:\n idx = pd.MultiIndex.from_tuples([(group_name, first_col)])\n self.stats = pd.DataFrame(columns=idx)\n\n if n_genes_user is not None:\n self.stats[group_name, 'names'] = self.var_names[global_indices]\n\n self.stats[group_name, 'scores'] = scores[global_indices]\n\n if pvals is not None:\n self.stats[group_name, 'pvals'] = pvals[global_indices]\n if corr_method == 'benjamini-hochberg':\n from statsmodels.stats.multitest import multipletests\n\n pvals[np.isnan(pvals)] = 1\n _, pvals_adj, _, _ = multipletests(\n pvals, alpha=0.05, method='fdr_bh'\n )\n elif corr_method == 'bonferroni':\n pvals_adj = np.minimum(pvals * n_genes, 1.0)\n self.stats[group_name, 'pvals_adj'] = pvals_adj[global_indices]\n\n if self.means is not None:\n mean_group = self.means[group_index]\n if self.ireference is None:\n mean_rest = self.means_rest[group_index]\n else:\n mean_rest = self.means[self.ireference]\n foldchanges = (self.expm1_func(mean_group) + 1e-9) / (\n self.expm1_func(mean_rest) + 1e-9\n ) # add small value to remove 0's\n self.stats[group_name, 'logfoldchanges'] = np.log2(\n foldchanges[global_indices]\n )\n\n if n_genes_user is None:\n self.stats.index = self.var_names\n\n\n# TODO: Make arguments after groupby keyword only\ndef rank_genes_groups(\n adata: AnnData,\n groupby: str,\n use_raw: bool = True,\n groups: Union[Literal['all'], Iterable[str]] = 'all',\n reference: str = 'rest',\n n_genes: Optional[int] = None,\n rankby_abs: bool = False,\n pts: bool = False,\n key_added: Optional[str] = None,\n copy: bool = False,\n method: _Method = None,\n corr_method: _CorrMethod = 'benjamini-hochberg',\n tie_correct: bool = False,\n layer: Optional[str] = None,\n **kwds,\n) -> Optional[AnnData]:\n \"\"\"\\\n Rank genes for characterizing groups.\n\n Parameters\n ----------\n adata\n Annotated data matrix.\n groupby\n The key of the observations grouping to consider.\n use_raw\n Use `raw` attribute of `adata` if present.\n layer\n Key from `adata.layers` whose value will be used to perform tests on.\n groups\n Subset of groups, e.g. [`'g1'`, `'g2'`, `'g3'`], to which comparison\n shall be restricted, or `'all'` (default), for all groups.\n reference\n If `'rest'`, compare each group to the union of the rest of the group.\n If a group identifier, compare with respect to this group.\n n_genes\n The number of genes that appear in the returned tables.\n Defaults to all genes.\n method\n The default method is `'t-test'`,\n `'t-test_overestim_var'` overestimates variance of each group,\n `'wilcoxon'` uses Wilcoxon rank-sum,\n `'logreg'` uses logistic regression. See [Ntranos18]_,\n `here <https://github.com/theislab/scanpy/issues/95>`__ and `here\n <http://www.nxn.se/valent/2018/3/5/actionable-scrna-seq-clusters>`__,\n for why this is meaningful.\n corr_method\n p-value correction method.\n Used only for `'t-test'`, `'t-test_overestim_var'`, and `'wilcoxon'`.\n tie_correct\n Use tie correction for `'wilcoxon'` scores.\n Used only for `'wilcoxon'`.\n rankby_abs\n Rank genes by the absolute value of the score, not by the\n score. The returned scores are never the absolute values.\n pts\n Compute the fraction of cells expressing the genes.\n key_added\n The key in `adata.uns` information is saved to.\n **kwds\n Are passed to test methods. Currently this affects only parameters that\n are passed to :class:`sklearn.linear_model.LogisticRegression`.\n For instance, you can pass `penalty='l1'` to try to come up with a\n minimal set of genes that are good predictors (sparse solution meaning\n few non-zero fitted coefficients).\n\n Returns\n -------\n **names** : structured `np.ndarray` (`.uns['rank_genes_groups']`)\n Structured array to be indexed by group id storing the gene\n names. Ordered according to scores.\n **scores** : structured `np.ndarray` (`.uns['rank_genes_groups']`)\n Structured array to be indexed by group id storing the z-score\n underlying the computation of a p-value for each gene for each\n group. Ordered according to scores.\n **logfoldchanges** : structured `np.ndarray` (`.uns['rank_genes_groups']`)\n Structured array to be indexed by group id storing the log2\n fold change for each gene for each group. Ordered according to\n scores. Only provided if method is 't-test' like.\n Note: this is an approximation calculated from mean-log values.\n **pvals** : structured `np.ndarray` (`.uns['rank_genes_groups']`)\n p-values.\n **pvals_adj** : structured `np.ndarray` (`.uns['rank_genes_groups']`)\n Corrected p-values.\n **pts** : `pandas.DataFrame` (`.uns['rank_genes_groups']`)\n Fraction of cells expressing the genes for each group.\n **pts_rest** : `pandas.DataFrame` (`.uns['rank_genes_groups']`)\n Only if `reference` is set to `'rest'`.\n Fraction of cells from the union of the rest of each group\n expressing the genes.\n\n Notes\n -----\n There are slight inconsistencies depending on whether sparse\n or dense data are passed. See `here <https://github.com/theislab/scanpy/blob/master/scanpy/tests/test_rank_genes_groups.py>`__.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> adata = sc.datasets.pbmc68k_reduced()\n >>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon')\n\n # to visualize the results\n >>> sc.pl.rank_genes_groups(adata)\n \"\"\"\n if method is None:\n logg.warning(\n \"Default of the method has been changed to 't-test' from 't-test_overestim_var'\"\n )\n method = 't-test'\n\n if 'only_positive' in kwds:\n rankby_abs = not kwds.pop('only_positive') # backwards compat\n\n start = logg.info('ranking genes')\n avail_methods = {'t-test', 't-test_overestim_var', 'wilcoxon', 'logreg'}\n if method not in avail_methods:\n raise ValueError(f'Method must be one of {avail_methods}.')\n\n avail_corr = {'benjamini-hochberg', 'bonferroni'}\n if corr_method not in avail_corr:\n raise ValueError(f'Correction method must be one of {avail_corr}.')\n\n adata = adata.copy() if copy else adata\n _utils.sanitize_anndata(adata)\n # for clarity, rename variable\n if groups == 'all':\n groups_order = 'all'\n elif isinstance(groups, (str, int)):\n raise ValueError('Specify a sequence of groups')\n else:\n groups_order = list(groups)\n if isinstance(groups_order[0], int):\n groups_order = [str(n) for n in groups_order]\n if reference != 'rest' and reference not in set(groups_order):\n groups_order += [reference]\n if reference != 'rest' and reference not in adata.obs[groupby].cat.categories:\n cats = adata.obs[groupby].cat.categories.tolist()\n raise ValueError(\n f'reference = {reference} needs to be one of groupby = {cats}.'\n )\n\n if key_added is None:\n key_added = 'rank_genes_groups'\n adata.uns[key_added] = {}\n adata.uns[key_added]['params'] = dict(\n groupby=groupby,\n reference=reference,\n method=method,\n use_raw=use_raw,\n layer=layer,\n corr_method=corr_method,\n )\n\n test_obj = _RankGenes(adata, groups_order, groupby, reference, use_raw, layer, pts)\n\n # for clarity, rename variable\n n_genes_user = n_genes\n # make sure indices are not OoB in case there are less genes than n_genes\n # defaults to all genes\n if n_genes_user is None or n_genes_user > test_obj.X.shape[1]:\n n_genes_user = test_obj.X.shape[1]\n\n logg.debug(f'consider {groupby!r} groups:')\n logg.debug(f'with sizes: {np.count_nonzero(test_obj.groups_masks, axis=1)}')\n\n test_obj.compute_statistics(\n method, corr_method, n_genes_user, rankby_abs, tie_correct, **kwds\n )\n\n if test_obj.pts is not None:\n groups_names = [str(name) for name in test_obj.groups_order]\n adata.uns[key_added]['pts'] = pd.DataFrame(\n test_obj.pts.T, index=test_obj.var_names, columns=groups_names\n )\n if test_obj.pts_rest is not None:\n adata.uns[key_added]['pts_rest'] = pd.DataFrame(\n test_obj.pts_rest.T, index=test_obj.var_names, columns=groups_names\n )\n\n test_obj.stats.columns = test_obj.stats.columns.swaplevel()\n\n dtypes = {\n 'names': 'O',\n 'scores': 'float32',\n 'logfoldchanges': 'float32',\n 'pvals': 'float64',\n 'pvals_adj': 'float64',\n }\n\n for col in test_obj.stats.columns.levels[0]:\n adata.uns[key_added][col] = test_obj.stats[col].to_records(\n index=False, column_dtypes=dtypes[col]\n )\n\n logg.info(\n ' finished',\n time=start,\n deep=(\n f'added to `.uns[{key_added!r}]`\\n'\n \" 'names', sorted np.recarray to be indexed by group ids\\n\"\n \" 'scores', sorted np.recarray to be indexed by group ids\\n\"\n + (\n \" 'logfoldchanges', sorted np.recarray to be indexed by group ids\\n\"\n \" 'pvals', sorted np.recarray to be indexed by group ids\\n\"\n \" 'pvals_adj', sorted np.recarray to be indexed by group ids\"\n if method in {'t-test', 't-test_overestim_var', 'wilcoxon'}\n else ''\n )\n ),\n )\n return adata if copy else None\n\n\ndef filter_rank_genes_groups(\n adata: AnnData,\n key=None,\n groupby=None,\n use_raw=True,\n log=True,\n key_added='rank_genes_groups_filtered',\n min_in_group_fraction=0.25,\n min_fold_change=2,\n max_out_group_fraction=0.5,\n) -> None:\n \"\"\"\\\n Filters out genes based on fold change and fraction of genes expressing the\n gene within and outside the `groupby` categories.\n\n See :func:`~scanpy.tl.rank_genes_groups`.\n\n Results are stored in `adata.uns[key_added]`\n (default: 'rank_genes_groups_filtered').\n\n To preserve the original structure of adata.uns['rank_genes_groups'],\n filtered genes are set to `NaN`.\n\n Parameters\n ----------\n adata\n key\n groupby\n use_raw\n log\n If true, it means that the values to work with are in log scale\n key_added\n min_in_group_fraction\n min_fold_change\n max_out_group_fraction\n\n Returns\n -------\n Same output as :func:`scanpy.tl.rank_genes_groups` but with filtered genes names set to\n `nan`\n\n Examples\n --------\n >>> import scanpy as sc\n >>> adata = sc.datasets.pbmc68k_reduced()\n >>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon')\n >>> sc.tl.filter_rank_genes_groups(adata, min_fold_change=3)\n >>> # visualize results\n >>> sc.pl.rank_genes_groups(adata, key='rank_genes_groups_filtered')\n >>> # visualize results using dotplot\n >>> sc.pl.rank_genes_groups_dotplot(adata, key='rank_genes_groups_filtered')\n \"\"\"\n if key is None:\n key = 'rank_genes_groups'\n\n if groupby is None:\n groupby = str(adata.uns[key]['params']['groupby'])\n\n # convert structured numpy array into DataFrame\n gene_names = pd.DataFrame(adata.uns[key]['names'])\n\n fraction_in_cluster_matrix = pd.DataFrame(\n np.zeros(gene_names.shape),\n columns=gene_names.columns,\n index=gene_names.index,\n )\n fold_change_matrix = pd.DataFrame(\n np.zeros(gene_names.shape),\n columns=gene_names.columns,\n index=gene_names.index,\n )\n fraction_out_cluster_matrix = pd.DataFrame(\n np.zeros(gene_names.shape),\n columns=gene_names.columns,\n index=gene_names.index,\n )\n logg.info(\n f\"Filtering genes using: \"\n f\"min_in_group_fraction: {min_in_group_fraction} \"\n f\"min_fold_change: {min_fold_change}, \"\n f\"max_out_group_fraction: {max_out_group_fraction}\"\n )\n from ..plotting._anndata import _prepare_dataframe\n for cluster in gene_names.columns:\n # iterate per column\n var_names = gene_names[cluster].values\n\n # add column to adata as __is_in_cluster__. This facilitates to measure\n # fold change of each gene with respect to all other clusters\n adata.obs['__is_in_cluster__'] = pd.Categorical(adata.obs[groupby] == cluster)\n\n # obs_tidy has rows=groupby, columns=var_names\n categories, obs_tidy = _prepare_dataframe(\n adata,\n var_names,\n groupby='__is_in_cluster__',\n use_raw=use_raw,\n )\n\n # for if category defined by groupby (if any) compute for each var_name\n # 1. the mean value over the category\n # 2. the fraction of cells in the category having a value > 0\n\n # 1. compute mean value\n mean_obs = obs_tidy.groupby(level=0).mean()\n\n # 2. compute fraction of cells having value >0\n # transform obs_tidy into boolean matrix\n obs_bool = obs_tidy.astype(bool)\n\n # compute the sum per group which in the boolean matrix this is the number\n # of values >0, and divide the result by the total number of values in the group\n # (given by `count()`)\n fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count()\n\n # Because the dataframe groupby is based on the '__is_in_cluster__' column,\n # in this context, [True] means __is_in_cluster__.\n # Also, in this context, fraction_obs.loc[True].values is the row of values\n # that is assigned *as column* to fraction_in_cluster_matrix to follow the\n # structure of the gene_names dataFrame\n fraction_in_cluster_matrix.loc[:, cluster] = fraction_obs.loc[True].values\n fraction_out_cluster_matrix.loc[:, cluster] = fraction_obs.loc[False].values\n\n # compute fold change.\n if log:\n fold_change_matrix.loc[:, cluster] = (np.exp(mean_obs.loc[True]) / np.exp(mean_obs.loc[False])).values\n else:\n fold_change_matrix.loc[:, cluster] = (mean_obs.loc[True] / mean_obs.loc[False]).values\n\n # remove temporary columns\n adata.obs.drop(columns='__is_in_cluster__')\n # filter original_matrix\n gene_names = gene_names[\n (fraction_in_cluster_matrix > min_in_group_fraction) &\n (fraction_out_cluster_matrix < max_out_group_fraction) &\n (fold_change_matrix > min_fold_change)\n ]\n # create new structured array using 'key_added'.\n adata.uns[key_added] = adata.uns[key].copy()\n adata.uns[key_added]['names'] = gene_names.to_records(index=False)\n"
] |
[
[
"numpy.minimum",
"numpy.sqrt",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"scipy.sparse.vstack",
"numpy.exp",
"numpy.where",
"scipy.sparse.issparse",
"numpy.arange",
"numpy.diff",
"numpy.insert",
"numpy.argpartition",
"numpy.count_nonzero",
"numpy.repeat",
"numpy.zeros",
"numpy.log",
"numpy.isnan",
"pandas.Categorical",
"numpy.errstate",
"numpy.argsort",
"numpy.sum",
"numpy.log2",
"sklearn.linear_model.LogisticRegression",
"numpy.abs",
"numpy.sort",
"numpy.float64"
]
] |
shakedmanes/neural-net-circular-gol
|
[
"317a3ef2d52508d86c0118b235b794f1ac6d3717"
] |
[
"neural_network_circular_gol/optimizers.py"
] |
[
"import numpy as np\n\n\n# Adam optimizer\nclass OptimizerADAM:\n \"\"\"\n Adaptive Momentum Stochastic Gradient Decent Optimizer.\n \"\"\"\n\n def __init__(\n self,\n learning_rate=0.001,\n decay=0.,\n epsilon=1e-7,\n beta_1=0.9,\n beta_2=0.999\n ):\n \"\"\"\n Initialize ADAM optimizer parameters.\n\n :param learning_rate: The learning rate of the optimizer.\n :param decay: Decay value of the optimizer\n :param epsilon: Value for epsilon portion for the optimizer.\n :param beta_1: Beta1 value of the optimizer.\n :param beta_2: Beta2 value of the optimizer.\n \"\"\"\n self.learning_rate = learning_rate\n self.current_learning_rate = learning_rate\n self.decay = decay\n self.iterations = 0\n self.epsilon = epsilon\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n\n def pre_update_params(self):\n \"\"\"\n Configure the needed configurations before updating the parameters of the layers.\n Call once before any parameter updates, to update the current learning rate based on the decay and iterations.\n \"\"\"\n if self.decay:\n self.current_learning_rate = self.learning_rate * \\\n (1. / (1. + self.decay * self.iterations))\n\n def update_params(self, layer):\n \"\"\"\n Updates the parameters of a given layer.\n\n :param layer: Layer to update it's parameters.\n \"\"\"\n\n # If layer does not contain cache arrays,\n # create them filled with zeros\n if not hasattr(layer, 'weight_cache'):\n layer.weight_momentums = np.zeros_like(layer.weights)\n layer.weight_cache = np.zeros_like(layer.weights)\n layer.bias_momentums = np.zeros_like(layer.biases)\n layer.bias_cache = np.zeros_like(layer.biases)\n\n # Update momentum with current gradients\n layer.weight_momentums = \\\n self.beta_1 * \\\n layer.weight_momentums + \\\n (1 - self.beta_1) * layer.dweights\n layer.bias_momentums = \\\n self.beta_1 * \\\n layer.bias_momentums + \\\n (1 - self.beta_1) * layer.dbiases\n\n # Get corrected momentum\n # self.iteration is 0 at first pass\n # and we need to start with 1 here\n weight_momentums_corrected = \\\n layer.weight_momentums / \\\n (1 - self.beta_1 ** (self.iterations + 1))\n bias_momentums_corrected = \\\n layer.bias_momentums / \\\n (1 - self.beta_1 ** (self.iterations + 1))\n\n # Update cache with squared current gradients\n layer.weight_cache = \\\n self.beta_2 * layer.weight_cache + \\\n (1 - self.beta_2) * layer.dweights**2\n layer.bias_cache = \\\n self.beta_2 * layer.bias_cache + \\\n (1 - self.beta_2) * layer.dbiases**2\n\n # Get corrected cache\n weight_cache_corrected = \\\n layer.weight_cache / \\\n (1 - self.beta_2 ** (self.iterations + 1))\n bias_cache_corrected = \\\n layer.bias_cache / \\\n (1 - self.beta_2 ** (self.iterations + 1))\n\n # Vanilla SGD parameter update + normalization\n # with square rooted cache\n layer.weights += \\\n -self.current_learning_rate * \\\n weight_momentums_corrected / \\\n (np.sqrt(weight_cache_corrected) + self.epsilon)\n layer.biases += \\\n -self.current_learning_rate * \\\n bias_momentums_corrected / \\\n (np.sqrt(bias_cache_corrected) + self.epsilon)\n\n def post_update_params(self):\n \"\"\"\n Post update parameters configurations.\n Call once after any parameter updates, to update the iterations number after done updating parameters.\n \"\"\"\n self.iterations += 1\n"
] |
[
[
"numpy.zeros_like",
"numpy.sqrt"
]
] |
ayzk/ft-caffe-public
|
[
"888399c2fcf90c227416576a5a265b218c6be5da"
] |
[
"examples/web_demo/app.py"
] |
[
"# \n# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation\n# \n# All contributions by the University of California:\n# Copyright (c) 2014, 2015, The Regents of the University of California (Regents)\n# All rights reserved.\n# \n# All other contributions:\n# Copyright (c) 2014, 2015, the respective contributors\n# All rights reserved.\n# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md\n# \n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Intel Corporation nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\nimport os\nimport time\nimport cPickle\nimport datetime\nimport logging\nimport flask\nimport werkzeug\nimport optparse\nimport tornado.wsgi\nimport tornado.httpserver\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport cStringIO as StringIO\nimport urllib\nimport exifutil\n\nimport caffe\n\nREPO_DIRNAME = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../..')\nUPLOAD_FOLDER = '/tmp/caffe_demos_uploads'\nALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])\n\n# Obtain the flask app object\napp = flask.Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return flask.render_template('index.html', has_result=False)\n\n\[email protected]('/classify_url', methods=['GET'])\ndef classify_url():\n imageurl = flask.request.args.get('imageurl', '')\n try:\n string_buffer = StringIO.StringIO(\n urllib.urlopen(imageurl).read())\n image = caffe.io.load_image(string_buffer)\n\n except Exception as err:\n # For any exception we encounter in reading the image, we will just\n # not continue.\n logging.info('URL Image open error: %s', err)\n return flask.render_template(\n 'index.html', has_result=True,\n result=(False, 'Cannot open image from URL.')\n )\n\n logging.info('Image: %s', imageurl)\n result = app.clf.classify_image(image)\n return flask.render_template(\n 'index.html', has_result=True, result=result, imagesrc=imageurl)\n\n\[email protected]('/classify_upload', methods=['POST'])\ndef classify_upload():\n try:\n # We will save the file to disk for possible data collection.\n imagefile = flask.request.files['imagefile']\n filename_ = str(datetime.datetime.now()).replace(' ', '_') + \\\n werkzeug.secure_filename(imagefile.filename)\n filename = os.path.join(UPLOAD_FOLDER, filename_)\n imagefile.save(filename)\n logging.info('Saving to %s.', filename)\n image = exifutil.open_oriented_im(filename)\n\n except Exception as err:\n logging.info('Uploaded image open error: %s', err)\n return flask.render_template(\n 'index.html', has_result=True,\n result=(False, 'Cannot open uploaded image.')\n )\n\n result = app.clf.classify_image(image)\n return flask.render_template(\n 'index.html', has_result=True, result=result,\n imagesrc=embed_image_html(image)\n )\n\n\ndef embed_image_html(image):\n \"\"\"Creates an image embedded in HTML base64 format.\"\"\"\n image_pil = Image.fromarray((255 * image).astype('uint8'))\n image_pil = image_pil.resize((256, 256))\n string_buf = StringIO.StringIO()\n image_pil.save(string_buf, format='png')\n data = string_buf.getvalue().encode('base64').replace('\\n', '')\n return 'data:image/png;base64,' + data\n\n\ndef allowed_file(filename):\n return (\n '.' in filename and\n filename.rsplit('.', 1)[1] in ALLOWED_IMAGE_EXTENSIONS\n )\n\n\nclass ImagenetClassifier(object):\n default_args = {\n 'model_def_file': (\n '{}/models/bvlc_reference_caffenet/deploy.prototxt'.format(REPO_DIRNAME)),\n 'pretrained_model_file': (\n '{}/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'.format(REPO_DIRNAME)),\n 'mean_file': (\n '{}/python/caffe/imagenet/ilsvrc_2012_mean.npy'.format(REPO_DIRNAME)),\n 'class_labels_file': (\n '{}/data/ilsvrc12/synset_words.txt'.format(REPO_DIRNAME)),\n 'bet_file': (\n '{}/data/ilsvrc12/imagenet.bet.pickle'.format(REPO_DIRNAME)),\n }\n for key, val in default_args.iteritems():\n if not os.path.exists(val):\n raise Exception(\n \"File for {} is missing. Should be at: {}\".format(key, val))\n default_args['image_dim'] = 256\n default_args['raw_scale'] = 255.\n\n def __init__(self, model_def_file, pretrained_model_file, mean_file,\n raw_scale, class_labels_file, bet_file, image_dim, gpu_mode):\n logging.info('Loading net and associated files...')\n if gpu_mode:\n caffe.set_mode_gpu()\n else:\n caffe.set_mode_cpu()\n self.net = caffe.Classifier(\n model_def_file, pretrained_model_file,\n image_dims=(image_dim, image_dim), raw_scale=raw_scale,\n mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0)\n )\n\n with open(class_labels_file) as f:\n labels_df = pd.DataFrame([\n {\n 'synset_id': l.strip().split(' ')[0],\n 'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]\n }\n for l in f.readlines()\n ])\n self.labels = labels_df.sort('synset_id')['name'].values\n\n self.bet = cPickle.load(open(bet_file))\n # A bias to prefer children nodes in single-chain paths\n # I am setting the value to 0.1 as a quick, simple model.\n # We could use better psychological models here...\n self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1\n\n def classify_image(self, image):\n try:\n starttime = time.time()\n scores = self.net.predict([image], oversample=True).flatten()\n endtime = time.time()\n\n indices = (-scores).argsort()[:5]\n predictions = self.labels[indices]\n\n # In addition to the prediction text, we will also produce\n # the length for the progress bar visualization.\n meta = [\n (p, '%.5f' % scores[i])\n for i, p in zip(indices, predictions)\n ]\n logging.info('result: %s', str(meta))\n\n # Compute expected information gain\n expected_infogain = np.dot(\n self.bet['probmat'], scores[self.bet['idmapping']])\n expected_infogain *= self.bet['infogain']\n\n # sort the scores\n infogain_sort = expected_infogain.argsort()[::-1]\n bet_result = [(self.bet['words'][v], '%.5f' % expected_infogain[v])\n for v in infogain_sort[:5]]\n logging.info('bet result: %s', str(bet_result))\n\n return (True, meta, bet_result, '%.3f' % (endtime - starttime))\n\n except Exception as err:\n logging.info('Classification error: %s', err)\n return (False, 'Something went wrong when classifying the '\n 'image. Maybe try another one?')\n\n\ndef start_tornado(app, port=5000):\n http_server = tornado.httpserver.HTTPServer(\n tornado.wsgi.WSGIContainer(app))\n http_server.listen(port)\n print(\"Tornado server starting on port {}\".format(port))\n tornado.ioloop.IOLoop.instance().start()\n\n\ndef start_from_terminal(app):\n \"\"\"\n Parse command line options and start the server.\n \"\"\"\n parser = optparse.OptionParser()\n parser.add_option(\n '-d', '--debug',\n help=\"enable debug mode\",\n action=\"store_true\", default=False)\n parser.add_option(\n '-p', '--port',\n help=\"which port to serve content on\",\n type='int', default=5000)\n parser.add_option(\n '-g', '--gpu',\n help=\"use gpu mode\",\n action='store_true', default=False)\n\n opts, args = parser.parse_args()\n ImagenetClassifier.default_args.update({'gpu_mode': opts.gpu})\n\n # Initialize classifier + warm start by forward for allocation\n app.clf = ImagenetClassifier(**ImagenetClassifier.default_args)\n app.clf.net.forward()\n\n if opts.debug:\n app.run(debug=True, host='0.0.0.0', port=opts.port)\n else:\n start_tornado(app, opts.port)\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n if not os.path.exists(UPLOAD_FOLDER):\n os.makedirs(UPLOAD_FOLDER)\n start_from_terminal(app)\n"
] |
[
[
"numpy.dot",
"numpy.array",
"numpy.load"
]
] |
pkgw/neurosynchro
|
[
"f21d198e01146988944728231417ff601b706379"
] |
[
"neurosynchro/cli.py"
] |
[
"#! /usr/bin/env python\n# -*- mode: python; coding: utf-8 -*-\n# Copyright 2017-2018 Peter Williams and collaborators.\n# Licensed under the MIT License.\n\n\"\"\"Command-line access to neurosynchro functionality.\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse, sys\nimport numpy as np\nfrom pwkit.cli import die\nfrom pwkit.io import Path\nimport pytoml\n\nfrom . import basic_load\n\n\n\ndef _hack_pytoml():\n \"\"\"pytoml will stringify floats using repr, which is ugly and fails outright with\n very small values (i.e. 1e-30 becomes \"0.000....\"). Here we hack it to use\n exponential notation if needed.\n\n \"\"\"\n from pytoml import writer\n orig_format_value = writer._format_value\n\n if not getattr(orig_format_value, '_neurosynchro_hack_applied', False):\n def better_format_value(v):\n if isinstance(v, float):\n if not np.isfinite(v):\n raise ValueError(\"{0} is not a valid TOML value\".format(v))\n return '%.16g' % v\n return orig_format_value(v)\n\n better_format_value._neurosynchro_hack_applied = True\n writer._format_value = better_format_value\n\n_hack_pytoml()\n\n\n# The \"init-nndir\" subcommand\n\ndef make_nninit_parser(ap=None):\n if ap is None:\n ap = argparse.ArgumentParser()\n\n ap.add_argument('nndir', type=str, metavar='<nndir>',\n help='The name of the output neural-net directory to create')\n return ap\n\n\nNNINIT_DEFAULT_CONFIG = {\n 'params': [\n dict(\n name = 's',\n maptype = 'log',\n ),\n\n dict(\n name = 'theta',\n maptype = 'direct',\n phys_bounds_mode = 'theta',\n out_of_sample = 'clip',\n )\n ],\n\n 'results': [\n dict(\n name = 'j_I',\n maptype = 'log',\n trainer = 'generic',\n ),\n\n dict(\n name = 'alpha_I',\n maptype = 'log',\n trainer = 'generic',\n ),\n\n dict(\n name = 'rho_Q',\n maptype = 'abs_log',\n trainer = 'generic',\n ),\n\n dict(\n name = 'rho_V',\n maptype = 'log',\n trainer = 'generic',\n ),\n\n dict(\n name = 'j_frac_pol',\n maptype = 'logit',\n trainer = 'generic',\n ),\n\n dict(\n name = 'alpha_frac_pol',\n maptype = 'logit',\n trainer = 'generic',\n ),\n\n dict(\n name = 'j_V_share',\n maptype = 'logit',\n trainer = 'generic',\n ),\n\n dict(\n name = 'alpha_V_share',\n maptype = 'logit',\n trainer = 'generic',\n ),\n\n dict(\n name = 'rho_Q_sign',\n maptype = 'direct',\n normalization_mode = 'unit_interval',\n x_mean = -1,\n x_std = 2,\n phys_min = -1,\n phys_max = 1,\n norm_min = 0,\n norm_max = 1,\n ),\n ]\n}\n\ndef nninit_cli(settings):\n nndir = Path(settings.nndir)\n\n try:\n nndir.mkdir()\n except OSError as e:\n if e.errno == 17:\n die('directory \\\"%s\\\" already exists' % settings.nndir)\n raise\n\n cfg_path = nndir / 'nn_config.toml'\n with cfg_path.open('wt') as f:\n pytoml.dump(f, NNINIT_DEFAULT_CONFIG)\n\n\n# The \"lock-domain-range\" subcommand\n\ndef make_ldr_parser(ap=None):\n if ap is None:\n ap = argparse.ArgumentParser()\n\n ap.add_argument('datadir', type=str, metavar='<datadir>',\n help='The path to the input training data directory.')\n ap.add_argument('nndir', type=str, metavar='<nndir>',\n help='The path to the output neural-net directory.')\n return ap\n\n\ndef lock_domain_range_cli(settings):\n from . import DomainRange\n\n # Load samples\n df = basic_load(settings.datadir)\n\n # Load skeleton config\n cfg_path = Path(settings.nndir) / 'nn_config.toml'\n with cfg_path.open('rt') as f:\n info = pytoml.load(f)\n\n # Turn into processed DomainRange object\n dr = DomainRange.from_info_and_samples(info, df)\n\n # Update config and rewrite\n dr.into_info(info)\n\n with cfg_path.open('wt') as f:\n pytoml.dump(f, info)\n\n\n# The \"summarize\" subcommand\n\ndef summarize(datadir):\n df = basic_load(datadir)\n\n # Report stuff.\n\n print('Columns:', ' '.join(df.columns))\n print('Number of rows:', df.shape[0])\n print('Total number of NaNs:', np.isnan(df.values).sum())\n print('Number of rows with NaNs:', (np.isnan(df.values).sum(axis=1) > 0).sum())\n\n for c in df.columns:\n r = df[c]\n print()\n print('Column %s:' % c)\n print(' Number of NaNs:', np.isnan(r).sum())\n print(' Non-NaN max:', np.nanmax(r))\n print(' Non-NaN min:', np.nanmin(r))\n print(' Nonnegative:', (r >= 0).sum())\n print(' Nonpositive:', (r <= 0).sum())\n\n\ndef make_summarize_parser(ap=None):\n if ap is None:\n ap = argparse.ArgumentParser()\n\n ap.add_argument('datadir', type=str, metavar='<datadir>',\n help='The path to the sample data directory.')\n return ap\n\n\ndef summarize_cli(settings):\n summarize(settings.datadir)\n\n\n# The \"transform\" subcommand\n\ndef transform(datadir):\n \"\"\"This task takes the raw synchrotron coefficients output by rimphony and\n transforms them into a format that better respects the physical\n constraints of the problem.\n\n \"\"\"\n import pandas as pd\n\n df = basic_load(datadir)\n n = df.shape[0]\n\n df = df.dropna()\n print('Dropping due to NaNs:', n - df.shape[0], file=sys.stderr)\n\n bad = (df['j_I(res)'] <= 0)\n mask = bad\n print('Rows with bad J_I:', bad.sum(), file=sys.stderr)\n\n bad = (df['alpha_I(res)'] <= 0)\n mask |= bad\n print('Rows with bad a_I:', bad.sum(), file=sys.stderr)\n\n bad = (df['j_Q(res)'] >= 0)\n mask |= bad\n print('Rows with bad J_Q:', bad.sum(), file=sys.stderr)\n\n bad = (df['alpha_Q(res)'] >= 0)\n mask |= bad\n print('Rows with bad a_Q:', bad.sum(), file=sys.stderr)\n\n bad = (df['j_V(res)'] <= 0)\n mask |= bad\n print('Rows with bad J_V:', bad.sum(), file=sys.stderr)\n\n bad = (df['alpha_V(res)'] <= 0)\n mask |= bad\n print('Rows with bad a_V:', bad.sum(), file=sys.stderr)\n\n # This cut isn't physically motivated, but under the current rimphony\n # model, f_V is always positive.\n bad = (df['rho_V(res)'] <= 0)\n mask |= bad\n print('Rows with bad f_V:', bad.sum(), file=sys.stderr)\n\n n = df.shape[0]\n df = df[~mask]\n print('Dropped due to first-pass filters:', n - df.shape[0], file=sys.stderr)\n\n j_pol = np.sqrt(df['j_Q(res)']**2 + df['j_V(res)']**2)\n a_pol = np.sqrt(df['alpha_Q(res)']**2 + df['alpha_V(res)']**2)\n\n df['j_frac_pol(res)'] = j_pol / df['j_I(res)']\n bad = (df['j_frac_pol(res)'] < 0) | (df['j_frac_pol(res)'] > 1)\n mask = bad\n print('Rows with bad j_frac_pol:', bad.sum(), file=sys.stderr)\n\n df['alpha_frac_pol(res)'] = a_pol / df['alpha_I(res)']\n bad = (df['alpha_frac_pol(res)'] < 0) | (df['alpha_frac_pol(res)'] > 1)\n mask |= bad\n print('Rows with bad alpha_frac_pol:', bad.sum(), file=sys.stderr)\n\n n = df.shape[0]\n df = df[~mask]\n print('Dropped due to second-pass filters:', n - df.shape[0], file=sys.stderr)\n\n df['j_V_share(res)'] = df['j_V(res)'] / j_pol\n df['alpha_V_share(res)'] = df['alpha_V(res)'] / a_pol\n\n # I used to scale rho_{Q,V} by alpha_I, but these values are often\n # strongly different. (And, judging by the commentary in Heyvaerts, I\n # think this is probably OK and not a sign of a numerics problem.) So we\n # just pass those columns on through like {j,alpha}_I. However, in a bit\n # of a hack, we add a column giving the sign of the rho_Q column, since in\n # the \"pitchy kappa\" distribution we have a non-negligible number of\n # negative rho_Q values *plus* a large dynamic range on both sides of\n # zero. Adding this column lets us break the neural networking into two\n # pieces in a way that doesn't involve a bunch of complicated\n # rearchitecting of my parameter code.\n df['rho_Q_sign(res)'] = np.sign(df['rho_Q(res)'])\n\n print('Final row count:', df.shape[0], file=sys.stderr)\n\n for c in 'j_Q alpha_Q j_V alpha_V'.split():\n del df[c + '(res)']\n\n df.to_csv(\n sys.stdout,\n sep = '\\t',\n index = False\n )\n\n\ndef make_transform_parser(ap=None):\n if ap is None:\n ap = argparse.ArgumentParser()\n\n ap.add_argument('datadir', type=str, metavar='<datadir>',\n help='The path to the sample data directory.')\n return ap\n\n\ndef transform_cli(settings):\n transform(settings.datadir)\n\n\n# The entrypoint\n\ndef entrypoint(argv):\n ap = argparse.ArgumentParser(prog='neurosynchro')\n subparsers = ap.add_subparsers(\n dest = 'subcommand',\n metavar = '<command>',\n help = 'The sub-command to invoke'\n )\n\n make_nninit_parser(subparsers.add_parser(\n 'init-nndir',\n help = 'Initialize a directory to save the neural network training data'\n ))\n\n make_ldr_parser(subparsers.add_parser(\n 'lock-domain-range',\n help = 'Find the domain and range of the training set'\n ))\n\n make_summarize_parser(subparsers.add_parser(\n 'summarize',\n help = 'Print summary statistics about the training set'\n ))\n\n from .grtrans import make_parser as make_grtrans_parser\n make_grtrans_parser(subparsers.add_parser(\n 'test-grtrans',\n help = 'Do a test integration with grtrans'\n ))\n\n make_transform_parser(subparsers.add_parser(\n 'transform',\n help = 'Transform the training set into Neurosynchro\\'s internal parametrization',\n description = 'Transform the training set into Neurosynchro\\'s internal parametrization.',\n epilog = '''The training set can have arbitrary input parameters, but should have eight\noutput parameters named `j_I`, `j_Q`, `j_V`, `alpha_I`, `alpha_Q`, `alpha_V`,\n`rho_Q`, `rho_V` -- these are the standard Stokes-basis radiative transfer\ncoefficients. The transformed training set will be printed to standard output,\nso you almost surely want to redirect the output of this program to a file.'''\n ))\n\n from .train import make_parser as make_train_parser\n make_train_parser(subparsers.add_parser(\n 'train',\n help = 'Train one of the neural networks'\n ))\n\n settings = ap.parse_args(argv[1:])\n\n if settings.subcommand is None:\n die('you must supply a subcommand; run with \"--help\" for help')\n\n if settings.subcommand == 'init-nndir':\n nninit_cli(settings)\n elif settings.subcommand == 'lock-domain-range':\n lock_domain_range_cli(settings)\n elif settings.subcommand == 'summarize':\n summarize_cli(settings)\n elif settings.subcommand == 'test-grtrans':\n from .grtrans import grtrans_cli\n grtrans_cli(settings)\n elif settings.subcommand == 'train':\n from .train import train_cli\n train_cli(settings)\n elif settings.subcommand == 'transform':\n transform_cli(settings)\n else:\n # argparse will error out if it the user gives an unrecognized\n # subcommand, so if we get here it's an internal bug\n assert False, 'internal bug: forgot to handle subcommand!'\n\n\ndef main():\n import sys\n from pwkit import cli\n\n cli.unicode_stdio()\n cli.propagate_sigint()\n cli.backtrace_on_usr1()\n entrypoint(sys.argv)\n"
] |
[
[
"numpy.nanmax",
"numpy.sqrt",
"numpy.isfinite",
"numpy.isnan",
"numpy.nanmin",
"numpy.sign"
]
] |
ibeauregard/tensorflow
|
[
"8e2be96fdc388049efe4bdbfa7a92e139ed0f4cb",
"20bc44d8fc2feee4c63dd90e49dbcdf34ed6564c"
] |
[
"tensorflow/python/distribute/coordinator/cluster_coordinator.py",
"tensorflow/lite/python/lite_v2_test.py"
] |
[
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Module for `ClusterCoordinator` and relevant cluster-worker related library.\n\nThis is currently under development and the API is subject to change.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport enum\nimport functools\nimport os\nimport re\nimport sys\nimport threading\nimport time\nimport weakref\nfrom six.moves import queue\n\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import parameter_server_strategy_v2\nfrom tensorflow.python.distribute.coordinator import metric_utils\nfrom tensorflow.python.eager import cancellation\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import executor\nfrom tensorflow.python.eager import function as tf_function\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Maximum time for failed worker to come back is 1 hour\n_WORKER_MAXIMUM_RECOVERY_SEC = 3600\n\n# Maximum size for queued closures, \"infinite\" if set to 0.\n# When the maximum queue size is reached, further schedule calls will become\n# blocking until some previously queued closures are executed on workers.\n# Note that using an \"infinite\" queue size can take a non-trivial portion of\n# memory, and even lead to coordinator OOM. Modify the size to a smaller value\n# for coordinator with constrained memory resource (only recommended for\n# advanced users). Also used in unit tests to ensure the correctness when the\n# queue is full.\n_CLOSURE_QUEUE_MAX_SIZE = 256 * 1024\n\n# RPC error message from PS\n_RPC_ERROR_FROM_PS = \"GRPC error information from remote target /job:ps\"\n\n# InvalidArgumentError (unknown device) will not have \"GRPC error...\" string.\n_JOB_WORKER_STRING_IDENTIFIER = \"/job:worker\"\n\n\nclass _RemoteValueStatus(enum.Enum):\n \"\"\"The status of a `RemoteValue` object.\n\n A `RemoteValue` object can have three states:\n 1) not ready: no value, no non-retryable error and not aborted;\n 2) aborted: i.e. the execution of function was aborted because of task\n failure, but can be retried;\n 3) ready: i.e. has value or has non-tryable error;\n\n The initial state of a `RemoteValue` is \"not ready\". When its corresponding\n closure has\n been executed at least once, it will become aborted or ready. The state\n transitions are:\n 1) not ready -> 2) aborted:\n when the corresponding closure is aborted due to worker failure, and the\n worker failure is not immediately handled.\n 1) not ready -> 3) ready:\n when the corresponding closure has been executed successfully.\n 2) aborted -> 3) ready:\n when the `RemoteValue` is rebuilt by rerunning the corresponding closure\n and the closure has been executed successfully.\n 3) ready -> 2) aborted:\n when the corresponding closure had been executed successfully but later\n the corresponding remote worker failed. This is currently only implemented\n for resource `RemoteValue` like iterators.\n \"\"\"\n NOT_READY = \"NOT_READY\"\n ABORTED = \"ABORTED\"\n READY = \"READY\"\n\n\n@tf_export(\"distribute.experimental.coordinator.RemoteValue\", v1=[])\nclass RemoteValue(object):\n \"\"\"An asynchronously available value of a scheduled function.\n\n This class is used as the return value of\n `tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` where\n the underlying value becomes available at a later time once the function has\n been executed.\n\n Using `tf.distribute.experimental.coordinator.RemoteValue` as an input to\n a subsequent function scheduled with\n `tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` is\n currently not supported.\n\n Example:\n\n ```python\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver=...)\n coordinator = (\n tf.distribute.experimental.coordinator.ClusterCoordinator(strategy))\n\n with strategy.scope():\n v1 = tf.Variable(initial_value=0.0)\n v2 = tf.Variable(initial_value=1.0)\n\n @tf.function\n def worker_fn():\n v1.assign_add(0.1)\n v2.assign_sub(0.2)\n return v1.read_value() / v2.read_value()\n\n result = coordinator.schedule(worker_fn)\n # Note that `fetch()` gives the actual result instead of a `tf.Tensor`.\n assert result.fetch() == 0.125\n\n for _ in range(10):\n # `worker_fn` will be run on arbitrary workers that are available. The\n # `result` value will be available later.\n result = coordinator.schedule(worker_fn)\n ```\n \"\"\"\n\n def fetch(self):\n \"\"\"Wait for the result of `RemoteValue` to be ready and return the result.\n\n This makes the value concrete by copying the remote value to local.\n\n Returns:\n The actual output of the `tf.function` associated with this `RemoteValue`,\n previously by a\n `tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` call.\n This can be a single value, or a structure of values, depending on the\n output of the `tf.function`.\n\n Raises:\n tf.errors.CancelledError: If the function that produces this `RemoteValue`\n is aborted or cancelled due to failure.\n \"\"\"\n raise NotImplementedError(\"Must be implemented in subclasses.\")\n\n\nclass RemoteValueImpl(RemoteValue):\n \"\"\"Implementation of `RemoteValue`.\"\"\"\n\n def __init__(self, closure, type_spec): # pylint: disable=super-init-not-called\n \"\"\"Initializes a `RemoteValueImpl`.\n\n Args:\n closure: The closure from which the `RemoteValue` is created.\n type_spec: The type spec for this `RemoteValue` which is used to trace\n functions that take this `RemoteValue` as input.\n \"\"\"\n self._closure = closure\n self._type_spec = type_spec\n self._value = None\n self._error = None\n self._status_available_event = threading.Event()\n self._status = _RemoteValueStatus.NOT_READY\n\n def _set_aborted(self):\n self._status = _RemoteValueStatus.ABORTED\n self._value = None\n self._error = None\n\n # Wake up any waiting thread and clear the event.\n self._status_available_event.set()\n\n def _rebuild_on(self, worker):\n self._status_available_event.clear()\n # TODO(yuefengz): we may need to rebuild its inputs as well.\n self._closure.execute_on(worker)\n\n def _set_value(self, value):\n self._status = _RemoteValueStatus.READY\n self._value = value\n self._error = None\n self._status_available_event.set()\n\n def _set_error(self, exception):\n self._status = _RemoteValueStatus.READY\n self._value = None\n self._error = exception\n self._status_available_event.set()\n\n def _get_value(self):\n self._status_available_event.wait()\n return self._value\n\n def _get_error(self):\n self._status_available_event.wait()\n return self._error\n\n def fetch(self):\n self._status_available_event.wait()\n if self._status is _RemoteValueStatus.ABORTED:\n raise errors.CancelledError(\n None, None,\n \"The corresponding function is aborted. Please reschedule the \"\n \"function.\")\n if self._error is not None:\n raise self._error # pylint: disable=raising-bad-type\n else:\n return nest.map_structure(\n lambda x: x.numpy() if hasattr(x, \"numpy\") else x, self._value)\n\n\nclass InputError(Exception):\n\n def __init__(self, original_exception):\n message = (\"Input has an error, the original exception is %r, \"\n \"error message is %s.\" %\n (original_exception, str(original_exception)))\n super().__init__(message)\n\n\ndef _maybe_rebuild_remote_values(worker, structure):\n \"\"\"Attempts to return errors from `RemoteValue`s. Rebuilds them if needed.\"\"\"\n errors_in_structure = []\n\n def _get_error(val):\n if isinstance(val, RemoteValue):\n if val._status is _RemoteValueStatus.ABORTED: # pylint: disable=protected-access\n try:\n with worker.failure_handler.wait_on_failure(\n on_recovery_fn=functools.partial(val._rebuild_on, worker), # pylint: disable=protected-access\n worker_device_name=worker.device_name):\n val._rebuild_on(worker) # pylint: disable=protected-access\n except Exception as e: # pylint: disable=broad-except\n val._set_error(e) # pylint: disable=protected-access\n\n error = val._get_error() # pylint: disable=protected-access\n if error:\n errors_in_structure.append(error)\n\n nest.map_structure(_get_error, structure)\n if errors_in_structure:\n return errors_in_structure[0]\n else:\n return None\n\n\ndef _maybe_get_remote_value(val):\n \"\"\"Gets the value of `val` if it is a `RemoteValue`.\"\"\"\n if isinstance(val, RemoteValue):\n error = val._get_error() # pylint: disable=protected-access\n if error:\n raise AssertionError(\n \"RemoteValue doesn't have a value because it has errors.\")\n else:\n return val._get_value() # pylint: disable=protected-access\n else:\n return val\n\n\ndef _maybe_as_type_spec(val):\n if isinstance(val, RemoteValue):\n if val._type_spec is None: # pylint: disable=protected-access\n raise ValueError(\"Output of a scheduled function that is not \"\n \"tf.function cannot be the input of another function.\")\n return val._type_spec # pylint: disable=protected-access\n else:\n return val\n\n\n@tf_export(\"distribute.experimental.coordinator.PerWorkerValues\", v1=[])\nclass PerWorkerValues(object):\n \"\"\"A container that holds a list of values, one value per worker.\n\n `tf.distribute.experimental.coordinator.PerWorkerValues` contains a collection\n of values, where each of the values is located on its corresponding worker,\n and upon being used as one of the `args` or `kwargs` of\n `tf.distribute.experimental.coordinator.ClusterCoordinator.schedule()`, the\n value specific to a worker will be passed into the function being executed at\n that corresponding worker.\n\n Currently, the only supported path to create an object of\n `tf.distribute.experimental.coordinator.PerWorkerValues` is through calling\n `iter` on a `ClusterCoordinator.create_per_worker_dataset`-returned\n distributed dataset instance. The mechanism to create a custom\n `tf.distribute.experimental.coordinator.PerWorkerValues` is not yet supported.\n \"\"\"\n\n def __init__(self, values):\n self._values = tuple(values)\n\n\ndef _select_worker_slice(worker_id, structured):\n \"\"\"Selects the worker slice of each of the items in `structured`.\"\"\"\n\n def _get(x):\n return x._values[worker_id] if isinstance(x, PerWorkerValues) else x # pylint: disable=protected-access\n\n return nest.map_structure(_get, structured)\n\n\ndef _disallow_remote_value_as_input(structured):\n \"\"\"Raises if any element of `structured` is a RemoteValue.\"\"\"\n\n def _raise_if_remote_value(x):\n if isinstance(x, RemoteValue):\n raise ValueError(\n \"`tf.distribute.experimental.coordinator.RemoteValue` used \"\n \"as an input to scheduled function is not yet \"\n \"supported.\")\n\n nest.map_structure(_raise_if_remote_value, structured)\n\n\nclass Closure(object):\n \"\"\"Hold a function to be scheduled and its arguments.\"\"\"\n\n def __init__(self, function, cancellation_mgr, args=None, kwargs=None):\n if not callable(function):\n raise ValueError(\"Function passed to `ClusterCoordinator.schedule` must \"\n \"be a callable object.\")\n self._args = args or ()\n self._kwargs = kwargs or {}\n\n _disallow_remote_value_as_input(self._args)\n _disallow_remote_value_as_input(self._kwargs)\n\n if isinstance(function, def_function.Function):\n replica_args = _select_worker_slice(0, self._args)\n replica_kwargs = _select_worker_slice(0, self._kwargs)\n\n # Note: no need to handle function registration failure since this kind of\n # failure will not raise exceptions as designed in the runtime. The\n # coordinator has to rely on subsequent operations that raise to catch\n # function registration failure.\n\n # Record the function tracing overhead. Note that we pass in the tracing\n # count of the def_function.Function as a state tracker, so that metrics\n # will only record the time for actual function tracing (i.e., excluding\n # function cache lookups).\n with metric_utils.monitored_timer(\n \"function_tracing\", state_tracker=function._get_tracing_count): # pylint: disable=protected-access\n self._concrete_function = function.get_concrete_function(\n *nest.map_structure(_maybe_as_type_spec, replica_args),\n **nest.map_structure(_maybe_as_type_spec, replica_kwargs))\n elif isinstance(function, tf_function.ConcreteFunction):\n self._concrete_function = function\n\n if hasattr(self, \"_concrete_function\"):\n # If we have a concrete function, we get to retrieve the output type spec\n # via the structured_output.\n output_type_spec = func_graph.convert_structure_to_signature(\n self._concrete_function.structured_outputs)\n self._function = cancellation_mgr.get_cancelable_function(\n self._concrete_function)\n else:\n # Otherwise (i.e. what is passed in is a regular python function), we have\n # no such information.\n output_type_spec = None\n self._function = function\n\n self.output_remote_value = RemoteValueImpl(self, output_type_spec)\n\n def mark_cancelled(self):\n self.output_remote_value._set_error( # pylint: disable=protected-access\n errors.CancelledError(\n None, None, \"The corresponding function is \"\n \"cancelled. Please reschedule the function.\"))\n\n def execute_on(self, worker):\n \"\"\"Executes the closure on the given worker.\n\n Args:\n worker: a `Worker` object.\n \"\"\"\n replica_args = _select_worker_slice(worker.worker_index, self._args)\n replica_kwargs = _select_worker_slice(worker.worker_index, self._kwargs)\n\n e = (\n _maybe_rebuild_remote_values(worker, replica_args) or\n _maybe_rebuild_remote_values(worker, replica_kwargs))\n if e:\n if not isinstance(e, InputError):\n e = InputError(e)\n self.output_remote_value._set_error(e) # pylint: disable=protected-access\n return\n\n with ops.device(worker.device_name):\n with context.executor_scope(worker.executor):\n with metric_utils.monitored_timer(\"closure_execution\"):\n output_value = self._function(\n *nest.map_structure(_maybe_get_remote_value, replica_args),\n **nest.map_structure(_maybe_get_remote_value, replica_kwargs))\n self.output_remote_value._set_value(output_value) # pylint: disable=protected-access\n\n\nclass _CoordinatedClosureQueue(object):\n \"\"\"Manage a queue of closures, inflight count and errors from execution.\n\n This class is thread-safe.\n \"\"\"\n\n def __init__(self):\n # `self._inflight_closure_count` only tracks the number of inflight closures\n # that are \"in generation\". Once an error occurs, error generation is\n # incremented and all subsequent arriving closures (from inflight) are\n # considered \"out of generation\".\n self._inflight_closure_count = 0\n\n self._queue_lock = threading.Lock()\n\n # Condition indicating that all pending closures (either queued or inflight)\n # have been processed, failed, or cancelled.\n self._stop_waiting_condition = threading.Condition(self._queue_lock)\n\n # Condition indicating that an item becomes available in queue (not empty).\n self._closures_queued_condition = threading.Condition(self._queue_lock)\n\n # Condition indicating that a queue slot becomes available (not full).\n # Note that even with \"infinite\" queue size, there is still a \"practical\"\n # size limit for the queue depending on host memory capacity, and thus the\n # queue will eventually become full with a lot of enqueued closures.\n self._queue_free_slot_condition = threading.Condition(self._queue_lock)\n\n # Condition indicating there is no inflight closures.\n self._no_inflight_closure_condition = threading.Condition(self._queue_lock)\n\n # Use to cancel in-flight closures.\n self._cancellation_mgr = cancellation.CancellationManager()\n\n if _CLOSURE_QUEUE_MAX_SIZE <= 0:\n logging.warning(\n \"In a `ClusterCoordinator`, creating an infinite closure queue can \"\n \"consume a significant amount of memory and even lead to OOM.\")\n self._queue = queue.Queue(maxsize=_CLOSURE_QUEUE_MAX_SIZE)\n self._error = None\n\n # The following is a lock to make sure when `wait` is called and before it\n # returns no `put` can be executed during this period. It is because `wait`\n # won't know what to do with newly put closures. This lock adds an cutoff\n # for `wait` so that closures put into the queue while waiting would not be\n # taken responsible by this `wait`.\n #\n # We cannot reuse the `self._queue_lock` since when `wait` waits for a\n # condition, the `self._queue_lock` will be released.\n #\n # We don't use a reader/writer's lock on purpose to reduce the complexity\n # of the code.\n self._put_wait_lock = threading.Lock()\n\n def _cancel_all_closures(self):\n \"\"\"Clears the queue and sets remaining closures cancelled error.\n\n This method expects self._queue_lock to be held prior to entry.\n \"\"\"\n self._cancellation_mgr.start_cancel()\n while self._inflight_closure_count > 0:\n self._no_inflight_closure_condition.wait()\n while True:\n try:\n closure = self._queue.get(block=False)\n self._queue_free_slot_condition.notify()\n closure.mark_cancelled()\n except queue.Empty:\n break\n # The cancellation manager cannot be reused once cancelled. After all\n # closures (queued or inflight) are cleaned up, recreate the cancellation\n # manager with clean state.\n # Note on thread-safety: this is triggered when one of theses\n # ClusterCoordinator APIs are called: `schedule`, `wait`, and `done`. At the\n # same time, no new closures can be constructed (which reads the\n # _cancellation_mgr to get cancellable functions).\n self._cancellation_mgr = cancellation.CancellationManager()\n\n def _raise_if_error(self):\n \"\"\"Raises the error if one exists.\n\n If an error exists, cancel the closures in queue, raises it, and clear\n the error.\n\n This method expects self._queue_lock to be held prior to entry.\n \"\"\"\n if self._error:\n logging.error(\"Start cancelling closures due to error %r: %s\",\n self._error, self._error)\n self._cancel_all_closures()\n try:\n raise self._error # pylint: disable=raising-bad-type\n finally:\n self._error = None\n\n def put(self, closure):\n \"\"\"Put a closure into the queue for later execution.\n\n If `mark_failed` was called before `put`, the error from the first\n invocation of `mark_failed` will be raised.\n\n Args:\n closure: The `Closure` to put into the queue.\n \"\"\"\n with self._put_wait_lock, self._queue_lock:\n self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())\n self._queue.put(closure, block=False)\n self._raise_if_error()\n self._closures_queued_condition.notify()\n\n def get(self, timeout=None):\n \"\"\"Return a closure from the queue to be executed.\"\"\"\n with self._queue_lock:\n while self._queue.empty():\n if not self._closures_queued_condition.wait(timeout=timeout):\n return None\n closure = self._queue.get(block=False)\n self._queue_free_slot_condition.notify()\n self._inflight_closure_count += 1\n return closure\n\n def mark_finished(self):\n \"\"\"Let the queue know that a closure has been successfully executed.\"\"\"\n with self._queue_lock:\n if self._inflight_closure_count < 1:\n raise AssertionError(\"There is no inflight closures to mark_finished.\")\n self._inflight_closure_count -= 1\n if self._inflight_closure_count == 0:\n self._no_inflight_closure_condition.notifyAll()\n if self._queue.empty() and self._inflight_closure_count == 0:\n self._stop_waiting_condition.notifyAll()\n\n def put_back(self, closure):\n \"\"\"Put the closure back into the queue as it was not properly executed.\"\"\"\n with self._queue_lock:\n if self._inflight_closure_count < 1:\n raise AssertionError(\"There is no inflight closures to put_back.\")\n if self._error:\n closure.mark_cancelled()\n else:\n self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())\n self._queue.put(closure, block=False)\n self._closures_queued_condition.notify()\n self._inflight_closure_count -= 1\n if self._inflight_closure_count == 0:\n self._no_inflight_closure_condition.notifyAll()\n\n def wait(self, timeout=None):\n \"\"\"Wait for all closures to be finished before returning.\n\n If `mark_failed` was called before or during `wait`, the error from the\n first invocation of `mark_failed` will be raised.\n\n Args:\n timeout: A float specifying a timeout for the wait in seconds.\n\n Returns:\n True unless the given timeout expired, in which case it returns False.\n \"\"\"\n with self._put_wait_lock, self._queue_lock:\n while (not self._error and\n (not self._queue.empty() or self._inflight_closure_count > 0)):\n if not self._stop_waiting_condition.wait(timeout=timeout):\n return False\n self._raise_if_error()\n return True\n\n def mark_failed(self, e):\n \"\"\"Sets error and unblocks any wait() call.\"\"\"\n with self._queue_lock:\n # TODO(yuefengz): maybe record all failure and give users more\n # information?\n if self._inflight_closure_count < 1:\n raise AssertionError(\"There is no inflight closures to mark_failed.\")\n if self._error is None:\n self._error = e\n self._inflight_closure_count -= 1\n if self._inflight_closure_count == 0:\n self._no_inflight_closure_condition.notifyAll()\n self._stop_waiting_condition.notifyAll()\n\n def done(self):\n \"\"\"Returns true if the queue is empty and there is no inflight closure.\n\n If `mark_failed` was called before `done`, the error from the first\n invocation of `mark_failed` will be raised.\n \"\"\"\n with self._queue_lock:\n self._raise_if_error()\n return self._queue.empty() and self._inflight_closure_count == 0\n\n\nclass WorkerPreemptionHandler(object):\n \"\"\"Handles worker preemptions.\"\"\"\n\n def __init__(self, server_def, cluster):\n self._server_def = server_def\n self._cluster = cluster\n self._cluster_update_lock = threading.Lock()\n self._cluster_due_for_update = threading.Event()\n self._worker_up_cond = threading.Condition(self._cluster_update_lock)\n threading.Thread(target=self._preemption_handler,\n name=\"WorkerPreemptionHandler\",\n daemon=True).start()\n\n def _validate_preemption_failure(self, e):\n \"\"\"Validates that the given exception represents worker preemption.\"\"\"\n if _is_worker_failure(e):\n return\n raise e\n\n @contextlib.contextmanager\n def wait_on_failure(self,\n on_failure_fn=None,\n on_recovery_fn=None,\n worker_device_name=\"(unknown)\"):\n \"\"\"Catches worker preemption error and wait until failed workers are back.\n\n Args:\n on_failure_fn: an optional function to run if preemption happens.\n on_recovery_fn: an optional function to run when a worker is recovered\n from preemption.\n worker_device_name: the device name of the worker instance that is passing\n through the failure.\n\n Yields:\n None.\n \"\"\"\n try:\n yield\n except errors.OpError as e:\n # If the error is due to temporary connectivity issues between worker and\n # ps, put back closure, ignore error and do not mark worker as failure.\n if self._cluster._record_and_ignore_transient_ps_failure(e): # pylint: disable=protected-access\n if on_failure_fn:\n on_failure_fn()\n return\n\n self._validate_preemption_failure(e)\n logging.error(\"Worker %s failed with error: %s\", worker_device_name, e)\n if on_failure_fn:\n on_failure_fn()\n\n with self._cluster_update_lock:\n self._cluster_due_for_update.set()\n self._worker_up_cond.wait(_WORKER_MAXIMUM_RECOVERY_SEC)\n logging.info(\"Worker %s has been recovered.\", worker_device_name)\n\n if on_recovery_fn:\n with self.wait_on_failure(\n on_recovery_fn=on_recovery_fn,\n worker_device_name=worker_device_name):\n on_recovery_fn()\n\n def _preemption_handler(self):\n \"\"\"A loop that handles preemption.\n\n This loop waits for signal of worker preemption and upon worker preemption,\n it waits until all workers are back and updates the cluster about the\n restarted workers.\n \"\"\"\n while True:\n self._cluster_due_for_update.wait()\n with self._cluster_update_lock:\n try:\n # TODO(haoyuzhang): support partial cluster recovery\n logging.info(\"Cluster now being recovered.\")\n context.context().update_server_def(self._server_def)\n\n # Cluster updated successfully, clear the update signal, and notify\n # all workers that they are recovered from failure.\n logging.info(\"Cluster successfully recovered.\")\n self._worker_up_cond.notify_all()\n self._cluster_due_for_update.clear()\n except Exception as e: # pylint: disable=broad-except\n self._validate_preemption_failure(e)\n # NOTE: Since the first RPC (GetStatus) of update_server_def is\n # currently blocking by default, error should only happen if:\n # (1) More workers failed while waiting for the previous workers to\n # come back;\n # (2) Worker failed when exchanging subsequent RPCs after the first\n # RPC returns.\n # Consider adding backoff retry logic if we see the error logged\n # too frequently.\n logging.error(\"Cluster update failed with error: %s. Retrying...\", e)\n\n\nclass Worker(object):\n \"\"\"A worker in a cluster.\n\n Attributes:\n worker_index: The index of the worker in the cluster.\n device_name: The device string of the worker, e.g. \"/job:worker/task:1\".\n executor: The worker's executor for remote function execution.\n failure_handler: The failure handler used to handler worker preemption\n failure.\n \"\"\"\n\n def __init__(self, worker_index, device_name, cluster):\n self.worker_index = worker_index\n self.device_name = device_name\n self.executor = executor.new_executor(enable_async=False)\n self.failure_handler = cluster.failure_handler\n self._cluster = cluster\n self._resource_remote_value_refs = []\n\n # Worker threads need to start after `Worker`'s initialization.\n threading.Thread(target=self._process_queue,\n name=\"WorkerClosureProcessingLoop-%d\" % self.worker_index,\n daemon=True).start()\n\n def _set_resources_aborted(self):\n # TODO(yuefengz): maybe we can query whether a tensor is valid or not\n # instead of marking a tensor aborted?\n for weakref_resource in self._resource_remote_value_refs:\n resource = weakref_resource()\n if resource:\n resource._set_aborted() # pylint: disable=protected-access\n\n def _set_dead(self):\n raise NotImplementedError(\"_set_dead is not implemented.\")\n\n def _process_closure(self, closure):\n \"\"\"Runs a closure with preemption handling.\"\"\"\n try:\n with self._cluster.failure_handler.wait_on_failure(\n on_failure_fn=lambda: self._cluster._closure_queue.put_back(closure), # pylint: disable=protected-access\n on_recovery_fn=self._set_resources_aborted,\n worker_device_name=self.device_name):\n closure.execute_on(self)\n # TODO(yuefengz): we don't have to materialize results every step.\n with metric_utils.monitored_timer(\"remote_value_fetch\"):\n closure.output_remote_value.fetch()\n self._cluster._closure_queue.mark_finished() # pylint: disable=protected-access\n except Exception as e: # pylint: disable=broad-except\n # Avoid logging the derived cancellation error\n if not isinstance(e, errors.CancelledError):\n logging.error(\n \"/job:worker/task:%d encountered the following error when \"\n \"processing closure: %r:%s\", self.worker_index, e, e)\n closure.output_remote_value._set_error(e) # pylint: disable=protected-access\n self._cluster._closure_queue.mark_failed(e) # pylint: disable=protected-access\n\n def _maybe_delay(self):\n \"\"\"Delay if corresponding env vars are set.\"\"\"\n # If the following two env vars variables are set. Scheduling for workers\n # will start in a staggered manner. Worker i will wait for\n # `TF_COORDINATOR_SCHEDULE_START_DELAY` * i seconds, not exceeding\n # `TF_COORDINATOR_SCHEDULE_START_DELAY_MAX`.\n delay_secs = int(os.environ.get(\"TF_COORDINATOR_SCHEDULE_START_DELAY\", \"0\"))\n delay_cap = int(\n os.environ.get(\"TF_COORDINATOR_SCHEDULE_START_DELAY_MAX\", \"0\"))\n if delay_cap:\n delay_secs = min(delay_secs * self.worker_index, delay_cap)\n if delay_secs > 0:\n logging.info(\"Worker %d sleeping for %d seconds before running function\",\n self.worker_index, delay_secs)\n time.sleep(delay_secs)\n\n def _process_queue(self):\n \"\"\"Function running in a thread to process closure queues.\"\"\"\n self._maybe_delay()\n while True:\n closure = self._cluster._closure_queue.get() # pylint: disable=protected-access\n self._process_closure(closure)\n\n def _create_resource(self, function, args=None, kwargs=None):\n \"\"\"Synchronously creates a per-worker resource represented by a `RemoteValue`.\n\n Args:\n function: the resource function to be run remotely. It should be a\n `tf.function`, a concrete function or a Python function.\n args: positional arguments to be passed to the function.\n kwargs: keyword arguments to be passed to the function.\n\n Returns:\n one or several RemoteValue objects depending on the function return\n values.\n \"\"\"\n # Some notes about the concurrency: currently all the activities related to\n # the same worker such as creating resources, setting resources' aborted\n # status, and executing closures happen on the same thread. This allows us\n # to have simpler logic of concurrency.\n closure = Closure(\n function,\n self._cluster._closure_queue._cancellation_mgr, # pylint: disable=protected-access\n args=args,\n kwargs=kwargs)\n resource_remote_value = closure.output_remote_value\n self._register_resource(resource_remote_value)\n\n # The following is a short-term solution to lazily create resources in\n # parallel.\n # TODO(b/160343165): we should create resources eagerly, i.e. schedule the\n # resource creation function as soon as users call this method.\n resource_remote_value._set_aborted() # pylint: disable=protected-access\n return resource_remote_value\n\n def _register_resource(self, resource_remote_value):\n if not isinstance(resource_remote_value, RemoteValue):\n raise ValueError(\"Resource being registered is not of type \"\n \"`tf.distribute.experimental.coordinator.RemoteValue`.\")\n self._resource_remote_value_refs.append(weakref.ref(resource_remote_value))\n\n\nclass Cluster(object):\n \"\"\"A cluster with workers.\n\n We assume all function errors are fatal and based on this assumption our\n error reporting logic is:\n 1) Both `schedule` and `join` can raise a non-retryable error which is the\n first error seen by the coordinator from any previously scheduled functions.\n 2) When an error is raised, there is no guarantee on how many previously\n scheduled functions have been executed; functions that have not been executed\n will be thrown away and marked as cancelled.\n 3) After an error is raised, the internal state of error will be cleared.\n I.e. functions can continue to be scheduled and subsequent calls of `schedule`\n or `join` will not raise the same error again.\n\n Attributes:\n failure_handler: The failure handler used to handler worker preemption\n failure.\n workers: a list of `Worker` objects in the cluster.\n \"\"\"\n\n def __init__(self, strategy):\n \"\"\"Initializes the cluster instance.\"\"\"\n\n self._num_workers = strategy._num_workers\n self._num_ps = strategy._num_ps\n\n # Ignore PS failures reported by workers due to transient connection errors.\n # Transient connectivity issues between workers and PS are relayed by the\n # workers to the coordinator, leading the coordinator to believe that there\n # are PS failures. The difference between transient vs. permanent PS failure\n # is the number of reports from the workers. When this env var is set to a\n # positive integer K, the coordinator ignores up to K reports of a failed PS\n # task, i.e., only when there are more than K trials of executing closures\n # fail due to errors from the same PS instance do we consider the PS\n # instance encounters a failure.\n # TODO(b/164279603): Remove this workaround when the underlying connectivity\n # issue in gRPC server is resolved.\n self._transient_ps_failures_threshold = int(\n os.environ.get(\"TF_COORDINATOR_IGNORE_TRANSIENT_PS_FAILURES\", 3))\n self._potential_ps_failures_lock = threading.Lock()\n self._potential_ps_failures_count = [0] * self._num_ps\n\n self._closure_queue = _CoordinatedClosureQueue()\n self.failure_handler = WorkerPreemptionHandler(context.get_server_def(),\n self)\n worker_device_strings = [\n \"/job:worker/replica:0/task:%d\" % i for i in range(self._num_workers)\n ]\n self.workers = [\n Worker(i, w, self) for i, w in enumerate(worker_device_strings)\n ]\n\n def _record_and_ignore_transient_ps_failure(self, e):\n \"\"\"Records potential PS failures and return if failure should be ignored.\"\"\"\n if self._transient_ps_failures_threshold <= 0 or not _is_ps_failure(e):\n return False\n\n ps_tasks = _extract_failed_ps_instances(str(e))\n with self._potential_ps_failures_lock:\n for t in ps_tasks:\n self._potential_ps_failures_count[t] += 1\n # The number of UnavailableError encountered on this PS task exceeds the\n # maximum number of ignored error\n if (self._potential_ps_failures_count[t] >=\n self._transient_ps_failures_threshold):\n return False\n return True\n\n def schedule(self, function, args, kwargs):\n \"\"\"Schedules `function` to be dispatched to a worker for execution.\n\n Args:\n function: The function to be dispatched to a worker for execution\n asynchronously.\n args: Positional arguments for `fn`.\n kwargs: Keyword arguments for `fn`.\n\n Returns:\n A `RemoteValue` object.\n \"\"\"\n closure = Closure(\n function,\n self._closure_queue._cancellation_mgr, # pylint: disable=protected-access\n args=args,\n kwargs=kwargs)\n self._closure_queue.put(closure)\n return closure.output_remote_value\n\n def join(self):\n \"\"\"Blocks until all scheduled functions are executed.\"\"\"\n self._closure_queue.wait()\n\n def done(self):\n \"\"\"Returns true if all scheduled functions are executed.\"\"\"\n return self._closure_queue.done()\n\n\n@tf_export(\"distribute.experimental.coordinator.ClusterCoordinator\", v1=[])\nclass ClusterCoordinator(object):\n \"\"\"An object to schedule and coordinate remote function execution.\n\n This class is used to create fault-tolerant resources and dispatch functions\n to remote TensorFlow servers.\n\n Currently, this class is not supported to be used in a standalone manner. It\n should be used in conjunction with a `tf.distribute` strategy that is designed\n to work with it. The `ClusterCoordinator` class currently only works\n `tf.distribute.experimental.ParameterServerStrategy`.\n\n __The `schedule`/`join` APIs__\n\n The most important APIs provided by this class is the `schedule`/`join` pair.\n The `schedule` API is non-blocking in that it queues a `tf.function` and\n returns a `RemoteValue` immediately. The queued functions will be dispatched\n to remote workers in background threads and their `RemoteValue`s will be\n filled asynchronously. Since `schedule` doesn’t require worker assignment, the\n `tf.function` passed in can be executed on any available worker. If the worker\n it is executed on becomes unavailable before its completion, it will be\n migrated to another worker. Because of this fact and function execution is not\n atomic, a function may be executed more than once.\n\n __Handling Task Failure__\n\n This class when used with\n `tf.distribute.experimental.ParameterServerStrategy`, comes with built-in\n fault tolerance for worker failures. That is, when some workers are not\n available for any reason to be reached from the coordinator, the training\n progress continues to be made with the remaining workers. Upon recovery of a\n failed worker, it will be added for function execution after datasets created\n by `create_per_worker_dataset` are re-built on it.\n\n When a parameter server fails, a `tf.errors.UnavailableError` is raised by\n `schedule`, `join` or `done`. In this case, in addition to bringing back the\n failed parameter server, users should restart the coordinator so that it\n reconnects to workers and parameter servers, re-creates the variables, and\n loads checkpoints. If the coordinator fails, after the user brings it back,\n the program will automatically connect to workers and parameter servers, and\n continue the progress from a checkpoint.\n\n It is thus essential that in user's program, a checkpoint file is periodically\n saved, and restored at the start of the program. If an\n `tf.keras.optimizers.Optimizer` is checkpointed, after restoring from a\n checkpoiont, its `iterations` property roughly indicates the number of steps\n that have been made. This can be used to decide how many epochs and steps are\n needed before the training completion.\n\n See `tf.distribute.experimental.ParameterServerStrategy` docstring for an\n example usage of this API.\n\n This is currently under development, and the API as well as implementation\n are subject to changes.\n \"\"\"\n\n def __init__(self, strategy):\n \"\"\"Initialization of a `ClusterCoordinator` instance.\n\n Args:\n strategy: a supported `tf.distribute.Strategy` object. Currently, only\n `tf.distribute.experimental.ParameterServerStrategy` is supported.\n\n Raises:\n ValueError: if the strategy being used is not supported.\n \"\"\"\n if not isinstance(strategy,\n parameter_server_strategy_v2.ParameterServerStrategyV2):\n raise ValueError(\n \"Only `tf.distribute.experimental.ParameterServerStrategy` \"\n \"is supported to work with \"\n \"`tf.distribute.experimental.coordinator.ClusterCoordinator` \"\n \"currently.\")\n self._strategy = strategy\n self.cluster = Cluster(strategy)\n\n @property\n def strategy(self):\n \"\"\"Returns the `Strategy` associated with the `ClusterCoordinator`.\"\"\"\n return self._strategy\n\n def schedule(self, fn, args=None, kwargs=None):\n \"\"\"Schedules `fn` to be dispatched to a worker for asynchronous execution.\n\n This method is non-blocking in that it queues the `fn` which will be\n executed later and returns a\n `tf.distribute.experimental.coordinator.RemoteValue` object immediately.\n `fetch` can be called on the it to wait for the function execution to finish\n and retrieve its output from a remote worker. On the other hand, call\n `tf.distribute.experimental.coordinator.ClusterCoordinator.join` to wait for\n all scheduled functions to finish.\n\n `schedule` guarantees that `fn` will be executed on a worker at least once;\n it could be more than once if its corresponding worker fails in the middle\n of its execution. Note that since worker can fail at any point when\n executing the function, it is possible that the function is partially\n executed, but `tf.distribute.experimental.coordinator.ClusterCoordinator`\n guarantees that in those events, the function will eventually be executed on\n any worker that is available.\n\n If any previously scheduled function raises an error, `schedule` will raise\n any one of those errors, and clear the errors collected so far. What happens\n here, some of the previously scheduled functions may have not been executed.\n User can call `fetch` on the returned\n `tf.distribute.experimental.coordinator.RemoteValue` to inspect if they have\n executed, failed, or cancelled, and reschedule the corresponding function if\n needed.\n\n When `schedule` raises, it guarantees that there is no function that is\n still being executed.\n\n At this time, there is no support of worker assignment for function\n execution, or priority of the workers.\n\n `args` and `kwargs` are the arguments passed into `fn`, when `fn` is\n executed on a worker. They can be\n `tf.distribute.experimental.coordinator.PerWorkerValues` and in this case,\n the argument will be substituted with the corresponding component on the\n target worker. Arguments that are not\n `tf.distribute.experimental.coordinator.PerWorkerValues` will be passed into\n `fn` as-is. Currently, `tf.distribute.experimental.coordinator.RemoteValue`\n is not supported to be input `args` or `kwargs`.\n\n Args:\n fn: A `tf.function`; the function to be dispatched to a worker for\n execution asynchronously.\n args: Positional arguments for `fn`.\n kwargs: Keyword arguments for `fn`.\n\n Returns:\n A `tf.distribute.experimental.coordinator.RemoteValue` object that\n represents the output of the function scheduled.\n\n Raises:\n Exception: one of the exceptions caught by the coordinator from any\n previously scheduled function, since the last time an error was thrown\n or since the beginning of the program.\n \"\"\"\n # Slot variables are usually created during function tracing time; thus\n # `schedule` needs to be called within the `strategy.scope()`.\n with self.strategy.scope():\n return self.cluster.schedule(fn, args=args, kwargs=kwargs)\n\n def join(self):\n \"\"\"Blocks until all the scheduled functions have finished execution.\n\n If any previously scheduled function raises an error, `join` will fail by\n raising any one of those errors, and clear the errors collected so far. If\n this happens, some of the previously scheduled functions may have not been\n executed. Users can call `fetch` on the returned\n `tf.distribute.experimental.coordinator.RemoteValue` to inspect if they have\n executed, failed, or cancelled. If some that have been cancelled need to be\n rescheduled, users should call `schedule` with the function again.\n\n When `join` returns or raises, it guarantees that there is no function that\n is still being executed.\n\n Raises:\n Exception: one of the exceptions caught by the coordinator by any\n previously scheduled function since the last time an error was thrown or\n since the beginning of the program.\n \"\"\"\n self.cluster.join()\n\n def done(self):\n \"\"\"Returns whether all the scheduled functions have finished execution.\n\n If any previously scheduled function raises an error, `done` will fail by\n raising any one of those errors.\n\n When `done` returns True or raises, it guarantees that there is no function\n that is still being executed.\n\n Returns:\n Whether all the scheduled functions have finished execution.\n Raises:\n Exception: one of the exceptions caught by the coordinator by any\n previously scheduled function since the last time an error was thrown or\n since the beginning of the program.\n \"\"\"\n return self.cluster.done()\n\n def create_per_worker_dataset(self, dataset_fn):\n \"\"\"Create dataset on workers by calling `dataset_fn` on worker devices.\n\n This creates the given dataset generated by dataset_fn on workers\n and returns an object that represents the collection of those individual\n datasets. Calling `iter` on such collection of datasets returns a\n `tf.distribute.experimental.coordinator.PerWorkerValues`, which is a\n collection of iterators, where the iterators have been placed on respective\n workers.\n\n Calling `next` on a `PerWorkerValues` of iterator is unsupported. The\n iterator is meant to be passed as an argument into\n `tf.distribute.experimental.coordinator.ClusterCoordinator.schedule`. When\n the scheduled function is about to be executed by a worker, the\n function will receive the individual iterator that corresponds to the\n worker. The `next` method can be called on an iterator inside a\n scheduled function when the iterator is an input of the function.\n\n Currently the `schedule` method assumes workers are all the same and thus\n assumes the datasets on different workers are the same, except they may be\n shuffled differently if they contain a `dataset.shuffle` operation and a\n random seed is not set. Because of this, we also recommend the datasets to\n be repeated indefinitely and schedule a finite number of steps instead of\n relying on the `OutOfRangeError` from a dataset.\n\n\n Example:\n\n ```python\n strategy = tf.distribute.experimental.ParameterServerStrategy(\n cluster_resolver=...)\n coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(\n strategy=strategy)\n\n @tf.function\n def worker_fn(iterator):\n return next(iterator)\n\n def per_worker_dataset_fn():\n return strategy.distribute_datasets_from_function(\n lambda x: tf.data.Dataset.from_tensor_slices([3] * 3))\n\n per_worker_dataset = coordinator.create_per_worker_dataset(\n per_worker_dataset_fn)\n per_worker_iter = iter(per_worker_dataset)\n remote_value = coordinator.schedule(worker_fn, args=(per_worker_iter,))\n assert remote_value.fetch() == 3\n ```\n\n Args:\n dataset_fn: The dataset function that returns a dataset. This is to be\n executed on the workers.\n\n Returns:\n An object that represents the collection of those individual\n datasets. `iter` is expected to be called on this object that returns\n a `tf.distribute.experimental.coordinator.PerWorkerValues` of the\n iterators (that are on the workers).\n \"\"\"\n input_workers = input_lib.InputWorkers([\n (w.device_name, [w.device_name]) for w in self.cluster.workers\n ])\n\n return _PerWorkerDistributedDataset(dataset_fn, input_workers, self)\n\n def _create_per_worker_resources(self, fn, args=None, kwargs=None):\n \"\"\"Synchronously create resources on the workers.\n\n The resources are represented by\n `tf.distribute.experimental.coordinator.RemoteValue`s.\n\n Args:\n fn: The function to be dispatched to all workers for execution\n asynchronously.\n args: Positional arguments for `fn`.\n kwargs: Keyword arguments for `fn`.\n\n Returns:\n A `tf.distribute.experimental.coordinator.PerWorkerValues` object, which\n wraps a tuple of `tf.distribute.experimental.coordinator.RemoteValue`\n objects.\n \"\"\"\n results = []\n for w in self.cluster.workers:\n results.append(w._create_resource(fn, args=args, kwargs=kwargs)) # pylint: disable=protected-access\n return PerWorkerValues(tuple(results))\n\n def fetch(self, val):\n \"\"\"Blocking call to fetch results from the remote values.\n\n This is a wrapper around\n `tf.distribute.experimental.coordinator.RemoteValue.fetch` for a\n `RemoteValue` structure; it returns the execution results of\n `RemoteValue`s. If not ready, wait for them while blocking the caller.\n\n Example:\n ```python\n strategy = ...\n coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(\n strategy)\n\n def dataset_fn():\n return tf.data.Dataset.from_tensor_slices([1, 1, 1])\n\n with strategy.scope():\n v = tf.Variable(initial_value=0)\n\n @tf.function\n def worker_fn(iterator):\n def replica_fn(x):\n v.assign_add(x)\n return v.read_value()\n return strategy.run(replica_fn, args=(next(iterator),))\n\n distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn)\n distributed_iterator = iter(distributed_dataset)\n result = coordinator.schedule(worker_fn, args=(distributed_iterator,))\n assert coordinator.fetch(result) == 1\n ```\n\n Args:\n val: The value to fetch the results from. If this is structure of\n `tf.distribute.experimental.coordinator.RemoteValue`, `fetch()` will be\n called on the individual\n `tf.distribute.experimental.coordinator.RemoteValue` to get the result.\n\n Returns:\n If `val` is a `tf.distribute.experimental.coordinator.RemoteValue` or a\n structure of `tf.distribute.experimental.coordinator.RemoteValue`s,\n return the fetched `tf.distribute.experimental.coordinator.RemoteValue`\n values immediately if they are available, or block the call until they are\n available, and return the fetched\n `tf.distribute.experimental.coordinator.RemoteValue` values with the same\n structure. If `val` is other types, return it as-is.\n \"\"\"\n\n def _maybe_fetch(val):\n if isinstance(val, RemoteValue):\n return val.fetch()\n else:\n return val\n\n # TODO(yuefengz): we should fetch values in a batch.\n return nest.map_structure(_maybe_fetch, val)\n\n\n# pylint: disable=missing-function-docstring\[email protected]\ndef handle_parameter_server_failure():\n try:\n yield\n except errors.UnavailableError as e: # pylint: disable=broad-except\n restart_exit_code = os.environ.get(\"TF_CLIENT_NON_FATAL_RESTART_EXIT_CODE\",\n None)\n if restart_exit_code is not None:\n sys.exit(int(restart_exit_code))\n else:\n raise\n\n\nclass _PerWorkerDistributedDataset(object):\n \"\"\"Represents worker-distributed datasets created from dataset function.\"\"\"\n\n def __init__(self, dataset_fn, input_workers, coordinator):\n \"\"\"Makes an iterable from datasets created by the given function.\n\n Args:\n dataset_fn: A function that returns a `Dataset`.\n input_workers: an `InputWorkers` object.\n coordinator: a `ClusterCoordinator` object, used to create dataset\n resources.\n \"\"\"\n def disallow_variable_creation(next_creator, **kwargs):\n raise ValueError(\"Creating variables in `dataset_fn` is not allowed.\")\n\n if isinstance(dataset_fn, def_function.Function):\n with variable_scope.variable_creator_scope(disallow_variable_creation):\n dataset_fn = dataset_fn.get_concrete_function()\n elif not isinstance(dataset_fn, tf_function.ConcreteFunction):\n with variable_scope.variable_creator_scope(disallow_variable_creation):\n dataset_fn = def_function.function(dataset_fn).get_concrete_function()\n self._dataset_fn = dataset_fn\n self._input_workers = input_workers\n self._coordinator = coordinator\n self._element_spec = None\n\n def __iter__(self):\n # We would like users to create iterators outside `tf.function`s so that we\n # can track them.\n if (not context.executing_eagerly() or\n ops.get_default_graph().building_function):\n raise RuntimeError(\n \"__iter__() is not supported inside of tf.function or in graph mode.\")\n\n def _create_per_worker_iterator():\n dataset = self._dataset_fn()\n return iter(dataset)\n\n # If _PerWorkerDistributedDataset.__iter__ is called multiple\n # times, for the same object it should only create and register resource\n # once. Using object id to distinguish different iterator resources.\n per_worker_iterator = self._coordinator._create_per_worker_resources(\n _create_per_worker_iterator)\n\n # Setting type_spec of each RemoteValue so that functions taking these\n # RemoteValues as inputs can be traced.\n for iterator_remote_value in per_worker_iterator._values:\n iterator_remote_value._type_spec = ( # pylint: disable=protected-access\n iterator_ops.IteratorSpec(\n self._dataset_fn.structured_outputs.element_spec))\n return _PerWorkerDistributedIterator(per_worker_iterator._values)\n\n @property\n def element_spec(self):\n \"\"\"The type specification of an element of this dataset.\"\"\"\n raise NotImplementedError(\"Passing `AsyncDistributedDataset` to a \"\n \"tf.function is not supported.\")\n\n\nclass _PerWorkerDistributedIterator(PerWorkerValues):\n \"\"\"Distributed iterator for `ClusterCoordinator`.\"\"\"\n\n def __next__(self):\n return self.get_next()\n\n def get_next(self, name=None):\n \"\"\"Returns the next input from the iterator for all replicas.\"\"\"\n raise NotImplementedError(\"Iterating over an `AsyncDistributedIterator` \"\n \"is not supported right now.\")\n\n\ndef _extract_failed_ps_instances(err_msg):\n \"\"\"Return a set of potentially failing ps instances from error message.\"\"\"\n tasks = re.findall(\"/job:ps/replica:0/task:[0-9]+\", err_msg)\n return set(int(t.split(\":\")[-1]) for t in tasks)\n\n\ndef _is_ps_failure(error):\n \"\"\"Whether the error is considered a parameter server failure.\"\"\"\n return (isinstance(error, errors.UnavailableError) and\n _RPC_ERROR_FROM_PS in str(error))\n\n\ndef _is_worker_failure(error):\n \"\"\"Whether the error is considered a worker failure.\"\"\"\n if _JOB_WORKER_STRING_IDENTIFIER not in str(error):\n return False\n if _RPC_ERROR_FROM_PS in str(error):\n return False\n\n # TODO(haoyuzhang): Consider using special status code if error from a\n # remote is derived from RPC errors originated from other hosts.\n if isinstance(error, (errors.UnavailableError, errors.AbortedError)):\n return True\n\n # The following error could happen when the remote task fails and restarts\n # in a very short interval during which no RPCs were exchanged to detect the\n # failure. In that case, gRPC allows channel (which is different from a\n # connection) to be reused for a replaced server listening to same address.\n if isinstance(error, errors.InvalidArgumentError):\n if (\"unknown device\" in str(error) or\n \"Unable to find the relevant tensor remote_handle\" in str(error)):\n # TODO(b/159961667): Fix \"Unable to find the relevant tensor\n # remote_handle\" part.\n return True\n\n # TODO(b/162541228): The following 2 types of errors are very rare and only\n # observed in large-scale testing. The types of errors should be reduced.\n # This could happen when the function registration fails. In the observed\n # cases this only happens to the dataset related functions.\n if isinstance(error, errors.NotFoundError):\n if (\"is neither a type of a primitive operation nor a name of a function \"\n \"registered\" in str(error)):\n return True\n\n # This could happen when the iterator is no longer valid on the remote worker\n # \"Resource input tensor contains an invalid device\"\n if isinstance(error, errors.CancelledError):\n return True\n\n return False\n",
"# Lint as: python2, python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lite.py functionality related to TensorFlow 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow as tf\n\nfrom tensorflow.lite.python import lite\nfrom tensorflow.lite.python import lite_v2_test_util\nfrom tensorflow.lite.python.convert import mlir_quantize\nfrom tensorflow.lite.python.interpreter import Interpreter\nfrom tensorflow.lite.toco import types_pb2 as _types_pb2\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import save_options\nfrom tensorflow.python.saved_model import saved_model\nfrom tensorflow.python.saved_model.loader_impl import parse_saved_model\nfrom tensorflow.python.saved_model.save import save\nfrom tensorflow.python.training.tracking import tracking\n\n\nclass FromConcreteFunctionTest(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testTypeInvalid(self):\n root = self._getSimpleVariableModel()\n with self.assertRaises(ValueError) as error:\n _ = lite.TFLiteConverterV2.from_concrete_functions([root.f])\n self.assertIn('call get_concrete_function', str(error.exception))\n\n @parameterized.named_parameters(\n ('EnableMlirConverter', True), # enable mlir\n ('DisableMlirConverter', False)) # disable mlir\n @test_util.run_v2_only\n def testFloat(self, enable_mlir_converter):\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n converter.experimental_new_converter = enable_mlir_converter\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @parameterized.named_parameters(\n ('_INT8InputOutput', dtypes.int8),\n ('_UINT8InputOutput', dtypes.uint8),\n ('_INT16InputOutput', dtypes.int16))\n @test_util.run_v2_only\n def testInvalidFloat(self, inference_input_output_type):\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n with self.assertRaises(ValueError) as error:\n converter.inference_input_type = inference_input_output_type\n converter.inference_output_type = inference_input_output_type\n converter.convert()\n self.assertEqual(\n 'The inference_input_type and inference_output_type '\n 'must be tf.float32.', str(error.exception))\n\n @test_util.run_v2_only\n def testScalarInput(self):\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[])\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testMultiFunctionModel(self):\n \"\"\"Convert a single model in a multi-functional model.\"\"\"\n root = self._getMultiFunctionModel()\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.add.get_concrete_function(input_data)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.add(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testConvertMultipleFunctions(self):\n \"\"\"Convert multiple functions in a multi-functional model.\"\"\"\n root = self._getMultiFunctionModel()\n input_data = tf.constant(1., shape=[1])\n add_func = root.add.get_concrete_function(input_data)\n sub_func = root.sub.get_concrete_function(input_data)\n\n # Try converting multiple functions.\n converter = lite.TFLiteConverterV2.from_concrete_functions(\n [add_func, sub_func])\n with self.assertRaises(ValueError) as error:\n _ = converter.convert()\n self.assertIn('can only convert a single ConcreteFunction',\n str(error.exception))\n\n def _getIntegerQuantizeModel(self):\n np.random.seed(0)\n\n root = tracking.AutoTrackable()\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[1, 5, 5, 3], dtype=tf.float32)])\n def func(inp):\n conv = tf.nn.conv2d(\n inp, tf.ones([3, 3, 3, 16]), strides=[1, 1, 1, 1], padding='SAME')\n output = tf.nn.relu(conv, name='output')\n return output\n\n def calibration_gen():\n for _ in range(5):\n yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]\n\n root.f = func\n to_save = root.f.get_concrete_function()\n return (to_save, calibration_gen)\n\n @parameterized.named_parameters(\n ('EnableMlirQuantizer', True), # enable mlir quantizer\n ('DisableMlirQuantizer', False)) # disable mlir quantizer\n def testPostTrainingCalibrateAndQuantize(self, mlir_quantizer):\n func, calibration_gen = self._getIntegerQuantizeModel()\n\n # Convert float model.\n float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])\n float_tflite_model = float_converter.convert()\n self.assertIsNotNone(float_tflite_model)\n\n # Convert quantized model.\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n quantized_converter._experimental_new_quantizer = mlir_quantizer\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n # The default input and output types should be float.\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(np.float32, input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(np.float32, output_details[0]['dtype'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(float_tflite_model))\n\n @parameterized.named_parameters(\n ('_INT8InputOutput', dtypes.int8),\n ('_UINT8InputOutput', dtypes.uint8),\n ('_INT16InputOutput', dtypes.int16))\n @test_util.run_v2_only\n def testInvalidPostTrainingDynamicRangeQuantization(\n self, inference_input_output_type):\n func, _ = self._getIntegerQuantizeModel()\n\n # Convert float model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([func])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Convert quantized model.\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n with self.assertRaises(ValueError) as error:\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_converter.convert()\n self.assertEqual(\n 'The inference_input_type and inference_output_type '\n 'must be tf.float32.', str(error.exception))\n\n @parameterized.named_parameters(\n ('_Default', False, False, dtypes.float32),\n ('_INT8InputOutput', False, False, dtypes.int8),\n ('_UINT8InputOutput', False, False, dtypes.uint8),\n ('_INT16Quantize', False, True, dtypes.float32),\n ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),\n ('_IntOnly', True, False, dtypes.float32),\n ('_IntOnly_INT8InputOutput', True, False, dtypes.int8),\n ('_IntOnly_UINT8InputOutput', True, False,\n dtypes.uint8),\n ('_IntOnly_INT16Quantize', True, True, dtypes.float32),\n ('_IntOnly_INT16Quantize_INT16InputOutput', True, True,\n dtypes.int16))\n def testIntegerQuantization(self, is_int_only, is_int16_quantize,\n inference_input_output_type):\n func, calibration_gen = self._getIntegerQuantizeModel()\n\n # Convert float model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([func])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Convert quantized model.\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n if is_int_only:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\\\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8\n ]\n else:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\\\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS\n ]\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n output_details[0]['dtype'])\n\n # Ensure that the quantized tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(tflite_model))\n\n @parameterized.named_parameters(\n ('_INT16Quantize_INT8InputOutput', True, dtypes.int8))\n def testInvalidIntegerQuantization(self, is_int16_quantize,\n inference_input_output_type):\n func, calibration_gen = self._getIntegerQuantizeModel()\n\n # Convert quantized model.\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\\\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS\n ]\n with self.assertRaises(ValueError) as error:\n quantized_converter.inference_input_type = dtypes.int8\n quantized_converter.inference_output_type = dtypes.int8\n quantized_converter.convert()\n self.assertEqual(\n \"The inference_input_type and inference_output_type \"\n \"must be in ['tf.float32', 'tf.int16'].\", str(error.exception))\n\n def testCalibrateAndQuantizeBuiltinInt16(self):\n func, calibration_gen = self._getIntegerQuantizeModel()\n\n # Convert float model.\n float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])\n float_tflite_model = float_converter.convert()\n self.assertIsNotNone(float_tflite_model)\n\n converter = lite.TFLiteConverterV2.from_concrete_functions([func])\n # TODO(b/156309549): We should add INT16 to the builtin types.\n converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8\n ]\n converter.representative_dataset = calibration_gen\n converter._experimental_calibrate_only = True\n calibrated_tflite = converter.convert()\n quantized_tflite_model = mlir_quantize(\n calibrated_tflite, inference_type=_types_pb2.QUANTIZED_INT16)\n\n self.assertIsNotNone(quantized_tflite_model)\n\n # The default input and output types should be float.\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(np.float32, input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(np.float32, output_details[0]['dtype'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(float_tflite_model))\n\n def _getTrainingTimeQuantizedModel(self):\n\n class QLinear(tf.keras.layers.Layer):\n\n def __init__(self, units=3, **kwargs):\n super(QLinear, self).__init__(**kwargs)\n self.units = units\n\n def build(self, input_shape):\n self.w = self.add_weight(\n 'weight',\n shape=(input_shape[-1], self.units),\n initializer='random_normal',\n trainable=True)\n self.min_var = self.add_weight(\n 'min',\n initializer=tf.keras.initializers.Constant(-6.0),\n trainable=False)\n self.max_var = self.add_weight(\n 'max',\n initializer=tf.keras.initializers.Constant(6.0),\n trainable=False)\n\n def call(self, inputs):\n x = tf.quantization.fake_quant_with_min_max_vars(\n inputs, self.min_var, self.max_var)\n\n w_fq = tf.quantization.fake_quant_with_min_max_vars(\n self.w, self.min_var, self.max_var)\n x = tf.matmul(x, w_fq)\n\n x = tf.quantization.fake_quant_with_min_max_vars(\n x, self.min_var, self.max_var)\n\n return x\n\n return tf.keras.Sequential(QLinear(3, input_shape=(2,)))\n\n @parameterized.named_parameters(\n ('_DefaultFLOAT32InputOutput', dtypes.float32),\n ('_INT8InputOutput', dtypes.int8),\n ('_UINT8InputOutput', dtypes.uint8))\n @test_util.run_v2_only\n def testTrainingTimeQuantization(self, inference_input_output_type):\n model = self._getTrainingTimeQuantizedModel()\n\n float_converter = lite.TFLiteConverterV2.from_keras_model(model)\n float_tflite_model = float_converter.convert()\n self.assertIsNotNone(float_tflite_model)\n\n quantized_converter = lite.TFLiteConverterV2.from_keras_model(model)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n output_details[0]['dtype'])\n\n # Ensure that the quantized tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(float_tflite_model))\n\n @test_util.run_v2_only\n def testNewQuantizer(self):\n \"\"\"Test the model quantized by the new converter.\"\"\"\n func, calibration_gen = self._getIntegerQuantizeModel()\n\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8\n ]\n quantized_converter.representative_dataset = calibration_gen\n\n # default quantizer\n quantized_converter._experimental_new_quantizer = False\n old_tflite = quantized_converter.convert()\n\n # new quantizer\n quantized_converter._experimental_new_quantizer = True\n new_tflite = quantized_converter.convert()\n\n for _ in range(5):\n input_data = tf.constant(\n np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32))\n old_value = self._evaluateTFLiteModel(old_tflite, [input_data])\n new_value = self._evaluateTFLiteModel(new_tflite, [input_data])\n self.assertAllClose(old_value, new_value, atol=1e-01)\n\n @parameterized.named_parameters(\n ('EnableMlirConverter', True), # enable mlir\n ('DisableMlirConverter', False)) # disable mlir\n @test_util.run_v2_only\n def testEmbeddings(self, enable_mlir_converter):\n \"\"\"Test model with embeddings.\"\"\"\n input_data = tf.constant(\n np.array(np.random.random_sample((20)), dtype=np.int32))\n\n class EmbeddingModel(tf.keras.Model):\n\n def __init__(self):\n super(EmbeddingModel, self).__init__()\n self.shared_weights = self.add_weight(\n 'weights',\n shape=(2000, 300),\n dtype=tf.float32,\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=300**(-0.5)))\n\n @tf.function(input_signature=[tf.TensorSpec(shape=(20), dtype=tf.int32)])\n def func(self, x):\n return tf.gather(self.shared_weights, x)\n\n # Building the model.\n root = EmbeddingModel()\n concrete_func = root.func.get_concrete_function()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n converter.experimental_new_converter = enable_mlir_converter\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.func(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertAllClose(expected_value.numpy(), actual_value[0], atol=1e-05)\n\n @test_util.run_v2_only\n def testGraphDebugInfo(self):\n \"\"\"Test a concrete function has debug info captured.\"\"\"\n root = tracking.AutoTrackable()\n root.v1 = tf.Variable(3.)\n root.f = tf.function(lambda x: root.v1 * x)\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n converter.convert()\n self._assertValidDebugInfo(converter._debug_info)\n\n def _getIntegerQuantizationModelWithFlexOp(self):\n np.random.seed(0)\n\n root = tracking.AutoTrackable()\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[3, 3, 3, 3, 3], dtype=tf.float32)\n ])\n def func(inp):\n tanh = tf.math.tanh(inp)\n # Flex delegate will merge the consecutive conv3d and erf ops into one\n # Delegate node.\n conv3d = tf.nn.conv3d(\n tanh,\n tf.ones([3, 3, 3, 3, 3]),\n strides=[1, 1, 1, 1, 1],\n padding='SAME')\n erf = tf.math.erf(conv3d)\n output = tf.math.tanh(erf)\n return output\n\n def calibration_gen():\n for _ in range(5):\n yield [\n np.random.uniform(-1, 1, size=(3, 3, 3, 3, 3)).astype(np.float32)\n ]\n\n root.f = func\n return (root.f.get_concrete_function(), calibration_gen)\n\n @parameterized.named_parameters(\n ('_Default', False, False, dtypes.float32),\n ('_INT8InputOutput', False, False, dtypes.int8),\n ('_UINT8InputOutput', False, False, dtypes.uint8),\n ('_INT16Quantize', False, True, dtypes.float32),\n ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),\n ('_IntOnly', True, False, dtypes.float32),\n ('_IntOnly_INT8InputOutput', True, False, dtypes.int8),\n ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),\n ('_IntOnly_INT16Quantize', True, True, dtypes.float32),\n ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16))\n @test_util.run_v2_only\n def testIntegerQuantizationWithFlexOp(self, is_int_only, is_int16_quantize,\n inference_input_output_type):\n func, calibration_gen = self._getIntegerQuantizationModelWithFlexOp()\n\n quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(\n [func])\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n if is_int_only:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\\\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.SELECT_TF_OPS\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.SELECT_TF_OPS\n ]\n else:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\\\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS,\n lite.OpsSet.SELECT_TF_OPS\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS\n ]\n\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n output_details[0]['dtype'])\n\n def _getIntegerQuantizationModelWithUnsupportedOps(self):\n np.random.seed(0)\n\n root = tracking.AutoTrackable()\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[3], dtype=tf.float32),\n tf.TensorSpec(shape=[3], dtype=tf.float32)\n ])\n def func(a, b):\n # ceil kernel does not support int8 nor int16 types neither.\n left = tf.math.ceil(a)\n right = tf.nn.tanh(b)\n add = tf.math.add(left, right)\n # ceil kernel does not support int8 nor int16 types neither.\n output = tf.math.ceil(add)\n return (output, right)\n\n def calibration_gen():\n for _ in range(5):\n yield [\n np.random.uniform(-1, 1, size=(3)).astype(np.float32),\n np.random.uniform(-1, 1, size=(3)).astype(np.float32)\n ]\n\n root.f = func\n return (root.f.get_concrete_function(), calibration_gen)\n\n @parameterized.named_parameters(\n ('_INT8InputOutput', False, False, dtypes.int8),\n ('_UINT8InputOutput', False, False, dtypes.uint8),\n ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),\n ('_IntOnly_INT8InputOutput', True, False, dtypes.int8),\n ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),\n ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16))\n @test_util.run_v2_only\n def testIntegerQuantizationWithUnsupportedOps(self, is_int_only,\n is_int16_quantize,\n inference_input_output_type):\n func, calib_gen = self._getIntegerQuantizationModelWithUnsupportedOps()\n\n quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(\n [func])\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calib_gen\n if is_int_only:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\\\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.TFLITE_BUILTINS\n ]\n else:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\\\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS\n ]\n\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 2)\n # Allow float32 for fallback.\n self.assertEqual(input_details[0]['dtype'], dtypes.float32)\n self.assertEqual(input_details[1]['dtype'],\n inference_input_output_type.as_numpy_dtype)\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 2)\n # Allow float32 for fallback.\n self.assertEqual(output_details[0]['dtype'], dtypes.float32)\n self.assertEqual(output_details[1]['dtype'],\n inference_input_output_type.as_numpy_dtype)\n\n\nclass FromSavedModelTest(lite_v2_test_util.ModelTest):\n\n def _createV1SavedModel(self, shape):\n \"\"\"Create a simple SavedModel.\"\"\"\n saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n in_tensor_1 = tf.compat.v1.placeholder(\n shape=shape, dtype=tf.float32, name='inputB')\n in_tensor_2 = tf.compat.v1.placeholder(\n shape=shape, dtype=tf.float32, name='inputA')\n variable_node = tf.Variable(1.0, name='variable_node')\n out_tensor = in_tensor_1 + in_tensor_2 * variable_node\n inputs = {'x': in_tensor_1, 'y': in_tensor_2}\n outputs = {'z': out_tensor}\n sess.run(tf.compat.v1.variables_initializer([variable_node]))\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n @test_util.run_v2_only\n def testV1SimpleModel(self):\n \"\"\"Test a SavedModel.\"\"\"\n with tf.Graph().as_default():\n saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 2)\n self.assertStartsWith(input_details[0]['name'], 'inputA')\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n self.assertStartsWith(\n input_details[1]['name'],\n 'inputB',\n )\n self.assertEqual(np.float32, input_details[1]['dtype'])\n self.assertTrue([1, 16, 16, 3], input_details[1]['shape'])\n self.assertEqual((0., 0.), input_details[1]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertStartsWith(output_details[0]['name'], 'add')\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue([1, 16, 16, 3], output_details[0]['shape'])\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n @test_util.run_v2_only\n def testTF1HubFormattedModel(self):\n \"\"\"Test a TF1 hub formatted model.\"\"\"\n saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])\n\n # TF1 hub model is based on V1 saved model and they omit the saved model\n # schema version setting.\n saved_model_proto = parse_saved_model(saved_model_dir)\n saved_model_proto.saved_model_schema_version = 0\n\n saved_model_pb_file_path = os.path.join(saved_model_dir, 'saved_model.pb')\n with file_io.FileIO(saved_model_pb_file_path, 'wb') as writer:\n writer.write(saved_model_proto.SerializeToString())\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n @test_util.run_v2_only\n def testConstModel(self):\n \"\"\"Test a basic model with functions to make sure functions are inlined.\"\"\"\n input_data = tf.constant(1., shape=[1])\n root = tracking.AutoTrackable()\n root.f = tf.function(lambda x: 2. * x)\n to_save = root.f.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, to_save)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testVariableModel(self):\n \"\"\"Test a basic model with Variables with saving/loading the SavedModel.\"\"\"\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n to_save = root.f.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, to_save)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testSignatures(self):\n \"\"\"Test values for `signature_keys` argument.\"\"\"\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n to_save = root.f.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, to_save)\n\n # Convert model with invalid `signature_keys`.\n with self.assertRaises(ValueError) as error:\n _ = lite.TFLiteConverterV2.from_saved_model(\n save_dir, signature_keys=['INVALID'])\n self.assertIn(\"Invalid signature key 'INVALID'\", str(error.exception))\n\n # Convert model with empty `signature_keys`.\n converter = lite.TFLiteConverterV2.from_saved_model(\n save_dir, signature_keys=[])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testMultipleFunctionModel(self):\n \"\"\"Convert multiple functions in a multi-functional model.\"\"\"\n root = self._getMultiFunctionModel()\n input_data = tf.constant(1., shape=[1])\n add_func = root.add.get_concrete_function(input_data)\n sub_func = root.sub.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, {'add': add_func, 'sub': sub_func})\n\n # Try converting multiple functions.\n with self.assertRaises(ValueError) as error:\n _ = lite.TFLiteConverterV2.from_saved_model(save_dir)\n self.assertIn('Only support a single signature key.', str(error.exception))\n\n @test_util.run_v2_only\n def testNoConcreteFunctionModel(self):\n root = self._getMultiFunctionModel()\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir)\n\n with self.assertRaises(ValueError) as error:\n _ = lite.TFLiteConverterV2.from_saved_model(save_dir)\n self.assertIn('Only support a single signature key.', str(error.exception))\n\n @test_util.run_v2_only\n def testKerasSequentialModel(self):\n \"\"\"Test a simple sequential tf.Keras model.\"\"\"\n input_data = tf.constant(1., shape=[1, 1])\n\n x = np.array([[1.], [2.]])\n y = np.array([[2.], [4.]])\n\n model = tf.keras.models.Sequential([\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(1),\n ])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(x, y, epochs=1)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(model, save_dir)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value, actual_value)\n\n @test_util.run_v2_only\n def testGraphDebugInfo(self):\n \"\"\"Test a SavedModel has debug info captured.\"\"\"\n input_data = tf.constant(1., shape=[1])\n root = tracking.AutoTrackable()\n root.f = tf.function(lambda x: 2. * x)\n to_save = root.f.get_concrete_function(input_data)\n options = save_options.SaveOptions(save_debug_info=True)\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, to_save, options)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n converter.convert()\n self._assertValidDebugInfo(converter._debug_info)\n\n @test_util.run_v2_only\n def testFallbackPath(self):\n \"\"\"Test a SavedModel fallback path using old converter.\"\"\"\n saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.experimental_new_converter = False\n tflite_model = converter.convert()\n\n self.assertTrue(tflite_model)\n\n @test_util.run_v2_only\n def testNonStatefulConvLSTM2D(self):\n \"\"\"Test saved model with non stateful ConvLSTM2D keras layer.\"\"\"\n # Create keras model\n model = tf.keras.Sequential([\n tf.keras.layers.ConvLSTM2D(\n 32, (3, 3),\n padding='same',\n return_sequences=True,\n stateful=False,\n batch_input_shape=(1, 1, 10, 10, 1))\n ])\n model.compile()\n\n # Export the keras model to saved model.\n saved_model_dir = os.path.join(self.get_temp_dir(), 'conv_lstm_2d')\n model.save(saved_model_dir, save_format='tf', include_optimizer=False)\n\n converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n\nclass FromKerasModelTest(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testSequentialModel(self):\n \"\"\"Test a simple sequential tf.Keras model.\"\"\"\n input_data = tf.constant(1., shape=[1, 1])\n\n # Create a simple Keras model.\n x = np.array([[1.], [2.]])\n y = np.array([[2.], [4.]])\n\n model = tf.keras.models.Sequential([\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(units=1, input_shape=[1])\n ])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(x, y, epochs=1)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value, actual_value)\n\n @test_util.run_v2_only\n def testSequentialMultiInputOutputModel(self):\n \"\"\"Test a tf.Keras model with multiple inputs and outputs.\"\"\"\n left_input_data = tf.constant(1., shape=[1, 3])\n right_input_data = tf.constant(1., shape=[1, 3])\n\n # Create a simple Keras model.\n input_a_np = np.random.random((10, 3))\n input_b_np = np.random.random((10, 3))\n output_c_np = np.random.random((10, 3))\n output_d_np = np.random.random((10, 2))\n\n input_a = tf.keras.layers.Input(shape=(3,), name='input_a')\n input_b = tf.keras.layers.Input(shape=(3,), name='input_b')\n\n dense = tf.keras.layers.Dense(8, name='dense_1')\n interm_a = dense(input_a)\n interm_b = dense(input_b)\n merged = tf.keras.layers.concatenate([interm_a, interm_b], name='merge')\n\n output_c = tf.keras.layers.Dense(\n 3, activation='softmax', name='dense_2')(\n merged)\n output_d = tf.keras.layers.Dense(\n 2, activation='softmax', name='dense_3')(\n merged)\n\n model = tf.keras.models.Model(\n inputs=[input_a, input_b], outputs=[output_c, output_d])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit([input_a_np, input_b_np], [output_c_np, output_d_np], epochs=1)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n input_data = [left_input_data, right_input_data]\n expected_value = model.predict(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, input_data)\n for tf_result, tflite_result in zip(expected_value, actual_value):\n self.assertAllClose(tf_result, tflite_result, atol=1e-05)\n\n @test_util.run_v2_only\n def testGraphDebugInfo(self):\n \"\"\"Test a tf.Keras model has debug info captured.\"\"\"\n # Create a simple Keras model.\n x = [-1, 0, 1, 2, 3, 4]\n y = [-3, -1, 1, 3, 5, 7]\n model = tf.keras.models.Sequential(\n [tf.keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(x, y, epochs=1)\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n converter.convert()\n self._assertValidDebugInfo(converter._debug_info)\n\n @test_util.run_v2_only\n def testKerasFallbackPath(self):\n \"\"\"Test keras model which failed when exporting to the saved model.\"\"\"\n input_data = tf.constant(\n np.array(np.random.random_sample((20)), dtype=np.float32))\n\n class Model(tf.keras.Model):\n\n def __init__(self):\n super(Model, self).__init__()\n # A None name will cause a failure in exporting to a saved model.\n self.shared_weights = self.add_weight(\n name=None,\n shape=(20, 1),\n dtype=tf.float32,\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=300**(-0.5)))\n\n def call(self, x):\n return tf.add(self.shared_weights, x)\n\n # Building the model.\n model = Model()\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(input_data, input_data, epochs=1)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n\nclass ControlFlowTest(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testCond(self):\n input_data = {\n 'x': tf.constant([1., 2.], shape=[1, 2]),\n 'b': tf.constant(True)\n }\n\n weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)\n\n def true_fn(x):\n return tf.matmul(x, weights)\n\n def false_fn(x):\n return tf.add(x, weights)\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[1, 2], dtype=tf.float32),\n tf.TensorSpec(shape=(), dtype=tf.bool)\n ])\n def model(x, b):\n return tf.cond(\n b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))\n\n concrete_func = model.get_concrete_function()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(**input_data)\n actual_value = self._evaluateTFLiteModel(\n tflite_model, [input_data['x'], input_data['b']])[0]\n self.assertAllClose(expected_value, actual_value)\n\n @test_util.run_v2_only\n def testStaticRnn(self):\n input_data = tf.constant(\n np.array(np.random.random_sample((3, 10)), dtype=np.float32))\n\n cell = tf.compat.v1.nn.rnn_cell.LSTMCell(10)\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[3, 10], dtype=tf.float32)])\n def model(x):\n seq = tf.split(x, 3, 0)\n return tf.compat.v1.nn.static_rnn(\n cell, seq, dtype=tf.float32, sequence_length=[1])\n\n concrete_func = model.get_concrete_function()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(input_data)[0]\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n for expected, actual in zip(expected_value, actual_value):\n self.assertAllClose(expected, actual)\n\n @test_util.run_v2_only\n def testWhileLoop(self):\n input_data = tf.constant([1., 2., 3., 4.], shape=[2, 2])\n\n weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)\n\n def condition(x):\n return tf.reduce_sum(x) < 100\n\n def body(x):\n return tf.add(x, weights)\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[2, 2], dtype=tf.float32)])\n def model(x):\n return tf.while_loop(condition, body, [x])\n\n concrete_func = model.get_concrete_function()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(input_data)[0]\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n self.assertAllClose(expected_value, actual_value)\n\n @test_util.run_v2_only\n def testDynamicRnn(self):\n input_data = tf.constant(\n np.array(np.random.random_sample((3, 10, 10)), dtype=np.float32))\n\n cell = tf.compat.v1.nn.rnn_cell.LSTMCell(10)\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[3, 10, 10], dtype=tf.float32)])\n def model(x):\n return tf.compat.v1.nn.dynamic_rnn(cell, x, dtype=tf.float32)\n\n concrete_func = model.get_concrete_function()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n for expected, actual in zip(expected_value, actual_value):\n if not isinstance(expected, ops.EagerTensor):\n expected = expected.c\n self.assertAllClose(expected, actual)\n\n @parameterized.named_parameters(('LSTM', tf.keras.layers.LSTM),\n ('SimpleRNN', tf.keras.layers.SimpleRNN),\n ('GRU', tf.keras.layers.GRU))\n @test_util.run_v2_only\n def testKerasRNN(self, rnn_layer):\n # This relies on TFLiteConverter to rewrite unknown batch size to 1. The\n # model will fail if resizing the input to non-1 batch size.\n input_data = tf.constant(\n np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))\n rnn_obj = rnn_layer(units=10, input_shape=(10, 10))\n model = tf.keras.models.Sequential([\n tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input'),\n rnn_obj,\n ])\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n @parameterized.named_parameters(('LSTM', tf.keras.layers.LSTM),\n ('SimpleRNN', tf.keras.layers.SimpleRNN),\n ('GRU', tf.keras.layers.GRU))\n @test_util.run_v2_only\n def testKerasRNNMultiBatches(self, rnn_layer):\n input_data = tf.constant(\n np.array(np.random.random_sample((4, 10, 10)), dtype=np.float32))\n # Specify a fixed batch size(4) for the test model.\n x = tf.keras.layers.Input(batch_shape=(4, 10, 10))\n y = rnn_layer(units=10, input_shape=(10, 10))(x)\n model = tf.keras.Model(inputs=[x], outputs=[y])\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n @test_util.run_v2_only\n def testKerasBidirectionalRNNReturnSequence(self):\n input_data = tf.constant(\n np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input'))\n model.add(\n tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(units=10, return_sequences=True),\n input_shape=(10, 10)))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(5))\n model.add(tf.keras.layers.Activation('softmax'))\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n @test_util.run_v2_only\n def testKerasBidirectionalRNN(self):\n input_data = tf.constant(\n np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input'))\n model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=10)))\n model.add(tf.keras.layers.Dense(5))\n model.add(tf.keras.layers.Activation('softmax'))\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n\nclass GrapplerTest(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testConstantFolding(self):\n # Constant folding handles the tf.broadcast_to operation which was not\n # supported by the TFLite at the time this test was added.\n input_data = tf.constant([1., 2., 3., 4., 5., 6., 7., 8., 9.], shape=[3, 3])\n\n @tf.function\n def func(x):\n y_const = tf.constant([1., 2., 3.])\n y_broadcast = tf.broadcast_to(y_const, [3, 3])\n return tf.matmul(x, y_broadcast)\n\n root = tracking.AutoTrackable()\n root.f = func\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n self.assertAllClose(expected_value, actual_value)\n\n # Enable hybrid quantization, same result\n converter.optimizations = [lite.Optimize.DEFAULT]\n tflite_model = converter.convert()\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n self.assertAllClose(expected_value, actual_value)\n\n\nclass UnknownShapes(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testMatMul(self):\n input_data = tf.constant(\n np.array(np.random.random_sample((10, 4)), dtype=np.float32))\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[None, 4], dtype=tf.float32)])\n def model(in_tensor):\n shape = tf.shape(in_tensor)\n fill = tf.transpose(tf.fill(shape, 1.))\n return tf.matmul(fill, in_tensor)\n\n concrete_func = model.get_concrete_function()\n\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(input_data)\n actual_value = self._evaluateTFLiteModel(\n tflite_model, [input_data], input_shapes=[([-1, 4], [10, 4])])[0]\n self.assertAllClose(expected_value, actual_value, atol=1e-06)\n\n def _getIntegerQuantizeModelWithUnknownShapes(self):\n np.random.seed(0)\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[None, 33], dtype=tf.float32)])\n def model(input_tensor):\n \"\"\"Define a model with tf.MatMul and unknown shapes.\"\"\"\n # We need the tensor to have more than 1024 elements for quantize_weights\n # to kick in. Thus, the [33, 33] shape.\n const_tensor = tf.constant(\n np.random.uniform(low=-10., high=10., size=[33, 33]),\n shape=[33, 33],\n dtype=tf.float32,\n name='inputB')\n\n shape = tf.shape(input_tensor)\n fill = tf.transpose(tf.fill(shape, 1.))\n mult = tf.matmul(fill, input_tensor)\n return tf.matmul(mult, const_tensor)\n\n root = tracking.AutoTrackable()\n root.f = model\n concrete_func = root.f.get_concrete_function()\n\n def calibration_gen():\n for batch in range(5, 20, 5):\n for _ in range(5):\n yield [np.random.uniform(-1, 1, size=(batch, 33)).astype(np.float32)]\n\n return concrete_func, calibration_gen\n\n @test_util.run_v2_only\n def testMatMulQuantize(self):\n concrete_func, _ = self._getIntegerQuantizeModelWithUnknownShapes()\n float_converter = lite.TFLiteConverterV2.from_concrete_functions(\n [concrete_func])\n float_tflite_model = float_converter.convert()\n\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(\n [concrete_func])\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_tflite_model = quantized_converter.convert()\n\n # The default input and output types should be float.\n quantized_interpreter = Interpreter(model_content=quantized_tflite_model)\n quantized_interpreter.allocate_tensors()\n input_details = quantized_interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(float_tflite_model))\n\n @test_util.run_v2_only\n def testMatMulCalibrateAndQuantize(self):\n concrete_func, calibration_gen = \\\n self._getIntegerQuantizeModelWithUnknownShapes()\n float_converter = lite.TFLiteConverterV2.from_concrete_functions(\n [concrete_func])\n float_tflite_model = float_converter.convert()\n\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(\n [concrete_func])\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n quantized_tflite_model = quantized_converter.convert()\n\n # The default input and output types should be float.\n quantized_interpreter = Interpreter(model_content=quantized_tflite_model)\n quantized_interpreter.allocate_tensors()\n input_details = quantized_interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(float_tflite_model))\n\n def testBatchMatMul(self):\n input_data_1 = tf.constant(\n np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))\n input_data_2 = tf.constant(\n np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32),\n tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32)\n ])\n def model(in_tensor_1, in_tensor_2):\n return tf.matmul(in_tensor_1, in_tensor_2)\n\n concrete_func = model.get_concrete_function()\n\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(input_data_1, input_data_2)\n actual_value = self._evaluateTFLiteModel(\n tflite_model, [input_data_1, input_data_2],\n input_shapes=[([-1, 256, 256], [1, 256, 256])])[0]\n self.assertAllClose(expected_value, actual_value, atol=4)\n\n def testSizeInvalid(self):\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[1, None, 16, 3], dtype=tf.float32)\n ])\n def model(in_tensor):\n return in_tensor + in_tensor\n\n concrete_func = model.get_concrete_function()\n\n # Test invalid shape. None after 1st dimension. Run with TOCO in order to\n # invoke shape checking code.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])\n converter.experimental_new_converter = False\n with self.assertRaises(ValueError) as error:\n converter.convert()\n self.assertEqual(\n 'None is only supported in the 1st dimension. Tensor '\n '\\'in_tensor\\' has invalid shape \\'[1, None, 16, 3]\\'.',\n str(error.exception))\n\n\nif __name__ == '__main__':\n test.main()\n"
] |
[
[
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.eager.context.executor_scope",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.eager.cancellation.CancellationManager",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.data.ops.iterator_ops.IteratorSpec",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.framework.func_graph.convert_structure_to_signature",
"tensorflow.python.eager.context.get_server_def",
"tensorflow.python.distribute.coordinator.metric_utils.monitored_timer",
"tensorflow.python.eager.executor.new_executor",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.errors.CancelledError",
"tensorflow.python.distribute.input_lib.InputWorkers"
],
[
"tensorflow.math.add",
"tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model",
"tensorflow.reduce_sum",
"tensorflow.lite.python.interpreter.Interpreter",
"numpy.random.random_sample",
"tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model",
"tensorflow.compat.v1.nn.rnn_cell.LSTMCell",
"tensorflow.compat.v1.nn.static_rnn",
"tensorflow.keras.layers.ConvLSTM2D",
"tensorflow.python.saved_model.saved_model.simple_save",
"tensorflow.math.tanh",
"tensorflow.python.saved_model.save_options.SaveOptions",
"tensorflow.Graph",
"tensorflow.while_loop",
"tensorflow.Variable",
"tensorflow.lite.TFLiteConverter.from_concrete_functions",
"tensorflow.python.saved_model.loader_impl.parse_saved_model",
"tensorflow.gather",
"tensorflow.python.platform.test.main",
"tensorflow.math.ceil",
"tensorflow.add",
"tensorflow.quantization.fake_quant_with_min_max_vars",
"tensorflow.lite.TFLiteConverter.from_saved_model",
"tensorflow.random_normal_initializer",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten",
"tensorflow.compat.v1.nn.dynamic_rnn",
"tensorflow.math.erf",
"tensorflow.matmul",
"tensorflow.fill",
"tensorflow.python.saved_model.save.save",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.python.training.tracking.tracking.AutoTrackable",
"tensorflow.shape",
"tensorflow.keras.Model",
"tensorflow.nn.tanh",
"tensorflow.function",
"tensorflow.split",
"numpy.array",
"tensorflow.nn.relu",
"tensorflow.keras.initializers.Constant",
"tensorflow.constant",
"numpy.random.random",
"numpy.random.seed",
"tensorflow.keras.layers.Activation",
"tensorflow.python.lib.io.file_io.FileIO",
"tensorflow.broadcast_to",
"tensorflow.lite.python.convert.mlir_quantize",
"tensorflow.compat.v1.variables_initializer",
"tensorflow.keras.layers.concatenate",
"tensorflow.ones",
"tensorflow.compat.v1.Session",
"numpy.random.uniform",
"tensorflow.compat.v1.placeholder",
"tensorflow.keras.layers.LSTM",
"tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions",
"tensorflow.keras.layers.Dropout",
"tensorflow.TensorSpec",
"tensorflow.keras.layers.Input"
]
] |
Ulysses0817/Autoencoder
|
[
"b7a088d884cbbeca44669027130a0c458ee7be75"
] |
[
"dnn_app_utils_v2.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport math\nimport os\nfrom forward import *\n#os.makedirs(\"F:\\\\ProgramData\\\\NVIDIA Corporation\\\\CUDA Samples\\\\v9.0\")\n# def sigmoid(Z):\n\t# \"\"\"\n\t# Implements the sigmoid activation in numpy\n\t\n\t# Arguments:\n\t# Z -- numpy array of any shape\n\t\n\t# Returns:\n\t# A -- output of sigmoid(z), same shape as Z\n\t# cache -- returns Z as well, useful during backpropagation\n\t# \"\"\"\n\t\n\t# A = 1/(1+np.exp(-Z))\n\t# cache = Z\n\t\n\t# return A, cache\n\n# def relu(Z):\n\t# \"\"\"\n\t# Implement the RELU function.\n\n\t# Arguments:\n\t# Z -- Output of the linear layer, of any shape\n\n\t# Returns:\n\t# A -- Post-activation parameter, of the same shape as Z\n\t# cache -- a python dictionary containing \"A\" ; stored for computing the backward pass efficiently\n\t# \"\"\"\n\t\n\t# A = np.maximum(0,Z)\n\t\n\t# assert(A.shape == Z.shape)\n\t\n\t# cache = Z \n\t# return A, cache\n\n\n# def relu_backward(dA, cache):\n\t# \"\"\"\n\t# Implement the backward propagation for a single RELU unit.\n\n\t# Arguments:\n\t# dA -- post-activation gradient, of any shape\n\t# cache -- 'Z' where we store for computing backward propagation efficiently\n\n\t# Returns:\n\t# dZ -- Gradient of the cost with respect to Z\n\t# \"\"\"\n\t\n\t# Z = cache\n\t# dZ = np.array(dA, copy=True) # just converting dz to a correct object.\n\t\n\t# # When z <= 0, you should set dz to 0 as well. \n\t# dZ[Z <= 0] = 0\n\t\n\t# assert (dZ.shape == Z.shape)\n\t\n\t# return dZ\n\n# def sigmoid_backward(dA, cache):\n\t# \"\"\"\n\t# Implement the backward propagation for a single SIGMOID unit.\n\n\t# Arguments:\n\t# dA -- post-activation gradient, of any shape\n\t# cache -- 'Z' where we store for computing backward propagation efficiently\n\n\t# Returns:\n\t# dZ -- Gradient of the cost with respect to Z\n\t# \"\"\"\n\t\n\t# Z = cache\n\t\n\t# s = 1/(1+np.exp(-Z))\n\t# dZ = dA * s * (1-s)\n\t\n\t# assert (dZ.shape == Z.shape)\n\t\n\t# return dZ\n\n\ndef load_data():\n\ttrain_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n\ttrain_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n\ttrain_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n\ttest_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n\ttest_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n\ttest_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n\tclasses = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n\t\n\ttrain_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n\ttest_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n\t\n\treturn train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes\n\n\n# def initialize_parameters(n_x, n_h, n_y):\n\t# \"\"\"\n\t# Argument:\n\t# n_x -- size of the input layer\n\t# n_h -- size of the hidden layer\n\t# n_y -- size of the output layer\n\t\n\t# Returns:\n\t# parameters -- python dictionary containing your parameters:\n\t\t\t\t\t# W1 -- weight matrix of shape (n_h, n_x)\n\t\t\t\t\t# b1 -- bias vector of shape (n_h, 1)\n\t\t\t\t\t# W2 -- weight matrix of shape (n_y, n_h)\n\t\t\t\t\t# b2 -- bias vector of shape (n_y, 1)\n\t# \"\"\"\n\t\n\t# np.random.seed(1)\n\t\n\t# W1 = np.random.randn(n_h, n_x)*0.01\n\t# b1 = np.zeros((n_h, 1))\n\t# W2 = np.random.randn(n_y, n_h)*0.01\n\t# b2 = np.zeros((n_y, 1))\n\t\n\t# assert(W1.shape == (n_h, n_x))\n\t# assert(b1.shape == (n_h, 1))\n\t# assert(W2.shape == (n_y, n_h))\n\t# assert(b2.shape == (n_y, 1))\n\t\n\t# parameters = {\"W1\": W1,\n\t\t\t\t # \"b1\": b1,\n\t\t\t\t # \"W2\": W2,\n\t\t\t\t # \"b2\": b2}\n\t\n\t# return parameters \n\n# # Xavier/He initialization\n# def initialize_parameters_deep(layer_dims, intype=\"Xavier\"):\n\t# \"\"\"\n\t# Arguments:\n\t# layer_dims -- python array (list) containing the dimensions of each layer in our network\n\t\n\t# Returns:\n\t# parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n\t\t\t\t\t# Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n\t\t\t\t\t# bl -- bias vector of shape (layer_dims[l], 1)\n\t# \"\"\"\n\t\n\t# np.random.seed(1)\n\t# parameters = {}\n\t# L = len(layer_dims) # number of layers in the network\n\t\n\t# ininum = 2 if intype == \"He\" else 1\n\t# for l in range(1, L):\n\t\t# parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * np.sqrt(ininum/layer_dims[l-1]) #*0.01\n\t\t# parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n\t\t\n\t\t# assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))\n\t\t# assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n\n\t\t\n\t# return parameters\n\n# # GRADED FUNCTION: random_mini_batches\n# def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n\t# \"\"\"\n\t# Creates a list of random minibatches from (X, Y)\n\t\n\t# Arguments:\n\t# X -- input data, of shape (input size, number of examples)\n\t# Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)\n\t# mini_batch_size -- size of the mini-batches, integer\n\t\n\t# Returns:\n\t# mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)\n\t# \"\"\"\n\t\n\t# np.random.seed(seed) # To make your \"random\" minibatches the same as ours\n\t# m = X.shape[1] # number of training examples\n\t# mini_batches = []\n\t\t\n\t# # Step 1: Shuffle (X, Y)\n\t# permutation = list(np.random.permutation(m))\n\t# shuffled_X = X[:, permutation]\n\t# shuffled_Y = Y[:, permutation].reshape((1,m))\n\n\t# # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n\t# num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n\t# for k in range(0, num_complete_minibatches):\n\t\t# ### START CODE HERE ### (approx. 2 lines)\n\t\t# mini_batch_X = shuffled_X[:, k*mini_batch_size:(k+1)*mini_batch_size]\n\t\t# mini_batch_Y = shuffled_Y[:, k*mini_batch_size:(k+1)*mini_batch_size]\n\t\t# ### END CODE HERE ###\n\t\t# mini_batch = (mini_batch_X, mini_batch_Y)\n\t\t# mini_batches.append(mini_batch)\n\t\n\t# # Handling the end case (last mini-batch < mini_batch_size)\n\t# if m % mini_batch_size != 0:\n\t\t# ### START CODE HERE ### (approx. 2 lines)\n\t\t# mini_batch_X = shuffled_X[:, (k+1)*mini_batch_size:]\n\t\t# mini_batch_Y = shuffled_Y[:, (k+1)*mini_batch_size:]\n\t\t# ### END CODE HERE ###\n\t\t# mini_batch = (mini_batch_X, mini_batch_Y)\n\t\t# mini_batches.append(mini_batch)\n\t\n\t# return mini_batches\n\t\n# # GRADED FUNCTION: initialize_velocity\n# def initialize_velocity(parameters):\n\t# \"\"\"\n\t# Initializes the velocity as a python dictionary with:\n\t\t\t\t# - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n\t\t\t\t# - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n\t# Arguments:\n\t# parameters -- python dictionary containing your parameters.\n\t\t\t\t\t# parameters['W' + str(l)] = Wl\n\t\t\t\t\t# parameters['b' + str(l)] = bl\n\t\n\t# Returns:\n\t# v -- python dictionary containing the current velocity.\n\t\t\t\t\t# v['dW' + str(l)] = velocity of dWl\n\t\t\t\t\t# v['db' + str(l)] = velocity of dbl\n\t# \"\"\"\n\t\n\t# L = len(parameters) // 2 # number of layers in the neural networks\n\t# v = {}\n\t\n\t# # Initialize velocity\n\t# for l in range(L):\n\t\t# v[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n\t\t# v[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n\t\t\n\t# return v\n\t\n# # GRADED FUNCTION: initialize_adam\n# def initialize_adam(parameters) :\n\t# \"\"\"\n\t# Initializes v and s as two python dictionaries with:\n\t\t\t\t# - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n\t\t\t\t# - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n\t\n\t# Arguments:\n\t# parameters -- python dictionary containing your parameters.\n\t\t\t\t\t# parameters[\"W\" + str(l)] = Wl\n\t\t\t\t\t# parameters[\"b\" + str(l)] = bl\n\t\n\t# Returns: \n\t# v -- python dictionary that will contain the exponentially weighted average of the gradient.\n\t\t\t\t\t# v[\"dW\" + str(l)] = ...\n\t\t\t\t\t# v[\"db\" + str(l)] = ...\n\t# s -- python dictionary that will contain the exponentially weighted average of the squared gradient.\n\t\t\t\t\t# s[\"dW\" + str(l)] = ...\n\t\t\t\t\t# s[\"db\" + str(l)] = ...\n\n\t# \"\"\"\n\t\n\t# L = len(parameters) // 2 # number of layers in the neural networks\n\t# v = {}\n\t# s = {}\n\t\n\t# # Initialize v, s. Input: \"parameters\". Outputs: \"v, s\".\n\t# for l in range(L):\n\t\t# v[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\"+str(l+1)].shape)\n\t\t# v[\"db\" + str(l+1)] = np.zeros(parameters[\"b\"+str(l+1)].shape)\n\t\t# s[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\"+str(l+1)].shape)\n\t\t# s[\"db\" + str(l+1)] = np.zeros(parameters[\"b\"+str(l+1)].shape)\n\t\n\t# return v, s\n\t\n# def linear_forward(A, W, b):\n\t# \"\"\"\n\t# Implement the linear part of a layer's forward propagation.\n\n\t# Arguments:\n\t# A -- activations from previous layer (or input data): (size of previous layer, number of examples)\n\t# W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n\t# b -- bias vector, numpy array of shape (size of the current layer, 1)\n\n\t# Returns:\n\t# Z -- the input of the activation function, also called pre-activation parameter \n\t# cache -- a python dictionary containing \"A\", \"W\" and \"b\" ; stored for computing the backward pass efficiently\n\t# \"\"\"\n\t\n\t# Z = W.dot(A) + b\n\t\n\t# assert(Z.shape == (W.shape[0], A.shape[1]))\n\t# cache = (A, W, b)\n\t\n\t# return Z, cache\n\n# def linear_activation_forward(A_prev, W, b, activation):\n\t# \"\"\"\n\t# Implement the forward propagation for the LINEAR->ACTIVATION layer\n\n\t# Arguments:\n\t# A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)\n\t# W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n\t# b -- bias vector, numpy array of shape (size of the current layer, 1)\n\t# activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n\n\t# Returns:\n\t# A -- the output of the activation function, also called the post-activation value \n\t# cache -- a python dictionary containing \"linear_cache\" and \"activation_cache\";\n\t\t\t # stored for computing the backward pass efficiently\n\t# \"\"\"\n\t\n\t# if activation == \"sigmoid\":\n\t\t# # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n\t\t# Z, linear_cache = linear_forward(A_prev, W, b)\n\t\t# A, activation_cache = sigmoid(Z)\n\t\n\t# elif activation == \"relu\":\n\t\t# # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n\t\t# Z, linear_cache = linear_forward(A_prev, W, b)\n\t\t# A, activation_cache = relu(Z)\n\t\n\t# assert (A.shape == (W.shape[0], A_prev.shape[1]))\n\t# cache = (linear_cache, activation_cache)\n\n\t# return A, cache\n\n# def L_model_forward(X, parameters):\n\t# \"\"\"\n\t# Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation\n\t\n\t# Arguments:\n\t# X -- data, numpy array of shape (input size, number of examples)\n\t# parameters -- output of initialize_parameters_deep()\n\t\n\t# Returns:\n\t# AL -- last post-activation value\n\t# caches -- list of caches containing:\n\t\t\t\t# every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)\n\t\t\t\t# the cache of linear_sigmoid_forward() (there is one, indexed L-1)\n\t# \"\"\"\n\n\t# caches = []\n\t# A = X\n\t# L = len(parameters) // 2 # number of layers in the neural network\n\t\n\t# # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\n\t# for l in range(1, L):\n\t\t# A_prev = A \n\t\t# A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = \"relu\")\n\t\t# caches.append(cache)\n\t\n\t# # Implement LINEAR -> SIGMOID. Add \"cache\" to the \"caches\" list.\n\t# AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = \"sigmoid\")\n\t# caches.append(cache)\n\t\n\t# assert(AL.shape == (1,X.shape[1]))\n\t\n\t# return AL, caches\n\n# def compute_cost(AL, Y):\n\t# \"\"\"\n\t# Implement the cost function defined by equation (7).\n\n\t# Arguments:\n\t# AL -- probability vector corresponding to your label predictions, shape (1, number of examples)\n\t# Y -- true \"label\" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)\n\n\t# Returns:\n\t# cost -- cross-entropy cost\n\t# \"\"\"\n\t\n\t# m = Y.shape[1]\n\t\n\t# # Compute loss from aL and y.\n\t# cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y, np.log(1-AL).T))\n\t\n\t# cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n\t# assert(cost.shape == ())\n\t\n\t# return cost\n\n# # GRADED FUNCTION: compute_cost_with_regularization\n# def compute_cost(AL, Y, parameters=0, lambd=0):\n\t# \"\"\"\n\t# Implement the cost function with L2 regularization. See formula (2) above.\n\t\n\t# Arguments:\n\t# A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)\n\t# Y -- \"true\" labels vector, of shape (output size, number of examples)\n\t# parameters -- python dictionary containing parameters of the model\n\t\n\t# Returns:\n\t# cost - value of the regularized loss function (formula (2))\n\t# \"\"\"\n\t# m = Y.shape[1]\n\t\n\t# # Compute loss from aL and y.\n\t# cross_entropy_cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y, np.log(1-AL).T))\n\t\n\t# cross_entropy_cost = np.squeeze(cross_entropy_cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n\t# assert(cross_entropy_cost.shape == ())\n\t\n\t# L2_regularization_sum = 0\n\t\n\t# if lambd != 0:\n\t\t# L = len(parameters)//2\n\t\t# for l in range(L):\n\t\t\t# L2_regularization_sum = L2_regularization_cost + np.sum(np.square(parameters[\"W\"+str(l+1)])) # L2_regularization_sum equals to W^1 + ... +W^L\n\t# L2_regularization_cost = L2_regularization_sum * lambd/(2*m)\n\t\n\t# cost = cross_entropy_cost + L2_regularization_cost\n\t\n\t# return cost\n\n# def linear_backward(dZ, cache):\n\t# \"\"\"\n\t# Implement the linear portion of backward propagation for a single layer (layer l)\n\n\t# Arguments:\n\t# dZ -- Gradient of the cost with respect to the linear output (of current layer l)\n\t# cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer\n\n\t# Returns:\n\t# dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n\t# dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n\t# db -- Gradient of the cost with respect to b (current layer l), same shape as b\n\t# \"\"\"\n\t# A_prev, W, b = cache\n\t# m = A_prev.shape[1]\n\n\t# dW = 1./m * np.dot(dZ,A_prev.T)\n\t# db = 1./m * np.sum(dZ, axis = 1, keepdims = True)\n\t# dA_prev = np.dot(W.T,dZ)\n\t\n\t# assert (dA_prev.shape == A_prev.shape)\n\t# assert (dW.shape == W.shape)\n\t# assert (db.shape == b.shape)\n\t\n\t# return dA_prev, dW, db\n\n# def linear_activation_backward(dA, cache, activation):\n\t# \"\"\"\n\t# Implement the backward propagation for the LINEAR->ACTIVATION layer.\n\t\n\t# Arguments:\n\t# dA -- post-activation gradient for current layer l \n\t# cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently\n\t# activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n\t\n\t# Returns:\n\t# dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n\t# dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n\t# db -- Gradient of the cost with respect to b (current layer l), same shape as b\n\t# \"\"\"\n\t# linear_cache, activation_cache = cache\n\t\n\t# if activation == \"relu\":\n\t\t# dZ = relu_backward(dA, activation_cache)\n\t\t# dA_prev, dW, db = linear_backward(dZ, linear_cache)\n\t\t\n\t# elif activation == \"sigmoid\":\n\t\t# dZ = sigmoid_backward(dA, activation_cache)\n\t\t# dA_prev, dW, db = linear_backward(dZ, linear_cache)\n\t\n\t# return dA_prev, dW, db\n\n# def L_model_backward(AL, Y, caches):\n\t# \"\"\"\n\t# Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group\n\t\n\t# Arguments:\n\t# AL -- probability vector, output of the forward propagation (L_model_forward())\n\t# Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat)\n\t# caches -- list of caches containing:\n\t\t\t\t# every cache of linear_activation_forward() with \"relu\" (there are (L-1) or them, indexes from 0 to L-2)\n\t\t\t\t# the cache of linear_activation_forward() with \"sigmoid\" (there is one, index L-1)\n\t\n\t# Returns:\n\t# grads -- A dictionary with the gradients\n\t\t\t # grads[\"dA\" + str(l)] = ... \n\t\t\t # grads[\"dW\" + str(l)] = ...\n\t\t\t # grads[\"db\" + str(l)] = ... \n\t# \"\"\"\n\t# grads = {}\n\t# L = len(caches) # the number of layers\n\t# m = AL.shape[1]\n\t# Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n\t\n\t# # Initializing the backpropagation\n\t# dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))\n\t\n\t# # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: \"AL, Y, caches\". Outputs: \"grads[\"dAL\"], grads[\"dWL\"], grads[\"dbL\"]\n\t# current_cache = caches[L-1]\n\t# grads[\"dA\" + str(L-1)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dAL, current_cache, activation = \"sigmoid\")\n\t\n\t# for l in reversed(range(L-1)):\n\t\t# # lth layer: (RELU -> LINEAR) gradients.\n\t\t# current_cache = caches[l]\n\t\t# dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 1)], current_cache, activation = \"relu\")\n\t\t# grads[\"dA\" + str(l)] = dA_prev_temp\n\t\t# grads[\"dW\" + str(l + 1)] = dW_temp\n\t\t# grads[\"db\" + str(l + 1)] = db_temp\n\n\t# return grads\n\n# def update_parameters(parameters, grads, learning_rate):\n\t# \"\"\"\n\t# Update parameters using gradient descent\n\t\n\t# Arguments:\n\t# parameters -- python dictionary containing your parameters \n\t# grads -- python dictionary containing your gradients, output of L_model_backward\n\t\n\t# Returns:\n\t# parameters -- python dictionary containing your updated parameters \n\t\t\t\t # parameters[\"W\" + str(l)] = ... \n\t\t\t\t # parameters[\"b\" + str(l)] = ...\n\t# \"\"\"\n\t\n\t# L = len(parameters) // 2 # number of layers in the neural network\n\n\t# # Update rule for each parameter. Use a for loop.\n\t# for l in range(L):\n\t\t# parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * grads[\"dW\" + str(l+1)]\n\t\t# parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * grads[\"db\" + str(l+1)]\n\t\t\n\t# return parameters\n\n# # GRADED FUNCTION: update_parameters_with_momentum\n# def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):\n\t# \"\"\"\n\t# Update parameters using Momentum\n\t\n\t# Arguments:\n\t# parameters -- python dictionary containing your parameters:\n\t\t\t\t\t# parameters['W' + str(l)] = Wl\n\t\t\t\t\t# parameters['b' + str(l)] = bl\n\t# grads -- python dictionary containing your gradients for each parameters:\n\t\t\t\t\t# grads['dW' + str(l)] = dWl\n\t\t\t\t\t# grads['db' + str(l)] = dbl\n\t# v -- python dictionary containing the current velocity:\n\t\t\t\t\t# v['dW' + str(l)] = ...\n\t\t\t\t\t# v['db' + str(l)] = ...\n\t# beta -- the momentum hyperparameter, scalar\n\t# learning_rate -- the learning rate, scalar\n\t\n\t# Returns:\n\t# parameters -- python dictionary containing your updated parameters \n\t# v -- python dictionary containing your updated velocities\n\t# \"\"\"\n\n\t# L = len(parameters) // 2 # number of layers in the neural networks\n\t\n\t# # Momentum update for each parameter\n\t# for l in range(L):\n\t\t\n\t\t# #(approx. 4 lines)\n\t\t# # compute velocities\n\t\t# v[\"dW\" + str(l+1)] = beta*v[\"dW\" + str(l+1)] + (1 - beta)*grads[\"dW\" + str(l+1)]\n\t\t# v[\"db\" + str(l+1)] = beta*v[\"db\" + str(l+1)] + (1 - beta)*grads[\"db\" + str(l+1)]\n\t\t# # update parameters\n\t\t# parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate*v[\"dW\" + str(l+1)]\n\t\t# parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate*v[\"db\" + str(l+1)]\n\t\t\n\t\t\n\t# return parameters, v\n\n# # GRADED FUNCTION: update_parameters_with_adam\n\n# def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,\n\t\t\t\t\t\t\t\t# beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):\n\t# \"\"\"\n\t# Update parameters using Adam\n\t\n\t# Arguments:\n\t# parameters -- python dictionary containing your parameters:\n\t\t\t\t\t# parameters['W' + str(l)] = Wl\n\t\t\t\t\t# parameters['b' + str(l)] = bl\n\t# grads -- python dictionary containing your gradients for each parameters:\n\t\t\t\t\t# grads['dW' + str(l)] = dWl\n\t\t\t\t\t# grads['db' + str(l)] = dbl\n\t# v -- Adam variable, moving average of the first gradient, python dictionary\n\t# s -- Adam variable, moving average of the squared gradient, python dictionary\n\t# learning_rate -- the learning rate, scalar.\n\t# beta1 -- Exponential decay hyperparameter for the first moment estimates \n\t# beta2 -- Exponential decay hyperparameter for the second moment estimates \n\t# epsilon -- hyperparameter preventing division by zero in Adam updates\n\n\t# Returns:\n\t# parameters -- python dictionary containing your updated parameters \n\t# v -- Adam variable, moving average of the first gradient, python dictionary\n\t# s -- Adam variable, moving average of the squared gradient, python dictionary\n\t# \"\"\"\n\t\n\t# L = len(parameters) // 2 # number of layers in the neural networks\n\t# v_corrected = {} # Initializing first moment estimate, python dictionary\n\t# s_corrected = {} # Initializing second moment estimate, python dictionary\n\t\n\t# # Perform Adam update on all parameters\n\t# for l in range(L):\n\t\t# # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n\t\t# v[\"dW\" + str(l+1)] = beta1*v[\"dW\" + str(l+1)] + (1-beta1)*grads[\"dW\" + str(l+1)]\n\t\t# v[\"db\" + str(l+1)] = beta1*v[\"db\" + str(l+1)] + (1-beta1)*grads[\"db\" + str(l+1)]\n\n\t\t# # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n\t\t# v_corrected[\"dW\" + str(l+1)] = v[\"dW\" + str(l+1)]/(1-beta1**t)\n\t\t# v_corrected[\"db\" + str(l+1)] = v[\"db\" + str(l+1)]/(1-beta1**t)\n\n\t\t# # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n\t\t# s[\"dW\" + str(l+1)] = beta2*s[\"dW\" + str(l+1)] + (1-beta2)*grads[\"dW\" + str(l+1)]**2\n\t\t# s[\"db\" + str(l+1)] = beta2*s[\"db\" + str(l+1)] + (1-beta2)*grads[\"db\" + str(l+1)]**2\n\n\t\t# # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n\t\t# s_corrected[\"dW\" + str(l+1)] = s[\"dW\" + str(l+1)]/(1-beta2**t)\n\t\t# s_corrected[\"db\" + str(l+1)] = s[\"db\" + str(l+1)]/(1-beta2**t) \n\n\t\t# # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n\t\t# parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate*v_corrected[\"dW\" + str(l+1)]/(np.sqrt(s_corrected[\"dW\" + str(l+1)])+epsilon)\n\t\t# parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate*v_corrected[\"db\" + str(l+1)]/(np.sqrt(s_corrected[\"db\" + str(l+1)])+epsilon)\n\t\t\n\t# return parameters, v, s\n\t\ndef predict(X, y, parameters, hidden_activation=None, output_activation=\"sigmoid\"):\n\t\"\"\"\n\tThis function is used to predict the results of a L-layer neural network.\n\t\n\tArguments:\n\tX -- data set of examples you would like to label\n\tparameters -- parameters of the trained model\n\t\n\tReturns:\n\tp -- predictions for the given dataset X\n\t\"\"\"\n\t\n\tm = X.shape[1]\n\tn = len(parameters) // 2 # number of layers in the neural network\n\tp = np.zeros((1,m))\n\t\n\t# Forward propagation\n\tprobas, caches = L_model_forward(X, parameters, 1, hidden_activation, output_activation)\n\t\n\t# If the activation of the output layer is \"sigmoid\", compute the accuracy like this.\n\tif output_activation == \"sigmoid\":\n\t\t# convert probas to 0/1 predictions\n\t\tfor i in range(0, probas.shape[1]):\n\t\t\tif probas[0,i] > 0.5:\n\t\t\t\tp[0,i] = 1\n\t\t\telse:\n\t\t\t\tp[0,i] = 0\n\t\t#print results\n\t\t#print (\"predictions: \" + str(p))\n\t\t#print (\"true labels: \" + str(y))\n\t\tprint(\"Accuracy: \" + str(np.sum((p == y)/m)))\n\t\treturn (probas>0.5)\n\telif output_activation == \"softmax\":\n\t\t# convert probas to 0/\n\t\tprob_max_index = np.argmax(probas, axis=0)\n\t\tpred_true = np.argmax(y, axis=0)\n\t\taccuracy = np.sum((prob_max_index==pred_true))/m\n\t\tprint(\"Accuracy: \", accuracy)\n\t\treturn prob_max_index\n\t\t\ndef print_mislabeled_images(classes, X, y, p):\n\t\"\"\"\n\tPlots images where predictions and truth were different.\n\tX -- dataset\n\ty -- true labels\n\tp -- predictions\n\t\"\"\"\n\ta = p + y\n\tmislabeled_indices = np.asarray(np.where(a == 1))\n\tplt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots\n\tnum_images = len(mislabeled_indices[0])\n\tfor i in range(num_images):\n\t\tindex = mislabeled_indices[1][i]\n\t\t\n\t\tplt.subplot(2, num_images, i + 1)\n\t\tplt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')\n\t\tplt.axis('off')\n\t\tplt.title(\"Prediction: \" + classes[int(p[0,index])].decode(\"utf-8\") + \" \\n Class: \" + classes[y[0,index]].decode(\"utf-8\"))\n"
] |
[
[
"numpy.sum",
"matplotlib.pyplot.subplot",
"numpy.argmax",
"matplotlib.pyplot.axis",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
FelixZhang7/miemiedetection
|
[
"ca44f33255e0bb9d6150044983a344fb9a288c08"
] |
[
"mmdet/models/custom_layers.py"
] |
[
"#! /usr/bin/env python\n# coding=utf-8\n# ================================================================\n#\n# Author : miemie2013\n# Created date:\n# Description :\n#\n# ================================================================\nimport torch\nimport torch as T\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport numpy as np\n# import paddle.fluid as fluid\n# from paddle import ParamAttr\n# from paddle.regularizer import L2Decay\n# from paddle.nn.initializer import Uniform\n# from paddle.nn.initializer import Constant\n# from paddle.vision.ops import DeformConv2D\n\n\ndef paddle_yolo_box(conv_output, anchors, stride, num_classes, scale_x_y, im_size, clip_bbox, conf_thresh):\n conv_output = conv_output.permute(0, 2, 3, 1)\n conv_shape = conv_output.shape\n batch_size = conv_shape[0]\n output_size = conv_shape[1]\n anchor_per_scale = len(anchors)\n conv_output = conv_output.reshape((batch_size, output_size, output_size, anchor_per_scale, 5 + num_classes))\n conv_raw_dxdy = conv_output[:, :, :, :, 0:2]\n conv_raw_dwdh = conv_output[:, :, :, :, 2:4]\n conv_raw_conf = conv_output[:, :, :, :, 4:5]\n conv_raw_prob = conv_output[:, :, :, :, 5: ]\n\n rows = T.arange(0, output_size, dtype=T.float32, device=conv_raw_dxdy.device)\n cols = T.arange(0, output_size, dtype=T.float32, device=conv_raw_dxdy.device)\n rows = rows[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis].repeat((1, output_size, 1, 1, 1))\n cols = cols[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis].repeat((1, 1, output_size, 1, 1))\n offset = T.cat([rows, cols], dim=-1)\n offset = offset.repeat((batch_size, 1, 1, anchor_per_scale, 1))\n # Grid Sensitive\n pred_xy = (scale_x_y * T.sigmoid(conv_raw_dxdy) + offset - (scale_x_y - 1.0) * 0.5 ) * stride\n\n device_name = conv_raw_dwdh.device.type\n device_index = conv_raw_dwdh.device.index\n # _anchors = T.Tensor(anchors, device=exp_wh.device) # RuntimeError: legacy constructor for device type: cpu was passed device type: cuda, but device type must be: cpu\n _anchors = torch.from_numpy(anchors)\n if device_name == 'cuda':\n _anchors = torch.from_numpy(anchors).cuda(device_index)\n pred_wh = (T.exp(conv_raw_dwdh) * _anchors)\n\n pred_xyxy = T.cat([pred_xy - pred_wh / 2, pred_xy + pred_wh / 2], dim=-1) # 左上角xy + 右下角xy\n pred_conf = T.sigmoid(conv_raw_conf)\n # mask = (pred_conf > conf_thresh).float()\n pred_prob = T.sigmoid(conv_raw_prob)\n pred_scores = pred_conf * pred_prob\n # pred_scores = pred_scores * mask\n # pred_xyxy = pred_xyxy * mask\n\n # paddle中实际的顺序\n pred_xyxy = pred_xyxy.permute(0, 3, 1, 2, 4)\n pred_scores = pred_scores.permute(0, 3, 1, 2, 4)\n\n pred_xyxy = pred_xyxy.reshape((batch_size, output_size*output_size*anchor_per_scale, 4))\n pred_scores = pred_scores.reshape((batch_size, pred_xyxy.shape[1], num_classes))\n\n _im_size_h = im_size[:, 0:1]\n _im_size_w = im_size[:, 1:2]\n _im_size = T.cat([_im_size_w, _im_size_h], 1)\n _im_size = _im_size.unsqueeze(1)\n _im_size = _im_size.repeat((1, pred_xyxy.shape[1], 1))\n pred_x0y0 = pred_xyxy[:, :, 0:2] / output_size / stride * _im_size\n pred_x1y1 = pred_xyxy[:, :, 2:4] / output_size / stride * _im_size\n if clip_bbox:\n x0 = pred_x0y0[:, :, 0:1]\n y0 = pred_x0y0[:, :, 1:2]\n x1 = pred_x1y1[:, :, 0:1]\n y1 = pred_x1y1[:, :, 1:2]\n x0 = torch.where(x0 < 0, x0 * 0, x0)\n y0 = torch.where(y0 < 0, y0 * 0, y0)\n x1 = torch.where(x1 > _im_size[:, :, 0:1], _im_size[:, :, 0:1], x1)\n y1 = torch.where(y1 > _im_size[:, :, 1:2], _im_size[:, :, 1:2], y1)\n pred_xyxy = T.cat([x0, y0, x1, y1], -1)\n else:\n pred_xyxy = T.cat([pred_x0y0, pred_x1y1], -1)\n return pred_xyxy, pred_scores\n\nclass MyDCNv2(nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dilation=1,\n groups=1,\n bias=False):\n super(MyDCNv2, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n if in_channels % groups != 0:\n raise ValueError(\"in_channels must be divisible by groups.\")\n self.groups = groups\n\n filter_shape = [out_channels, in_channels // groups, kernel_size, kernel_size]\n\n self.weight = torch.nn.Parameter(torch.randn(filter_shape))\n self.bias = None\n if bias:\n self.bias = torch.nn.Parameter(torch.randn(out_channels, ))\n\n def forward(self, x, offset, mask):\n in_C = self.in_channels\n out_C = self.out_channels\n stride = self.stride\n padding = self.padding\n # dilation = self.dilation\n groups = self.groups\n N, _, H, W = x.shape\n _, w_in, kH, kW = self.weight.shape\n out_W = (W + 2 * padding - (kW - 1)) // stride\n out_H = (H + 2 * padding - (kH - 1)) // stride\n\n # ================== 1.先对图片x填充得到填充后的图片pad_x ==================\n pad_x_H = H + padding * 2 + 1\n pad_x_W = W + padding * 2 + 1\n pad_x = torch.zeros((N, in_C, pad_x_H, pad_x_W), dtype=torch.float32, device=x.device)\n pad_x[:, :, padding:padding + H, padding:padding + W] = x\n\n # ================== 2.求所有采样点的坐标 ==================\n # 卷积核中心点在pad_x中的位置\n y_outer, x_outer = torch.meshgrid([torch.arange(out_H, device=x.device), torch.arange(out_W, device=x.device)])\n y_outer = y_outer * stride + padding\n x_outer = x_outer * stride + padding\n start_pos_yx = torch.stack((y_outer, x_outer), 2).float() # [out_H, out_W, 2] 仅仅是卷积核中心点在pad_x中的位置\n start_pos_yx = start_pos_yx.unsqueeze(0).unsqueeze(3) # [1, out_H, out_W, 1, 2] 仅仅是卷积核中心点在pad_x中的位置\n start_pos_yx = torch.tile(start_pos_yx, [N, 1, 1, kH * kW, 1]) # [N, out_H, out_W, kH*kW, 2] 仅仅是卷积核中心点在pad_x中的位置\n start_pos_y = start_pos_yx[:, :, :, :, :1] # [N, out_H, out_W, kH*kW, 1] 仅仅是卷积核中心点在pad_x中的位置\n start_pos_x = start_pos_yx[:, :, :, :, 1:] # [N, out_H, out_W, kH*kW, 1] 仅仅是卷积核中心点在pad_x中的位置\n start_pos_y.requires_grad = False\n start_pos_x.requires_grad = False\n\n # 卷积核内部的偏移\n half_W = (kW - 1) // 2\n half_H = (kH - 1) // 2\n y_inner2, x_inner2 = torch.meshgrid([torch.arange(kH, device=x.device), torch.arange(kW, device=x.device)])\n y_inner = y_inner2 - half_H\n x_inner = x_inner2 - half_W\n filter_inner_offset_yx = torch.stack((y_inner, x_inner), 2).float() # [kH, kW, 2] 卷积核内部的偏移\n filter_inner_offset_yx = torch.reshape(filter_inner_offset_yx, (1, 1, 1, kH * kW, 2)) # [1, 1, 1, kH*kW, 2] 卷积核内部的偏移\n filter_inner_offset_yx = torch.tile(filter_inner_offset_yx, [N, out_H, out_W, 1, 1]) # [N, out_H, out_W, kH*kW, 2] 卷积核内部的偏移\n filter_inner_offset_y = filter_inner_offset_yx[:, :, :, :, :1] # [N, out_H, out_W, kH*kW, 1] 卷积核内部的偏移\n filter_inner_offset_x = filter_inner_offset_yx[:, :, :, :, 1:] # [N, out_H, out_W, kH*kW, 1] 卷积核内部的偏移\n filter_inner_offset_y.requires_grad = False\n filter_inner_offset_x.requires_grad = False\n\n # 预测的偏移\n offset = offset.permute(0, 2, 3, 1) # [N, out_H, out_W, kH*kW*2]\n offset_yx = torch.reshape(offset, (N, out_H, out_W, kH * kW, 2)) # [N, out_H, out_W, kH*kW, 2]\n offset_y = offset_yx[:, :, :, :, :1] # [N, out_H, out_W, kH*kW, 1]\n offset_x = offset_yx[:, :, :, :, 1:] # [N, out_H, out_W, kH*kW, 1]\n\n # 最终采样位置。\n pos_y = start_pos_y + filter_inner_offset_y + offset_y # [N, out_H, out_W, kH*kW, 1]\n pos_x = start_pos_x + filter_inner_offset_x + offset_x # [N, out_H, out_W, kH*kW, 1]\n pos_y = torch.clamp(pos_y, 0.0, H + padding * 2 - 1.0) # 最终采样位置限制在pad_x内\n pos_x = torch.clamp(pos_x, 0.0, W + padding * 2 - 1.0) # 最终采样位置限制在pad_x内\n\n # ================== 3.采样。用F.grid_sample()双线性插值采样。 ==================\n pos_x = pos_x / (pad_x_W - 1) * 2.0 - 1.0\n pos_y = pos_y / (pad_x_H - 1) * 2.0 - 1.0\n xtyt = torch.cat([pos_x, pos_y], -1) # [N, out_H, out_W, kH*kW, 2]\n xtyt = torch.reshape(xtyt, (N, out_H, out_W * kH * kW, 2)) # [N, out_H, out_W*kH*kW, 2]\n value = F.grid_sample(pad_x, xtyt, mode='bilinear', padding_mode='zeros', align_corners=True) # [N, in_C, out_H, out_W*kH*kW]\n value = torch.reshape(value, (N, in_C, out_H, out_W, kH * kW)) # [N, in_C, out_H, out_W, kH * kW]\n value = value.permute(0, 1, 4, 2, 3) # [N, in_C, kH * kW, out_H, out_W]\n\n # ================== 4.乘以重要程度 ==================\n # 乘以重要程度\n mask = mask.unsqueeze(1) # [N, 1, kH * kW, out_H, out_W]\n value = value * mask # [N, in_C, kH * kW, out_H, out_W]\n new_x = torch.reshape(value, (N, in_C * kH * kW, out_H, out_W)) # [N, in_C * kH * kW, out_H, out_W]\n\n # ================== 5.乘以本层的权重,加上偏置 ==================\n # 1x1卷积\n rw = torch.reshape(self.weight, (out_C, w_in * kH * kW, 1, 1)) # [out_C, w_in, kH, kW] -> [out_C, w_in*kH*kW, 1, 1] 变成1x1卷积核\n out = F.conv2d(new_x, rw, bias=self.bias, stride=1, groups=groups) # [N, out_C, out_H, out_W]\n return out\n\n\ndef get_norm(norm_type):\n bn = 0\n sync_bn = 0\n gn = 0\n af = 0\n if norm_type == 'bn':\n bn = 1\n elif norm_type == 'sync_bn':\n sync_bn = 1\n elif norm_type == 'gn':\n gn = 1\n elif norm_type == 'in':\n gn = 1\n elif norm_type == 'ln':\n gn = 1\n elif norm_type == 'affine_channel':\n af = 1\n return bn, sync_bn, gn, af\n\n\n\n\nclass Mish(torch.nn.Module):\n def __init__(self):\n super(Mish, self).__init__()\n\n def forward(self, x):\n return x * torch.tanh(F.softplus(x))\n\n\nclass AffineChannel(torch.nn.Module):\n def __init__(self, num_features):\n super(AffineChannel, self).__init__()\n self.weight = torch.nn.Parameter(torch.randn(num_features, ))\n self.bias = torch.nn.Parameter(torch.randn(num_features, ))\n\n def forward(self, x):\n w = torch.reshape(self.weight, (1, -1, 1, 1))\n b = torch.reshape(self.bias, (1, -1, 1, 1))\n x = x * w + b\n return x\n\n\nclass Conv2dUnit(torch.nn.Module):\n def __init__(self,\n input_dim,\n filters,\n filter_size,\n stride=1,\n bias_attr=False,\n norm_type=None,\n groups=1,\n padding=None,\n norm_groups=32,\n act=None,\n freeze_norm=False,\n norm_decay=0.,\n lr=1.,\n bias_lr=None,\n weight_init=None,\n bias_init=None,\n use_dcn=False,\n name='',\n data_format='NCHW'):\n super(Conv2dUnit, self).__init__()\n self.filters = filters\n self.filter_size = filter_size\n self.stride = stride\n self.padding = (filter_size - 1) // 2\n if padding is not None:\n self.padding = padding\n self.act = act\n self.freeze_norm = freeze_norm\n self.norm_decay = norm_decay\n self.use_dcn = use_dcn\n self.name = name\n self.lr = lr\n\n # conv\n conv_name = name\n self.conv_offset = None\n if use_dcn:\n self.offset_channel = 2 * filter_size**2\n self.mask_channel = filter_size**2\n self.conv_offset = nn.Conv2d(\n in_channels=input_dim,\n out_channels=3 * filter_size**2,\n kernel_size=filter_size,\n stride=stride,\n padding=self.padding,\n bias=True)\n torch.nn.init.constant_(self.conv_offset.weight, 0.0)\n torch.nn.init.constant_(self.conv_offset.bias, 0.0)\n\n # 自实现的DCNv2\n self.conv = MyDCNv2(\n in_channels=input_dim,\n out_channels=filters,\n kernel_size=filter_size,\n stride=stride,\n padding=self.padding,\n dilation=1,\n groups=groups,\n bias=bias_attr)\n # 初始化权重\n torch.nn.init.xavier_normal_(self.conv.weight, gain=1.)\n if bias_attr:\n torch.nn.init.constant_(self.conv.bias, 0.0)\n else:\n self.conv = nn.Conv2d(\n in_channels=input_dim,\n out_channels=filters,\n kernel_size=filter_size,\n stride=stride,\n padding=self.padding,\n groups=groups,\n bias=bias_attr)\n # 初始化权重\n torch.nn.init.xavier_normal_(self.conv.weight, gain=1.)\n if bias_attr:\n torch.nn.init.constant_(self.conv.bias, 0.0)\n blr = lr\n if bias_lr:\n blr = bias_lr\n self.blr = blr\n\n\n # norm\n assert norm_type in [None, 'bn', 'sync_bn', 'gn', 'affine_channel', 'in', 'ln']\n bn, sync_bn, gn, af = get_norm(norm_type)\n if norm_type == 'in':\n norm_groups = filters\n if norm_type == 'ln':\n norm_groups = 1\n if conv_name == \"conv1\":\n norm_name = \"bn_\" + conv_name\n if gn:\n norm_name = \"gn_\" + conv_name\n if af:\n norm_name = \"af_\" + conv_name\n else:\n norm_name = \"bn\" + conv_name[3:]\n if gn:\n norm_name = \"gn\" + conv_name[3:]\n if af:\n norm_name = \"af\" + conv_name[3:]\n self.bn = None\n self.gn = None\n self.af = None\n if bn:\n self.bn = torch.nn.BatchNorm2d(filters)\n torch.nn.init.constant_(self.bn.weight, 1.0)\n torch.nn.init.constant_(self.bn.bias, 0.0)\n if sync_bn:\n self.bn = torch.nn.BatchNorm2d(filters)\n torch.nn.init.constant_(self.bn.weight, 1.0)\n torch.nn.init.constant_(self.bn.bias, 0.0)\n # self.bn = torch.nn.SyncBatchNorm(filters, weight_attr=pattr, bias_attr=battr)\n if gn:\n self.gn = torch.nn.GroupNorm(num_groups=norm_groups, num_channels=filters)\n torch.nn.init.constant_(self.gn.weight, 1.0)\n torch.nn.init.constant_(self.gn.bias, 0.0)\n if af:\n self.af = AffineChannel(filters)\n torch.nn.init.constant_(self.af.weight, 1.0)\n torch.nn.init.constant_(self.af.bias, 0.0)\n\n # act\n self.act = None\n if act == 'relu':\n self.act = torch.nn.ReLU()\n elif act == 'leaky':\n self.act = torch.nn.LeakyReLU(0.1)\n elif act == 'mish':\n self.act = Mish()\n elif act is None:\n pass\n else:\n raise NotImplementedError(\"Activation \\'{}\\' is not implemented.\".format(act))\n\n\n def freeze(self):\n if self.conv is not None:\n if self.conv.weight is not None:\n self.conv.weight.requires_grad = False\n if self.conv.bias is not None:\n self.conv.bias.requires_grad = False\n if self.conv_offset is not None:\n if self.conv_offset.weight is not None:\n self.conv_offset.weight.requires_grad = False\n if self.conv_offset.bias is not None:\n self.conv_offset.bias.requires_grad = False\n if self.bn is not None:\n self.bn.weight.requires_grad = False\n self.bn.bias.requires_grad = False\n if self.gn is not None:\n self.gn.weight.requires_grad = False\n self.gn.bias.requires_grad = False\n if self.af is not None:\n self.af.weight.requires_grad = False\n self.af.bias.requires_grad = False\n\n def fix_bn(self):\n if self.bn is not None:\n self.bn.eval()\n\n def add_param_group(self, param_groups, base_lr, base_wd):\n if isinstance(self.conv, torch.nn.Conv2d):\n if self.conv.weight.requires_grad:\n param_group_conv = {'params': [self.conv.weight]}\n param_group_conv['lr'] = base_lr * self.lr\n param_group_conv['base_lr'] = base_lr * self.lr\n param_group_conv['weight_decay'] = base_wd\n param_groups.append(param_group_conv)\n if self.conv.bias is not None:\n if self.conv.bias.requires_grad:\n param_group_conv_bias = {'params': [self.conv.bias]}\n param_group_conv_bias['lr'] = base_lr * self.blr\n param_group_conv_bias['base_lr'] = base_lr * self.blr\n param_group_conv_bias['weight_decay'] = 0.0\n param_groups.append(param_group_conv_bias)\n elif isinstance(self.conv, MyDCNv2): # 自实现的DCNv2\n if self.conv_offset.weight.requires_grad:\n param_group_conv_offset_w = {'params': [self.conv_offset.weight]}\n param_group_conv_offset_w['lr'] = base_lr * self.lr\n param_group_conv_offset_w['base_lr'] = base_lr * self.lr\n param_group_conv_offset_w['weight_decay'] = base_wd\n param_groups.append(param_group_conv_offset_w)\n if self.conv_offset.bias.requires_grad:\n param_group_conv_offset_b = {'params': [self.conv_offset.bias]}\n param_group_conv_offset_b['lr'] = base_lr * self.lr\n param_group_conv_offset_b['base_lr'] = base_lr * self.lr\n param_group_conv_offset_b['weight_decay'] = base_wd\n param_groups.append(param_group_conv_offset_b)\n if self.conv.weight.requires_grad:\n param_group_dcn_weight = {'params': [self.conv.weight]}\n param_group_dcn_weight['lr'] = base_lr * self.lr\n param_group_dcn_weight['base_lr'] = base_lr * self.lr\n param_group_dcn_weight['weight_decay'] = base_wd\n param_groups.append(param_group_dcn_weight)\n else: # 官方DCNv2\n pass\n if self.bn is not None:\n if self.bn.weight.requires_grad:\n param_group_norm_weight = {'params': [self.bn.weight]}\n param_group_norm_weight['lr'] = base_lr * self.lr\n param_group_norm_weight['base_lr'] = base_lr * self.lr\n param_group_norm_weight['weight_decay'] = 0.0\n param_groups.append(param_group_norm_weight)\n if self.bn.bias.requires_grad:\n param_group_norm_bias = {'params': [self.bn.bias]}\n param_group_norm_bias['lr'] = base_lr * self.lr\n param_group_norm_bias['base_lr'] = base_lr * self.lr\n param_group_norm_bias['weight_decay'] = 0.0\n param_groups.append(param_group_norm_bias)\n if self.gn is not None:\n if self.gn.weight.requires_grad:\n param_group_norm_weight = {'params': [self.gn.weight]}\n param_group_norm_weight['lr'] = base_lr * self.lr\n param_group_norm_weight['base_lr'] = base_lr * self.lr\n param_group_norm_weight['weight_decay'] = 0.0\n param_groups.append(param_group_norm_weight)\n if self.gn.bias.requires_grad:\n param_group_norm_bias = {'params': [self.gn.bias]}\n param_group_norm_bias['lr'] = base_lr * self.lr\n param_group_norm_bias['base_lr'] = base_lr * self.lr\n param_group_norm_bias['weight_decay'] = 0.0\n param_groups.append(param_group_norm_bias)\n if self.af is not None:\n if self.af.weight.requires_grad:\n param_group_norm_weight = {'params': [self.af.weight]}\n param_group_norm_weight['lr'] = base_lr * self.lr\n param_group_norm_weight['base_lr'] = base_lr * self.lr\n param_group_norm_weight['weight_decay'] = 0.0\n param_groups.append(param_group_norm_weight)\n if self.af.bias.requires_grad:\n param_group_norm_bias = {'params': [self.af.bias]}\n param_group_norm_bias['lr'] = base_lr * self.lr\n param_group_norm_bias['base_lr'] = base_lr * self.lr\n param_group_norm_bias['weight_decay'] = 0.0\n param_groups.append(param_group_norm_bias)\n\n def forward(self, x):\n if self.use_dcn:\n offset_mask = self.conv_offset(x)\n offset = offset_mask[:, :self.offset_channel, :, :]\n mask = offset_mask[:, self.offset_channel:, :, :]\n mask = T.sigmoid(mask)\n x = self.conv(x, offset, mask=mask)\n else:\n x = self.conv(x)\n if self.bn:\n x = self.bn(x)\n if self.gn:\n x = self.gn(x)\n if self.af:\n x = self.af(x)\n if self.act:\n x = self.act(x)\n return x\n\n\nclass CoordConv2(torch.nn.Module):\n def __init__(self, coord_conv=True):\n super(CoordConv2, self).__init__()\n self.coord_conv = coord_conv\n\n def forward(self, input):\n if not self.coord_conv:\n return input\n b = input.shape[0]\n h = input.shape[2]\n w = input.shape[3]\n x_range = T.arange(0, w, dtype=T.float32, device=input.device) / (w - 1) * 2.0 - 1\n y_range = T.arange(0, h, dtype=T.float32, device=input.device) / (h - 1) * 2.0 - 1\n x_range = x_range[np.newaxis, np.newaxis, np.newaxis, :].repeat((b, 1, h, 1))\n y_range = y_range[np.newaxis, np.newaxis, :, np.newaxis].repeat((b, 1, 1, w))\n offset = T.cat([input, x_range, y_range], dim=1)\n return offset\n\n\ndef add_coord(x, data_format):\n b = x.shape[0]\n if data_format == 'NCHW':\n h, w = x.shape[2], x.shape[3]\n else:\n h, w = x.shape[1], x.shape[2]\n\n gx = T.arange(0, w, dtype=x.dtype, device=x.device) / (w - 1.) * 2.0 - 1.\n gy = T.arange(0, h, dtype=x.dtype, device=x.device) / (h - 1.) * 2.0 - 1.\n\n if data_format == 'NCHW':\n gx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w])\n gy = gy.reshape([1, 1, h, 1]).expand([b, 1, h, w])\n else:\n gx = gx.reshape([1, 1, w, 1]).expand([b, h, w, 1])\n gy = gy.reshape([1, h, 1, 1]).expand([b, h, w, 1])\n\n gx.requires_grad = False\n gy.requires_grad = False\n return gx, gy\n\n\nclass CoordConv(torch.nn.Module):\n def __init__(self,\n ch_in,\n ch_out,\n filter_size,\n padding,\n norm_type,\n freeze_norm=False,\n name='',\n act='leaky',\n data_format='NCHW'):\n \"\"\"\n PPYOLO专用的CoordConv,强制绑定一个Conv + BN + LeakyRELU.\n CoordConv layer, see https://arxiv.org/abs/1807.03247\n\n Args:\n ch_in (int): input channel\n ch_out (int): output channel\n filter_size (int): filter size, default 3\n padding (int): padding size, default 0\n norm_type (str): batch norm type, default bn\n name (str): layer name\n data_format (str): data format, NCHW or NHWC\n\n \"\"\"\n super(CoordConv, self).__init__()\n self.conv = Conv2dUnit(\n ch_in + 2,\n ch_out,\n filter_size=filter_size,\n norm_type=norm_type,\n freeze_norm=freeze_norm,\n act=act,\n name=name)\n self.data_format = data_format\n\n def add_param_group(self, param_groups, base_lr, base_wd):\n self.conv.add_param_group(param_groups, base_lr, base_wd)\n\n def forward(self, x):\n gx, gy = add_coord(x, self.data_format)\n if self.data_format == 'NCHW':\n y = torch.cat([x, gx, gy], 1)\n else:\n y = torch.cat([x, gx, gy], -1)\n y = self.conv(y)\n return y\n\n\nclass SPP2(torch.nn.Module):\n def __init__(self, seq='asc'):\n super(SPP2, self).__init__()\n assert seq in ['desc', 'asc']\n self.seq = seq\n\n def forward(self, x):\n x_1 = x\n x_2 = F.max_pool2d(x, 5, 1, 2)\n x_3 = F.max_pool2d(x, 9, 1, 4)\n x_4 = F.max_pool2d(x, 13, 1, 6)\n if self.seq == 'desc':\n out = torch.cat([x_4, x_3, x_2, x_1], dim=1)\n else:\n out = torch.cat([x_1, x_2, x_3, x_4], dim=1)\n return out\n\n\nclass SPP(torch.nn.Module):\n def __init__(self,\n ch_in,\n ch_out,\n k,\n pool_size,\n norm_type,\n freeze_norm=False,\n name='',\n act='leaky',\n data_format='NCHW'):\n \"\"\"\n PPYOLO专用的SPP,强制绑定一个Conv + BN + LeakyRELU.\n SPP layer, which consist of four pooling layer follwed by conv layer\n\n Args:\n ch_in (int): input channel of conv layer\n ch_out (int): output channel of conv layer\n k (int): kernel size of conv layer\n norm_type (str): batch norm type\n freeze_norm (bool): whether to freeze norm, default False\n name (str): layer name\n act (str): activation function\n data_format (str): data format, NCHW or NHWC\n \"\"\"\n super(SPP, self).__init__()\n self.pool = torch.nn.ModuleList()\n self.data_format = data_format\n for size in pool_size:\n pool = nn.MaxPool2d(kernel_size=size, stride=1, padding=size // 2, ceil_mode=False)\n self.pool.append(pool)\n self.conv = Conv2dUnit(\n ch_in,\n ch_out,\n k,\n padding=k // 2,\n norm_type=norm_type,\n freeze_norm=freeze_norm,\n name=name,\n act=act,\n data_format=data_format)\n\n def add_param_group(self, param_groups, base_lr, base_wd):\n self.conv.add_param_group(param_groups, base_lr, base_wd)\n\n def forward(self, x):\n outs = [x]\n for pool in self.pool:\n outs.append(pool(x))\n if self.data_format == \"NCHW\":\n y = torch.cat(outs, 1)\n else:\n y = torch.cat(outs, -1)\n\n y = self.conv(y)\n return y\n\n\nclass DropBlock2(torch.nn.Module):\n def __init__(self,\n block_size=3,\n keep_prob=0.9):\n super(DropBlock2, self).__init__()\n self.block_size = block_size\n self.keep_prob = keep_prob\n\n def forward(self, input):\n if not self.training:\n return input\n\n def CalculateGamma(input, block_size, keep_prob):\n h = input.shape[2] # int\n h = np.array([h])\n h = torch.tensor(h, dtype=torch.float32, device=input.device)\n feat_shape_t = h.reshape((1, 1, 1, 1)) # shape: [1, 1, 1, 1]\n feat_area = torch.pow(feat_shape_t, 2) # shape: [1, 1, 1, 1]\n\n block_shape_t = torch.zeros((1, 1, 1, 1), dtype=torch.float32, device=input.device) + block_size\n block_area = torch.pow(block_shape_t, 2)\n\n useful_shape_t = feat_shape_t - block_shape_t + 1\n useful_area = torch.pow(useful_shape_t, 2)\n\n upper_t = feat_area * (1 - keep_prob)\n bottom_t = block_area * useful_area\n output = upper_t / bottom_t\n return output\n\n gamma = CalculateGamma(input, block_size=self.block_size, keep_prob=self.keep_prob)\n input_shape = input.shape\n p = gamma.repeat(input_shape)\n\n input_shape_tmp = input.shape\n random_matrix = torch.rand(input_shape_tmp, device=input.device)\n one_zero_m = (random_matrix < p).float()\n\n mask_flag = torch.nn.functional.max_pool2d(one_zero_m, (self.block_size, self.block_size), stride=1, padding=1)\n mask = 1.0 - mask_flag\n\n elem_numel = input_shape[0] * input_shape[1] * input_shape[2] * input_shape[3]\n elem_numel_m = float(elem_numel)\n\n elem_sum = mask.sum()\n\n output = input * mask * elem_numel_m / elem_sum\n return output\n\n\nclass DropBlock(torch.nn.Module):\n def __init__(self, block_size, keep_prob, name, data_format='NCHW'):\n \"\"\"\n DropBlock layer, see https://arxiv.org/abs/1810.12890\n\n Args:\n block_size (int): block size\n keep_prob (int): keep probability\n name (str): layer name\n data_format (str): data format, NCHW or NHWC\n \"\"\"\n super(DropBlock, self).__init__()\n self.block_size = block_size\n self.keep_prob = keep_prob\n self.name = name\n self.data_format = data_format\n\n def forward(self, x):\n if not self.training or self.keep_prob == 1:\n return x\n else:\n gamma = (1. - self.keep_prob) / (self.block_size**2)\n if self.data_format == 'NCHW':\n shape = x.shape[2:]\n else:\n shape = x.shape[1:3]\n for s in shape:\n gamma *= s / (s - self.block_size + 1)\n\n matrix = torch.rand(x.shape, device=x.device)\n matrix = (matrix < gamma).float()\n mask_inv = F.max_pool2d(\n matrix,\n self.block_size,\n stride=1,\n padding=self.block_size // 2,\n data_format=self.data_format)\n mask = 1. - mask_inv\n y = x * mask * (mask.numel() / mask.sum())\n return y\n\n\nclass PointGenerator(object):\n\n def _meshgrid(self, x, y, w, h, row_major=True):\n xx = paddle.tile(paddle.reshape(x, (1, -1)), [h, 1])\n yy = paddle.tile(paddle.reshape(y, (-1, 1)), [1, w])\n\n xx = paddle.reshape(xx, (-1, ))\n yy = paddle.reshape(yy, (-1, ))\n if row_major:\n return xx, yy\n else:\n return yy, xx\n\n def grid_points(self, featmap_size, stride=16):\n feat_h, feat_w = featmap_size\n eps = 1e-3\n shift_x = paddle.arange(0., feat_w - eps, 1., dtype='float32') * stride\n shift_y = paddle.arange(0., feat_h - eps, 1., dtype='float32') * stride\n\n shift_xx, shift_yy = self._meshgrid(shift_x, shift_y, feat_w, feat_h)\n stride = paddle.full(shape=shift_xx.shape, fill_value=stride, dtype='float32')\n all_points = paddle.stack([shift_xx, shift_yy, stride], axis=-1)\n return all_points\n\n def valid_flags(self, featmap_size, valid_size, device='cuda'):\n # feat_h, feat_w = featmap_size\n # valid_h, valid_w = valid_size\n # assert valid_h <= feat_h and valid_w <= feat_w\n # valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)\n # valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)\n # valid_x[:valid_w] = 1\n # valid_y[:valid_h] = 1\n # valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)\n # valid = valid_xx & valid_yy\n # return valid\n pass\n\n\n\n\n"
] |
[
[
"torch.cat",
"torch.zeros",
"torch.where",
"torch.pow",
"torch.tile",
"torch.reshape",
"torch.randn",
"torch.from_numpy",
"torch.tensor",
"torch.rand",
"torch.arange",
"torch.nn.GroupNorm",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.softplus",
"torch.sigmoid",
"torch.nn.init.constant_",
"torch.nn.functional.conv2d",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.init.xavier_normal_",
"torch.nn.ReLU",
"torch.exp",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.stack",
"numpy.array",
"torch.nn.MaxPool2d",
"torch.nn.functional.grid_sample",
"torch.clamp"
]
] |
aminekechaou/detectron2
|
[
"3772b9316f8a2e6bf55cf5868dd64214d7f7c49a"
] |
[
"detectron2/data/datasets/cityscapes.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport functools\nimport json\nimport logging\nimport multiprocessing as mp\nimport numpy as np\nimport os\nfrom itertools import chain\nimport pycocotools.mask as mask_util\nfrom PIL import Image\n\nfrom detectron2.structures import BoxMode\nfrom detectron2.utils.comm import get_world_size\nfrom detectron2.utils.file_io import PathManager\nfrom detectron2.utils.logger import setup_logger\n\ntry:\n import cv2 # noqa\nexcept ImportError:\n # OpenCV is an optional dependency at the moment\n pass\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_cityscapes_files(image_dir, gt_dir):\n files = []\n # scan through the directory\n cities = PathManager.ls(image_dir)\n logger.info(f\"{len(cities)} cities found in '{image_dir}'.\")\n for city in cities:\n city_img_dir = os.path.join(image_dir, city)\n city_gt_dir = os.path.join(gt_dir, city)\n for basename in PathManager.ls(city_img_dir):\n image_file = os.path.join(city_img_dir, basename)\n\n suffix = \"leftImg8bit.png\"\n assert basename.endswith(suffix), basename\n basename = basename[: -len(suffix)]\n\n instance_file = os.path.join(city_gt_dir, basename + \"gtFine_instanceIds.png\")\n label_file = os.path.join(city_gt_dir, basename + \"gtFine_labelIds.png\")\n json_file = os.path.join(city_gt_dir, basename + \"gtFine_polygons.json\")\n\n files.append((image_file, instance_file, label_file, json_file))\n assert len(files), \"No images found in {}\".format(image_dir)\n for f in files[0]:\n assert PathManager.isfile(f), f\n return files\n\n\ndef load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):\n \"\"\"\n Args:\n image_dir (str): path to the raw dataset. e.g., \"~/cityscapes/leftImg8bit/train\".\n gt_dir (str): path to the raw annotations. e.g., \"~/cityscapes/gtFine/train\".\n from_json (bool): whether to read annotations from the raw json file or the png files.\n to_polygons (bool): whether to represent the segmentation as polygons\n (COCO's format) instead of masks (cityscapes's format).\n\n Returns:\n list[dict]: a list of dicts in Detectron2 standard format. (See\n `Using Custom Datasets </tutorials/datasets.html>`_ )\n \"\"\"\n if from_json:\n assert to_polygons, (\n \"Cityscapes's json annotations are in polygon format. \"\n \"Converting to mask format is not supported now.\"\n )\n files = _get_cityscapes_files(image_dir, gt_dir)\n\n logger.info(\"Preprocessing cityscapes annotations ...\")\n # This is still not fast: all workers will execute duplicate works and will\n # take up to 10m on a 8GPU server.\n pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))\n\n ret = pool.map(\n functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),\n files,\n )\n logger.info(\"Loaded {} images from {}\".format(len(ret), image_dir))\n\n # Map cityscape ids to contiguous ids\n from cityscapesscripts.helpers.labels import labels\n\n labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]\n dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}\n for dict_per_image in ret:\n for anno in dict_per_image[\"annotations\"]:\n anno[\"category_id\"] = dataset_id_to_contiguous_id[anno[\"category_id\"]]\n return ret\n\n\ndef load_cityscapes_semantic(image_dir, gt_dir):\n \"\"\"\n Args:\n image_dir (str): path to the raw dataset. e.g., \"~/cityscapes/leftImg8bit/train\".\n gt_dir (str): path to the raw annotations. e.g., \"~/cityscapes/gtFine/train\".\n\n Returns:\n list[dict]: a list of dict, each has \"file_name\" and\n \"sem_seg_file_name\".\n \"\"\"\n ret = []\n # gt_dir is small and contain many small files. make sense to fetch to local first\n gt_dir = PathManager.get_local_path(gt_dir)\n for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):\n label_file = label_file.replace(\"labelIds\", \"labelTrainIds\")\n\n with PathManager.open(json_file, \"r\") as f:\n jsonobj = json.load(f)\n ret.append(\n {\n \"file_name\": image_file,\n \"sem_seg_file_name\": label_file,\n \"height\": jsonobj[\"imgHeight\"],\n \"width\": jsonobj[\"imgWidth\"],\n }\n )\n assert len(ret), f\"No images found in {image_dir}!\"\n assert PathManager.isfile(\n ret[0][\"sem_seg_file_name\"]\n ), \"Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py\" # noqa\n return ret\n\n\ndef _cityscapes_files_to_dict(files, from_json, to_polygons):\n \"\"\"\n Parse cityscapes annotation files to a instance segmentation dataset dict.\n\n Args:\n files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)\n from_json (bool): whether to read annotations from the raw json file or the png files.\n to_polygons (bool): whether to represent the segmentation as polygons\n (COCO's format) instead of masks (cityscapes's format).\n\n Returns:\n A dict in Detectron2 Dataset format.\n \"\"\"\n from cityscapesscripts.helpers.labels import id2label, name2label\n\n image_file, instance_id_file, _, json_file = files\n\n annos = []\n\n if from_json:\n from shapely.geometry import MultiPolygon, Polygon\n\n with PathManager.open(json_file, \"r\") as f:\n jsonobj = json.load(f)\n ret = {\n \"file_name\": image_file,\n \"image_id\": os.path.basename(image_file),\n \"height\": jsonobj[\"imgHeight\"],\n \"width\": jsonobj[\"imgWidth\"],\n }\n\n # `polygons_union` contains the union of all valid polygons.\n polygons_union = Polygon()\n\n # CityscapesScripts draw the polygons in sequential order\n # and each polygon *overwrites* existing ones. See\n # (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa\n # We use reverse order, and each polygon *avoids* early ones.\n # This will resolve the ploygon overlaps in the same way as CityscapesScripts.\n for obj in jsonobj[\"objects\"][::-1]:\n if \"deleted\" in obj: # cityscapes data format specific\n continue\n label_name = obj[\"label\"]\n\n try:\n label = name2label[label_name]\n except KeyError:\n if label_name.endswith(\"group\"): # crowd area\n label = name2label[label_name[: -len(\"group\")]]\n else:\n raise\n if label.id < 0: # cityscapes data format\n continue\n\n # Cityscapes's raw annotations uses integer coordinates\n # Therefore +0.5 here\n poly_coord = np.asarray(obj[\"polygon\"], dtype=\"f4\") + 0.5\n # CityscapesScript uses PIL.ImageDraw.polygon to rasterize\n # polygons for evaluation. This function operates in integer space\n # and draws each pixel whose center falls into the polygon.\n # Therefore it draws a polygon which is 0.5 \"fatter\" in expectation.\n # We therefore dilate the input polygon by 0.5 as our input.\n poly = Polygon(poly_coord).buffer(0.5, resolution=4)\n\n if not label.hasInstances or label.ignoreInEval:\n # even if we won't store the polygon it still contributes to overlaps resolution\n polygons_union = polygons_union.union(poly)\n continue\n\n # Take non-overlapping part of the polygon\n poly_wo_overlaps = poly.difference(polygons_union)\n if poly_wo_overlaps.is_empty:\n continue\n polygons_union = polygons_union.union(poly)\n\n anno = {}\n anno[\"iscrowd\"] = label_name.endswith(\"group\")\n anno[\"category_id\"] = label.id\n\n if isinstance(poly_wo_overlaps, Polygon):\n poly_list = [poly_wo_overlaps]\n elif isinstance(poly_wo_overlaps, MultiPolygon):\n poly_list = poly_wo_overlaps.geoms\n else:\n raise NotImplementedError(\"Unknown geometric structure {}\".format(poly_wo_overlaps))\n\n poly_coord = []\n for poly_el in poly_list:\n # COCO API can work only with exterior boundaries now, hence we store only them.\n # TODO: store both exterior and interior boundaries once other parts of the\n # codebase support holes in polygons.\n poly_coord.append(list(chain(*poly_el.exterior.coords)))\n anno[\"segmentation\"] = poly_coord\n (xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds\n\n anno[\"bbox\"] = (xmin, ymin, xmax, ymax)\n anno[\"bbox_mode\"] = BoxMode.XYXY_ABS\n\n annos.append(anno)\n else:\n # See also the official annotation parsing scripts at\n # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa\n with PathManager.open(instance_id_file, \"rb\") as f:\n inst_image = np.asarray(Image.open(f), order=\"F\")\n # ids < 24 are stuff labels (filtering them first is about 5% faster)\n flattened_ids = np.unique(inst_image[inst_image >= 24])\n\n ret = {\n \"file_name\": image_file,\n \"image_id\": os.path.basename(image_file),\n \"height\": inst_image.shape[0],\n \"width\": inst_image.shape[1],\n }\n\n for instance_id in flattened_ids:\n # For non-crowd annotations, instance_id // 1000 is the label_id\n # Crowd annotations have <1000 instance ids\n label_id = instance_id // 1000 if instance_id >= 1000 else instance_id\n label = id2label[label_id]\n if not label.hasInstances or label.ignoreInEval:\n continue\n\n anno = {}\n anno[\"iscrowd\"] = instance_id < 1000\n anno[\"category_id\"] = label.id\n\n mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order=\"F\")\n\n inds = np.nonzero(mask)\n ymin, ymax = inds[0].min(), inds[0].max()\n xmin, xmax = inds[1].min(), inds[1].max()\n anno[\"bbox\"] = (xmin, ymin, xmax, ymax)\n if xmax <= xmin or ymax <= ymin:\n continue\n anno[\"bbox_mode\"] = BoxMode.XYXY_ABS\n if to_polygons:\n # This conversion comes from D4809743 and D5171122,\n # when Mask-RCNN was first developed.\n contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[\n -2\n ]\n polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]\n # opencv's can produce invalid polygons\n if len(polygons) == 0:\n continue\n anno[\"segmentation\"] = polygons\n else:\n anno[\"segmentation\"] = mask_util.encode(mask[:, :, None])[0]\n annos.append(anno)\n ret[\"annotations\"] = annos\n return ret\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Test the cityscapes dataset loader.\n\n Usage:\n python -m detectron2.data.datasets.cityscapes \\\n cityscapes/leftImg8bit/train cityscapes/gtFine/train\n \"\"\"\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"image_dir\")\n parser.add_argument(\"gt_dir\")\n parser.add_argument(\"--type\", choices=[\"instance\", \"semantic\"], default=\"instance\")\n args = parser.parse_args()\n from detectron2.data.catalog import Metadata\n from detectron2.utils.visualizer import Visualizer\n from cityscapesscripts.helpers.labels import labels\n\n logger = setup_logger(name=__name__)\n\n dirname = \"cityscapes-data-vis\"\n os.makedirs(dirname, exist_ok=True)\n\n if args.type == \"instance\":\n dicts = load_cityscapes_instances(\n args.image_dir, args.gt_dir, from_json=True, to_polygons=True\n )\n logger.info(\"Done loading {} samples.\".format(len(dicts)))\n\n thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval]\n meta = Metadata().set(thing_classes=thing_classes)\n\n else:\n dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir)\n logger.info(\"Done loading {} samples.\".format(len(dicts)))\n\n stuff_names = [k.name for k in labels if k.trainId != 255]\n stuff_colors = [k.color for k in labels if k.trainId != 255]\n meta = Metadata().set(stuff_names=stuff_names, stuff_colors=stuff_colors)\n\n for d in dicts:\n img = np.array(Image.open(PathManager.open(d[\"file_name\"], \"rb\")))\n visualizer = Visualizer(img, metadata=meta)\n vis = visualizer.draw_dataset_dict(d)\n # cv2.imshow(\"a\", vis.get_image()[:, :, ::-1])\n # cv2.waitKey()\n fpath = os.path.join(dirname, os.path.basename(d[\"file_name\"]))\n vis.save(fpath)\n"
] |
[
[
"numpy.asarray",
"numpy.nonzero",
"numpy.unique"
]
] |
baymlab/wastewater_analysis
|
[
"b009452669fa32c9a1a204c461725dd0a3a996c9"
] |
[
"analysis/plot_sequence_diversity.py"
] |
[
"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport argparse\nimport matplotlib.pyplot as plt\n\ncolors = {\n 'B.1.1.7': (0.4980392156862745, 0.788235294117647, 0.4980392156862745, 1.0),\n 'B.1.351': (0.9921568627450981, 0.7529411764705882, 0.5254901960784314, 1.0),\n 'B.1.427': (0.2196078431372549, 0.4235294117647059, 0.6901960784313725, 1.0),\n 'B.1.429': (0.7490196078431373, 0.3568627450980392, 0.09019607843137253, 1.0),\n 'P.1': (0.4, 0.4, 0.4, 1.0),\n 'B.1.427/B.1.429': (0.2196078431372549, 0.4235294117647059, 0.6901960784313725, 1.0),\n 'B.1.526': 'gold'}\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Plot nucleotide diversity per VOC.\")\n parser.add_argument('--site_pi_files', type=str, nargs='+', help=\"nucleotide diversity files (site-pi)\")\n parser.add_argument('--allele_freq_files', type=str, nargs='+')\n parser.add_argument('--voc_names', type=str, nargs='+')\n parser.add_argument('--ref_size', required=True, type=int)\n parser.add_argument('--min_af', default=0, type=float)\n parser.add_argument('--outdir', default='sequence_diversity')\n parser.add_argument('--verbose', action='store_true')\n args = parser.parse_args()\n\n # create output directory\n try:\n os.mkdir(args.outdir)\n except FileExistsError:\n pass\n\n diversity_dict = {}\n for i, file in enumerate(args.site_pi_files):\n voc = args.voc_names[i]\n pos_list = []\n pi_list = []\n with open(file, 'r') as f:\n for line in f:\n line = line.split('\\t')\n if line[0] == \"CHROM\":\n continue\n pos_list.append(int(line[1]))\n pi_list.append(float(line[2]))\n diversity_dict[voc] = [pos_list, pi_list]\n\n plt.rcParams.update({'font.size': 14,\n 'legend.fontsize': 12,\n 'legend.title_fontsize': 12,\n 'figure.titlesize': 16})\n\n plot_nuc_diversity(diversity_dict, args.ref_size, args.outdir)\n plot_nuc_diversity_subplots(diversity_dict, args.ref_size, args.outdir)\n\n allele_freq_dict = {}\n for i, file in enumerate(args.allele_freq_files):\n voc = args.voc_names[i]\n filtered = 0\n pos_list = []\n alt_allele_freq_list = []\n with open(file, 'r') as f:\n for line in f:\n line = line.split('\\t')\n if line[0] == \"CHROM\":\n continue\n ref_info = line[4]\n allele, freq = ref_info.split(':')\n ref_allele_freq = float(freq)\n alt_allele_freq = 1 - ref_allele_freq\n if alt_allele_freq > args.min_af:\n if args.verbose:\n print(line)\n filtered += 1\n pos_list.append(int(line[1]))\n alt_allele_freq_list.append(alt_allele_freq)\n print(\"{} total # sites with alt allele frequency > {} = {}\".format(\n voc, args.min_af, filtered))\n allele_freq_dict[voc] = [pos_list, alt_allele_freq_list]\n\n plot_allele_freq_subplots(allele_freq_dict, args.ref_size, args.outdir)\n\n return\n\n\ndef plot_nuc_diversity(diversity_dict, ref_size, outdir):\n \"\"\"Plot nucleotide diversity per VOC\"\"\"\n outfile = outdir + \"/nucleotide_diversity.png\"\n plt.figure()\n for voc, diversity in diversity_dict.items():\n pos_list, pi_list = diversity\n pi_list_all = []\n pos0 = 0\n for i, pos1 in enumerate(pos_list):\n if pos1 == pos0:\n pi_list_all[-1] += pi_list[i]\n else:\n pi_list_all.extend([0]*(pos1-pos0-1))\n pi_list_all.append(pi_list[i])\n pos0 = pos1\n pi_list_all.extend([0]*(ref_size-pos1))\n # print(pi_list_all)\n assert len(pi_list_all) == ref_size\n plt.plot(range(1, ref_size+1), pi_list_all, label=voc,\n color=colors[voc], alpha=0.7)\n print(\"{} done\".format(voc))\n\n plt.xlabel(\"Reference position\")\n plt.ylabel(\"Nucleotide diversity\")\n plt.gcf().set_size_inches(15, 3)\n plt.legend(bbox_to_anchor=(1.1, 1))\n plt.tight_layout()\n plt.savefig(outfile)\n return\n\ndef plot_nuc_diversity_subplots(diversity_dict, ref_size, outdir):\n \"\"\"Plot nucleotide diversity per VOC using subplots\"\"\"\n outfile = outdir + \"/nucleotide_diversity_subplots.png\"\n n_plots = len(diversity_dict.keys())\n fig, axs = plt.subplots(n_plots, 1, sharex=True, sharey=False)\n ax_idx = 0\n for voc, diversity in diversity_dict.items():\n pos_list, pi_list = diversity\n pi_list_all = []\n pos0 = 0\n for i, pos1 in enumerate(pos_list):\n if pos1 == pos0:\n pi_list_all[-1] += pi_list[i]\n else:\n pi_list_all.extend([0]*(pos1-pos0-1))\n pi_list_all.append(pi_list[i])\n pos0 = pos1\n pi_list_all.extend([0]*(ref_size-pos1))\n # print(pi_list_all)\n assert len(pi_list_all) == ref_size\n ax = axs[ax_idx]\n ax.plot(range(1, ref_size+1), pi_list_all, label=voc,\n color=colors[voc], alpha=0.7)\n ax.set_ylim(0, 0.5)\n ax.set_ylabel(\"diversity\")\n ax.set_title(voc)\n ax_idx += 1\n\n ax.set_xlabel(\"Reference position\")\n plt.gcf().set_size_inches(15, 8)\n # plt.legend(bbox_to_anchor=(1.1, 1))\n plt.tight_layout()\n plt.savefig(outfile)\n return\n\ndef plot_allele_freq_subplots(allele_freq_dict, ref_size, outdir):\n \"\"\"Plot nucleotide diversity per VOC using subplots\"\"\"\n outfile = outdir + \"/allele_freq_subplots.png\"\n n_plots = len(allele_freq_dict.keys())\n fig, axs = plt.subplots(n_plots, 1, sharex=True, sharey=False)\n ax_idx = 0\n for voc, freq_info in allele_freq_dict.items():\n pos_list, freq_list = freq_info\n freq_list_all = []\n pos0 = 0\n for i, pos1 in enumerate(pos_list):\n if pos1 == pos0:\n freq_list_all[-1] += freq_list[i]\n else:\n freq_list_all.extend([0]*(pos1-pos0-1))\n freq_list_all.append(freq_list[i])\n pos0 = pos1\n freq_list_all.extend([0]*(ref_size-pos1))\n # print(pi_list_all)\n assert len(freq_list_all) == ref_size\n ax = axs[ax_idx]\n ax.plot(range(1, ref_size+1), freq_list_all, label=voc,\n color=colors[voc], alpha=0.7)\n ax.set_ylim(0, 1)\n ax.set_ylabel(\"AAF\")\n ax.set_title(voc)\n ax_idx += 1\n\n ax.set_xlabel(\"Reference position\")\n plt.gcf().set_size_inches(15, 8)\n # plt.legend(bbox_to_anchor=(1.1, 1))\n plt.tight_layout()\n plt.savefig(outfile)\n svg_outfile = outdir + \"/allele_freq_subplots.svg\"\n plt.savefig(svg_outfile)\n return\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.ylabel"
]
] |
savelov/artview
|
[
"83c918757d6ae51cecc7589af4ab279e2c750bfe"
] |
[
"artview/components/correlation.py"
] |
[
"\"\"\"\nplot_radar.py\n\nClass instance used to make Display.\n\"\"\"\n# Load the needed packages\nimport numpy as np\nimport scipy\nimport os\nimport pyart\n\nfrom matplotlib.backends.qt_compat import is_pyqt5\nif is_pyqt5():\n from matplotlib.backends.backend_qt5agg import (\n FigureCanvas, NavigationToolbar2QT as NavigationToolbar)\nelse:\n from matplotlib.backends.backend_qt4agg import (\n FigureCanvas, NavigationToolbar2QT as NavigationToolbar)\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.colors import Normalize as mlabNormalize\nfrom matplotlib.colorbar import ColorbarBase as mlabColorbarBase\nfrom matplotlib.pyplot import cm\n\nfrom ..core import (Variable, Component, common, VariableChoose, QtCore,\n QtGui, QtWidgets)\nfrom ..core.points import Points\n\n# Save image file type and DPI (resolution)\nIMAGE_EXT = 'png'\nDPI = 200\n# ========================================================================\n\n\nclass Correlation(Component):\n '''\n Class to create a correlation plot, using a returned Radar structure\n from the PyArt pyart.graph package.\n '''\n\n Vradar = None #: see :ref:`shared_variable`\n VfieldVertical = None #: see :ref:`shared_variable`\n VfieldHorizontal = None #: see :ref:`shared_variable`\n Vtilt = None #: see :ref:`shared_variable`\n Vgatefilter = None #: see :ref:`shared_variable`\n VplotAxes = None #: see :ref:`shared_variable` (no internal use)\n\n @classmethod\n def guiStart(self, parent=None):\n '''Graphical interface for starting this class'''\n kwargs, independent = \\\n common._SimplePluginStart(\"Correlation\").startDisplay()\n kwargs['parent'] = parent\n return self(**kwargs), independent\n\n def __init__(self, Vradar=None, VfieldVertical=None, VfieldHorizontal=None,\n Vgatefilter=None, name=\"Correlation\", parent=None):\n '''\n Initialize the class to create display.\n\n Parameters\n ----------\n [Optional]\n Vradar : :py:class:`~artview.core.core.Variable` instance\n Radar signal variable. If None start new one with None.\n VfieldVertical, \\\n VfieldHorizontal : :py:class:`~artview.core.core.Variable` instance\n Field signal variable. If None start new one with empty string.\n Vgatefilter : :py:class:`~artview.core.core.Variable` instance\n Gatefilter signal variable.\n A value of None will instantiate a empty variable.\n name : string\n Display window name.\n parent : PyQt instance\n Parent instance to associate to Display window.\n If None, then Qt owns, otherwise associated with parent PyQt\n instance.\n\n '''\n super(Correlation, self).__init__(name=name, parent=parent)\n self.setFocusPolicy(QtCore.Qt.ClickFocus)\n # Set up signal, so that DISPLAY can react to\n # external (or internal) changes in radar, field,\n # lims and tilt (expected to be Core.Variable instances)\n # The capital V so people remember using \".value\"\n if Vradar is None:\n self.Vradar = Variable(None)\n else:\n self.Vradar = Vradar\n if VfieldVertical is None:\n self.VfieldVertical = Variable('')\n else:\n self.VfieldVertical = VfieldVertical\n if VfieldHorizontal is None:\n self.VfieldHorizontal = Variable('')\n else:\n self.VfieldHorizontal = VfieldHorizontal\n\n if Vgatefilter is None:\n self.Vgatefilter = Variable(None)\n else:\n self.Vgatefilter = Vgatefilter\n\n self.VplotAxes = Variable(None)\n\n self.sharedVariables = {\"Vradar\": self.NewRadar,\n \"VfieldVertical\": self.NewField,\n \"VfieldHorizontal\": self.NewField,\n \"Vgatefilter\": self.NewGatefilter,\n \"VplotAxes\": None,}\n\n # Connect the components\n self.connectAllVariables()\n\n self.parameters = {\n \"marker\": 'o',\n \"facecolors\": \"blue\",\n \"edgecolors\": \"none\",\n \"s\": 20,\n \"color\": \"red\",\n \"xmin\": None,\n \"xmax\": None,\n \"ymin\": None,\n \"ymax\": None,\n }\n\n self.parameters_type = [\n (\"marker\", str, \"marker type\"),\n (\"facecolors\", str, \"marker color\"),\n (\"edgecolors\", str, \"marker edge color\"),\n (\"s\", int, \"marker size\"),\n (\"color\", str, \"line color\"),\n (\"xmin\", common.float_or_none, \"Min X Value\"),\n (\"xmax\", common.float_or_none, \"Max X Value\"),\n (\"ymin\", common.float_or_none, \"Min Y Value\"),\n (\"ymax\", common.float_or_none, \"Max Y Value\"),\n ]\n\n # Set plot title and colorbar units to defaults\n self.title = self._get_default_title()\n self.unitsVertical, self.unitsHorizontal = self._get_default_units()\n\n # Create display image text dictionary\n self.disp_text = {}\n\n # Create a figure for output\n self._set_fig_ax()\n\n # Launch the GUI interface\n self.LaunchGUI()\n\n # Initialize radar variable\n self.NewRadar(None, True)\n\n self.show()\n\n ####################\n # GUI methods #\n ####################\n\n def LaunchGUI(self):\n '''Launches a GUI interface.'''\n # Create layout\n self.layout = QtWidgets.QGridLayout()\n self.layout.setSpacing(8)\n\n # Create the widget\n self.central_widget = QtWidgets.QWidget()\n self.setCentralWidget(self.central_widget)\n self._set_figure_canvas()\n\n self.central_widget.setLayout(self.layout)\n\n # Add buttons along display for user control\n self.addButtons()\n\n # Set the status bar to display messages\n self.statusbar = self.statusBar()\n\n def setParameters(self):\n '''Open set parameters dialog.'''\n parm = common.get_options(self.parameters_type, self.parameters)\n for key in parm.keys():\n self.parameters[key] = parm[key]\n self._update_plot()\n\n ##################################\n # User display interface methods #\n ##################################\n\n def addButtons(self):\n '''Add a series of buttons for user control over display.'''\n # Create the Display controls\n self._add_displayBoxUI()\n # Create the Field controls\n self._add_fieldBoxUI()\n # Create the Tools controls\n #self._add_toolsBoxUI()\n # Create the Informational label at top\n #self._add_infolabel()\n\n self.layout.addWidget(self.fieldVerticalBox, 0, 0, 1, 2)\n label = QtWidgets.QLabel(\"VS.\")\n self.layout.addWidget(label, 0, 2, 1, 1)\n self.layout.setAlignment(label, QtCore.Qt.AlignHCenter)\n self.layout.addWidget(self.fieldHorizontalBox, 0, 3, 1, 2)\n self.layout.addWidget(self.dispButton, 0, 6)\n self.layout.setAlignment(self.dispButton, QtCore.Qt.AlignRight)\n #self.layout.addWidget(self.toolsButton, 0, 3)\n #self.layout.addWidget(self.infolabel, 0, 4)\n\n #############################\n # Functionality methods #\n #############################\n\n def _fillFieldBox(self):\n '''Fill in the Field Window Box with current variable names.'''\n for box in (self.fieldVerticalBox, self.fieldHorizontalBox):\n box.clear()\n box.addItem(\"Field Select\")\n # Loop through and create each field button\n for field in self.fieldnames:\n box.addItem(field)\n\n def _fieldVerticalAction(self, text):\n '''Define action for Field Button selection.'''\n if text == \"Field Select\":\n from .field import FieldButtonWindow\n self.fieldbuttonwindow = FieldButtonWindow(\n self.Vradar, self.VfieldVertical,\n name=self.name+\"Vertical Field Selection\", parent=self.parent)\n else:\n self.VfieldVertical.change(str(text))\n\n def _fieldHorizontalAction(self, text):\n '''Define action for Field Button selection.'''\n if text == \"Field Select\":\n from .field import FieldButtonWindow\n self.fieldbuttonwindow = FieldButtonWindow(\n self.Vradar, self.VfieldHorizontal,\n name=self.name+\"Horizontal Field Selection\", parent=self.parent)\n else:\n self.VfieldHorizontal.change(str(text))\n\n def _GateFilterToggleAction(self):\n '''Define action for GateFilterToggle menu selection.'''\n if self.gatefilterToggle.isChecked():\n self.gatefilterToggle.setText(\"GateFilter On\")\n else:\n self.gatefilterToggle.setText(\"GateFilter Off\")\n self._update_plot()\n\n def _title_input(self):\n '''Retrieve new plot title.'''\n val, entry = common.string_dialog_with_reset(\n self.title, \"Plot Title\", \"Title:\", self._get_default_title())\n if entry is True:\n self.title = val\n self._update_plot()\n\n def _units_input(self):\n '''Retrieve new plot units.'''\n val0, entry0 = common.string_dialog_with_reset(\n self.unitsVertical, \"Plot Units\", \"Vertical Units:\",\n self._get_default_units())\n val1, entry1 = common.string_dialog_with_reset(\n self.unitsHorizontal, \"Plot Units\", \"Horizontal Units:\",\n self._get_default_units())\n if entry0 is True:\n self.unitsVertical = val0\n if entry1 is True:\n self.unitsHorizontal = val1\n if entry0 is True or entry1 is True:\n self._update_plot()\n\n def _add_ImageText(self):\n '''Add a text box to display.'''\n from .image_text import ImageTextBox\n itext = ImageTextBox(self, parent=self.parent)\n return itext\n\n def _add_displayBoxUI(self):\n '''Create the Display Options Button menu.'''\n parentdir = os.path.abspath(os.path.join(os.path.dirname(__file__),\n os.pardir))\n config_icon = QtGui.QIcon(os.sep.join(\n [parentdir, 'icons', \"categories-applications-system-icon.png\"]))\n self.dispButton = QtWidgets.QPushButton(config_icon, \"\", self)\n self.dispButton.setToolTip(\"Adjust display properties\")\n self.dispButton.setFocusPolicy(QtCore.Qt.NoFocus)\n dispmenu = QtWidgets.QMenu(self)\n\n self.sweepMenu = dispmenu.addMenu(\"sweep\")\n\n vertical_scale_menu = dispmenu.addMenu(\"vertical scale\")\n self.vertical_scale_menu_group = QtWidgets.QActionGroup(self, exclusive=True)\n self.vertical_scale_menu_group.triggered.connect(self._update_plot)\n # linear\n action = self.vertical_scale_menu_group.addAction(\"linear\")\n action.setCheckable(True)\n action.setChecked(True)\n vertical_scale_menu.addAction(action)\n # log\n action = self.vertical_scale_menu_group.addAction(\"log\")\n action.setCheckable(True)\n vertical_scale_menu.addAction(action)\n\n horizontal_scale_menu = dispmenu.addMenu(\"horizontal scale\")\n self.horizontal_scale_menu_group = QtWidgets.QActionGroup(self, exclusive=True)\n self.horizontal_scale_menu_group.triggered.connect(self._update_plot)\n # linear\n action = self.horizontal_scale_menu_group.addAction(\"linear\")\n action.setCheckable(True)\n action.setChecked(True)\n horizontal_scale_menu.addAction(action)\n # log\n action = self.horizontal_scale_menu_group.addAction(\"log\")\n action.setCheckable(True)\n horizontal_scale_menu.addAction(action)\n\n dispTitle = dispmenu.addAction(\"Change Title\")\n dispTitle.setToolTip(\"Change plot title\")\n dispUnit = dispmenu.addAction(\"Change Units\")\n dispUnit.setToolTip(\"Change units string\")\n\n self.gatefilterToggle = QtWidgets.QAction(\n 'GateFilter On', dispmenu, checkable=True,\n triggered=self._GateFilterToggleAction)\n dispmenu.addAction(self.gatefilterToggle)\n self.gatefilterToggle.setChecked(True)\n\n self.regressionLineToggle = QtWidgets.QAction(\n 'Linear Regression', dispmenu, checkable=True,\n triggered=self._update_plot)\n dispmenu.addAction(self.regressionLineToggle)\n\n self.dispImageText = dispmenu.addAction(\"Add Text to Image\")\n self.dispImageText.setToolTip(\"Add Text Box to Image\")\n dispQuickSave = dispmenu.addAction(\"Quick Save Image\")\n dispQuickSave.setShortcut(\"Ctrl+D\")\n dispQuickSave.setToolTip(\n \"Save Image to local directory with default name\")\n dispSaveFile = dispmenu.addAction(\"Save Image\")\n dispSaveFile.setShortcut(\"Ctrl+S\")\n dispSaveFile.setStatusTip(\"Save Image using dialog\")\n\n dispmenu.addAction(QtWidgets.QAction(\"Set Parameters\", self,\n triggered=self.setParameters))\n\n dispTitle.triggered.connect(self._title_input)\n dispUnit.triggered.connect(self._units_input)\n self.dispImageText.triggered.connect(self._add_ImageText)\n dispQuickSave.triggered.connect(self._quick_savefile)\n dispSaveFile.triggered.connect(self._savefile)\n\n self.dispButton.setMenu(dispmenu)\n\n def _add_tiltBoxUI(self):\n '''Create the Tilt Selection ComboBox.'''\n self.tiltBox = QtWidgets.QComboBox()\n self.tiltBox.setFocusPolicy(QtCore.Qt.NoFocus)\n self.tiltBox.setToolTip(\"Select tilt elevation angle to display.\\n\"\n \"'Tilt Window' will launch popup.\\n\"\n \"Up/Down arrow keys Increase/Decrease tilt.\")\n self.tiltBox.activated[str].connect(self._tiltAction)\n\n def _add_fieldBoxUI(self):\n '''Create the Field Selection ComboBox.'''\n self.fieldVerticalBox = QtWidgets.QComboBox()\n self.fieldVerticalBox.setFocusPolicy(QtCore.Qt.NoFocus)\n self.fieldVerticalBox.setToolTip(\"Select variable/field in data file.\\n\"\n \"'Field Window' will launch popup.\\n\")\n self.fieldVerticalBox.activated[str].connect(self._fieldVerticalAction)\n\n self.fieldHorizontalBox = QtWidgets.QComboBox()\n self.fieldHorizontalBox.setFocusPolicy(QtCore.Qt.NoFocus)\n self.fieldHorizontalBox.setToolTip(\"Select variable/field in data file.\\n\"\n \"'Field Window' will launch popup.\\n\")\n self.fieldHorizontalBox.activated[str].connect(self._fieldHorizontalAction)\n\n def _add_toolsBoxUI(self):\n '''Create the Tools Button menu.'''\n self.toolsButton = QtWidgets.QPushButton(\"Toolbox\")\n self.toolsButton.setFocusPolicy(QtCore.Qt.NoFocus)\n self.toolsButton.setToolTip(\"Choose a tool to apply\")\n toolmenu = QtWidgets.QMenu(self)\n toolZoomPan = toolmenu.addAction(\"Zoom/Pan\")\n toolValueClick = toolmenu.addAction(\"Click for Value\")\n toolSelectRegion = toolmenu.addAction(\"Select a Region of Interest\")\n toolReset = toolmenu.addAction(\"Reset Tools\")\n toolDefault = toolmenu.addAction(\"Reset File Defaults\")\n toolZoomPan.triggered.connect(self.toolZoomPanCmd)\n toolValueClick.triggered.connect(self.toolValueClickCmd)\n toolSelectRegion.triggered.connect(self.toolSelectRegionCmd)\n toolReset.triggered.connect(self.toolResetCmd)\n toolDefault.triggered.connect(self.toolDefaultCmd)\n self.toolsButton.setMenu(toolmenu)\n\n def _select_all_sweeps(self):\n ''' Check all sweeps if action 'all sweeps' is checked.'''\n check = self.sweep_actions[0].isChecked()\n for action in self.sweep_actions[1:]:\n action.setChecked(check)\n self._update_plot()\n\n def _sweep_checked(self, checked):\n if checked is False:\n self.sweep_actions[0].setChecked(False)\n self._update_plot()\n\n ########################\n # Selectionion methods #\n ########################\n\n def NewRadar(self, variable, strong):\n '''\n Slot for 'ValueChanged' signal of\n :py:class:`Vradar <artview.core.core.Variable>`.\n\n This will:\n\n * Update fields and tilts lists and MenuBoxes\n * Check radar scan type and reset limits if needed\n * Reset units and title\n * If strong update: update plot\n '''\n if self.Vradar.value is None:\n self.fieldVerticalBox.clear()\n self.fieldHorizontalBox.clear()\n self.sweepMenu.clear()\n return\n\n # Get the tilt angles\n self.rTilts = self.Vradar.value.sweep_number['data'][:]\n # Get field names\n self.fieldnames = self.Vradar.value.fields.keys()\n\n # Update field and tilt MenuBox\n self._fillFieldBox()\n\n self.sweepMenu.clear()\n self.sweep_actions = []\n\n action = self.sweepMenu.addAction(\"all sweeps\")\n self.sweep_actions.append(action)\n action.triggered.connect(self._select_all_sweeps)\n action.setCheckable(True)\n action.setChecked(True)\n self.sweepMenu.addAction(action)\n\n for sweep in range(len(self.rTilts)):\n action = self.sweepMenu.addAction(\"sweep \" + str(sweep))\n self.sweep_actions.append(action)\n action.triggered.connect(self._sweep_checked)\n action.setCheckable(True)\n action.setChecked(True)\n self.sweepMenu.addAction(action)\n\n self.unitsVertical, self.unitsHorizontal = self._get_default_units()\n self.title = self._get_default_title()\n if strong:\n self._update_plot()\n\n def NewField(self, variable, strong):\n '''\n Slot for 'ValueChanged' signal of\n :py:class:`Vfield <artview.core.core.Variable>`.\n\n This will:\n\n * Reset colormap\n * Reset units\n * Update fields MenuBox\n * If strong update: update plot\n '''\n # XXX diferenciate between vertical and horizontal\n self.unitsVertical, self.unitsHorizontal = self._get_default_units()\n self.title = self._get_default_title()\n #idx = self.fieldBox.findText(variable.value)\n #self.fieldBox.setCurrentIndex(idx)\n if strong:\n self._update_plot()\n\n def NewGatefilter(self, variable, strong):\n '''\n Slot for 'ValueChanged' signal of\n :py:class:`Vgatefilter <artview.core.core.Variable>`.\n\n This will:\n\n * If strong update: update plot\n '''\n if strong:\n self._update_plot()\n\n ####################\n # Plotting methods #\n ####################\n\n def _set_fig_ax(self):\n '''Set the figure and axis to plot.'''\n self.XSIZE = 8\n self.YSIZE = 8\n self.fig = Figure(figsize=(self.XSIZE, self.YSIZE))\n self.ax = self.fig.add_axes([0.1, 0.1, 0.8, 0.8])\n self.VplotAxes.change(self.ax)\n\n def _set_figure_canvas(self):\n '''Set the figure canvas to draw in window area.'''\n self.canvas = FigureCanvasQTAgg(self.fig)\n # Add the widget to the canvas\n self.layout.addWidget(self.canvas, 1, 0, 8, 7)\n\n @staticmethod\n def _get_xy_values(radar, field_horizontal, field_vertical,\n sweeps, gatefilter):\n xvalues = radar.fields[field_horizontal]['data']\n yvalues = radar.fields[field_vertical]['data']\n\n if gatefilter is None:\n gates = np.ma.getmaskarray(xvalues) | np.ma.getmaskarray(yvalues)\n else:\n gates = gatefilter.gate_excluded\n\n if sweeps is not None:\n sweep_filter = gates | True\n for sweep, (start, end) in enumerate(radar.iter_start_end()):\n if sweep in sweeps:\n sweep_filter[start:end+1,:] = False\n gates = gates | sweep_filter\n\n xvalues = np.ma.MaskedArray(xvalues, mask=gates)\n yvalues = np.ma.MaskedArray(yvalues, mask=gates)\n\n return xvalues, yvalues\n\n @staticmethod\n def plot_correlation(radar, field_horizontal, field_vertical,\n sweeps, gatefilter, ax, title, **kwargs):\n\n xvalues, yvalues = Correlation._get_xy_values(\n radar, field_horizontal, field_vertical, sweeps, gatefilter)\n\n ax.scatter(xvalues,yvalues,**kwargs)\n\n ax.set_title(title)\n\n @staticmethod\n def plot_regression(radar, field_horizontal, field_vertical,\n sweeps, gatefilter, ax, vmin, vmax, xscale=\"linear\",\n yscale=\"linear\", **kwargs):\n\n xvalues, yvalues = Correlation._get_xy_values(\n radar, field_horizontal, field_vertical, sweeps, gatefilter)\n\n if xscale==\"log\":\n xvalues = np.ma.masked_where( xvalues <= 0, xvalues)\n xvalues = np.ma.log10(xvalues)\n if yscale==\"log\":\n yvalues = np.ma.masked_where( yvalues <= 0, yvalues)\n yvalues = np.ma.log10(yvalues)\n\n m, b, r, _, _ = scipy.stats.linregress(xvalues[~xvalues.mask],\n yvalues[~xvalues.mask])\n\n if xscale==\"log\":\n x = np.linspace(max(vmin,0.0001),vmax,50)\n y = m * np.log10(x) + b\n else:\n x = np.linspace(vmin,vmax,50)\n y = m * x + b\n\n if yscale==\"log\":\n y=10**y\n\n line = ax.plot(x,y, linestyle=\"--\",\n label='y = %f x + %f\\n'%(m,b) +\n 'r value = %f'%(r), **kwargs)\n ax.legend()\n return (m,b)\n\n def _update_plot(self):\n '''Draw/Redraw the plot.'''\n\n if self.Vradar.value is None:\n return\n\n # Create the plot with PyArt RadarDisplay\n self.ax.cla() # Clear the plot axes\n\n self.VplotAxes.update()\n\n if ((self.VfieldVertical.value not in self.fieldnames) or\n (self.VfieldHorizontal.value not in self.fieldnames)):\n self.canvas.draw()\n self.statusbar.setStyleSheet(\"QStatusBar{padding-left:8px;\" +\n \"background:rgba(255,0,0,255);\" +\n \"color:black;font-weight:bold;}\")\n self.statusbar.showMessage(\"Field not Found in Radar\", msecs=5000)\n return\n else:\n self.statusbar.setStyleSheet(\"QStatusBar{padding-left:8px;\" +\n \"background:rgba(0,0,0,0);\" +\n \"color:black;font-weight:bold;}\")\n self.statusbar.clearMessage()\n\n if self.gatefilterToggle.isChecked():\n gatefilter = self.Vgatefilter.value\n else:\n gatefilter = None\n\n if self.sweep_actions[0].isChecked():\n sweeps = None\n else:\n sweeps = []\n for sweep, action in enumerate(self.sweep_actions[1:]):\n if action.isChecked():\n sweeps.append(sweep)\n\n self.plot_correlation(\n self.Vradar.value, self.VfieldHorizontal.value,\n self.VfieldVertical.value, sweeps, gatefilter, self.ax, self.title,\n **{k: self.parameters[k] for k in\n ('s','facecolors', 'edgecolors', 'marker')}\n )\n\n\n self.ax.set_xscale(\n str(self.horizontal_scale_menu_group.checkedAction().text()))\n self.ax.set_yscale(\n str(self.vertical_scale_menu_group.checkedAction().text()))\n\n self.ax.set_xlabel(self.unitsHorizontal)\n self.ax.set_ylabel(self.unitsVertical)\n\n self.ax.set_xlim(self.parameters[\"xmin\"], self.parameters[\"xmax\"])\n self.ax.set_ylim(self.parameters[\"ymin\"], self.parameters[\"ymax\"])\n\n\n if self.regressionLineToggle.isChecked():\n vmin, vmax = self.ax.get_xlim()\n self.plot_regression(\n self.Vradar.value, self.VfieldHorizontal.value,\n self.VfieldVertical.value, sweeps, gatefilter, self.ax,\n vmin + 0.05 * (vmax-vmin), vmax - 0.05 * (vmax-vmin),\n str(self.horizontal_scale_menu_group.checkedAction().text()),\n str(self.vertical_scale_menu_group.checkedAction().text()),\n color=self.parameters[\"color\"])\n\n self.canvas.draw()\n\n #########################\n # Check methods #\n #########################\n\n def _get_default_title(self):\n '''Get default title from pyart.'''\n return 'Correlation'\n\n def _get_default_units(self):\n '''Get default units for current radar and field.'''\n vertical = ' '\n horizontal = ' '\n if self.Vradar.value is not None:\n try:\n vertical += self.Vradar.value.fields[\n self.VfieldVertical.value]['units']\n except:\n pass\n try:\n horizontal += self.Vradar.value.fields[\n self.VfieldHorizontal.value]['units']\n except:\n pass\n\n return (self.VfieldVertical.value + vertical,\n self.VfieldHorizontal.value + horizontal)\n\n ########################\n # Image save methods #\n ########################\n def _quick_savefile(self, PTYPE=IMAGE_EXT):\n '''Save the current display via PyArt interface.'''\n imagename = (str(self.VfieldVertical.value) + \"VS.\" +\n str(self.VfieldHorizontal.value) + \".png\")\n self.canvas.print_figure(os.path.join(os.getcwd(), imagename), dpi=DPI)\n self.statusbar.showMessage(\n 'Saved to %s' % os.path.join(os.getcwd(), imagename))\n\n def _savefile(self, PTYPE=IMAGE_EXT):\n '''Save the current display using PyQt dialog interface.'''\n imagename = (str(self.VfieldVertical.value) + \"VS.\" +\n str(self.VfieldHorizontal.value) + \".png\")\n file_choices = \"PNG (*.png)|*.png\"\n path = unicode(QtWidgets.QFileDialog.getSaveFileName(\n self, 'Save file', imagename, file_choices))\n if path:\n self.canvas.print_figure(path, dpi=DPI)\n self.statusbar.showMessage('Saved to %s' % path)\n\n\n\n"
] |
[
[
"numpy.linspace",
"matplotlib.figure.Figure",
"numpy.ma.log10",
"numpy.ma.getmaskarray",
"matplotlib.backends.qt_compat.is_pyqt5",
"scipy.stats.linregress",
"numpy.log10",
"numpy.ma.masked_where",
"numpy.ma.MaskedArray"
]
] |
WeichenXu123/spark-sklearn
|
[
"cbde36f6311b73d967e2ec8a97040dfd71eca579"
] |
[
"python/spark_sklearn/test_utils.py"
] |
[
"\"\"\"\nSome test utilities to create the spark context.\n\"\"\"\n\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nfrom scipy.sparse import csr_matrix\nimport time\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.linalg import Vectors\nfrom spark_sklearn.util import createLocalSparkSession\nimport sys\nif sys.version_info[:2] <= (2, 6):\n try:\n import unittest2 as unittest\n except ImportError:\n sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')\n sys.exit(1)\nelse:\n import unittest\n\n\n# Used as decorator to wrap around a class deriving from unittest.TestCase. Wraps current\n# unittest methods setUpClass() and tearDownClass(), invoked by the nosetest command before\n# and after unit tests are run. This enables us to create one PySpark SparkSession per\n# test fixture. The session can be referred to with self.spark or ClassName.spark.\n#\n# The SparkSession is set up before invoking the class' own set up and torn down after the\n# class' tear down, so you may safely refer to it in those methods.\ndef fixtureReuseSparkSession(cls):\n setup = getattr(cls, 'setUpClass', None)\n teardown = getattr(cls, 'tearDownClass', None)\n\n def setUpClass(cls):\n cls.spark = createLocalSparkSession(\"Unit Tests\")\n if setup:\n setup()\n\n def tearDownClass(cls):\n if teardown:\n teardown()\n if cls.spark:\n cls.spark.stop()\n # Next session will attempt to reuse the previous stopped\n # SparkContext if it's not cleared.\n SparkSession._instantiatedContext = None\n cls.spark = None\n\n cls.setUpClass = classmethod(setUpClass)\n cls.tearDownClass = classmethod(tearDownClass)\n return cls\n\n\nclass MLlibTestCase(unittest.TestCase):\n def setUp(self):\n super(MLlibTestCase, self).setUp()\n self.sc = self.spark.sparkContext\n self.sql = self.spark\n self.X = np.array([[1, 2, 3],\n [-1, 2, 3], [1, -2, 3], [1, 2, -3],\n [-1, -2, 3], [1, -2, -3], [-1, 2, -3],\n [-1, -2, -3]])\n self.y = np.array([1, 0, 1, 1, 0, 1, 0, 0])\n data = [(float(self.y[i]), Vectors.dense(self.X[i])) for i in range(len(self.y))]\n self.df = self.sql.createDataFrame(data, [\"label\", \"features\"])\n\n @staticmethod\n def list2csr(x):\n \"\"\"\n Convert list to a scipy.sparse.csr_matrix\n :param x: list\n :return: csr_matrix with 1 row\n \"\"\"\n return csr_matrix((np.array(x), np.array(range(0, len(x))), np.array([0, len(x)])))\n\n\n# Asserts that two Pandas dataframes are equal, with only 5 digits of precision for\n# floats.\n#\n# If convert is not None, then applies convert to each item in both dataframes first.\n#\n# Sorts rows in dataframes by sortby. If sortby is None then all columns are used.\ndef assertPandasAlmostEqual(actual, expected, convert=None, sortby=None):\n def normalize(pdDF):\n converted = pdDF.apply(lambda col: col.apply(convert if convert else lambda x: x))\n ordered = converted.sort_values(sortby if sortby else pdDF.columns.tolist())\n # We need to drop the index after sorting because pandas remembers the pre-sort\n # permutation in the old index. This would trigger a failure if we were to compare\n # differently-ordered dataframes, even if they had the same sorted content.\n unindexed = ordered.reset_index(drop=True)\n return unindexed\n actual = normalize(actual)\n expected = normalize(expected)\n pd.util.testing.assert_almost_equal(actual, expected)\n\n\n# This unittest.TestCase subclass sets the random seed to be based on the time\n# that the test is run.\n#\n# If there is a SEED variable in the environnment, then this is used as the seed.\n# Sets both random and numpy.random.\n#\n# Prints the seed to stdout before running each test case.\nclass RandomTest(unittest.TestCase):\n def setUp(self):\n seed = os.getenv(\"SEED\")\n seed = np.uint32(seed if seed else time.time())\n\n print('Random test using SEED={}'.format(seed))\n\n random.seed(seed)\n np.random.seed(seed)\n"
] |
[
[
"numpy.array",
"pandas.util.testing.assert_almost_equal",
"numpy.random.seed"
]
] |
VisualJoyce/caffe2
|
[
"28523ff1ff33f18eaf8b04cc4e0f308826e1861a"
] |
[
"caffe2/distributed/store_ops_test_util.py"
] |
[
"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n## @package store_ops_test_util\n# Module caffe2.distributed.store_ops_test_util\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom multiprocessing import Process, Queue\n\nimport numpy as np\n\nfrom caffe2.python import core, workspace\n\n\nclass StoreOpsTests(object):\n @classmethod\n def _test_set_get(cls, queue, create_store_handler_fn, index, num_procs):\n store_handler = create_store_handler_fn()\n blob = \"blob\"\n value = np.full(1, 1, np.float32)\n\n # Use last process to set blob to make sure other processes\n # are waiting for the blob before it is set.\n if index == (num_procs - 1):\n workspace.FeedBlob(blob, value)\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"StoreSet\",\n [store_handler, blob],\n [],\n blob_name=blob))\n\n output_blob = \"output_blob\"\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"StoreGet\",\n [store_handler],\n [output_blob],\n blob_name=blob))\n\n try:\n np.testing.assert_array_equal(workspace.FetchBlob(output_blob), 1)\n except AssertionError as err:\n queue.put(err)\n\n workspace.ResetWorkspace()\n\n @classmethod\n def test_set_get(cls, create_store_handler_fn):\n # Queue for assertion errors on subprocesses\n queue = Queue()\n\n # Start N processes in the background\n num_procs = 4\n procs = []\n for index in range(num_procs):\n proc = Process(\n target=cls._test_set_get,\n args=(queue, create_store_handler_fn, index, num_procs, ))\n proc.start()\n procs.append(proc)\n\n # Test complete, join background processes\n for proc in procs:\n proc.join()\n\n # Raise first error we find, if any\n if not queue.empty():\n raise queue.get()\n\n @classmethod\n def test_get_timeout(cls, create_store_handler_fn):\n store_handler = create_store_handler_fn()\n net = core.Net('get_missing_blob')\n net.StoreGet([store_handler], 1, blob_name='blob')\n workspace.RunNetOnce(net)\n"
] |
[
[
"numpy.full"
]
] |
jasonke1225/AI-introduction
|
[
"0748198fc926b1e2f79d2c679ef964ffcfa4ca45"
] |
[
"JasonKe-v1/preprocess.py"
] |
[
"#%%\n### make sure the file in ./primevalData folder should be format {'date, open, high, low, close, volume'}\n### come from https://coinmarketcap.com/zh-tw/historical/\n### the outpur csv file will be saved in ./usefulData folder\n\nimport pandas as pd\nfrom stockstats import StockDataFrame as Sdf\nimport os\n\nMonth2num = {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05', 'Jun':'06',\\\n 'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10', 'Nov':'11', 'Dec':'12'}\n\n### need sourse data folder\ndef arrangeData(foldername):\n df_return = None\n for filename in os.listdir(foldername+\"/\"):\n print(filename)\n date_, open_, high_, low_, close_, volume_ = list(), list(), list(), list(), list(), list()\n \n with open(foldername+'/'+filename, 'r') as f:\n s = f.readline()\n index = s.replace('\\n','').split('\\t')[:6]\n index.insert(1,'tic')\n\n s = f.readline()\n while(s):\n data = s.replace('\\n','').split('\\t')\n\n dat = data[0].replace(',','').split(' ')\n date_.append(dat[2]+Month2num[dat[0]]+dat[1])\n\n tmp = [d.replace('NT$','').replace(',','') for d in data[1:-1]]\n open_.append(float(tmp[0]))\n high_.append(float(tmp[1]))\n low_.append(float(tmp[2]))\n close_.append(float(tmp[3]))\n volume_.append(int(tmp[4]))\n s = f.readline()\n\n tic_list = [filename.replace('.txt','')] * len(volume_)\n \n dfmap = {index[0]:date_, index[1]:tic_list, index[2]:open_, index[3]:high_,\\\n index[4]:low_, index[5]:close_, index[6]:volume_}\n\n df = pd.DataFrame(data=dfmap)\n df = df.sort_values([index[0]],ignore_index=True)\n stock = Sdf.retype(df.copy())\n\n df['macd'] = stock['macd'].values\n df['rsi'] = stock['rsi_30'].values\n df['cci'] = stock['cci_30'].values\n df['adx'] = stock['dx_30'].values\n \n df_return = pd.concat([df_return, df]) if df_return is not None else df\n\n df_return = df_return.sort_values(['Date', 'tic'])\n df_return.to_csv('usefulData/result.csv',index=False)\n\n return df_return\n\ndef getArrangedData():\n df = pd.read_csv('usefulData/result.csv')\n date = df.Date.unique()\n print(date[date>=20150808])\n data = df.loc[df.Date==20150808]\n return df\ngetArrangedData()"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
besticka/acados
|
[
"32767a19aed01a15b5e7b83ebc6ddbd669a47954"
] |
[
"examples/acados_python/test/generate_c_code.py"
] |
[
"#\n# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,\n# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,\n# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,\n# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl\n#\n# This file is part of acados.\n#\n# The 2-Clause BSD License\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.;\n#\n\nfrom acados_template import *\nimport acados_template as at\nfrom export_ode_model import *\nimport numpy as np\nimport scipy.linalg\nfrom ctypes import *\nimport json\nimport argparse\n\n# set to 'True' to generate test data\nGENERATE_DATA = False\n\nLOCAL_TEST = False\nTEST_TOL = 1e-8\n\nif LOCAL_TEST is True:\n FORMULATION = 'LS'\n SOLVER_TYPE = 'SQP_RTI'\n QP_SOLVER = 'FULL_CONDENSING_QPOASES'\n INTEGRATOR_TYPE = 'IRK'\nelse:\n parser = argparse.ArgumentParser(description='test Python interface on pendulum example.')\n parser.add_argument('--FORMULATION', dest='FORMULATION',\n default='LS',\n help='FORMULATION: linear least-squares (LS) or nonlinear \\\n least-squares (NLS) (default: LS)')\n\n parser.add_argument('--QP_SOLVER', dest='QP_SOLVER',\n default='PARTIAL_CONDENSING_HPIPM',\n help='QP_SOLVER: PARTIAL_CONDENSING_HPIPM, FULL_CONDENSING_HPIPM, ' \\\n 'FULL_CONDENSING_HPIPM (default: PARTIAL_CONDENSING_HPIPM)')\n\n parser.add_argument('--INTEGRATOR_TYPE', dest='INTEGRATOR_TYPE',\n default='ERK',\n help='INTEGRATOR_TYPE: explicit (ERK) or implicit (IRK) ' \\\n ' Runge-Kutta (default: ERK)')\n\n parser.add_argument('--SOLVER_TYPE', dest='SOLVER_TYPE',\n default='SQP_RTI',\n help='SOLVER_TYPE: (full step) sequential quadratic programming (SQP) or ' \\\n ' real-time iteration (SQP-RTI) (default: SQP-RTI)')\n\n\n args = parser.parse_args()\n\n FORMULATION = args.FORMULATION\n FORMULATION_values = ['LS', 'NLS']\n if FORMULATION not in FORMULATION_values:\n raise Exception('Invalid unit test value {} for parameter FORMULATION. Possible values are' \\\n ' {}. Exiting.'.format(FORMULATION, FORMULATION_values))\n\n QP_SOLVER = args.QP_SOLVER\n QP_SOLVER_values = ['PARTIAL_CONDENSING_HPIPM', 'FULL_CONDENSING_HPIPM', 'FULL_CONDENSING_QPOASES']\n if QP_SOLVER not in QP_SOLVER_values:\n raise Exception('Invalid unit test value {} for parameter QP_SOLVER. Possible values are' \\\n ' {}. Exiting.'.format(QP_SOLVER, QP_SOLVER_values))\n\n INTEGRATOR_TYPE = args.INTEGRATOR_TYPE\n INTEGRATOR_TYPE_values = ['ERK', 'IRK']\n if INTEGRATOR_TYPE not in INTEGRATOR_TYPE:\n raise Exception('Invalid unit test value {} for parameter INTEGRATOR_TYPE. Possible values are' \\\n ' {}. Exiting.'.format(INTEGRATOR_TYPE, INTEGRATOR_TYPE_values))\n\n SOLVER_TYPE = args.SOLVER_TYPE\n SOLVER_TYPE_values = ['SQP', 'SQP-RTI']\n if SOLVER_TYPE not in SOLVER_TYPE:\n raise Exception('Invalid unit test value {} for parameter SOLVER_TYPE. Possible values are' \\\n ' {}. Exiting.'.format(SOLVER_TYPE, SOLVER_TYPE_values))\n\n\n# print test setting\nprint(\"Running test with:\\n\\tformulation:\", FORMULATION, \"\\n\\tqp solver: \", QP_SOLVER,\\\n \"\\n\\tintergrator: \", INTEGRATOR_TYPE, \"\\n\\tsolver: \", SOLVER_TYPE)\n\n# create render arguments\nocp = acados_ocp_nlp()\n\n# export model\nmodel = export_ode_model()\n\n# set model_name\nocp.model = model\n\nTf = 2.0\nnx = model.x.size()[0]\nnu = model.u.size()[0]\nny = nx + nu\nny_e = nx\nN = 50\n\n# set ocp_nlp_dimensions\nnlp_dims = ocp.dims\nnlp_dims.nx = nx\nnlp_dims.ny = ny\nnlp_dims.ny_e = ny_e\nnlp_dims.nbx = 0\nnlp_dims.nbu = nu\nnlp_dims.nu = model.u.size()[0]\nnlp_dims.N = N\n\n# set weighting matrices\nnlp_cost = ocp.cost\n\nif FORMULATION == 'LS':\n nlp_cost.cost_type = 'LINEAR_LS'\n nlp_cost.cost_type_e = 'LINEAR_LS'\nelif FORMULATION == 'NLS':\n nlp_cost.cost_type = 'NONLINEAR_LS'\n nlp_cost.cost_type_e = 'NONLINEAR_LS'\nelse:\n raise Exception('Unknown FORMULATION. Possible values are \\'LS\\' and \\'NLS\\'.')\n\nQ = np.eye(4)\nQ[0,0] = 1e0\nQ[1,1] = 1e2\nQ[2,2] = 1e-3\nQ[3,3] = 1e-2\n\nR = np.eye(1)\nR[0,0] = 1e0\n\nunscale = N/Tf\nQ = Q * unscale\nR = R * unscale\n\nif FORMULATION == 'NLS':\n nlp_cost.W = scipy.linalg.block_diag(R, Q)\nelse:\n nlp_cost.W = scipy.linalg.block_diag(Q, R)\n\nnlp_cost.W_e = Q/unscale\n\nVx = np.zeros((ny, nx))\nVx[0,0] = 1.0\nVx[1,1] = 1.0\nVx[2,2] = 1.0\nVx[3,3] = 1.0\n\nnlp_cost.Vx = Vx\n\nVu = np.zeros((ny, nu))\nVu[4,0] = 1.0\nnlp_cost.Vu = Vu\n\n\nVx_e = np.zeros((ny_e, nx))\nVx_e[0,0] = 1.0\nVx_e[1,1] = 1.0\nVx_e[2,2] = 1.0\nVx_e[3,3] = 1.0\n\nnlp_cost.Vx_e = Vx_e\nif FORMULATION == 'NLS':\n x = SX.sym('x', 4, 1)\n u = SX.sym('u', 1, 1)\n ocp.cost_r.expr = vertcat(u, x)\n ocp.cost_r.x = x\n ocp.cost_r.u = u\n ocp.cost_r.name = 'lin_res'\n ocp.cost_r.ny = nx + nu\n\n ocp.cost_r_e.expr = x\n ocp.cost_r_e.x = x\n ocp.cost_r_e.name = 'lin_res'\n ocp.cost_r_e.ny = nx\n\n\nnlp_cost.yref = np.zeros((ny, ))\nnlp_cost.yref_e = np.zeros((ny_e, ))\n\n# setting bounds\nFmax = 2.0\nnlp_con = ocp.constraints\nnlp_con.lbu = np.array([-Fmax])\nnlp_con.ubu = np.array([+Fmax])\nnlp_con.x0 = np.array([0.0, 3.14, 0.0, 0.0])\nnlp_con.idxbu = np.array([0])\n\n# set QP solver\nocp.solver_options.qp_solver = QP_SOLVER\nocp.solver_options.hessian_approx = 'GAUSS_NEWTON'\nocp.solver_options.integrator_type = INTEGRATOR_TYPE\nocp.solver_options.sim_method_num_stages = 2\nocp.solver_options.sim_method_num_steps = 5\n\n# set prediction horizon\nocp.solver_options.tf = Tf\nocp.solver_options.nlp_solver_type = SOLVER_TYPE\n\n# set header path\nocp.acados_include_path = '../../../../include'\nocp.acados_lib_path = '../../../../lib'\n\nacados_solver = generate_solver(ocp, json_file = 'acados_ocp.json')\n\nNsim = 100\n\nsimX = np.ndarray((Nsim, nx))\nsimU = np.ndarray((Nsim, nu))\n\nfor i in range(Nsim):\n status = acados_solver.solve()\n\n if status !=0:\n print(\"acados failure! Exiting. \\n\")\n sys.exit(status)\n\n # get solution\n x0 = acados_solver.get(0, \"x\")\n u0 = acados_solver.get(0, \"u\")\n\n for j in range(nx):\n simX[i,j] = x0[j]\n\n for j in range(nu):\n simU[i,j] = u0[j]\n\n # update initial condition\n x0 = acados_solver.get(1, \"x\")\n\n acados_solver.set(0, \"lbx\", x0)\n acados_solver.set(0, \"ubx\", x0)\n\n # update reference\n for j in range(N):\n acados_solver.set(j, \"yref\", np.array([0, 0, 0, 0, 0]))\n acados_solver.set(N, \"yref\", np.array([0, 0, 0, 0]))\n\n# dump result to JSON file for unit testing\ntest_file_name = 'test_data/generate_c_code_out_' + FORMULATION + '_' + QP_SOLVER + '_' + \\\n INTEGRATOR_TYPE + '_' + SOLVER_TYPE + '.json'\n\nif GENERATE_DATA:\n with open(test_file_name, 'w') as f:\n json.dump({\"simX\": simX.tolist(), \"simU\": simU.tolist()}, f, indent=4, sort_keys=True)\nelse:\n with open(test_file_name, 'r') as f:\n test_data = json.load(f)\n simX_error = np.linalg.norm(test_data['simX'] - simX)\n simU_error = np.linalg.norm(test_data['simU'] - simU)\n if simX_error > TEST_TOL or simU_error > TEST_TOL:\n raise Exception(\"Python acados test failure with accuracies {:.2E} and {:.2E} ({:.2E} required) on pendulum example! Exiting.\\n\".format(simX_error, simU_error, TEST_TOL))\n else:\n print('Python test passed with accuracy {:.2E}'.format(max(simU_error, simX_error)))\n"
] |
[
[
"numpy.eye",
"numpy.ndarray",
"numpy.linalg.norm",
"numpy.array",
"numpy.zeros"
]
] |
xipingyan/opencv
|
[
"6a2077cbd8a8a0d8cbd3e0e8c3ca239f17e6c067"
] |
[
"samples/dnn/person_reid.py"
] |
[
"#!/usr/bin/env python\n'''\nYou can download a baseline ReID model and sample input from:\nhttps://github.com/ReID-Team/ReID_extra_testdata\n\nAuthors of samples and Youtu ReID baseline:\n Xing Sun <[email protected]>\n Feng Zheng <[email protected]>\n Xinyang Jiang <[email protected]>\n Fufu Yu <[email protected]>\n Enwei Zhang <[email protected]>\n\nCopyright (C) 2020-2021, Tencent.\nCopyright (C) 2020-2021, SUSTech.\n'''\nimport argparse\nimport os.path\nimport numpy as np\nimport cv2 as cv\n\nbackends = (cv.dnn.DNN_BACKEND_DEFAULT,\n cv.dnn.DNN_BACKEND_INFERENCE_ENGINE,\n cv.dnn.DNN_BACKEND_OPENCV,\n cv.dnn.DNN_BACKEND_VKCOM,\n cv.dnn.DNN_BACKEND_CUDA)\n\ntargets = (cv.dnn.DNN_TARGET_CPU,\n cv.dnn.DNN_TARGET_OPENCL,\n cv.dnn.DNN_TARGET_OPENCL_FP16,\n cv.dnn.DNN_TARGET_MYRIAD,\n cv.dnn.DNN_TARGET_HDDL,\n cv.dnn.DNN_TARGET_VULKAN,\n cv.dnn.DNN_TARGET_CUDA,\n cv.dnn.DNN_TARGET_CUDA_FP16)\n\nMEAN = (0.485, 0.456, 0.406)\nSTD = (0.229, 0.224, 0.225)\n\ndef preprocess(images, height, width):\n \"\"\"\n Create 4-dimensional blob from image\n :param image: input image\n :param height: the height of the resized input image\n :param width: the width of the resized input image\n \"\"\"\n img_list = []\n for image in images:\n image = cv.resize(image, (width, height))\n img_list.append(image[:, :, ::-1])\n\n images = np.array(img_list)\n images = (images / 255.0 - MEAN) / STD\n\n input = cv.dnn.blobFromImages(images.astype(np.float32), ddepth = cv.CV_32F)\n return input\n\ndef extract_feature(img_dir, model_path, batch_size = 32, resize_h = 384, resize_w = 128, backend=cv.dnn.DNN_BACKEND_OPENCV, target=cv.dnn.DNN_TARGET_CPU):\n \"\"\"\n Extract features from images in a target directory\n :param img_dir: the input image directory\n :param model_path: path to ReID model\n :param batch_size: the batch size for each network inference iteration\n :param resize_h: the height of the input image\n :param resize_w: the width of the input image\n :param backend: name of computation backend\n :param target: name of computation target\n \"\"\"\n feat_list = []\n path_list = os.listdir(img_dir)\n path_list = [os.path.join(img_dir, img_name) for img_name in path_list]\n count = 0\n\n for i in range(0, len(path_list), batch_size):\n print('Feature Extraction for images in', img_dir, 'Batch:', count, '/', len(path_list))\n batch = path_list[i : min(i + batch_size, len(path_list))]\n imgs = read_data(batch)\n inputs = preprocess(imgs, resize_h, resize_w)\n\n feat = run_net(inputs, model_path, backend, target)\n\n feat_list.append(feat)\n count += batch_size\n\n feats = np.concatenate(feat_list, axis = 0)\n return feats, path_list\n\ndef run_net(inputs, model_path, backend=cv.dnn.DNN_BACKEND_OPENCV, target=cv.dnn.DNN_TARGET_CPU):\n \"\"\"\n Forword propagation for a batch of images.\n :param inputs: input batch of images\n :param model_path: path to ReID model\n :param backend: name of computation backend\n :param target: name of computation target\n \"\"\"\n net = cv.dnn.readNet(model_path)\n net.setPreferableBackend(backend)\n net.setPreferableTarget(target)\n net.setInput(inputs)\n out = net.forward()\n out = np.reshape(out, (out.shape[0], out.shape[1]))\n return out\n\ndef read_data(path_list):\n \"\"\"\n Read all images from a directory into a list\n :param path_list: the list of image path\n \"\"\"\n img_list = []\n for img_path in path_list:\n img = cv.imread(img_path)\n if img is None:\n continue\n img_list.append(img)\n return img_list\n\ndef normalize(nparray, order=2, axis=0):\n \"\"\"\n Normalize a N-D numpy array along the specified axis.\n :param nparry: the array of vectors to be normalized\n :param order: order of the norm\n :param axis: the axis of x along which to compute the vector norms\n \"\"\"\n norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)\n return nparray / (norm + np.finfo(np.float32).eps)\n\ndef similarity(array1, array2):\n \"\"\"\n Compute the euclidean or cosine distance of all pairs.\n :param array1: numpy array with shape [m1, n]\n :param array2: numpy array with shape [m2, n]\n Returns:\n numpy array with shape [m1, m2]\n \"\"\"\n array1 = normalize(array1, axis=1)\n array2 = normalize(array2, axis=1)\n dist = np.matmul(array1, array2.T)\n return dist\n\ndef topk(query_feat, gallery_feat, topk = 5):\n \"\"\"\n Return the index of top K gallery images most similar to the query images\n :param query_feat: array of feature vectors of query images\n :param gallery_feat: array of feature vectors of gallery images\n :param topk: number of gallery images to return\n \"\"\"\n sim = similarity(query_feat, gallery_feat)\n index = np.argsort(-sim, axis = 1)\n return [i[0:int(topk)] for i in index]\n\ndef drawRankList(query_name, gallery_list, output_size = (128, 384)):\n \"\"\"\n Draw the rank list\n :param query_name: path of the query image\n :param gallery_name: path of the gallery image\n \"param output_size: the output size of each image in the rank list\n \"\"\"\n def addBorder(im, color):\n bordersize = 5\n border = cv.copyMakeBorder(\n im,\n top = bordersize,\n bottom = bordersize,\n left = bordersize,\n right = bordersize,\n borderType = cv.BORDER_CONSTANT,\n value = color\n )\n return border\n query_img = cv.imread(query_name)\n query_img = cv.resize(query_img, output_size)\n query_img = addBorder(query_img, [0, 0, 0])\n cv.putText(query_img, 'Query', (10, 30), cv.FONT_HERSHEY_COMPLEX, 1., (0,255,0), 2)\n\n gallery_img_list = []\n for i, gallery_name in enumerate(gallery_list):\n gallery_img = cv.imread(gallery_name)\n gallery_img = cv.resize(gallery_img, output_size)\n gallery_img = addBorder(gallery_img, [255, 255, 255])\n cv.putText(gallery_img, 'G%02d'%i, (10, 30), cv.FONT_HERSHEY_COMPLEX, 1., (0,255,0), 2)\n gallery_img_list.append(gallery_img)\n ret = np.concatenate([query_img] + gallery_img_list, axis = 1)\n return ret\n\n\ndef visualization(topk_idx, query_names, gallery_names, output_dir = 'vis'):\n \"\"\"\n Visualize the retrieval results with the person ReID model\n :param topk_idx: the index of ranked gallery images for each query image\n :param query_names: the list of paths of query images\n :param gallery_names: the list of paths of gallery images\n :param output_dir: the path to save the visualize results\n \"\"\"\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n for i, idx in enumerate(topk_idx):\n query_name = query_names[i]\n topk_names = [gallery_names[j] for j in idx]\n vis_img = drawRankList(query_name, topk_names)\n output_path = os.path.join(output_dir, '%03d_%s'%(i, os.path.basename(query_name)))\n cv.imwrite(output_path, vis_img)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Use this script to run human parsing using JPPNet',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--query_dir', '-q', required=True, help='Path to query image.')\n parser.add_argument('--gallery_dir', '-g', required=True, help='Path to gallery directory.')\n parser.add_argument('--resize_h', default = 256, help='The height of the input for model inference.')\n parser.add_argument('--resize_w', default = 128, help='The width of the input for model inference')\n parser.add_argument('--model', '-m', default='reid.onnx', help='Path to pb model.')\n parser.add_argument('--visualization_dir', default='vis', help='Path for the visualization results')\n parser.add_argument('--topk', default=10, help='Number of images visualized in the rank list')\n parser.add_argument('--batchsize', default=32, help='The batch size of each inference')\n parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,\n help=\"Choose one of computation backends: \"\n \"%d: automatically (by default), \"\n \"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), \"\n \"%d: OpenCV implementation, \"\n \"%d: VKCOM, \"\n \"%d: CUDA backend\"% backends)\n parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,\n help='Choose one of target computation devices: '\n '%d: CPU target (by default), '\n '%d: OpenCL, '\n '%d: OpenCL fp16 (half-float precision), '\n '%d: NCS2 VPU, '\n '%d: HDDL VPU, '\n '%d: Vulkan, '\n '%d: CUDA, '\n '%d: CUDA FP16'\n % targets)\n args, _ = parser.parse_known_args()\n\n if not os.path.isfile(args.model):\n raise OSError(\"Model not exist\")\n\n query_feat, query_names = extract_feature(args.query_dir, args.model, args.batchsize, args.resize_h, args.resize_w, args.backend, args.target)\n gallery_feat, gallery_names = extract_feature(args.gallery_dir, args.model, args.batchsize, args.resize_h, args.resize_w, args.backend, args.target)\n\n topk_idx = topk(query_feat, gallery_feat, args.topk)\n visualization(topk_idx, query_names, gallery_names, output_dir = args.visualization_dir)\n"
] |
[
[
"numpy.reshape",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.finfo",
"numpy.concatenate",
"numpy.argsort",
"numpy.array"
]
] |
rafaelpossas/reinforcement-learning
|
[
"6e1e8057939cbcb489c714868ee650a3481b8c7c"
] |
[
"TD/q_learning.py"
] |
[
"import gym\nimport itertools\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nimport sys\n\nif \"../\" not in sys.path:\n sys.path.append(\"../\")\n\nfrom collections import defaultdict\nfrom lib.envs.cliff_walking import CliffWalkingEnv\nfrom lib import plotting\n\nmatplotlib.style.use('ggplot')\n\nenv = CliffWalkingEnv()\n\n\ndef make_epsilon_greedy_policy(Q, epsilon, nA):\n \"\"\"\n Creates an epsilon-greedy policy based on a given Q-function and epsilon.\n\n Args:\n Q: A dictionary that maps from state -> action-values.\n Each value is a numpy array of length nA (see below)\n epsilon: The probability to select a random action . float between 0 and 1.\n nA: Number of actions in the environment.\n\n Returns:\n A function that takes the observation as an argument and returns\n the probabilities for each action in the form of a numpy array of length nA.\n\n \"\"\"\n\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.argmax(Q[observation])\n A[best_action] += (1.0 - epsilon)\n return A\n\n return policy_fn\n\n\ndef q_learning(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1):\n \"\"\"\n SARSA algorithm: On-policy TD control. Finds the optimal epsilon-greedy policy.\n\n Args:\n env: OpenAI environment.\n num_episodes: Number of episodes to run for.\n discount_factor: Lambda time discount factor.\n alpha: TD learning rate.\n epsilon: Chance the sample a random action. Float betwen 0 and 1.\n\n Returns:\n A tuple (Q, stats).\n Q is the optimal action-value function, a dictionary mapping state -> action values.\n stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.\n \"\"\"\n\n # The final action-value function.\n # A nested dictionary that maps state -> (action -> action-value).\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n\n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n # The policy we're following\n policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)\n all_actions = range(env.action_space.n)\n\n for i_episode in range(num_episodes):\n # Print out which episode we're on, useful for debugging.\n if (i_episode + 1) % 10 == 0:\n print(\"\\rEpisode {}/{}.\\n\".format(i_episode + 1, num_episodes), end=\"\")\n sys.stdout.flush()\n\n observation = env.reset()\n action = np.random.choice(all_actions, p=policy(observation))\n\n for t in itertools.count():\n\n next_obs, reward, done, _ = env.step(action)\n next_action = np.random.choice(all_actions, p=policy(next_obs))\n\n td_target = reward + discount_factor * np.max(Q[next_obs])\n Q[observation][action] += alpha * (td_target - Q[observation][action])\n\n if done:\n break\n\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t\n\n observation = next_obs\n action = next_action\n\n return Q, stats\n\nQ, stats = q_learning(env, 500)\n\nplotting.plot_episode_stats(stats)"
] |
[
[
"matplotlib.style.use",
"numpy.ones",
"numpy.max",
"numpy.argmax",
"numpy.zeros"
]
] |
ajz34/ML_Course_Code_Report
|
[
"59f02e4e6e2d85343a61162147ed6e775dc1a596"
] |
[
"Gilmer_Code/chem_tensorflow.py"
] |
[
"#!/usr/bin/env/python\n\nfrom typing import Tuple, List, Any, Sequence\n\nimport tensorflow as tf\nimport time\nimport os\nimport json\nimport numpy as np\nimport pickle\nimport random\n\nfrom utils import MLP, ThreadedIterator, SMALL_NUMBER\n\n\nclass ChemModel(object):\n @classmethod\n def default_params(cls):\n return {\n 'num_epochs': 3000,\n 'patience': 25,\n 'learning_rate': 0.0001,\n 'clamp_gradient_norm': 1.0,\n 'out_layer_dropout_keep_prob': 1.0,\n\n 'hidden_size': 100,\n 'num_timesteps': 4,\n 'use_graph': True,\n\n 'tie_fwd_bkwd': True,\n 'task_ids': [0],\n\n 'random_seed': 0,\n }\n\n def __init__(self, args):\n self.args = args\n\n # Collect argument things:\n data_dir = ''\n if '--data_dir' in args and args['--data_dir'] is not None:\n data_dir = args['--data_dir']\n self.data_dir = data_dir\n\n self.run_id = \"_\".join([time.strftime(\"%Y-%m-%d-%H-%M-%S\"), str(os.getpid())])\n log_dir = args.get('--log_dir') or '.'\n self.log_file = os.path.join(log_dir, \"%s_log.json\" % self.run_id)\n self.best_model_file = os.path.join(log_dir, \"%s_model_best.pickle\" % self.run_id)\n\n # Collect parameters:\n params = self.default_params()\n config_file = args.get('--config-file')\n if config_file is not None:\n with open(config_file, 'r') as f:\n params.update(json.load(f))\n config = args.get('--config')\n if config is not None:\n params.update(json.loads(config))\n self.params = params\n with open(os.path.join(log_dir, \"%s_params.json\" % self.run_id), \"w\") as f:\n json.dump(params, f)\n print(\"Run %s starting with following parameters:\\n%s\" % (self.run_id, json.dumps(self.params)))\n\n # Load data:\n self.max_num_vertices = 0\n self.num_edge_types = 0\n self.annotation_size = 0\n self.train_data = self.load_data(\"molecules_train.json\", is_training_data=True)\n self.valid_data = self.load_data(\"molecules_valid.json\", is_training_data=False)\n\n # Build the actual model\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.graph = tf.Graph()\n self.sess = tf.Session(graph=self.graph, config=config)\n with self.graph.as_default():\n random.seed(params['random_seed'])\n np.random.seed(params['random_seed'])\n tf.set_random_seed(params['random_seed'])\n self.placeholders = {}\n self.weights = {}\n self.ops = {}\n self.make_model()\n self.make_train_step()\n\n # Restore/initialize variables:\n restore_file = args.get('--restore')\n if restore_file is not None:\n self.restore_model(restore_file)\n else:\n self.initialize_model()\n\n def load_data(self, file_name, is_training_data: bool):\n full_path = os.path.join(self.data_dir, file_name)\n\n print(\"Loading data from %s\" % full_path)\n with open(full_path, 'r') as f:\n data = json.load(f)\n\n restrict = self.args.get(\"--restrict_data\")\n if restrict is not None and restrict > 0:\n data = data[:restrict]\n\n # Get some common data out:\n num_fwd_edge_types = 0\n for g in data:\n self.max_num_vertices = max(self.max_num_vertices, max([v for e in g['graph'] for v in [e[0], e[2]]]))\n num_fwd_edge_types = max(num_fwd_edge_types, max([e[1] for e in g['graph']]))\n self.num_edge_types = max(self.num_edge_types, num_fwd_edge_types * (1 if self.params['tie_fwd_bkwd'] else 2))\n self.annotation_size = max(self.annotation_size, len(data[0][\"node_features\"][0]))\n\n return self.process_raw_graphs(data, is_training_data)\n\n @staticmethod\n def graph_string_to_array(graph_string: str) -> List[List[int]]:\n return [[int(v) for v in s.split(' ')]\n for s in graph_string.split('\\n')]\n\n def process_raw_graphs(self, raw_data: Sequence[Any], is_training_data: bool) -> Any:\n raise Exception(\"Models have to implement process_raw_graphs!\")\n\n def make_model(self):\n self.placeholders['target_values'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],\n name='target_values')\n self.placeholders['target_mask'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],\n name='target_mask')\n self.placeholders['num_graphs'] = tf.placeholder(tf.int64, [], name='num_graphs')\n self.placeholders['out_layer_dropout_keep_prob'] = tf.placeholder(tf.float32, [], name='out_layer_dropout_keep_prob')\n\n with tf.variable_scope(\"graph_model\"):\n self.prepare_specific_graph_model()\n # This does the actual graph work:\n if self.params['use_graph']:\n self.ops['final_node_representations'] = self.compute_final_node_representations()\n else:\n self.ops['final_node_representations'] = tf.zeros_like(self.placeholders['initial_node_representation'])\n\n self.ops['losses'] = []\n for (internal_id, task_id) in enumerate(self.params['task_ids']):\n with tf.variable_scope(\"out_layer_task%i\" % task_id):\n with tf.variable_scope(\"regression_gate\"):\n self.weights['regression_gate_task%i' % task_id] = MLP(2 * self.params['hidden_size'], 1, [],\n self.placeholders['out_layer_dropout_keep_prob'])\n with tf.variable_scope(\"regression\"):\n self.weights['regression_transform_task%i' % task_id] = MLP(self.params['hidden_size'], 1, [],\n self.placeholders['out_layer_dropout_keep_prob'])\n computed_values = self.gated_regression(self.ops['final_node_representations'],\n self.weights['regression_gate_task%i' % task_id],\n self.weights['regression_transform_task%i' % task_id])\n diff = computed_values - self.placeholders['target_values'][internal_id,:]\n task_target_mask = self.placeholders['target_mask'][internal_id,:]\n task_target_num = tf.reduce_sum(task_target_mask) + SMALL_NUMBER\n diff = diff * task_target_mask # Mask out unused values\n self.ops['accuracy_task%i' % task_id] = tf.reduce_sum(tf.abs(diff)) / task_target_num\n task_loss = tf.reduce_sum(0.5 * tf.square(diff)) / task_target_num\n # Normalise loss to account for fewer task-specific examples in batch:\n task_loss = task_loss * (1.0 / (self.params['task_sample_ratios'].get(task_id) or 1.0))\n self.ops['losses'].append(task_loss)\n self.ops['loss'] = tf.reduce_sum(self.ops['losses'])\n\n def make_train_step(self):\n trainable_vars = self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n if self.args.get('--freeze-graph-model'):\n graph_vars = set(self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\"graph_model\"))\n filtered_vars = []\n for var in trainable_vars:\n if var not in graph_vars:\n filtered_vars.append(var)\n else:\n print(\"Freezing weights of variable %s.\" % var.name)\n trainable_vars = filtered_vars\n optimizer = tf.train.AdamOptimizer()\n grads_and_vars = optimizer.compute_gradients(self.ops['loss'], var_list=trainable_vars)\n clipped_grads = []\n for grad, var in grads_and_vars:\n if grad is not None:\n clipped_grads.append((tf.clip_by_norm(grad, self.params['clamp_gradient_norm']), var))\n else:\n clipped_grads.append((grad, var))\n self.ops['train_step'] = optimizer.apply_gradients(clipped_grads)\n # Initialize newly-introduced variables:\n self.sess.run(tf.local_variables_initializer())\n\n def gated_regression(self, last_h, regression_gate, regression_transform):\n raise Exception(\"Models have to implement gated_regression!\")\n\n def prepare_specific_graph_model(self) -> None:\n raise Exception(\"Models have to implement prepare_specific_graph_model!\")\n\n def compute_final_node_representations(self) -> tf.Tensor:\n raise Exception(\"Models have to implement compute_final_node_representations!\")\n\n def make_minibatch_iterator(self, data: Any, is_training: bool):\n raise Exception(\"Models have to implement make_minibatch_iterator!\")\n\n def run_epoch(self, epoch_name: str, data, is_training: bool):\n chemical_accuracies = np.array([0.066513725, 0.012235489, 0.071939046, 0.033730778, 0.033486113, 0.004278493,\n 0.001330901, 0.004165489, 0.004128926, 0.00409976, 0.004527465, 0.012292586,\n 0.037467458])\n\n loss = 0\n accuracies = []\n accuracy_ops = [self.ops['accuracy_task%i' % task_id] for task_id in self.params['task_ids']]\n start_time = time.time()\n processed_graphs = 0\n batch_iterator = ThreadedIterator(self.make_minibatch_iterator(data, is_training), max_queue_size=5)\n for step, batch_data in enumerate(batch_iterator):\n num_graphs = batch_data[self.placeholders['num_graphs']]\n processed_graphs += num_graphs\n if is_training:\n batch_data[self.placeholders['out_layer_dropout_keep_prob']] = self.params['out_layer_dropout_keep_prob']\n fetch_list = [self.ops['loss'], accuracy_ops, self.ops['train_step']]\n else:\n batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0\n fetch_list = [self.ops['loss'], accuracy_ops]\n result = self.sess.run(fetch_list, feed_dict=batch_data)\n (batch_loss, batch_accuracies) = (result[0], result[1])\n loss += batch_loss * num_graphs\n accuracies.append(np.array(batch_accuracies) * num_graphs)\n\n print(\"Running %s, batch %i (has %i graphs). Loss so far: %.4f\" % (epoch_name,\n step,\n num_graphs,\n loss / processed_graphs),\n end='\\r')\n\n accuracies = np.sum(accuracies, axis=0) / processed_graphs\n loss = loss / processed_graphs\n error_ratios = accuracies / chemical_accuracies[self.params[\"task_ids\"]]\n instance_per_sec = processed_graphs / (time.time() - start_time)\n return loss, accuracies, error_ratios, instance_per_sec\n\n def train(self):\n log_to_save = []\n total_time_start = time.time()\n with self.graph.as_default():\n if self.args.get('--restore') is not None:\n _, valid_accs, _, _ = self.run_epoch(\"Resumed (validation)\", self.valid_data, False)\n best_val_acc = np.sum(valid_accs)\n best_val_acc_epoch = 0\n print(\"\\r\\x1b[KResumed operation, initial cum. val. acc: %.5f\" % best_val_acc)\n else:\n (best_val_acc, best_val_acc_epoch) = (float(\"+inf\"), 0)\n for epoch in range(1, self.params['num_epochs'] + 1):\n print(\"== Epoch %i\" % epoch)\n train_loss, train_accs, train_errs, train_speed = self.run_epoch(\"epoch %i (training)\" % epoch,\n self.train_data, True)\n accs_str = \" \".join([\"%i:%.5f\" % (id, acc) for (id, acc) in zip(self.params['task_ids'], train_accs)])\n errs_str = \" \".join([\"%i:%.5f\" % (id, err) for (id, err) in zip(self.params['task_ids'], train_errs)])\n print(\"\\r\\x1b[K Train: loss: %.5f | acc: %s | error_ratio: %s | instances/sec: %.2f\" % (train_loss,\n accs_str,\n errs_str,\n train_speed))\n valid_loss, valid_accs, valid_errs, valid_speed = self.run_epoch(\"epoch %i (validation)\" % epoch,\n self.valid_data, False)\n accs_str = \" \".join([\"%i:%.5f\" % (id, acc) for (id, acc) in zip(self.params['task_ids'], valid_accs)])\n errs_str = \" \".join([\"%i:%.5f\" % (id, err) for (id, err) in zip(self.params['task_ids'], valid_errs)])\n print(\"\\r\\x1b[K Valid: loss: %.5f | acc: %s | error_ratio: %s | instances/sec: %.2f\" % (valid_loss,\n accs_str,\n errs_str,\n valid_speed))\n\n epoch_time = time.time() - total_time_start\n log_entry = {\n 'epoch': epoch,\n 'time': epoch_time,\n 'train_results': (train_loss, train_accs.tolist(), train_errs.tolist(), train_speed),\n 'valid_results': (valid_loss, valid_accs.tolist(), valid_errs.tolist(), valid_speed),\n }\n log_to_save.append(log_entry)\n with open(self.log_file, 'w') as f:\n json.dump(log_to_save, f, indent=4)\n\n val_acc = np.sum(valid_accs) # type: float\n if val_acc < best_val_acc:\n self.save_model(self.best_model_file)\n print(\" (Best epoch so far, cum. val. acc decreased to %.5f from %.5f. Saving to '%s')\" % (val_acc, best_val_acc, self.best_model_file))\n best_val_acc = val_acc\n best_val_acc_epoch = epoch\n elif epoch - best_val_acc_epoch >= self.params['patience']:\n print(\"Stopping training after %i epochs without improvement on validation accuracy.\" % self.params['patience'])\n break\n\n def save_model(self, path: str) -> None:\n weights_to_save = {}\n for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):\n assert variable.name not in weights_to_save\n weights_to_save[variable.name] = self.sess.run(variable)\n\n data_to_save = {\n \"params\": self.params,\n \"weights\": weights_to_save\n }\n\n with open(path, 'wb') as out_file:\n pickle.dump(data_to_save, out_file, pickle.HIGHEST_PROTOCOL)\n\n def initialize_model(self) -> None:\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n self.sess.run(init_op)\n\n def restore_model(self, path: str) -> None:\n print(\"Restoring weights from file %s.\" % path)\n with open(path, 'rb') as in_file:\n data_to_load = pickle.load(in_file)\n\n # Assert that we got the same model configuration\n assert len(self.params) == len(data_to_load['params'])\n for (par, par_value) in self.params.items():\n # Fine to have different task_ids:\n if par not in ['task_ids']:\n assert par_value == data_to_load['params'][par]\n\n variables_to_initialize = []\n with tf.name_scope(\"restore\"):\n restore_ops = []\n used_vars = set()\n for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):\n used_vars.add(variable.name)\n if variable.name in data_to_load['weights']:\n restore_ops.append(variable.assign(data_to_load['weights'][variable.name]))\n else:\n print('Freshly initializing %s since no saved value was found.' % variable.name)\n variables_to_initialize.append(variable)\n for var_name in data_to_load['weights']:\n if var_name not in used_vars:\n print('Saved weights for %s not used by model.' % var_name)\n restore_ops.append(tf.variables_initializer(variables_to_initialize))\n self.sess.run(restore_ops)\n"
] |
[
[
"tensorflow.reduce_sum",
"tensorflow.variables_initializer",
"tensorflow.train.AdamOptimizer",
"tensorflow.Graph",
"tensorflow.ConfigProto",
"tensorflow.clip_by_norm",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.zeros_like",
"tensorflow.set_random_seed",
"numpy.array",
"numpy.sum",
"tensorflow.local_variables_initializer",
"numpy.random.seed",
"tensorflow.variable_scope",
"tensorflow.abs"
]
] |
reedessick/sddr
|
[
"f1225ad798a06bf351cf20b93c36864e5604f35a"
] |
[
"sddr/plot.py"
] |
[
"__doc__ = \"a method to support some basic and not-so-basic plotting functionality\"\n__author__ = \"[email protected]\"\n\n#-------------------------------------------------\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\n\n### non-standard libraries\nfrom sddr import utils\n\n#-------------------------------------------------\n\ndef kde2fig(points, logkde, priors, fields, sanitycheck_tuple=None, levels=[], log=True):\n \"\"\"\n generate something like a corner plot for logkde\n assumes\n points.shape = Nfields, Npoints\n logkde.shape = (Npoints,)*Nfields\n priors.shape = Nfields, 2\n len(fields) = Nfields\n\n sanitycheck_tuple is either None or (samples, weights, wpoints, wsamples, wpriors, b)\n\n returns the associated figure object\n \"\"\"\n ### sanity-check input\n Nfields, Npoints = points.shape\n nfields, nbounds = priors.shape\n assert nfields==Nfields\n assert nbounds==2\n assert len(fields)==Nfields\n shape = logkde.shape\n assert len(shape)==Nfields\n assert np.all(n==Npoints for n in shape)\n\n\n sanitycheck = sanitycheck_tuple is not None\n if sanitycheck:\n samples, weights, wpoints, wsamples, wpriors, b = sanitycheck_tuple\n Nsamp = samples.shape[1]\n \n ### set up helper arrays\n tmp = np.empty(Npoints, dtype='float')\n tmp_field = None\n\n # make a copy so I can mess around without screwing up original array\n dummy_logkde = np.empty_like(logkde, dtype='float')\n dummy_points = np.empty((Nfields, Npoints), dtype='float')\n\n ### plot this stuff!\n fig = plt.figure()\n\n for row in xrange(Nfields):\n for col in xrange(row+1):\n ax = plt.subplot(Nfields, Nfields, row*Nfields+col+1)\n\n trans = range(Nfields)\n dummy_logkde[...] = logkde[...]\n dummy_points[...] = points[...]\n\n if row==col: ### marginalize away everything except for this row, plot result\n\n # put col as the first index\n trans[0] = col\n trans[col] = 0\n dummy_logkde[...] = dummy_logkde.transpose(*trans)[...]\n\n tmp[:] = dummy_points[col,:]\n dummy_points[col,:] = dummy_points[0,:]\n dummy_points[0,:] = tmp[:]\n\n # actually plot\n tmp[:] = utils.nd_marg_leave1(dummy_points, dummy_logkde)[1]\n tmp -= np.max(tmp)\n tmp -= np.log(np.sum(np.exp(tmp)))\n\n if log:\n ax.plot(dummy_points[0], tmp, color='b')\n\n else:\n ax.plot(dummy_points[0], np.exp(tmp), color='b')\n\n # decorate\n ax.set_xlim(priors[row])\n\n if sanitycheck:\n ### add direct 1D marginalization\n y = np.array([utils._compute_logkde(wp, wsamples[col], weights, b=b, prior_min=wpriors[col][0], prior_max=wpriors[col][1]) for wp in wpoints[col]])\n y -= np.max(y)\n y -= np.log(np.sum(np.exp(y)))\n ax.plot(points[col], np.exp(y), color='k', linestyle='dashed')\n\n ### add a histogram\n ax.hist(samples[col], bins=points[col], histtype='step', color='g', weights=np.ones(Nsamp, dtype=float)/Nsamp)\n\n else: ### marginalize everything besides these rows and columns\n\n if sanitycheck:\n ### add data points\n counts, xedges, yedges, _ = ax.hist2d(samples[col], samples[row], bins=max(10, int(0.1*len(samples[col]))**0.5), cmap='Greens') #, norm=matplotlib.colors.LogNorm())\n\n ax.scatter(samples[col], samples[row], marker='.', s=1, color='k', alpha=0.1)\n\n # add marginalized histograms to diagonal axes\n margx = 1.*np.sum(counts, axis=1)/Nsamp * (len(xedges)-1)/(len(points[row])-1)\n plt.figure(fig.number)\n plt.subplot(Nfields, Nfields, Nfields*col+col+1).plot((xedges[1:]+xedges[:-1])/2., margx, color='m')\n\n margy = 1.*np.sum(counts, axis=0)/Nsamp * (len(yedges)-1)/(len(points[col])-1)\n plt.figure(fig.number)\n plt.subplot(Nfields, Nfields, Nfields*row+row+1).plot((yedges[1:]+yedges[:-1])/2., margy, color='m')\n\n # put the column in the first index\n trans[0] = col\n trans[col] = 0\n\n x = trans[1]\n trans[1] = row\n trans[row] = x\n\n dummy_logkde[...] = dummy_logkde.transpose(*trans)[...]\n\n tmp[:] = dummy_points[col,:]\n dummy_points[col,:] = dummy_points[0,:]\n dummy_points[0,:] = tmp[:]\n\n tmp[:] = dummy_points[row,:]\n dummy_points[row,:] = dummy_points[1,:]\n dummy_points[1,:] = tmp[:]\n\n # actually plot\n ans = utils.nd_marg_leave2(dummy_points, dummy_logkde)[1].transpose() ### FIXME: fill in a place-holder\n if log:\n if levels:\n ax.contour(dummy_points[0], dummy_points[1], ans, levels=np.log(np.kde2levels(np.exp(ans), levels)), colors='b')\n else:\n ax.contour(dummy_points[0], dummy_points[1], ans, colors='b')\n else:\n if levels:\n ax.contour(dummy_points[0], dummy_points[1], np.exp(ans), levels=kde2levels(np.exp(ans), levels), colors='b')\n else:\n ax.contour(dummy_points[0], dummy_points[1], np.exp(ans), colors='b')\n\n # decorate\n ax.set_xlim(priors[col])\n ax.set_ylim(priors[row])\n\n if col > 0:\n if col==row:\n ax.yaxis.tick_right()\n else:\n plt.setp(ax.get_yticklabels(), visible=False)\n else:\n ax.set_ylabel(fields[row])\n\n if row < Nfields-1:\n plt.setp(ax.get_xticklabels(), visible=False)\n else:\n ax.set_xlabel(fields[col])\n \n plt.subplots_adjust(\n hspace=0.05,\n wspace=0.05,\n )\n return fig\n\ndef kde2levels(kde, levels):\n kde = kde.flatten()\n\n order = kde.argsort()[::-1] ### largest to smallest\n ckde = np.cumsum(kde[order]) ### cumulative distribution\n ckde /= np.sum(kde)\n\n ans = []\n for level in levels: ### iterate through levels, returning the kde value associated with that confidence\n ### assume kde spacing is close enough that interpolation isn't worth while...\n ans.append(kde[order[ckde<=level][-1]])\n\n return ans\n"
] |
[
[
"matplotlib.use",
"numpy.empty_like",
"numpy.cumsum",
"numpy.ones",
"numpy.all",
"numpy.max",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots_adjust",
"numpy.exp",
"numpy.sum",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] |
young-geng/design-baselines-icml
|
[
"ce4183babac304ed5f83151a602d75de6c4740c4"
] |
[
"design_baselines/coms_original/__init__.py"
] |
[
"from design_baselines.data import StaticGraphTask\nfrom design_baselines.logger import Logger\nfrom design_baselines.utils import spearman\nfrom design_baselines.coms_original.trainers import ConservativeMaximumLikelihood\nfrom design_baselines.coms_original.trainers import TransformedMaximumLikelihood\nfrom design_baselines.coms_original.nets import ForwardModel\nfrom collections import defaultdict\nimport tensorflow as tf\nimport numpy as np\nimport os\n\n\ndef coms_original(config):\n \"\"\"Train a forward model and perform offline model-based\n optimization using a conservative objective model\n\n Args:\n\n config: dict\n a dictionary of hyper parameters such as the learning rate\n \"\"\"\n\n # create the training task and logger\n logger = Logger(config['logging_dir'])\n task = StaticGraphTask(config['task'], **config['task_kwargs'])\n\n # save the initial dataset statistics for safe keeping\n x = task.x\n y = task.y\n\n # if the task is discrete then use a continuous relaxation\n if config['is_discrete']:\n p = np.full_like(x, 1 / float(x.shape[-1]))\n x = config.get('discrete_clip', 5.0) * x + (\n 1.0 - config.get('discrete_clip', 5.0)) * p\n x = np.log(x)\n x = x[:, :, 1:] - x[:, :, :1]\n\n if config['normalize_ys']:\n # compute normalization statistics for the score\n mu_y = np.mean(y, axis=0, keepdims=True).astype(np.float32)\n y = y - mu_y\n st_y = np.std(y, axis=0, keepdims=True).astype(np.float32).clip(1e-6, 1e9)\n y = y / st_y\n else:\n # compute normalization statistics for the score\n mu_y = np.zeros_like(y[:1])\n st_y = np.ones_like(y[:1])\n\n if config['normalize_xs']:\n # compute normalization statistics for the data vectors\n mu_x = np.mean(x, axis=0, keepdims=True).astype(np.float32)\n x = x - mu_x\n st_x = np.std(x, axis=0, keepdims=True).astype(np.float32).clip(1e-6, 1e9)\n x = x / st_x\n else:\n # compute normalization statistics for the score\n mu_x = np.zeros_like(x[:1])\n st_x = np.ones_like(x[:1])\n\n input_shape = list(task.input_shape)\n if config['is_discrete']:\n input_shape[-1] = input_shape[-1] - 1\n\n solver_lr = config['solver_lr'] * np.sqrt(np.prod(input_shape))\n solver_interval = int(config['solver_interval'] * (\n x.shape[0] - config['val_size']) / config['batch_size'])\n solver_warmup = int(config['solver_warmup'] * (\n x.shape[0] - config['val_size']) / config['batch_size'])\n\n # make a neural network to predict scores\n forward_model = ForwardModel(\n input_shape,\n activations=config['activations'],\n hidden=config['hidden_size'],\n final_tanh=config['final_tanh'])\n\n # create a trainer for a forward model with a conservative objective\n trainer = ConservativeMaximumLikelihood(\n forward_model, forward_model_opt=tf.keras.optimizers.Adam,\n forward_model_lr=config['forward_model_lr'],\n initial_alpha=config['initial_alpha'],\n alpha_opt=tf.keras.optimizers.Adam,\n alpha_lr=config['alpha_lr'],\n target_conservatism=config['target_conservatism'],\n negatives_fraction=config['negatives_fraction'],\n lookahead_steps=config['lookahead_steps'],\n lookahead_backprop=config['lookahead_backprop'],\n solver_beta=config['solver_beta'],\n solver_lr=solver_lr,\n solver_interval=solver_interval,\n solver_warmup=solver_warmup,\n solver_steps=config['solver_steps'],\n constraint_type=config['constraint_type'],\n entropy_coefficient=config['entropy_coefficient'],\n continuous_noise_std=config.get('continuous_noise_std', 0.0))\n\n # make a neural network to predict scores\n validation_models = [ForwardModel(\n input_shape,\n activations=config['activations'],\n hidden=config['hidden_size'],\n final_tanh=config['final_tanh'])]\n\n # create a trainer for a forward model with a conservative objective\n validation_trainers = [TransformedMaximumLikelihood(\n model,\n forward_model_optim=tf.keras.optimizers.Adam,\n forward_model_lr=config['forward_model_lr'],\n continuous_noise_std=config.get('continuous_noise_std', 0.0),\n logger_prefix=f\"validation_model_{i}\")\n for i, model in enumerate(validation_models)]\n\n # create a data set\n train_data, validate_data = task.build(\n x=x, y=y,\n batch_size=config['batch_size'],\n val_size=config['val_size'])\n\n # train the validation models\n for t in validation_trainers:\n t.launch(train_data, validate_data, logger, 100)\n\n # select the top k initial designs from the dataset\n indices = tf.math.top_k(y[:, 0], k=config['batch_size'])[1]\n initial_x = tf.gather(x, indices, axis=0)\n\n # create the starting point for the optimizer\n evaluations = 0\n score = None\n trainer.solution = tf.Variable(initial_x)\n trainer.done = tf.Variable(tf.fill(\n [config['batch_size']] + [1 for _ in x.shape[1:]], False))\n\n def evaluate_solution(xt):\n nonlocal evaluations, score\n\n # evaluate the design using the oracle and the forward model\n with tf.GradientTape() as tape:\n tape.watch(xt)\n model = forward_model(xt)\n\n # evaluate the predictions and gradient norm\n evaluations += 1\n grads = tape.gradient(model, xt)\n model = model * st_y + mu_y\n\n for i, val in enumerate(validation_models):\n prediction = val(xt)\n logger.record(f\"validation_model_{i}/prediction\",\n prediction * st_y + mu_y, evaluations)\n\n # record the prediction and score to the logger\n logger.record(\"distance/travelled\",\n tf.linalg.norm(xt - initial_x), evaluations)\n logger.record(f\"train/prediction\", model, evaluations)\n logger.record(f\"train/grad_norm\", tf.linalg.norm(\n tf.reshape(grads, [grads.shape[0], -1]), axis=-1), evaluations)\n\n if evaluations in config['evaluate_steps'] \\\n or len(config['evaluate_steps']) == 0 or score is None:\n solution = xt * st_x + mu_x\n if config['is_discrete']:\n solution = tf.math.softmax(\n tf.pad(solution, [[0, 0], [0, 0], [1, 0]]) / 0.001)\n score = task.score(solution)\n logger.record(\"score\", score, evaluations, percentile=True)\n logger.record(f\"rank_corr/model_to_real\",\n spearman(model[:, 0], score[:, 0]), evaluations)\n\n return score, model\n\n # keep track of when to record performance\n interval = trainer.solver_interval\n warmup = trainer.solver_warmup\n\n scores = []\n predictions = []\n\n # train model for many epochs with conservatism\n for e in range(config['epochs']):\n\n statistics = defaultdict(list)\n for x, y in train_data:\n for name, tensor in trainer.train_step(x, y).items():\n statistics[name].append(tensor)\n\n # evaluate the current solution\n if tf.logical_and(\n tf.equal(tf.math.mod(trainer.step, interval), 0),\n tf.math.greater_equal(trainer.step, warmup)):\n score, model = evaluate_solution(trainer.solution)\n scores.append(score)\n predictions.append(model.numpy())\n\n for name in statistics.keys():\n logger.record(\n name, tf.concat(statistics[name], axis=0), e)\n\n statistics = defaultdict(list)\n for x, y in validate_data:\n for name, tensor in trainer.validate_step(x, y).items():\n statistics[name].append(tensor)\n\n for name in statistics.keys():\n logger.record(\n name, tf.concat(statistics[name], axis=0), e)\n\n if tf.reduce_all(trainer.done):\n break\n\n # save the model predictions and scores to be aggregated later\n np.save(os.path.join(config['logging_dir'], \"scores.npy\"),\n np.concatenate(scores, axis=1))\n np.save(os.path.join(config['logging_dir'], \"predictions.npy\"),\n np.stack(predictions, axis=1))\n"
] |
[
[
"tensorflow.concat",
"numpy.concatenate",
"tensorflow.math.greater_equal",
"numpy.zeros_like",
"numpy.mean",
"tensorflow.pad",
"tensorflow.linalg.norm",
"numpy.ones_like",
"tensorflow.Variable",
"numpy.stack",
"tensorflow.gather",
"numpy.std",
"numpy.log",
"tensorflow.fill",
"tensorflow.math.mod",
"tensorflow.GradientTape",
"tensorflow.reshape",
"numpy.prod",
"tensorflow.math.top_k",
"tensorflow.reduce_all"
]
] |
angshine/pytorch3d
|
[
"5d9b236f41ea2175d8747f0c433b8a1c82393bed"
] |
[
"pytorch3d/implicitron/tools/model_io.py"
] |
[
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport glob\nimport logging\nimport os\nimport shutil\nimport tempfile\n\nimport torch\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_stats(flstats):\n from pytorch3d.implicitron.tools.stats import Stats\n\n try:\n stats = Stats.load(flstats)\n except:\n logger.info(\"Cant load stats! %s\" % flstats)\n stats = None\n return stats\n\n\ndef get_model_path(fl) -> str:\n fl = os.path.splitext(fl)[0]\n flmodel = \"%s.pth\" % fl\n return flmodel\n\n\ndef get_optimizer_path(fl) -> str:\n fl = os.path.splitext(fl)[0]\n flopt = \"%s_opt.pth\" % fl\n return flopt\n\n\ndef get_stats_path(fl, eval_results: bool = False):\n fl = os.path.splitext(fl)[0]\n if eval_results:\n for postfix in (\"_2\", \"\"):\n flstats = os.path.join(os.path.dirname(fl), f\"stats_test{postfix}.jgz\")\n if os.path.isfile(flstats):\n break\n else:\n flstats = \"%s_stats.jgz\" % fl\n # pyre-fixme[61]: `flstats` is undefined, or not always defined.\n return flstats\n\n\ndef safe_save_model(model, stats, fl, optimizer=None, cfg=None) -> None:\n \"\"\"\n This functions stores model files safely so that no model files exist on the\n file system in case the saving procedure gets interrupted.\n\n This is done first by saving the model files to a temporary directory followed\n by (atomic) moves to the target location. Note, that this can still result\n in a corrupt set of model files in case interruption happens while performing\n the moves. It is however quite improbable that a crash would occur right at\n this time.\n \"\"\"\n logger.info(f\"saving model files safely to {fl}\")\n # first store everything to a tmpdir\n with tempfile.TemporaryDirectory() as tmpdir:\n tmpfl = os.path.join(tmpdir, os.path.split(fl)[-1])\n stored_tmp_fls = save_model(model, stats, tmpfl, optimizer=optimizer, cfg=cfg)\n tgt_fls = [\n (\n os.path.join(os.path.split(fl)[0], os.path.split(tmpfl)[-1])\n if (tmpfl is not None)\n else None\n )\n for tmpfl in stored_tmp_fls\n ]\n # then move from the tmpdir to the right location\n for tmpfl, tgt_fl in zip(stored_tmp_fls, tgt_fls):\n if tgt_fl is None:\n continue\n shutil.move(tmpfl, tgt_fl)\n\n\ndef save_model(model, stats, fl, optimizer=None, cfg=None):\n flstats = get_stats_path(fl)\n flmodel = get_model_path(fl)\n logger.info(\"saving model to %s\" % flmodel)\n torch.save(model.state_dict(), flmodel)\n flopt = None\n if optimizer is not None:\n flopt = get_optimizer_path(fl)\n logger.info(\"saving optimizer to %s\" % flopt)\n # __import__('ipdb').set_trace() # optimizer.state_dict()\n torch.save(optimizer.state_dict(), flopt)\n logger.info(\"saving model stats to %s\" % flstats)\n stats.save(flstats)\n\n return flstats, flmodel, flopt\n\n\ndef load_model(fl):\n flstats = get_stats_path(fl)\n flmodel = get_model_path(fl)\n flopt = get_optimizer_path(fl)\n model_state_dict = torch.load(flmodel)\n stats = load_stats(flstats)\n if os.path.isfile(flopt):\n optimizer = torch.load(flopt)\n else:\n optimizer = None\n\n return model_state_dict, stats, optimizer\n\n\ndef parse_epoch_from_model_path(model_path) -> int:\n return int(\n os.path.split(model_path)[-1].replace(\".pth\", \"\").replace(\"model_epoch_\", \"\")\n )\n\n\ndef get_checkpoint(exp_dir, epoch):\n fl = os.path.join(exp_dir, \"model_epoch_%08d.pth\" % epoch)\n return fl\n\n\ndef find_last_checkpoint(\n exp_dir, any_path: bool = False, all_checkpoints: bool = False\n):\n if any_path:\n exts = [\".pth\", \"_stats.jgz\", \"_opt.pth\"]\n else:\n exts = [\".pth\"]\n\n for ext in exts:\n fls = sorted(\n glob.glob(\n os.path.join(glob.escape(exp_dir), \"model_epoch_\" + \"[0-9]\" * 8 + ext)\n )\n )\n if len(fls) > 0:\n break\n # pyre-fixme[61]: `fls` is undefined, or not always defined.\n if len(fls) == 0:\n fl = None\n else:\n if all_checkpoints:\n # pyre-fixme[61]: `fls` is undefined, or not always defined.\n fl = [f[0 : -len(ext)] + \".pth\" for f in fls]\n else:\n fl = fls[-1][0 : -len(ext)] + \".pth\"\n\n return fl\n\n\ndef purge_epoch(exp_dir, epoch) -> None:\n model_path = get_checkpoint(exp_dir, epoch)\n\n for file_path in [\n model_path,\n get_optimizer_path(model_path),\n get_stats_path(model_path),\n ]:\n if os.path.isfile(file_path):\n logger.info(\"deleting %s\" % file_path)\n os.remove(file_path)\n"
] |
[
[
"torch.load"
]
] |
JavisPeng/CenterNet-pytorch-detection-simple-tutorial
|
[
"1dd69dd26be3627079aeeb458dde35a1f3f1c5df"
] |
[
"main.py"
] |
[
"import torch, tqdm\nfrom models import cnet\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom dataset import CTDataset\nfrom losses import FocalLoss, RegL1Loss\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ntorch.manual_seed(41)\n\n\ndef train_epoch(epoch, model, dl, optimizer, cerition_hm, cerition_wh, cerition_reg):\n model.train()\n loss_meter, it = 0, 0\n bar = tqdm.tqdm(dl)\n bar.set_description_str(\"%02d\" % epoch)\n for item in bar:\n item = [x.to(device) for x in item]\n img, hm, wh, reg, ind, reg_mask = item\n optimizer.zero_grad()\n out_hm, out_wh, out_reg = model(img)\n hm_loss = cerition_hm(out_hm, hm)\n wh_loss = cerition_wh(out_wh, wh, reg_mask, ind)\n reg_loss = cerition_reg(out_reg, reg, reg_mask, ind)\n loss = hm_loss + 0.1 * wh_loss + reg_loss\n loss.backward()\n optimizer.step()\n loss_meter += loss.item()\n bar.set_postfix(hm_loss=hm_loss.item(), wh_loss=wh_loss.item(), reg_loss=reg_loss.item(), loss=loss.item())\n it += 1\n return loss_meter / it\n\n\[email protected]_grad()\ndef val_epoch(model, dl, cerition_hm, cerition_wh, cerition_reg):\n model.eval()\n loss_meter, it = 0, 0\n for item in dl:\n item = [x.to(device) for x in item]\n img, hm, wh, reg, ind, reg_mask = item\n out_hm, out_wh, out_reg = model(img)\n hm_loss = cerition_hm(out_hm, hm)\n wh_loss = cerition_wh(out_wh, wh, reg_mask, ind)\n reg_loss = cerition_reg(out_reg, reg, reg_mask, ind)\n loss = hm_loss + 0.1 * wh_loss + reg_loss\n loss_meter += loss.item()\n it += 1\n return loss_meter / it\n\n\ndef train(opt):\n # model\n model = cnet(nb_res=opt.resnet_num, num_classes=opt.num_classes)\n model = model.to(device)\n\n transform_train = transforms.Compose([\n transforms.Resize((opt.input_size, opt.input_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n # 数据文件通过voc.py生成\n dic_data = torch.load('data.pth')\n\n train_dataset = CTDataset(opt=opt, data=dic_data['train'], transform=transform_train)\n val_dataset = CTDataset(opt=opt, data=dic_data['val'], transform=transform_train)\n train_dl = DataLoader(dataset=train_dataset, batch_size=opt.batch_size, num_workers=opt.num_workers, shuffle=True)\n val_dl = DataLoader(dataset=val_dataset, batch_size=opt.batch_size, num_workers=opt.num_workers)\n\n cerition_hm = FocalLoss()\n cerition_wh = RegL1Loss()\n cerition_reg = RegL1Loss()\n\n optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)\n\n min_loss, best_epoch = 1e7, 1\n for epoch in range(1, opt.max_epoch + 1):\n train_loss = train_epoch(epoch, model, train_dl, optimizer, cerition_hm, cerition_wh, cerition_reg)\n val_loss = val_epoch(model, val_dl, cerition_hm, cerition_wh, cerition_reg)\n print(\"Epoch%02d train_loss:%0.3e val_loss:%0.3e min_loss:%0.3e(%02d)\" % (\n epoch, train_loss, val_loss, min_loss, best_epoch))\n if min_loss > val_loss:\n min_loss, best_epoch = val_loss, epoch\n torch.save(model.state_dict(), opt.ckpt)\n\n\[email protected]_grad()\ndef test(opt):\n model = cnet(nb_res=opt.resnet_num, num_classes=opt.num_classes)\n model.load_state_dict(torch.load(opt.ckpt, map_location='cpu'))\n model = model.to(device)\n\n transform_x = transforms.Compose([\n transforms.Resize((opt.input_size, opt.input_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n import os\n from PIL import Image, ImageDraw\n if not os.path.exists(opt.output_dir): os.mkdir(opt.output_dir)\n img_name = os.path.basename(opt.test_img_path)\n img0 = Image.open(opt.test_img_path)\n real_w, real_h = img0.size\n img = transform_x(img0)\n img = torch.unsqueeze(img, 0).to(device)\n out_hm, out_wh, out_reg = model(img)\n import utils\n bbox, cls, scores = utils.heatmap_bbox(out_hm, out_wh, out_reg, opt.topk)\n w_ratio = real_w * opt.down_ratio / opt.input_size\n h_ratio = real_h * opt.down_ratio / opt.input_size\n # 同一维度和类型,便于cat\n cls = cls.unsqueeze(-1).float()\n scores = scores.unsqueeze(-1)\n #只测试一张图片batch=1,去掉该维度\n bbox_cls_score = torch.cat([bbox, cls, scores], dim=-1).squeeze()\n #使用soft_nms过滤掉不同类别在同一个关键点位置的情况\n #bbox_cls_score = utils.soft_nms(bbox_cls_score, score_threshold=opt.threshold, top_k=opt.topk)\n bbox_cls_score = bbox_cls_score.cpu().numpy()\n for bcs in bbox_cls_score:\n box, cls, score = bcs[:4], int(bcs[4]), bcs[-1]\n print(box, cls, score)\n box = box[0] * w_ratio, box[1] * h_ratio, box[2] * w_ratio, box[3] * h_ratio\n draw = ImageDraw.Draw(img0)\n draw.rectangle(box, outline='blue')\n draw.text((box[0], box[1] - 10), \"(%d,%0.3f)\" % (cls, score), fill='blue')\n img0.save(os.path.join(opt.output_dir, img_name))\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"method\", choices=['train', 'test'], help=\"train | test\")\n parser.add_argument('--batch_size', type=int, default=16, help='batch size')\n parser.add_argument('--num_workers', type=int, default=4, help='number workers in dataloader')\n parser.add_argument('--max_epoch', type=int, default=256)\n parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')\n parser.add_argument('--resnet_num', type=int, default=18, choices=[18, 34, 50, 101, 152],\n help='resnet numner in [18,34,50,101,152]')\n parser.add_argument('--num_classes', type=int, default=6)\n parser.add_argument('--input_size', type=int, default=512, help='image input size')\n parser.add_argument('--max_objs', type=int, default=16, help='max object number in a picture')\n parser.add_argument('--topk', type=int, default=4, help='topk in target')\n parser.add_argument('--threshold', type=float, default=0.5, help='threshold for nms,default is 0.5')\n parser.add_argument('--down_ratio', type=int, default=4, help='downsample ratio')\n parser.add_argument('--ckpt', type=str, default='w.pth', help='the path of model weight')\n parser.add_argument('--test_img_path', type=str, default='VOC2007/JPEGImages/000019.jpg',\n help='test image path')\n parser.add_argument('--output_dir', type=str, default='output', help='output directory')\n opt = parser.parse_args()\n\n if opt.method == \"train\":\n train(opt)\n elif opt.method == \"test\":\n test(opt)\n"
] |
[
[
"torch.load",
"torch.cat",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.unsqueeze",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
tronsgaard/tinygp
|
[
"11e9c869fd2a62d7c0292db3b6fa9eab46e296e5"
] |
[
"tests/test_george_compat.py"
] |
[
"# -*- coding: utf-8 -*-\n# mypy: ignore-errors\n\nimport numpy as np\nimport pytest\n\nfrom tinygp import GaussianProcess, kernels\n\ngeorge = pytest.importorskip(\"george\")\n\nfrom jax.config import config\n\nconfig.update(\"jax_enable_x64\", True)\n\n\[email protected]\ndef random():\n return np.random.default_rng(1058390)\n\n\[email protected](\n scope=\"module\",\n params=[\"Constant\", \"DotProduct\", \"Polynomial\"],\n)\ndef kernel(request):\n return {\n \"Constant\": (\n kernels.Constant(value=1.5),\n george.kernels.ConstantKernel(\n log_constant=np.log(1.5 / 5), ndim=5\n ),\n ),\n \"DotProduct\": (\n kernels.DotProduct(),\n george.kernels.DotProductKernel(ndim=3),\n ),\n \"Polynomial\": (\n kernels.Polynomial(order=2.5, sigma=1.3),\n george.kernels.PolynomialKernel(\n order=2.5, log_sigma2=2 * np.log(1.3), ndim=1\n ),\n ),\n }[request.param]\n\n\[email protected](scope=\"module\", params=[\"Cosine\", \"ExpSineSquared\"])\ndef periodic_kernel(request):\n return {\n \"Cosine\": (\n kernels.Cosine(period=2.3),\n george.kernels.CosineKernel(log_period=np.log(2.3)),\n ),\n \"ExpSineSquared\": (\n kernels.ExpSineSquared(period=2.3, gamma=1.3),\n george.kernels.ExpSine2Kernel(gamma=1.3, log_period=np.log(2.3)),\n ),\n }[request.param]\n\n\[email protected](\n scope=\"module\",\n params=[\"Exp\", \"ExpSquared\", \"Matern32\", \"Matern52\", \"RationalQuadratic\"],\n)\ndef stationary_kernel(request):\n scale = 1.5\n return {\n \"Exp\": (\n kernels.Exp(scale),\n george.kernels.ExpKernel(scale**2),\n ),\n \"ExpSquared\": (\n kernels.ExpSquared(scale),\n george.kernels.ExpSquaredKernel(scale**2),\n ),\n \"Matern32\": (\n kernels.Matern32(scale),\n george.kernels.Matern32Kernel(scale**2),\n ),\n \"Matern52\": (\n kernels.Matern52(scale),\n george.kernels.Matern52Kernel(scale**2),\n ),\n \"RationalQuadratic\": (\n kernels.RationalQuadratic(alpha=1.5),\n george.kernels.RationalQuadraticKernel(\n metric=1.0, log_alpha=np.log(1.5)\n ),\n ),\n }[request.param]\n\n\ndef compare_kernel_value(random, tiny_kernel, george_kernel):\n x1 = np.sort(random.uniform(0, 10, (50, george_kernel.ndim)))\n x2 = np.sort(random.uniform(0, 10, (45, george_kernel.ndim)))\n np.testing.assert_allclose(\n tiny_kernel(x1, x2),\n george_kernel.get_value(x1, x2),\n )\n np.testing.assert_allclose(\n tiny_kernel(x1, x1),\n george_kernel.get_value(x1),\n )\n np.testing.assert_allclose(\n tiny_kernel(x1),\n george_kernel.get_value(x1, diag=True),\n )\n\n\ndef compare_gps(random, tiny_kernel, george_kernel):\n x = np.sort(random.uniform(0, 10, (50, george_kernel.ndim)))\n t = np.sort(random.uniform(0, 10, (12, george_kernel.ndim)))\n y = np.sin(x[:, 0])\n diag = random.uniform(0.01, 0.1, 50)\n\n # Set up the GPs\n george_gp = george.GP(george_kernel)\n george_gp.compute(x, np.sqrt(diag))\n tiny_gp = GaussianProcess(tiny_kernel, x, diag=diag)\n\n # Likelihood\n np.testing.assert_allclose(\n tiny_gp.condition(y), george_gp.log_likelihood(y)\n )\n\n # Filtering\n np.testing.assert_allclose(\n tiny_gp.predict(y),\n george_gp.predict(y, x, return_var=False, return_cov=False),\n )\n\n # Filtering with explicit value\n np.testing.assert_allclose(\n tiny_gp.predict(y, x),\n george_gp.predict(y, x, return_var=False, return_cov=False),\n )\n np.testing.assert_allclose(\n tiny_gp.predict(y, t),\n george_gp.predict(y, t, return_var=False, return_cov=False),\n )\n\n # Variance\n np.testing.assert_allclose(\n tiny_gp.predict(y, return_var=True)[1],\n george_gp.predict(y, x, return_var=True, return_cov=False)[1],\n rtol=1e-5,\n )\n np.testing.assert_allclose(\n tiny_gp.predict(y, t, return_var=True)[1],\n george_gp.predict(y, t, return_var=True, return_cov=False)[1],\n rtol=1e-5,\n )\n\n # Covariance\n np.testing.assert_allclose(\n tiny_gp.predict(y, return_cov=True)[1],\n george_gp.predict(y, x, return_var=False, return_cov=True)[1],\n atol=1e-5,\n )\n np.testing.assert_allclose(\n tiny_gp.predict(y, t, return_cov=True)[1],\n george_gp.predict(y, t, return_var=False, return_cov=True)[1],\n atol=1e-5,\n )\n\n\ndef test_kernel_value(random, kernel):\n tiny_kernel, george_kernel = kernel\n compare_kernel_value(random, tiny_kernel, george_kernel)\n tiny_kernel *= 0.3\n george_kernel *= 0.3\n compare_kernel_value(random, tiny_kernel, george_kernel)\n\n\ndef test_periodic_kernel_value(random, periodic_kernel):\n tiny_kernel, george_kernel = periodic_kernel\n compare_kernel_value(random, tiny_kernel, george_kernel)\n tiny_kernel *= 0.3\n george_kernel *= 0.3\n compare_kernel_value(random, tiny_kernel, george_kernel)\n\n\ndef test_metric_kernel_value(random, stationary_kernel):\n tiny_kernel, george_kernel = stationary_kernel\n compare_kernel_value(random, tiny_kernel, george_kernel)\n tiny_kernel *= 0.3\n george_kernel *= 0.3\n compare_kernel_value(random, tiny_kernel, george_kernel)\n\n\ndef test_gp(random, kernel):\n tiny_kernel, george_kernel = kernel\n compare_gps(random, tiny_kernel, george_kernel)\n\n\ndef test_periodic_gp(random, periodic_kernel):\n tiny_kernel, george_kernel = periodic_kernel\n compare_gps(random, tiny_kernel, george_kernel)\n\n\ndef test_metric_gp(random, stationary_kernel):\n tiny_kernel, george_kernel = stationary_kernel\n compare_gps(random, tiny_kernel, george_kernel)\n"
] |
[
[
"numpy.log",
"numpy.sqrt",
"numpy.random.default_rng",
"numpy.sin"
]
] |
gitter-badger/aroma-1
|
[
"e8e4169fa158572278f786769951831ef45a4a2f"
] |
[
"aroma/features.py"
] |
[
"\"\"\"Functions to calculate ICA-AROMA features for component classification.\"\"\"\nimport logging\nimport os\n\nimport nibabel as nib\nimport numpy as np\nfrom nilearn import image, masking\n\nfrom . import utils\n\nLGR = logging.getLogger(__name__)\n\n\ndef feature_time_series(mel_mix, mc):\n \"\"\"Extract maximum motion parameter correlation scores from components.\n\n This function determines the maximum robust correlation of each component\n time series with a model of 72 realignment parameters.\n\n Parameters\n ----------\n mel_mix : str\n Full path of the melodic_mix text file.\n Stored array is (time x component).\n mc : str or array_like\n Full path of the text file containing the realignment parameters.\n Motion parameters are (time x 6), with the first three columns being\n rotation parameters (in radians) and the final three being translation\n parameters (in mm).\n\n Returns\n -------\n max_RP_corr : array_like\n Array of the maximum RP correlation feature scores for the components\n of the melodic_mix file.\n \"\"\"\n if isinstance(mc, str):\n rp6 = utils.load_motpars(mc, source=\"auto\")\n else:\n rp6 = mc\n assert (rp6.ndim == 2) and (rp6.shape[1] == 6), \"Wrong shape\"\n\n # Read melodic mix file (IC time-series), subsequently define a set of\n # squared time-series\n mix = np.loadtxt(mel_mix)\n\n # Determine the derivatives of the RPs (add zeros at time-point zero)\n _, nparams = rp6.shape\n rp6_der = np.vstack((\n np.zeros(nparams),\n np.diff(rp6, axis=0)\n ))\n\n # Create an RP-model including the RPs and its derivatives\n rp12 = np.hstack((rp6, rp6_der))\n\n # add the fw and bw shifted versions\n rp12_1fw = np.vstack((\n np.zeros(2 * nparams),\n rp12[:-1]\n ))\n rp12_1bw = np.vstack((\n rp12[1:],\n np.zeros(2 * nparams)\n ))\n rp_model = np.hstack((rp12, rp12_1fw, rp12_1bw))\n\n # Determine the maximum correlation between RPs and IC time-series\n nsplits = 1000\n nmixrows, nmixcols = mix.shape\n nrows_to_choose = int(round(0.9 * nmixrows))\n\n # Max correlations for multiple splits of the dataset (for a robust\n # estimate)\n max_correls = np.empty((nsplits, nmixcols))\n for i in range(nsplits):\n # Select a random subset of 90% of the dataset rows\n # (*without* replacement)\n chosen_rows = np.random.choice(a=range(nmixrows),\n size=nrows_to_choose,\n replace=False)\n\n # Combined correlations between RP and IC time-series, squared and\n # non squared\n correl_nonsquared = utils.cross_correlation(mix[chosen_rows],\n rp_model[chosen_rows])\n correl_squared = utils.cross_correlation(mix[chosen_rows]**2,\n rp_model[chosen_rows]**2)\n correl_both = np.hstack((correl_squared, correl_nonsquared))\n\n # Maximum absolute temporal correlation for every IC\n max_correls[i] = np.abs(correl_both).max(axis=1)\n\n # Feature score is the mean of the maximum correlation over all the random\n # splits\n # Avoid propagating occasional nans that arise in artificial test cases\n max_RP_corr = np.nanmean(max_correls, axis=0)\n return max_RP_corr\n\n\ndef feature_frequency(mel_FT_mix, TR):\n \"\"\"Extract the high-frequency content feature scores.\n\n This function determines the frequency, as fraction of the Nyquist\n frequency, at which the higher and lower frequencies explain half\n of the total power between 0.01Hz and Nyquist.\n\n Parameters\n ----------\n mel_FT_mix : str\n Full path of the melodic_FTmix text file.\n Stored array is (frequency x component), with frequencies\n ranging from 0 Hz to Nyquist frequency.\n TR : float\n TR (in seconds) of the fMRI data\n\n Returns\n -------\n HFC : array_like\n Array of the HFC ('High-frequency content') feature scores\n for the components of the melodic_FTmix file\n \"\"\"\n # Determine sample frequency\n Fs = 1 / TR\n\n # Determine Nyquist-frequency\n Ny = Fs / 2\n\n # Load melodic_FTmix file\n FT = np.loadtxt(mel_FT_mix)\n n_frequencies = FT.shape[0]\n\n # Determine which frequencies are associated with every row in the\n # melodic_FTmix file (assuming the rows range from 0Hz to Nyquist)\n f = Ny * np.arange(1, n_frequencies + 1) / n_frequencies\n\n # Only include frequencies higher than 0.01Hz\n fincl = np.squeeze(np.array(np.where(f > 0.01)))\n FT = FT[fincl, :]\n f = f[fincl]\n\n # Set frequency range to [0-1]\n f_norm = (f - 0.01) / (Ny - 0.01)\n\n # For every IC; get the cumulative sum as a fraction of the total sum\n fcumsum_fract = np.cumsum(FT, axis=0) / np.sum(FT, axis=0)\n\n # Determine the index of the frequency with the fractional cumulative sum\n # closest to 0.5\n idx_cutoff = np.argmin(np.abs(fcumsum_fract - 0.5), axis=0)\n\n # Now get the fractions associated with those indices index, these are the\n # final feature scores\n HFC = f_norm[idx_cutoff]\n\n # Return feature score\n return HFC\n\n\ndef feature_spatial(mel_IC):\n \"\"\"Extract the spatial feature scores.\n\n For each IC it determines the fraction of the mixture modeled thresholded\n Z-maps respectively located within the CSF or at the brain edges,\n using predefined standardized masks.\n\n Parameters\n ----------\n mel_IC : str\n Full path of the nii.gz file containing mixture-modeled thresholded\n (p<0.5) Z-maps, registered to the MNI152 2mm template\n\n Returns\n -------\n edge_fract : array_like\n Array of the edge fraction feature scores for the components of the\n mel_IC file\n csf_fract : array_like\n Array of the CSF fraction feature scores for the components of the\n mel_IC file\n \"\"\"\n # Get the number of ICs\n mel_IC_img = nib.load(mel_IC)\n num_ICs = mel_IC_img.shape[3]\n\n masks_dir = utils.get_resource_path()\n csf_mask = os.path.join(masks_dir, \"mask_csf.nii.gz\")\n edge_mask = os.path.join(masks_dir, \"mask_edge.nii.gz\")\n out_mask = os.path.join(masks_dir, \"mask_out.nii.gz\")\n\n # Loop over ICs\n edge_fract = np.zeros(num_ICs)\n csf_fract = np.zeros(num_ICs)\n for i in range(num_ICs):\n # Extract IC from the merged melodic_IC_thr2MNI2mm file\n temp_IC = image.index_img(mel_IC, i)\n\n # Change to absolute Z-values\n temp_IC = image.math_img(\"np.abs(img)\", img=temp_IC)\n\n # Get sum of Z-values within the total Z-map (calculate via the mean\n # and number of non-zero voxels)\n temp_IC_data = temp_IC.get_fdata()\n tot_sum = np.sum(temp_IC_data)\n\n if tot_sum == 0:\n LGR.info(\"\\t- The spatial map of component {} is empty. \"\n \"Please check!\".format(i + 1))\n\n # Get sum of Z-values of the voxels located within the CSF\n # (calculate via the mean and number of non-zero voxels)\n csf_data = masking.apply_mask(temp_IC, csf_mask)\n csf_sum = np.sum(csf_data)\n\n # Get sum of Z-values of the voxels located within the Edge\n # (calculate via the mean and number of non-zero voxels)\n edge_data = masking.apply_mask(temp_IC, edge_mask)\n edge_sum = np.sum(edge_data)\n\n # Get sum of Z-values of the voxels located outside the brain\n # (calculate via the mean and number of non-zero voxels)\n out_data = masking.apply_mask(temp_IC, out_mask)\n out_sum = np.sum(out_data)\n\n # Determine edge and CSF fraction\n if tot_sum != 0:\n edge_fract[i] = (out_sum + edge_sum) / (tot_sum - csf_sum)\n csf_fract[i] = csf_sum / tot_sum\n else:\n edge_fract[i] = 0\n csf_fract[i] = 0\n\n # Return feature scores\n return edge_fract, csf_fract\n"
] |
[
[
"numpy.hstack",
"numpy.abs",
"numpy.arange",
"numpy.cumsum",
"numpy.loadtxt",
"numpy.diff",
"numpy.nanmean",
"numpy.where",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.