repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
YigeWang-WHU/BlastLoadsRegression | [
"884ba58a31ba854eaf86b846e551a97d84b11924"
] | [
"dataset/blast_wall.py"
] | [
"from torch.utils.data import Dataset\nimport os\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nimport torch\n\n\n\ndef make_dataset(data_root, txt, input_size, output_size, portion_train, portion_val):\n ######################################################################\n # split the data into train, val and test and do normalization on it #\n ######################################################################\n\n # make those variables visible globally\n global train_split\n global test_split\n global val_split\n global num_input\n global num_output\n\n # the dim of input\n num_input = input_size\n # the dim of output\n num_output = output_size\n # specify the file of train , val, test set.\n train_split = 'data_splits/' + txt.split('.')[0] + '_train.txt'\n test_split = 'data_splits/' + txt.split('.')[0] + '_test.txt'\n val_split = 'data_splits/' + txt.split('.')[0] + '_val.txt'\n\n # if the data is already splittd, use it! You dont need to split it once more\n if os.path.isfile(train_split) and os.path.isfile(test_split) \\\n and os.path.isfile(val_split):\n\n print(\"Use Splitted Data\")\n # remember to set the argument \"new_split\" to False๏ผ since we dont need to split it anymore\n # we also dont need to pass inputs and targets to the function. Set them to None\n return split_data(portion_train, portion_val, None, None, new_split=False)\n\n else:\n # otherwise, split the data\n # open the raw data file and read each line\n with open(os.path.join(data_root, txt), 'r') as d:\n lines = d.readlines()\n\n target = [] # an empty list for storing the targets\n input = [] # an empty list for storing the inputs\n for line in lines:\n line = eval(line.strip())\n line = list(map(float, line))\n # split the line accoring the dim of inputs and outputs and append them to the corresponding lists.\n input.append(line[:num_input]) \n target.append(line[num_input:(num_input + num_output)])\n # convert the data type to float\n target = np.array(target).astype(np.float32)\n input = np.array(input).astype(np.float32)\n # split the data now\n return split_data(portion_train, portion_val, input, target, new_split=True)\n\n\ndef split_data(portion_train, portion_val, input, target, new_split):\n # portion_train: the rate of the training set\n # portion_val: the rate of the validation set\n # input: input data\n # target: label data\n # new_split: whether splitting the data \n\n if new_split:\n # number of totoal data\n num_sequences = input.shape[0]\n # the breakpoint/index of training set\n break_train = int(num_sequences * portion_train)\n # the breakpoint/index of validation set\n break_val = int(num_sequences * (portion_train + portion_val))\n # randomly permute the dataset before splitting\n splits = np.random.permutation(np.arange(num_sequences))\n # split the data; only two breakpoints are need to generate 3 datasets, i.e. train, val, test\n splits = np.split(splits, [break_train, break_val])\n # splits[0], splits[1], splits[2] now contain the indices of train, val, test set\n # map those indices to actual data by input[splits[0]], input[splits[1]], input[splits[2]]; similarly for the targets\n input_train, input_val, input_test = input[splits[0]], input[splits[1]], input[splits[2]]\n target_train, target_val, target_test = target[splits[0]], target[splits[1]], target[splits[2]]\n \n # open the train data file and write each data element\n with open(train_split, 'w') as tr:\n for i, (inp, tar) in enumerate(zip(input_train, target_train)):\n point = np.append(inp, tar)\n tr.write(str(point.tolist()) + '\\n')\n # open the validation data file and write each data element\n with open(val_split, 'w') as val:\n for i, (inp, tar) in enumerate(zip(input_val, target_val)):\n point = np.append(inp, tar)\n val.write(str(point.tolist()) + '\\n')\n # open the test data file and write each data element\n with open(test_split, 'w') as te:\n for i, (inp, tar) in enumerate(zip(input_test, target_test)):\n point = np.append(inp, tar)\n te.write(str(point.tolist()) + '\\n')\n else:\n\n # if not splitting data / data is already splitted. Read them directly!\n with open(train_split, 'r') as tr, open(test_split, 'r') as te, open(val_split, 'r') as val:\n strlines = tr.readlines()\n stelines = te.readlines()\n vallines = val.readlines()\n # empty lists for storing inputs and targets for each type of dataset\n target_train = []\n target_test = []\n target_val = []\n input_train = []\n input_test = []\n input_val = []\n\n for line in strlines:\n # convert line from string to python list\n line = eval(line.strip())\n # map element in the list to float type\n line = list(map(float, line))\n # append data to lists according to the dim of inputs and outputs\n input_train.append(line[:num_input])\n target_train.append(line[num_input:(num_input + num_output)])\n # similarly for the test set and validation set\n for line in stelines:\n line = eval(line.strip())\n line = list(map(float, line))\n input_test.append(line[:num_input])\n target_test.append(line[num_input:(num_input + num_output)])\n for line in vallines:\n line = eval(line.strip())\n line = list(map(float, line))\n input_val.append(line[:num_input])\n target_val.append(line[num_input:(num_input + num_output)])\n\n # convert all data to the type of np.float32, becaue we need to normalize data with packages accepting np.float32 data\n\n target_train = np.array(target_train).astype(np.float32)\n input_train = np.array(input_train).astype(np.float32)\n\n target_test = np.array(target_test).astype(np.float32)\n input_test = np.array(input_test).astype(np.float32)\n\n target_val = np.array(target_val).astype(np.float32)\n input_val = np.array(input_val).astype(np.float32)\n\n # data normalization\n\n target_scaler = StandardScaler()\n # fit the mean/std from the training set. you can only touch trainning set\n target_train = torch.from_numpy(target_scaler.fit_transform(target_train).astype(np.float32))\n # transform the test and val set\n target_test = torch.from_numpy(target_scaler.transform(target_test).astype(np.float32))\n target_val = torch.from_numpy(target_scaler.transform(target_val).astype(np.float32))\n\n # same for the targets\n input_scaler = StandardScaler()\n input_train = torch.from_numpy(input_scaler.fit_transform(input_train).astype(np.float32))\n input_test = torch.from_numpy(input_scaler.transform(input_test).astype(np.float32))\n input_val = torch.from_numpy(input_scaler.transform(input_val).astype(np.float32))\n \n # wrap the data with pytorch dataset\n return BlastWall(input_train, target_train), BlastWall(input_val, target_val), BlastWall(input_test, target_test)\n\n\n# the pytorch dataset\nclass BlastWall(Dataset):\n\n def __init__(self, input, target):\n self.input = input\n self.target = target\n\n\n def __len__(self):\n return self.input.size()[0]\n\n def __getitem__(self, idx):\n return self.input[idx], self.target[idx]\n"
] | [
[
"numpy.array",
"sklearn.preprocessing.StandardScaler",
"numpy.split",
"numpy.arange",
"numpy.append"
]
] |
alex-kj-chin/ConstellationNet | [
"28bb44beb735654381a10c8c25d2cfdedbdb43bc"
] | [
"models/models.py"
] | [
"import torch\n\n\nmodels = {}\ndef register(name):\n def decorator(cls):\n models[name] = cls\n return cls\n return decorator\n\n\ndef make(name, **kwargs):\n if name is None:\n return None\n \n model = models[name](**kwargs)\n if torch.cuda.is_available() :\n model.cuda()\n return model\n\n\ndef load(model_sv, name=None):\n if name is None:\n name = 'model'\n model = make(model_sv[name], **model_sv[name + '_args'])\n model.load_state_dict(model_sv[name + '_sd'])\n return model\n\n"
] | [
[
"torch.cuda.is_available"
]
] |
mcglynnk/AdhereID_app | [
"7e15f695f8f17afe588abb5f9dd3cc1fa9e3ede5"
] | [
"app.py"
] | [
"# Setup\nfrom flask import Flask, render_template, request\n\nimport pandas as pd\nimport sklearn\nimport numpy as np\nimport pickle\nfrom _collections import OrderedDict\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\nfrom required_files import res_url, conditions_list_file, drugs_list_file\n\n# Initialize application\napp = Flask(__name__)\n\n\n# Home page\[email protected]('/', methods=['POST', 'GET'])\ndef home1():\n # List of available medical conditions for the bottom drop-down menu\n with open(conditions_list_file, 'rb') as f:\n cond_list = pickle.load(f)\n cond_list = sorted(cond_list)\n\n with open(drugs_list_file, 'rb') as f2:\n drug_list = pickle.load(f2)\n\n return render_template(\"index.html\", cond_list=cond_list, drug_list=drug_list, res_url=res_url)\n\n\[email protected]('/slides', methods=['POST', 'GET'])\ndef pres():\n return render_template(\"pres.html\")\n\n\n# Load ML Model\nfilename = r'files/lr_model.sav'\nwith open(filename, 'rb') as file:\n lr_model = pickle.load(file)\n\n# Import functions for cleaning input data\nfrom functions import make_input_df, process_data\nfrom required_files import X\n\n# Import functions for getting the medical condition cost, burden, side effects (for pie charts)\nfrom functions import get_cost, get_burden, get_sideeffects\nfrom functions import color_cost_chart, color_burden_chart, color_side_effects_chart\nfrom functions import return_max_value\nfrom required_files import plm\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result1():\n if request.method == 'POST':\n # Get results after hitting submit\n result_list = request.form.to_dict()\n result_list = list(result_list.values())\n print('result list: ', result_list)\n result_list_without_condition = result_list[0:-2]\n\n # Goes to a blank html page containing \"Please fill out all the fields!\" if the user doesn't fill out the\n # whole form\n try:\n inputs_df = make_input_df(result_list_without_condition)\n except ValueError as e:\n return \"Please fill out all the fields!\"\n\n # Process input data in order to feed it into the logistic regression model\n inputs_df_with_X = process_data(X, inputs_df)\n\n inputs_df_with_X_array = np.array(inputs_df_with_X.iloc[0,]).reshape(1, 22)\n\n predictions = lr_model.predict(inputs_df_with_X_array)\n\n # Prints out a message with the results\n if predictions == 1.0:\n predictions = \"High risk of non-adherence!\"\n elif predictions == 0:\n predictions = \"Low risk of non-adherence!\"\n\n # List of available medical conditions and drugs for the bottom drop-down menu, also used to write the result\n # message 'Patients with {} report...'.format(result_list[-1])\n condfile = conditions_list_file\n with open(condfile, 'rb') as f:\n cond_list = pickle.load(f)\n cond_list = sorted(cond_list)\n\n with open(drugs_list_file, 'rb') as f2:\n drug_list = pickle.load(f2)\n\n # Select a medical condition is optional. If selected, prints pie charts on results page. This code is a switch.\n print(result_list)\n\n if 'Select a condition' in result_list and 'Select a drug' in result_list:\n show_charts = False\n else:\n show_charts = True\n\n # Generates pie charts from PatientsLikeMe data\n if show_charts == True:\n skip_charts = False\n # Return pie charts and text based on medical condition selected\n if 'Select a drug' and not 'Select a condition' in result_list: # If 'Select a drug' is in the list, this\n drug_selected = False # is the default value. User must have\n max_results = [] # selected a medical condition instead.\n drug_url, nav_d, nav_s = None, None, None\n\n costlabels, costvalues = get_cost(plm, result_list[-2], 'condition')\n cost_vals_labels = list(zip(costvalues, costlabels))\n costcolors = color_cost_chart(cost_vals_labels)\n max_results.append(return_max_value('cost', cost_vals_labels))\n\n burdenlabels, burdenvalues = get_burden(plm, result_list[-2], 'condition')\n burden_vals_labels = list(zip(burdenvalues, burdenlabels))\n burdencolors = color_burden_chart(burden_vals_labels)\n max_results.append(return_max_value('burden', burden_vals_labels))\n\n sideeffectslabels, sideeffectsvalues = get_sideeffects(plm, result_list[-2], 'condition')\n sideeffects_vals_labels = list(zip(sideeffectsvalues, sideeffectslabels))\n sideeffectscolors = color_side_effects_chart(sideeffects_vals_labels)\n max_results.append(return_max_value('side_effects', sideeffects_vals_labels))\n\n max_res_df = pd.DataFrame(max_results)\n print(pd.DataFrame(max_results))\n\n if result_list[-2] == 'acquired immune deficiency syndrome (AIDS)':\n result_list[-2] = 'HIV infection'\n url = 'https://www.drugs.com/search.php?searchterm={}'.format(result_list[-2])\n with requests.get(url, headers={'User-Agent': 'Mozilla/5.0'}) as page:\n soup = BeautifulSoup(page.content, features='lxml')\n condition_url = soup.find('div', {\"class\": \"snippet search-result\"}).a.get('href')\n print(condition_url)\n\n # Return pie charts and text based on medication selected\n elif 'Select a condition' and not 'Select a drug' in result_list:\n drug_selected = True\n max_results = []\n condition_url = None\n\n costlabels, costvalues = get_cost(plm, result_list[-1], 'drug')\n cost_vals_labels = list(zip(costvalues, costlabels))\n costcolors = color_cost_chart(cost_vals_labels)\n max_results.append(return_max_value('cost', cost_vals_labels))\n\n burdenlabels, burdenvalues = get_burden(plm, result_list[-1], 'drug')\n burden_vals_labels = list(zip(burdenvalues, burdenlabels))\n burdencolors = color_burden_chart(burden_vals_labels)\n max_results.append(return_max_value('burden', burden_vals_labels))\n\n sideeffectslabels, sideeffectsvalues = get_sideeffects(plm, result_list[-1], 'drug')\n sideeffects_vals_labels = list(zip(sideeffectsvalues, sideeffectslabels))\n sideeffectscolors = color_side_effects_chart(sideeffects_vals_labels)\n max_results.append(return_max_value('side_effects', sideeffects_vals_labels))\n\n max_res_df = pd.DataFrame(max_results)\n print(pd.DataFrame(max_results))\n\n # drug_id = drugbank_df[drugbank_df['name']==result_list[-1]]['drugbank_id'].to_list()\n # print(drug_id)\n\n url = 'https://www.drugs.com/search.php?searchterm={}'.format(result_list[-1])\n with requests.get(url, headers={'User-Agent': 'Mozilla/5.0'}) as page:\n soup = BeautifulSoup(page.content, features='lxml')\n try:\n drug_url = soup.find('div',\n {\"class\": \"snippet search-result search-result-with-secondary\"}).a.get(\n 'href')\n except AttributeError as a:\n drug_url = soup.find('div', {\"class\": \"snippet search-result\"}).a.get('href')\n except:\n drug_url = None\n with requests.get(drug_url) as link:\n soup = BeautifulSoup(link.content)\n try:\n d = soup.find('h2', text=re.compile(\"How should I [a-zA-Z]+.\")).get('id')\n s = soup.find('h2', text=re.compile(\".+[a-zA-Z] side effects\")).get('id')\n except AttributeError as a:\n d = soup.find('h2', text=re.compile(\"How should I [a-zA-Z]+.\")).parent.get('id')\n s = soup.find('h2', text=re.compile(\".+[a-zA-Z] side effects\")).parent.get('id')\n\n nav_d = [\"HowTake\" if d != \"directions\" else \"directions\"][0]\n nav_s = [\"SideEffects\" if s != \"sideEffects\" else \"sideEffects\"][0]\n print(nav_d, nav_s)\n\n print(drug_url)\n else:\n drug_selected, drug_url, condition_url, max_res_df, skip_charts = None, None, None, None, True\n nav_d, nav_s = None, None\n costlabels, costvalues, costcolors = [0], [0], [0]\n burdenlabels, burdenvalues, burdencolors = [0], [0], [0]\n sideeffectslabels, sideeffectsvalues, sideeffectscolors = [0], [0], [0]\n\n # Return the results page!\n return render_template(\"result.html\", max=17000,\n predictions=predictions, # ML prediction result\n # For pie charts:\n show_charts=show_charts,\n costset=costcolors,\n burdenset=burdencolors,\n sideeffectsset=sideeffectscolors,\n # For descriptive text below pie charts\n max_res_df_=max_res_df,\n # For inserting python variables into the html files:\n cond_list=cond_list,\n drug_list=drug_list,\n drug_url_=drug_url, condition_url_=condition_url, nav_d_=nav_d, nav_s_=nav_s,\n drug_selected_=drug_selected, skip_charts=skip_charts,\n result_list=result_list\n )\n\n\nif __name__ == '__main__':\n # app.run(debug=True)\n app.run(debug=False, host='0.0.0.0', port=80)\n"
] | [
[
"pandas.DataFrame",
"numpy.array"
]
] |
bsmarine/BleedDetection | [
"2203e48cb8ed10adeda9fcef129560b6a907ef00"
] | [
"experiments/bleed_exp/multi_preprocessing.py"
] | [
"#!/usr/bin/env python\n# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n'''\nThis preprocessing script loads nrrd files obtained by the data conversion tool: https://github.com/MIC-DKFZ/LIDC-IDRI-processing/tree/v1.0.1\nAfter applying preprocessing, images are saved as numpy arrays and the meta information for the corresponding patient is stored\nas a line in the dataframe saved as info_df.pickle.\n'''\n\nimport os\nimport SimpleITK as sitk\nimport numpy as np\nimport random\nfrom multiprocessing import Pool\nimport pandas as pd\nimport numpy.testing as npt\nfrom skimage.transform import resize\nimport subprocess\nimport pickle\nimport json\nimport configs\ncf = configs.configs()\n\ndef pp_patient(inputs):\n\n #read image\n ix, path = inputs\n pid = path.split('/')[-1]\n \n if cf.multiphase:\n with open(os.path.abspath(cf.pp_mp_cf), 'r') as f:\n pp_mp_cf = json.load(f)\n phases = pp_mp_cf[cf.mp_setting]\n \n concat_images = list()\n \n for ii in phases:\n \n img = sitk.ReadImage(os.path.join(path,'{}.nii.gz'.format(ii)))\n\n img_arr = sitk.GetArrayFromImage(img)\n print('processing {} {}'.format(pid,ii), img.GetSpacing(), img_arr.shape,np.mean(img_arr),np.std(img_arr))\n #img_arr = resample_array(img_arr, img.GetSpacing(), cf.target_spacing) #already re-sampled in prior pre-processing\n img_arr = np.clip(img_arr, -1200, 600)\n #img_arr = (1200 + img_arr) / (600 + 1200) * 255 # a+x / (b-a) * (c-d) (c, d = new)\n concat_images.append(img_arr)\n\n mask = sitk.ReadImage(os.path.join(cf.raw_data_dir, pid, '01_mask.nii.gz'))\n mask_arr = sitk.GetArrayFromImage(mask).astype(np.uint8)\n mask_arr[mask_arr>10] = 0\n\n concat_images.append(mask_arr)\n\n # Find random patch of patch size with random offset, apply to images\n\n if cf.pp_patches is not None:\n \n concat_images = generate_positive_patches(mask_arr,concat_images,cf.pp_patches,40)\n\n # Remove mask_arr from concat\n\n mask_arr = concat_images.pop()\n \n # Concatenate images into singe img array\n\n concat = np.stack(concat_images,axis=3)\n\n # Z normalization of concatenated images as one multi-dimensional array\n\n concat = concat.astype(np.float32)\n concat = (concat - np.mean(concat)) / np.std(concat).astype(np.float16)\n\n print (\"After concatenation \",np.mean(concat),np.std(concat),concat.dtype)\n \n print (\"Concatenated Img Shape \"+str(concat.shape))\n\n #Open Characteristics File\n df = pd.read_csv(os.path.join(cf.root_dir, 'raw_characteristics_gi.csv'), sep=',',converters={'PatientID': lambda x: str(x)})\n df = df[df.PatientID == pid]\n\n #Make Masks Array, Grab Mask ID Per Patient\n #final_rois = np.zeros_like(img_arr, dtype=np.uint8)\n mal_labels = []\n roi_ids = set([ii.split('.')[0].split('_')[0] for ii in os.listdir(path) if '01_mask.nii.gz' in ii])\n print (roi_ids)\n rix = 1\n for rid in roi_ids:\n\n #Grab Mask Paths and Nodule IDs\n roi_id_paths = [ii for ii in os.listdir(path) if '01_mask.nii' in ii]\n print (\"ROI ID Paths:\"+str(roi_id_paths))\n nodule_ids = [ii.split('.')[0].split('_')[0].lstrip(\"0\") for ii in roi_id_paths]\n print (\"Nodule ID:\"+str(nodule_ids))\n\n #Grab Severity Value From Characteristics file\n rater_labels = [1] #[df[df.ROI_ID == int(ii)].Severity.values[0] for ii in nodule_ids]\n print (\"Rater Labels:\"+str(rater_labels))\n\n ##Take Mean Severity Value\n #rater_labels.extend([0] * (4-len(rater_labels)))\n #mal_label = np.mean([ii for ii in rater_labels if ii > -1])\n mal_label = rater_labels\n mal_list = mal_label\n print (\"#############Mal Label: \"+str(mal_list))\n \n ##Read Mask Paths\n #roi_rater_list = []\n # for rp in roi_id_paths:\n rp = roi_id_paths[0]\n\n # roi = sitk.ReadImage(os.path.join(cf.raw_data_dir, pid, rp))\n # roi_arr = sitk.GetArrayFromImage(roi).astype(np.uint8)\n # roi_arr[roi_arr>10] = 0\n roi_arr = mask_arr\n\n if cf.multiphase:\n\n # Will need to change manually if two-phase ie img_arr = concat[:,:,0]\n\n img_arr = concat[:,:,:,0]\n else:\n img_arr= concat\n\n #roi_arr = resample_array(roi_arr, roi.GetSpacing(), cf.target_spacing)\n assert roi_arr.shape == img_arr.shape, [roi_arr.shape, img_arr.shape, pid, mask.GetSpacing()]\n \n for ix in range(len(img_arr.shape)):\n npt.assert_almost_equal(mask.GetSpacing()[ix], img.GetSpacing()[ix])\n #roi_rater_list.append(roi_arr)\n\n final_rois = roi_arr\n\n # roi_rater_list.extend([np.zeros_like(roi_rater_list[-1])]*(4-len(roi_id_paths)))\n # roi_raters = np.array(roi_rater_list)\n # roi_raters = np.mean(roi_raters, axis=0)\n # roi_raters[roi_raters < 0.5] = 0\n\n # if np.sum(roi_raters) > 0:\n # mal_labels.append(mal_label)\n # final_rois[roi_raters >= 0.5] = rix\n # rix += 1\n # else:\n # # indicate rois suppressed by majority voting of raters\n # print('suppressed roi!', roi_id_paths)\n # with open(os.path.join(cf.pp_dir, 'suppressed_rois.txt'), 'a') as handle:\n # handle.write(\" \".join(roi_id_paths))\n\n #Generate Foreground Slice Indices\n final_rois = np.around(final_rois)\n fg_slices = [ii for ii in np.unique(np.argwhere(final_rois != 0)[:, 0])]\n \n #Make Array From Severity \n #mal_labels = np.array(mal_label)\n\n if mal_list[0] == [0]:\n mal_labels_assert_test = []\n else:\n mal_labels_assert_test = mal_list\n\n print (\"Print Malignancy Labels:\"+str(mal_list))\n print (\"Print Unique Values in ROI Array:\"+str(len(np.unique(final_rois))))\n\n\n assert len(mal_labels_assert_test) + 1 == len(np.unique(final_rois)), [len(mal_labels), np.unique(final_rois), pid]\n\n np.save(os.path.join(cf.pp_dir, '{}_rois.npy'.format(pid)), final_rois)\n np.save(os.path.join(cf.pp_dir, '{}_img.npy'.format(pid)), concat)\n\n with open(os.path.join(cf.pp_dir, 'meta_info_{}.pickle'.format(pid)), 'wb') as handle:\n meta_info_dict = {'pid': pid, 'class_target': mal_list, 'spacing': img.GetSpacing(), 'fg_slices': fg_slices}\n print (meta_info_dict)\n pickle.dump(meta_info_dict, handle)\n\ndef aggregate_meta_info(exp_dir):\n\n files = [os.path.join(exp_dir, f) for f in os.listdir(exp_dir) if 'meta_info' in f]\n df = pd.DataFrame(columns=['pid', 'class_target', 'spacing', 'fg_slices'])\n for f in files:\n with open(f, 'rb') as handle:\n df.loc[len(df)] = pickle.load(handle)\n\n df.to_pickle(os.path.join(exp_dir, 'info_df.pickle'))\n print (\"aggregated meta info to df with length\", len(df))\n\ndef resample_array(src_imgs, src_spacing, target_spacing):\n\n src_spacing = np.round(src_spacing, 3)\n target_shape = [int(src_imgs.shape[ix] * src_spacing[::-1][ix] / target_spacing[::-1][ix]) for ix in range(len(src_imgs.shape))]\n for i in range(len(target_shape)):\n try:\n assert target_shape[i] > 0\n except:\n raise AssertionError(\"AssertionError:\", src_imgs.shape, src_spacing, target_spacing)\n\n img = src_imgs.astype(float)\n resampled_img = resize(img, target_shape, order=1, clip=True, mode='edge').astype('float16')\n\n return resampled_img\n\ndef generate_positive_patches(mask_arr,studies,patch_size,random_center_displacement):\n\n q = random_center_displacement\n \n pos_patches = list()\n\n where = np.where(mask_arr==1)\n z_mid = int(where[0].mean())\n y_mid = int(where[1].mean())\n x_mid = int(where[2].mean())\n\n print (x_mid,y_mid,z_mid)\n\n repeat = True\n \n while repeat == True:\n\n z_start = random.randint(-(q),q)+z_mid-(patch_size[0]/2)\n if z_start < 0:\n z_start = 0\n y_start = random.randint(-(q),q)+y_mid-(patch_size[1]/2)\n if y_start < 0:\n y_start = 0\n x_start = random.randint(-(q),q)+x_mid-(patch_size[2]/2)\n if x_start < 0:\n x_start = 0\n\n z_end = z_start+patch_size[0]\n y_end = y_start+patch_size[1]\n x_end = x_start+patch_size[2]\n\n numbers = [ int(x) for x in [x_start,x_end,y_start,y_end,z_start,z_end] ]\n\n pos_patches = [numbers]\n\n print (\"Positive Patches: \",pos_patches)\n\n Z = pos_patches[0]\n patches = list()\n for arr in studies:\n print (arr.shape,Z)\n data = arr[Z[4]:Z[5],Z[2]:Z[3],Z[0]:Z[1]]\n\n ## Pad if doesn't fit correct full patch size\n if np.any([data.shape[dim] < ps for dim, ps in enumerate(patch_size)]):\n new_shape = [np.max([data.shape[dim], ps]) for dim, ps in enumerate(patch_size)]\n data = pad_nd_image(data, new_shape, mode='constant')\n \n print (\"Patch Shape \",data.shape)\n\n mean = np.mean(data)\n\n if len(np.unique(data))==2:\n if mean == 0.0:\n print (\"Mask missing \"+str(mean))\n repeat = True\n break\n \n if mean > -650:\n print (\"Appropriate Mean of Patch \"+str(mean))\n repeat = False\n else:\n print (\"Inappropriate Mean of Patch \"+str(mean))\n repeat = True\n break\n\n patches.append(data)\n \n return patches\n\ndef pad_nd_image(image, new_shape=None, mode=\"edge\", kwargs=None, return_slicer=False, shape_must_be_divisible_by=None):\n \"\"\"\n one padder to pad them all. Documentation? Well okay. A little bit. by Fabian Isensee\n\n :param image: nd image. can be anything\n :param new_shape: what shape do you want? new_shape does not have to have the same dimensionality as image. If\n len(new_shape) < len(image.shape) then the last axes of image will be padded. If new_shape < image.shape in any of\n the axes then we will not pad that axis, but also not crop! (interpret new_shape as new_min_shape)\n Example:\n image.shape = (10, 1, 512, 512); new_shape = (768, 768) -> result: (10, 1, 768, 768). Cool, huh?\n image.shape = (10, 1, 512, 512); new_shape = (364, 768) -> result: (10, 1, 512, 768).\n\n :param mode: see np.pad for documentation\n :param return_slicer: if True then this function will also return what coords you will need to use when cropping back\n to original shape\n :param shape_must_be_divisible_by: for network prediction. After applying new_shape, make sure the new shape is\n divisibly by that number (can also be a list with an entry for each axis). Whatever is missing to match that will\n be padded (so the result may be larger than new_shape if shape_must_be_divisible_by is not None)\n :param kwargs: see np.pad for documentation\n \"\"\"\n if kwargs is None:\n kwargs = {}\n\n if new_shape is not None:\n old_shape = np.array(image.shape[-len(new_shape):])\n else:\n assert shape_must_be_divisible_by is not None\n assert isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray))\n new_shape = image.shape[-len(shape_must_be_divisible_by):]\n old_shape = new_shape\n\n num_axes_nopad = len(image.shape) - len(new_shape)\n\n new_shape = [max(new_shape[i], old_shape[i]) for i in range(len(new_shape))]\n\n if not isinstance(new_shape, np.ndarray):\n new_shape = np.array(new_shape)\n\n if shape_must_be_divisible_by is not None:\n if not isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray)):\n shape_must_be_divisible_by = [shape_must_be_divisible_by] * len(new_shape)\n else:\n assert len(shape_must_be_divisible_by) == len(new_shape)\n\n for i in range(len(new_shape)):\n if new_shape[i] % shape_must_be_divisible_by[i] == 0:\n new_shape[i] -= shape_must_be_divisible_by[i]\n\n new_shape = np.array([new_shape[i] + shape_must_be_divisible_by[i] - new_shape[i] % shape_must_be_divisible_by[i] for i in range(len(new_shape))])\n\n difference = new_shape - old_shape\n pad_below = difference // 2\n pad_above = difference // 2 + difference % 2\n pad_list = [[0, 0]]*num_axes_nopad + list([list(i) for i in zip(pad_below, pad_above)])\n res = np.pad(image, pad_list, mode, **kwargs)\n if not return_slicer:\n return res\n else:\n pad_list = np.array(pad_list)\n pad_list[:, 1] = np.array(res.shape) - pad_list[:, 1]\n slicer = list(slice(*i) for i in pad_list)\n return res, slicer\n\n\n#Convert .nii.gz to .nrrd\n\nif __name__ == \"__main__\":\n\n paths = [os.path.join(cf.raw_data_dir, ii) for ii in os.listdir(cf.raw_data_dir) if not ii.startswith('.')]\n\n if not os.path.exists(cf.pp_dir):\n os.makedirs(cf.pp_dir)\n\n # pool = Pool(processes=1)\n # p1 = pool.map(pp_patient, enumerate(paths), chunksize=1)\n # pool.close()\n # pool.join()\n for i in enumerate(paths):\n pp_patient(i)\n\n aggregate_meta_info(cf.pp_dir)\n subprocess.call('cp {} {}'.format(os.path.join(cf.pp_dir, 'info_df.pickle'), os.path.join(cf.pp_dir, 'info_df_bk.pickle')), shell=True)\n"
] | [
[
"numpy.max",
"numpy.pad",
"numpy.array",
"numpy.round",
"pandas.DataFrame",
"numpy.mean",
"numpy.where",
"numpy.std",
"numpy.stack",
"numpy.clip",
"numpy.around",
"numpy.argwhere",
"numpy.unique"
]
] |
sanrou/fidelityWeighting | [
"3767d80ad31559264d8ff3e42407eeed863f0ebc"
] | [
"sourceFidelityAnalyses.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 5 14:34:19 2021\nLoad source fidelity arrays. Do analyses.\n@author: rouhinen\n\"\"\"\n\nimport numpy as np\nimport os\nimport glob\n\n## Resolutions and path patterns\nresolutions = ['100', '200', '400', '597', '775', '942']\n\nsubjectsFolder = 'C:\\\\temp\\\\fWeighting\\\\fwSubjects_p\\\\'\nsourceFidPattern = '\\\\sourceFidelities_MEEG_parc2018yeo7_XYZ.npy'\n\n\"\"\" Search folders in main folder. \"\"\"\nsubjects = next(os.walk(subjectsFolder))[1]\nif any('_Population' in s for s in subjects):\n subjects.remove('_Population')\n\n\"\"\" Get source fidelities. \"\"\"\nfidelities = [None]*len(subjects)\n## Loop over folders containing subjects.\nfor i, subject in enumerate(subjects):\n subjectFolder = os.path.join(subjectsFolder, subject)\n fidelities[i] = []\n ## Loop over resolutions\n for ii, resolution in enumerate(resolutions):\n # Load source fidelities from file\n fileSourceFidelities = glob.glob(subjectFolder + sourceFidPattern.replace('XYZ', resolution))[0]\n sourceFidelities = np.abs(np.load(fileSourceFidelities))\n fidelities[i].append(sourceFidelities[sourceFidelities!=0]) # Source length vector - zeros. Sources not belonging to a parcel expected to have fidelity value 0.\n\n\nfor ii, resolution in enumerate(resolutions):\n print(f'Minimum {resolution}: {np.min(np.concatenate(fidelities[ii]))}, max: {np.max(np.concatenate(fidelities[ii]))}')\n \n\n### Range of median fidelities for subjects\nmedians = np.zeros((len(fidelities), len(resolutions)))\nfor i, subFids in enumerate(fidelities):\n for ii, singleResFids in enumerate(subFids):\n medians[i,ii] = np.median(singleResFids)\n \nprint(f'Min median for resolutions {resolutions}: {np.min(medians, axis=0)}. Max medians: {np.max(medians, axis=0)}')\n\n\n### Range of mean source fidelities for individuals\nmeans = np.zeros((len(fidelities), len(resolutions)))\nfor i, subFids in enumerate(fidelities):\n for ii, singleResFids in enumerate(subFids):\n means[i,ii] = np.mean(singleResFids)\n \nprint(f'Min mean for resolutions {resolutions}: {np.min(means, axis=0)}. Max means: {np.max(means, axis=0)}')\n"
] | [
[
"numpy.max",
"numpy.concatenate",
"numpy.median",
"numpy.load",
"numpy.min",
"numpy.mean"
]
] |
nathancy/stackoverflow | [
"e9e2e2b8fba61e41526638a13ac7ada6de2d7560"
] | [
"58133383-blur-section-using-mask/create_mask.py"
] | [
"import cv2\nimport numpy as np\n\n# Create a mask\nimage = cv2.imread('1.png')\nmask = np.zeros(image.shape, dtype=np.uint8)\ncnt = np.array([[200, 100], [350, 100], [350, 250], [200, 250]])\ncv2.fillPoly(mask, [cnt], [255,255,255])\ncv2.imwrite('newmask.png', mask)\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
satyam0051/preipl | [
"30b3c9a7f7de5a2ee6750fe3c002297c558d421d"
] | [
"ipl_team_win_predict.py"
] | [
"import pandas as pd\nimport numpy as np\nimport pickle as pkl\nfrom matplotlib import pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\n\nimport seaborn as sb\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\ndeliveries=pd.read_csv('/home/stellamarsh/ipl_project/ipl/deliveries.csv',engine='python')\ndata=pd.read_csv(\"/home/stellamarsh/ipl_project/ipl/matches.csv\",engine='python')\n\n\n#print(data.head())\n#print(data.shape)\n\n#print(data.tail())\n#print(data.describe())\n\n#sb.countplot(data['winner'])\n\n#plt.xticks(rotation=90)\n#batting_first=data[data['win_by_runs']!=0]\n#plt.figure(figsize=(7,7))\n#plt.pie(list(batting_first['winner'].value_counts()),labels=list(batting_first['winner'].value_counts().keys()),autopct='%0.1f%%')\n\n#plt.show()\n\n\n#batting_second=data[data['win_by_wickets']!=0]\n\n#plt.figure(figsize=(7,7))\n\n#plt.pie(list(batting_second['winner'].value_counts()),labels=list(batting_second['winner'].value_counts().keys()),autopct='%0.1f%%')\n\n#plt.show()\n\n\n#most_runs=deliveries.groupby(['batsman','batting_team'])['batsman_runs'].sum().sort_values(ascending=False).reset_index().head(10)\n\n\n#runs=sb.barplot(x=\"batsman\",y=\"batsman_runs\",data=most_runs,edgecolor=(0,0,0))\n#runs.set_ylabel('Total Runs')\n#runs.set_xlabel('Batsman')\n\n#plt.xticks(rotation=90)\n#plt.title(\"Total Runs per Batsman\")\n#plt.show()\n\nnew_data=data[['team1','team2','toss_decision','toss_winner','winner']]\n\nnew_data.dropna(inplace=True)\n\nall_teams={}\n\nct=0\n\n\nfor i in range(len(data)):\n if data.loc[i]['team1'] not in all_teams:\n all_teams[data.loc[i]['team1']]=ct\n ct=ct+1\n\n if data.loc[i]['team2'] not in all_teams:\n all_teams[data.loc[i]['team2']]=ct\n ct=ct+1\n\n\nx=new_data[['team1','team2','toss_decision','toss_winner']]\ny=new_data[['winner']]\n\nencoded_teams={w:k for k,w in all_teams.items()}\n\n\nx=np.array(x)\ny=np.array(y)\n\nfor i in range(len(x)):\n x[i][0]=all_teams[x[i][0]]\n x[i][1]=all_teams[x[i][1]]\n x[i][3]=all_teams[x[i][3]]\n\n y[i][0]=all_teams[y[i][0]]\n\n\nfb={'field':0,'bat':1}\n\nfor i in range(len(x)):\n x[i][2]=fb[x[i][2]]\n\n\nfor i in range(len(x)):\n if x[i][3]==x[i][0]:\n x[i][3]=0\n else:\n x[i][3]=1\n\n\n\nones=0\nfor i in range(len(y)):\n if y[i][0]==x[i][1]:\n if ones<370:\n ones+=1\n y[i][0]=1\n else:\n t=x[i][1]\n x[i][0]=x[i][1]\n x[i][1]=t\n y[i][0]=0\n\n else:\n y[i][0]=0\n\nx=np.array(x,dtype='int32')\ny=np.array(y,dtype='int32')\ny=y.ravel()\n\nprint(np.unique(y,return_counts=True))\n\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)\n\n\nmodel1=SVC().fit(x_train,y_train)\nmodel1.score(x_test,y_test)\n\nmodel2=DecisionTreeClassifier().fit(x_train,y_train)\nmodel2.score(x_test,y_test)\n\nmodel3=RandomForestClassifier(n_estimators=200).fit(x_train,y_train)\n\nmodel3.score(x_test,y_test)\ntest=np.array([1,2,0,0]).reshape(1,-1)\nprint(model1.predict(test))\nprint(model2.predict(test))\nprint(model3.predict(test))\n\nwith open('/home/stellamarsh/ipl_project/ipl/model1.pkl','wb') as f:\n pkl.dump(model3,f)\n\nwith open('/home/stellamarsh/ipl_project/ipl/vocab.pkl','wb') as f:\n pkl.dump(encoded_teams,f)\nwith open('/home/stellamarsh/ipl_project/ipl/inv_vocab.pkl','wb') as f:\n pkl.dump(all_teams,f)\n\n"
] | [
[
"numpy.array",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.svm.SVC",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"numpy.unique"
]
] |
Anshu2711/zmc | [
"13e646cd7c69f4ce53951921e3ea18c7299c16e7"
] | [
"zmc/Zmc.py"
] | [
"import os \nimport subprocess \nimport numpy as np \nfrom random import seed \nfrom random import choice\nfrom random import random\nfrom timeit import default_timer as timer\n\n########### Defining a Zmc class that will initialize by reading all the input files #############################\nclass interval:\n def __init__(self):\n self.label = 'Event' \n self.interval = 10\n \nclass Copper:\n def __init__(self,lab,state,pos):\n self.label = lab\n self.state = state \n self.position = pos\n\ndef error_write(nline):\n f = open('error_output.txt','w')\n f.write(nline)\n f.close()\n\ndef detect_num(line,char):\n try:\n return float(line.split()[-1].replace('\\n',''))\n except:\n error_write('The {0} isnt a numeric value. Please re-enter'.format(char))\n return False \n \ndef frac_distance(f1, f2,xdir,ydir,zdir):\n #print('#->',f1,'<-#')\n #print('#->',f2,'<-#')\n [ax,ay,az] = [13.6750000000000007 , 0.0000000000000000 , 0.0000000000000000]\n [bx,by,bz] = [-6.8374999999999977 , 11.8428973967521998 , 0.0000000000000000]\n [cx,cy,cz] = [0.0000000000000000 , 0.0000000000000000 , 14.7669999999999995]\n xdist = abs(f1[0] - f2[0]) - int(2*abs(f1[0] - f2[0])) \n ydist = abs(f1[1] - f2[1]) - int(2*abs(f1[1] - f2[1]))\n zdist = abs(f1[2] - f2[2]) - int(2*abs(f1[2] - f2[2]))\n #print(xdist,ydist,zdist)\n delx = xdir*xdist*ax + ydir*ydist*bx + zdir*zdist*cx \n dely = xdir*xdist*ay + ydir*ydist*by + zdir*zdist*cy \n delz = xdir*xdist*az + ydir*ydist*bz + zdir*zdist*cz \n return np.sqrt((delx)**2 + (dely)**2 + (delz)**2)\n \nclass Zmc:\n '''\n Contains attributes corresponding to the various input parameters\n Reaction conditions: r_seed, T, P_O2, P_NH3, P_NO \n Box description: lx, ly, lz, N, x_Cu1 \n Kinetic Parameters: A_pair, E_pair, A_red, E_red, f(d) \n Simulation parameters: N_max, t_final, t_wall \n Recording parameter: event_intval, time_intval \n '''\n \n def __init__(self):\n '''\n Initialize by reading the input files\n '''\n self.rseed = None # 10000\n self.T = None #473\n self.P_O2 = None\n self.P_NH3 = None \n self.P_NO = None \n #self.P_tot = None #1 \n #self.x_O2 = None #0.1\n #self.x_NH3 = None #0.4\n #self.x_NO = None #0.3 \n self.max_events = None #100\n self.max_time = None #360 \n self.wtime = None #50\n self.xl = None #10.0\n self.yl = None #10.0\n self.zl = None #10.0\n self.al = None \n self.bl = None\n self.cl = None\n self.NCu = None #20 \n self.xCuI = None #0.5\n self.Ar = None #5.0E+08 \n self.Ear = None #0.35 \n self.Ap = None #4.0E+09\n self.Eap = None #0.97\n self.seed_mode = None #'auto'\n self.autostat = 0 \n self.manstat = 0\n self.chastat = 0\n self.kin_mode = None\n self.red_mode = None #'single'\n self.dec_mode = None #'exponential'\n self.dec_counters = [0,0,0] #Lists to validate the expo/sigmo/step dependence. \n self.expo_count = 0 \n self.sig_count = 0 \n self.step_count = 0 \n self.steady_count = 0 \n self.tred_count = 0\n self.tox_count = 0\n self.simutime = 0\n self.simevents = 0\n self.oxievents = 0\n self.redevents = 0\n self.Cut_sigmo = None \n self.Cut_step = None\n self.n_O2 = 0 \n self.n_NO = 0\n self.n_NH3 = 0 \n self.statistics = None\n self.Cu_list = []\n self.indices = []\n self.snap_int = interval()\n self.proc_int = interval()\n self.spec_int = interval()\n self.p2ind = {} \n self.min_distance = {}\n self.frequency = {}\n if 'error_output.txt' in os.listdir():\n os.system('rm error_output.txt')\n if 'raw_outputs.txt' in os.listdir():\n os.system('rm raw_outputs.txt')\n if 'event_outputs.txt' in os.listdir():\n os.system('rm event_outputs.txt')\n stat1 = self.read_simu_input()\n stat2 = self.read_box_input()\n stat3 = self.read_kin_input()\n if stat1 == False or stat2 == False or stat3 == False:\n print('kMC calculation exitting') \n exit()\n self.read_outputs()\n \n \n def read_simu_input(self):\n if 'simulation_input.dat' not in os.listdir():\n error_write('simulation_input.dat not detected. Please create the required file in the directory')\n return False\n lines = open('simulation_input.dat','r').readlines()\n for line in lines:\n if line.startswith('#'):\n continue #<-----Comment \n elif line == '\\n':\n continue #<-----Empty line \n elif '#' in line:\n line = line.split('#')[0] #<------ Hash tag in the line which is ignored while parsing. \n if len(line) == 0 or line == '':\n continue\n if 'random_seed' in line:\n self.rseed = detect_num(line,'rseed')\n if self.rseed == False: return False\n elif 'temperature' in line:\n self.T = detect_num(line,'temperature')\n if self.T == False: return False\n elif 'O2_pressure' in line:\n self.P_O2 = detect_num(line,'O2_pressure')\n if self.P_O2 == False: return False \n elif 'NO_pressure' in line:\n self.P_NO = detect_num(line,'NO_pressure')\n if self.P_NO == False: return False\n elif 'NH3_pressure' in line:\n self.P_NH3 = detect_num(line,'NH3_pressure')\n if self.P_NH3 == False: return False\n elif 'snapshots' in line: \n if 'event' in line:\n self.snap_int.label = 'event' \n self.snap_int.interval = detect_num(line,'snapshot')\n if self.snap_int.interval == False: return False\n self.snap_int.interval = int(self.snap_int.interval)\n elif 'time' in line:\n self.snap_int.label = 'time'\n self.snap_int.interval = detect_num(line,'snapshot')\n if self.snap_int.interval == False: return False\n else:\n error_write('Snapshot interval specified which is neither event or time. Please rectify in simulation_input.dat')\n elif 'process_statistics' in line: \n if 'event' in line:\n self.proc_int.label = 'event' \n self.proc_int.interval = detect_num(line,'process_statistics')\n if self.proc_int.interval == False: return False\n self.proc_int.interval = int(self.proc_int.interval)\n elif 'time' in line:\n self.proc_int.label = 'time'\n self.proc_int.interval = detect_num(line,'process_statistics')\n if self.proc_int.interval == False: return False\n else:\n error_write('Process statistics interval specified which is neither event or time. Please rectify in simulation_input.dat')\n elif 'species_numbers' in line: \n if 'event' in line:\n self.spec_int.label = 'event' \n self.spec_int.interval = detect_num(line,'species_numbers')\n if self.spec_int.interval == False: return False\n self.spec_int.interval = int(self.spec_int.interval)\n elif 'time' in line:\n self.spec_int.label = 'time'\n self.spec_int.interval = detect_num(line,'species_numbers')\n if self.spec_int.interval == False: return False\n else:\n error_write('Specie number interval specified which is neither event or time. Please rectify in simulation_input.dat')\n elif 'max_events' in line: \n self.max_events = detect_num(line,'max_events')\n if self.max_events == False: return False\n elif 'max_time' in line: \n self.max_time = detect_num(line,'max_time')\n if self.max_time == False: return False \n elif 'wall_time' in line: \n self.wtime = detect_num(line,'wall_time')\n if self.wtime == False: return False\n elif len(line.strip())==0:\n continue\n else:\n error_write('Unknown line {0} detected. Please re-enter in simulation_input.dat'.format(line))\n return False \n msg = ['Random seed','Temperature','O2_pressure','NO_pressure','NH3_pressure','Snapshot interval','Process statistics interval','Specie number interval','Max events','Max time','Wall time']\n var =[self.rseed,self.T,self.P_O2,self.P_NO,self.P_NH3,self.snap_int.interval,self.proc_int.interval,self.spec_int.interval,self.max_events, self.max_time,self.wtime]\n ind = 0\n for v in var:\n if v == None:\n error_write('{0} has not been specified in simulation_input.dat. Please recheck and re-enter'.format(msg[ind]))\n return False \n ind += 1 \n \n def read_box_input(self):\n if 'box_input.dat' not in os.listdir():\n error_write('box_input.dat not detected. Please create the required file in the directory')\n return False\n #lines = open('box_input.dat','r').readlines()\n with open('box_input.dat','r') as fi:\n lines = fi.readlines()\n for line in lines:\n if line.startswith('#'):\n continue #<-----Comment \n elif line == '\\n':\n continue #<-----Empty line \n elif '#' in line:\n line = line.split('#')[0] #<------ Hash tag in the line which is ignored while parsing. \n if len(line) == 0 or line == '':\n continue\n if 'x_length' in line:\n self.xl = detect_num(line,'x_length')\n if self.xl == False: return False\n elif 'y_length' in line:\n self.yl = detect_num(line,'y_length')\n if self.yl == False: return False\n elif 'z_length' in line:\n self.zl = detect_num(line,'z_length')\n if self.zl == False: return False\n elif 'a_reps' in line:\n self.al = detect_num(line,'a_reps')\n self.chastat += 1\n if self.al == False: return False\n elif 'b_reps' in line:\n self.bl = detect_num(line,'b_reps')\n self.chastat += 1\n if self.bl == False: return False\n elif 'c_reps' in line:\n self.cl = detect_num(line,'c_reps')\n self.chastat += 1\n if self.cl == False: return False\n elif 'N_Copper' in line:\n self.NCu = detect_num(line,'N_Copper')\n self.chastat += 1\n self.autostat += 1\n if self.NCu == False: return False\n elif 'Cu1_frac' in line:\n self.xCuI = detect_num(line,'Cu1_frac')\n self.chastat += 1\n self.autostat += 1\n if self.xCuI is False: return False\n elif 'Seed_mode' in line: \n self.seed_mode = line.split()[-1].replace('\\n','')\n if self.seed_mode != 'auto' and self.seed_mode != 'manual' and self.seed_mode != 'CHA':\n error_write('Unknown seed_mode {0} detected. Please re-edit the box_input.dat file'.format(self.seed_mode))\n return False\n elif len(line.split())==4: #<-- Checks if 4 numbers are entered as a first pass. Advanced checking in seed_box_manual\n for li in line.split(): \n try:\n temp = float(li)\n except:\n error_write('Unknown line {0} detected. Please re-edit the box_input.dat file'.format(line))\n return False\n self.manstat += 1\n elif len(line.strip())==0:\n continue\n else:\n error_write('Unknown line {0} detected. Please re-edit the box_input.dat file'.format(line))\n return False\n ############# Now beginning the seeding of the 3D box ########################################################\n if self.autostat == 2 and self.manstat !=0 and self.chastat != 2: #<--Checks if entries for BOTH auto and manual have been entered. \n if self.seed_mode == 'auto':\n error_write('Manual/CHA seeding entries detected. Only auto seeding entries N_Copper and Cu1_frac needed!')\n return False\n elif self.seed_mode == 'manual':\n error_write('Auto/CHA seeding entries detected. Only manual seeding entries of Cu oxidation state and positions needed')\n return False\n elif self.seed_mode == 'CHA':\n error_write('Auto/Manual seeding entries detected. Only manual seeding entries of Cu oxidation state and positions needed')\n return False\n if self.seed_mode == 'auto':\n stat = self.seed_box_auto() \n if stat == False: return False\n elif self.seed_mode == 'manual':\n stat = self.seed_box_manual()\n if stat == False: return False\n elif self.seed_mode == 'CHA':\n stat = self.seed_cha_auto()\n if stat == False: return False\n else:\n error_write('Seed mode not specified. Please re-enter in box_input.dat')\n return False\n \n def read_kin_input(self):\n if 'kinetic_input.dat' not in os.listdir():\n error_write('kinetic_input.dat not detected. Please create the required file in the directory')\n return False\n lines = open('kinetic_input.dat','r').readlines()\n for line in lines:\n if line.startswith('#'):\n continue #<-----Comment \n elif line == '\\n':\n continue #<-----Empty line \n elif '#' in line:\n line = line.split('#')[0] #<------ Hash tag in the line which is ignored while parsing. \n if len(line) == 0 or line == '':\n continue\n if '_expo' in line:\n self.dec_counters[0] += 1\n continue\n elif 'Sigmoid' in line:\n self.dec_counters[1] += 1 \n continue\n elif 'Step' in line:\n self.dec_counters[2] += 1\n continue\n elif 'Kinetic_mode' in line:\n self.kin_mode = line.split()[-1].replace('\\n','')\n if self.kin_mode != 'steady_state' and self.kin_mode != 'transient_reduction' and self.kin_mode != 'transient_oxidation':\n error_write('Unknown kinetic_mode {0} detected. Please re-edit the kinetic_input.dat file'.format(self.kin_mode))\n return False\n elif 'A_reduction' in line:\n self.Ar = detect_num(line,'A_reduction')\n if self.Ar == False: return False \n self.steady_count += 1\n self.tred_count += 1 \n elif 'Ea_reduction' in line:\n self.Ear = detect_num(line,'A_reduction')\n if self.Ear is False: return False \n self.steady_count += 1\n self.tred_count += 1\n elif 'Reduce_mode' in line:\n self.red_mode = line.split()[-1].replace('\\n','')\n if self.red_mode != 'single' and self.red_mode != 'dual':\n error_write('Unknown reduce_mode {0} detected. Please re-edit the kinetic_input.dat file'.format(self.red_mode))\n return False\n self.steady_count += 1\n self.tred_count += 1\n elif 'A_pairing' in line:\n self.Ap = detect_num(line,'A_pairing')\n if self.Ap == False: return False\n self.steady_count += 1\n self.tox_count += 1\n elif 'Ea_pairing' in line:\n self.Eap = detect_num(line,'A_pairing')\n if self.Eap is False: return False \n self.steady_count += 1\n self.tox_count += 1\n elif 'Statistics' in line:\n strtf = line.split()[-1]\n if strtf == 'True':\n self.statistics = True\n elif strtf == 'False':\n self.statistics = False\n else:\n return False\n elif 'Decay' in line:\n self.dec_mode = line.split()[-1].replace('\\n','')\n if self.dec_mode != 'exponential' and self.dec_mode != 'sigmoidal' and self.dec_mode != 'step':\n error_write('Unknown reduce_mode {0} detected. Please re-edit the kinetic_input.dat file'.format(self.dec_mode))\n return False\n self.steady_count += 1\n self.tox_count += 1\n elif len(line.strip())==0:\n continue\n else:\n error_write('Unknown line {0} detected. Please re-edit the kinetic_input.dat file'.format(line))\n return False \n if self.kin_mode == 'steady_state':\n stat = self.set_steady_state()\n if stat == False: return False\n elif self.kin_mode == 'transient_oxidation':\n stat = self.set_trans_oxidation()\n if stat == False: return False\n elif self.kin_mode == 'transient_reduction':\n stat = self.set_trans_reduction()\n if stat == False: return False\n if self.dec_counters!=[2,0,0] and self.dec_counters!=[0,2,0] and self.dec_counters!=[0,0,1]: \n if self.dec_mode == 'exponential':\n error_write('Entries relevant to sigmoidal/step decay detected. Remove all those entries from kinetic_input.dat.')\n return False\n elif self.dec_mode == 'sigmoidal':\n error_write('Entries relevant to exponential/step decay detected. Remove all those entries from kinetic_input.dat.')\n return False\n elif self.dec_mode == 'step':\n error_write('Entries relevant to exponential/sigmoidal decay detected. Remove all those entries from kinetic_input.dat.')\n return False\n \n def seed_box_auto(self):\n if self.autostat != 2:\n error_write('Auto-seeding components N_Copper and Cu1_frac not entered. Please check and re-enter in box_input.dat')\n return False\n if self.xCuI == None or self.NCu == None or self.rseed == None:\n error_write('CuI fraction/Total Copper/Random seed not entered. Please check and re-enter in box_input.dat')\n return False\n if self.xCuI > 1.0 or self.xCuI < 0:\n error_write('The CuI fraction has to be between 0 and 1!. Please check and re-enter in box_input.dat')\n return False\n if self.xCuI == 0:\n print('Shoot!')\n self.NCuI = int(self.xCuI*self.NCu) \n self.NCuII = int(self.NCu - self.NCuI)\n self.NCu = self.NCuI + self.NCuII\n if self.xl == None or self.yl == None or self.zl == None:\n error_write('x_length/y_length/z_length not entered. Please check and re-enter in box_input.dat')\n return False\n maxi_posi = np.array([self.xl,self.yl,self.zl])\n seed(int(self.rseed))\n for i in range(1,self.NCuI+1):\n rand_arr = np.array([random(), random(), random()])\n posi = np.multiply(rand_arr,maxi_posi)\n self.Cu_list.append(Copper(lab=i,state=1,pos=posi))\n for i in range(self.NCuI+1,int(self.NCu)+1):\n rand_arr = np.array([random(), random(), random()])\n posi = np.multiply(rand_arr,maxi_posi)\n self.Cu_list.append(Copper(lab=i,state=2,pos=posi))\n \n def seed_cha_auto(self):\n if self.chastat != 5:\n error_write('CHA seeding components a_reps,b_reps,c_reps,NCu, xCuI have not been entered. Please check and re-enter in box_input.dat')\n return False\n if self.xCuI == None or self.NCu == None or self.rseed == None:\n error_write('CuI fraction/Total Copper/Random seed not entered. Please check and re-enter in box_input.dat')\n return False\n if self.al == None or self.bl == None or self.cl == None:\n error_write('Repetitions in (a/b/c) directions have not been entered. Please check and re-enter in box_input.dat')\n return False\n if self.xCuI > 1.0 or self.xCuI < 0:\n error_write('The CuI fraction has to be between 0 and 1!. Please check and re-enter in box_input.dat')\n return False\n self.NCuI = int(self.xCuI*self.NCu) \n self.NCuII = int(self.NCu - self.NCuI)\n self.NCu = self.NCuI + self.NCuII\n self.al = int(self.al) ; self.bl = int(self.bl) ; self.cl = int(self.cl) \n from ase.io import read \n atoms = read('/afs/crc.nd.edu/user/a/agoswami/zmc/zmc/POSCAR_CHA')\n del atoms[[atom.index for atom in atoms if atom.symbol=='O']] # Delete all O atoms \n atomsrep = atoms.repeat([self.al,self.bl,self.cl])\n nums = [a for a in range(0,len(atomsrep))]\n seed(self.rseed)\n # While seeding, have to ensure that Cu locations are consistent with Lowenstein's rule. The following routine ensures this \n for i in range(1,self.NCuI+1):\n print('Seed_on_site: ', i)\n if i == 0:\n ind = choice(nums)\n #label_pos.update({i+1:atomsrep[ind].position})\n self.Cu_list.append(Copper(lab=i,state=1,pos=atomsrep[ind].position))\n self.indices.append(ind)\n self.p2ind.update({tuple(atomsrep[ind].position):ind})\n else:\n while True:\n ind = choice(nums)\n flag = 0\n for at in self.indices:\n d = atomsrep.get_distance(at,ind,mic = True)\n #print(d)\n if round(d,2) < 4:\n flag = 1 \n break\n if flag == 0:\n break\n #label_pos.update({i+1:atomsrep[ind].position})\n self.Cu_list.append(Copper(lab=i,state=1,pos=atomsrep[ind].position))\n self.indices.append(ind)\n self.p2ind.update({tuple(atomsrep[ind].position):ind})\n for i in range(self.NCuI+1,int(self.NCu)+1):\n print('Seed_on_site: ', i)\n if len(self.Cu_list) == 0:\n ind = choice(nums)\n #label_pos.update({i+1:atomsrep[ind].position})\n self.Cu_list.append(Copper(lab=i,state=2,pos=atomsrep[ind].position))\n self.indices.append(ind)\n self.p2ind.update({tuple(atomsrep[ind].position):ind})\n else:\n while True:\n ind = choice(nums)\n flag = 0\n for at in self.indices:\n d = atomsrep.get_distance(at,ind,mic = True)\n #print(d)\n if round(d,2) < 4:\n flag = 1 \n break\n if flag == 0:\n break\n #label_pos.update({i+1:atomsrep[ind].position})\n self.Cu_list.append(Copper(lab=i,state=2,pos=atomsrep[ind].position))\n self.indices.append(ind)\n self.p2ind.update({tuple(atomsrep[ind].position):ind})\n f = open('indices.txt','w')\n for a in self.indices:\n f.write('{0} '.format(a))\n f.close()\n #print('The Cu indices are', self.indices)\n #print('The diictionary is', self.p2ind)\n def seed_box_manual(self):\n if self.manstat==0:\n error_write('Please recheck the manual entries in box_input.dat. If not entered, please do so.')\n return False\n if self.xl == None or self.yl == None or self.zl == None:\n error_write('x_length/y_length/z_length not entered. Please check and re-enter in box_input.dat')\n return False\n lines = open('box_input.dat','r').readlines()\n self.NCuI = 0\n self.NCuII = 0\n Cu_list1 = []\n Cu_list2 = []\n flag1 = 0\n num1s = 0 \n for line in lines:\n if len(line.split())==4:\n ox_state = (float((line.split()[0])))\n if ox_state == 1:\n num1s += 1 \n flag2 = num1s \n for line in lines:\n if len(line.split())==4:\n ox_state = (float((line.split()[0])))\n if ox_state - int(ox_state) < 1E-15:\n ox_state = int(ox_state)\n else:\n error_write('The oxidation state of Copper cannnot be a decimal here! Please recheck in box_input.dat')\n return False \n X_pos = float(line.split()[1])\n Y_pos = float(line.split()[2])\n Z_pos = float(line.split()[3])\n posi = np.array([X_pos, Y_pos, Z_pos])\n if X_pos > self.xl or Y_pos > self.yl or Z_pos > self.zl:\n error_write('The position specified are outside the box. Please check the entries again in box_input.dat')\n return False \n if ox_state != 1 and ox_state !=2:\n error_write('Invalid oxidation state {0} specified in box_input.dat. Please recheck'.format(ox_state))\n return False \n elif ox_state == 1:\n flag1 += 1 \n Cu_list1.append(Copper(lab=flag1,state=ox_state,pos=posi))\n self.NCuI += 1 \n elif ox_state == 2:\n flag2 += 1 \n self.NCuII += 1\n Cu_list2.append(Copper(lab=flag2,state=ox_state,pos=posi))\n self.Cu_list = Cu_list1 + Cu_list2\n self.NCu = self.NCuI + self.NCuII #This automatically fills the Cu list first with O.S. 1 and then O.S. 2. \n \n def set_steady_state(self):\n if self.steady_count!=6:\n error_write('All the inputs required for steady state calculations not entered. Please recheck and re-enter')\n return False\n if self.dec_mode == 'exponential':\n stat = self.set_expo_dependence()\n if stat == False: return False\n elif self.dec_mode == 'sigmoidal':\n stat = self.set_sig_dependence()\n if stat == False: return False\n elif self.dec_mode == 'step':\n stat = self.set_step_dependence()\n if stat == False: return False\n \n def set_trans_reduction(self):\n if self.Ap != None or self.Eap != None or self.dec_mode != None:\n error_write('For transient reduction, oxidation parameters A_pairing, Ea_pairing, decay mode dont need to be specified. Please recheck and re-enter in kinetic_input.dat')\n return False\n if self.NCuI != 0:\n error_write('For transient reduction, all Coppers have to be in +2 oxidation state initially. Please recheck and re-enter in box_input.dat')\n return False\n \n def set_trans_oxidation(self):\n if self.Ar != None or self.Ear != None or self.red_mode != None:\n error_write('For transient oxidation, reduction parameters A_reduction, Ea_reduction, reduce_mode dont need to be specified. Please recheck and re-enter in kinetic_input.dat')\n return False\n if self.NCuII != 0:\n error_write('For transient oxidation, all Coppers have to be in +1 oxidation state initially. Please recheck and re-enter in box_input.dat')\n return False\n if self.dec_mode == 'exponential':\n stat = self.set_expo_dependence()\n if stat == False: return False\n elif self.dec_mode == 'sigmoidal':\n print('Yes!')\n stat = self.set_sig_dependence()\n if stat == False: return False\n elif self.dec_mode == 'step':\n stat = self.set_step_dependence()\n if stat == False: return False\n \n def set_expo_dependence(self):\n if self.dec_counters[0]!=2:\n error_write('Either A_expo or B_expo have not been specified. Please check and re-enter in kinetic_input.dat')\n return False\n lines = open('kinetic_input.dat','r').readlines()\n for line in lines:\n if 'A_expo' in line:\n self.A_expo = detect_num(line,'A_expo')\n if self.A_expo is False: return False \n self.expo_count +=1\n elif 'B_expo' in line:\n self.B_expo = detect_num(line,'A_reduction')\n if self.B_expo is False: return False\n self.expo_count += 1 \n if self.expo_count != 2:\n error_write('Exponential dependence components A_expo, B_expo not recognized separately. Please check and re-enter in kinetic_input.dat')\n return False\n \n def set_sig_dependence(self):\n if self.dec_counters[1]!=2:\n error_write('Sigmoidal cutoff/slope has not been specified. Please check and re-enter in kinetic_input.dat')\n return False\n lines = open('kinetic_input.dat','r').readlines()\n for line in lines:\n if 'Sigmoid_cutoff' in line:\n self.Cut_sigmo = detect_num(line,'Sigmoid_cutoff')\n if self.Cut_sigmo == False: return False \n if 'Sigmoid_slope' in line:\n self.Slo_sigmo = detect_num(line,'Sigmoid_slope')\n if self.Slo_sigmo == False: return False \n if self.seed_mode != 'CHA':\n if self.Cut_sigmo > self.xl or self.Cut_sigmo > self.yl or self.Cut_sigmo > self.zl:\n error_write('Sigmoid cutoff specified is greater than box dimensions. Please recheck the input at kinetic_input.dat.')\n return False\n\n def set_step_dependence(self):\n if self.dec_counters[2]!=1:\n error_write('Step cutoff has not been specified. Please check and re-enter in kinetic_input.dat')\n return False\n lines = open('kinetic_input.dat','r').readlines()\n for line in lines:\n if 'Step_cutoff' in line:\n self.Cut_step = detect_num(line,'Step_cutoff')\n if self.Cut_step == False: return False \n if self.seed_mode != 'CHA':\n if self.Cut_step > self.xl or self.Cut_step > self.yl or self.Cut_step > self.zl:\n error_write('Sigmoid cutoff specified is greater than box dimensions. Please recheck the input at kinetic_input.dat.')\n return False\n \n def init_snap(self):\n f = open('Box_state.txt','w')\n f.write('##### 3D Coordinates of the seeded coppers written below#######\\n')\n f.write('S.No. X Y Z\\n')\n i = 0\n for cop in self.Cu_list:\n i += 1\n posit = cop.position\n f.write('{0} {1} {2} {3}\\n'.format(i,posit[0],posit[1],posit[2]))\n f.close()\n \n def init_spec(self):\n f = open('Specie_numbers.txt','w')\n f.write('####### Specie numbers of all the participating species########\\n')\n f.write('S.No Time Events CuI CuII O2 NO NH3\\n')\n f.close()\n \n def init_proc(self):\n f = open('Process_statistics.txt','w')\n f.write('###### Process counters of all the occuring processes###########\\n')\n f.write('S.No Time Events Pairing Reduction\\n')\n f.close()\n \n \n \n"
] | [
[
"numpy.multiply",
"numpy.array",
"numpy.sqrt"
]
] |
ocmadin/RJMC_2CLJQ | [
"8034cb676de429146caa1e5f080999f07f802543"
] | [
"rjmc_testcase_nested_squares.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 20 18:42:58 2018\n\n@author: owenmadin\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 20 11:33:46 2018\n\n@author: owenmadin\n\"\"\"\n\n\"\"\"\nThis code performs an RJMC model selection problem over three square regions of uniform probability, defined as squares of side length 1,2 and 5\nwith one corner at (0,0) and another at (s,s). The uniform probability is defined as positive inside each square, and zero outside each square.\n\n\"\"\"\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport yaml\nfrom LennardJones_correlations import LennardJones\nfrom LennardJones_2Center_correlations import LennardJones_2C\nfrom scipy.stats import distributions\nfrom scipy.stats import linregress\nfrom scipy.optimize import minimize\nimport random as rm\n\n# Define probabilities for both regions\n\ndef test_pdf_1(model,x,y):\n if model == 0:\n if 0 <= x <= 1 and 0 <= y <= 1:\n f=5\n else:\n f=0\n if model == 1:\n if 0 <= x <= 2 and 0 <= y <= 2:\n f=5\n else:\n f=0 \n if model == 2:\n if 0 <= x <= 5 and 0 <= y <= 5:\n f=5\n else:\n f=0 \n return f\n\n\ndef test_pdf_2(model,x,y):\n if model == 0:\n if 0 <= x <= 1 and 0 <= y <= 1:\n f=5\n else:\n f=0\n if model == 1:\n if 1 <= x <= 3 and 1 <= y <= 3:\n f=5\n else:\n f=0 \n \n if model == 2:\n if 3 <= x <= 6 and 3 <= y <= 6:\n f=5\n else:\n f=0 \n \n return f\n \ndnorm = distributions.norm.logpdf\ndgamma = distributions.gamma.logpdf\nduni = distributions.uniform.logpdf\n\nrnorm = np.random.normal\nrunif = np.random.rand\n\n#Define log priors for the distributions. For now, we will use a uniform prior on (10,10), but we may want to change the prior for different models in the future\n\ndef calc_posterior(model,x,y):\n \n logp = 0\n logp += duni(x, 0, 10)\n logp += duni(y, 0, 10)\n \n #prop_density=test_pdf_1(model,x,y)\n prop_density=test_pdf_2(model,x,y)\n logp += np.log(prop_density)\n \n return logp\n\n\ndef T_matrix_scale_one():\n T_matrix_x_scale=np.ones((3,3))\n T_matrix_y_scale=np.ones((3,3))\n T_matrix_x_scale[0,1]=3\n T_matrix_y_scale[0,1]=3\n T_matrix_x_scale[1,0]=1./3\n T_matrix_y_scale[1,0]=1./3\n T_matrix_x_scale[0,2]=6\n T_matrix_y_scale[0,2]=6\n T_matrix_x_scale[2,0]=1./6\n T_matrix_y_scale[2,0]=1./6\n T_matrix_x_scale[1,2]=6./3\n T_matrix_y_scale[1,2]=6./3\n T_matrix_x_scale[2,1]=3./6\n T_matrix_y_scale[2,1]=3./6\n return T_matrix_x_scale, T_matrix_y_scale\n\ndef T_matrix_scale_two():\n T_matrix_x_scale=np.ones((3,3))\n T_matrix_y_scale=np.ones((3,3))\n T_matrix_x_scale[0,1]=4\n T_matrix_x_scale[1,0]=1./4\n T_matrix_x_scale[0,2]=9\n T_matrix_x_scale[2,0]=1./9\n T_matrix_x_scale[1,2]=9/4\n T_matrix_x_scale[2,1]=4./9\n T_matrix_y_scale[0,1]=4\n T_matrix_y_scale[1,0]=1./4\n T_matrix_y_scale[0,2]=9\n T_matrix_y_scale[2,0]=1./9\n T_matrix_y_scale[1,2]=9./4\n T_matrix_y_scale[2,1]=4./9\n return T_matrix_x_scale, T_matrix_y_scale\n\n\n\ndef T_matrix_translation():\n T_matrix_x=np.zeros((2,2))\n T_matrix_y=np.zeros((2,2))\n T_matrix_x[0,1]=2\n T_matrix_y[0,1]=2\n T_matrix_x[1,0]=-2\n T_matrix_y[1,0]=-2\n return T_matrix_x, T_matrix_y\n \nT_matrix_x_scale_1, T_matrix_y_scale_1 = T_matrix_scale_one()\nT_matrix_x_scale_2, T_matrix_y_scale_2 = T_matrix_scale_two()\n\ndef RJMC_tuned(calc_posterior,n_iterations, initial_values, prop_var, \n tune_for=None, tune_interval=1, map_scale='False'):\n \n n_params = len(initial_values) #One column is the model number\n \n # Initial proposal standard deviations\n prop_sd = prop_var\n \n # Initialize trace for parameters\n trace = np.zeros((n_iterations+1, n_params)) #n_iterations + 1 to account for guess\n logp_trace = np.zeros(n_iterations+1)\n # Set initial values\n trace[0] = initial_values\n\n # Initialize acceptance counts\n accepted = [0]*n_params\n rejected = [0]*n_params\n \n model_swaps = 0\n model_swap_attempts = 0\n swap_freq = 1\n swap_flag='False'\n # OCM: Currently attempting a model swap every single move, although this can be easily changed. This is something that is not of critical importance now but will be important in the future.\n \n # Calculate joint posterior for initial values\n current_log_prob = calc_posterior(*trace[0])\n \n logp_trace[0] = current_log_prob\n #OCM: This is just the priors at this point.\n \n if tune_for is None:\n tune_for = n_iterations/2\n \n for i in range(n_iterations):\n swap_flag='False'\n if not i%1000: print('Iteration '+str(i))\n \n # Grab current parameter values\n current_params = trace[i].copy()\n trace[i+1] = current_params.copy() #Initialize the next step with the current step. Then update if MCMC move is accepted\n current_model = int(current_params[0])\n logp_trace[i+1] = current_log_prob.copy()\n \n # Loop through model parameters\n \n for j in range(n_params):\n \n # Get current value for parameter j\n params = current_params.copy() # This approach updates previous param values\n \n # Propose new values\n if j == 0: #If proposing a new model\n if not i%swap_freq:\n mod_ran = np.random.random()\n if mod_ran < 1./3: #Use new models with equal probability\n proposed_model = 0\n elif mod_ran >= 2./3:\n proposed_model = 1\n else: \n proposed_model = 2\n if proposed_model != current_model:\n model_swap_attempts += 1\n params[0] = proposed_model\n if map_scale=='True':\n params[1] *= T_matrix_x_scale_1[current_model,proposed_model]\n params[2] *= T_matrix_y_scale_1[current_model,proposed_model]\n \n #params[1] *= T_matrix_x_scale_2[current_model,proposed_model]\n #params[2] *= T_matrix_y_scale_2[current_model,proposed_model]\n '''\n else: \n params[1] += T_matrix_x[current_model,proposed_model]\n params[2] += T_matrix_y[current_model,proposed_model]\n # Calculate log posterior with proposed value\n '''\n proposed_log_prob = calc_posterior(*params)\n \n # Log-acceptance rate\n alpha = (proposed_log_prob - current_log_prob) + np.log(T_matrix_x_scale_1[current_model,proposed_model]) + np.log(T_matrix_y_scale_1[current_model,proposed_model])\n #alpha = (proposed_log_prob - current_log_prob) + np.log(T_matrix_x_scale_2[current_model,proposed_model]) + np.log(T_matrix_y_scale_2[current_model,proposed_model])\n urv = runif()\n \n # Test proposed value\n if np.log(urv) < alpha:\n \n # Accept\n trace[i+1] = params\n logp_trace[i+1] = proposed_log_prob.copy()\n current_log_prob = proposed_log_prob.copy()\n current_params = params\n accepted[j] += 1\n if j == 0:\n if proposed_model != current_model:\n model_swaps += 1\n swap_flag = 'True'\n else:\n if swap_flag=='False':\n params[j] = rnorm(current_params[j], prop_sd[j])\n \n # Calculate log posterior with proposed value\n proposed_log_prob = calc_posterior(*params)\n \n # Log-acceptance rate\n alpha = (proposed_log_prob - current_log_prob)\n \n \n #OCM: The two components of the acceptance ratio here are the log of the ratio of the probabilities, and the log of the jacobian determinant between the model spaces\n \n\n \n \n \n # Sample a uniform random variate (urv)\n urv = runif()\n \n # Test proposed value\n if np.log(urv) < alpha:\n \n # Accept\n trace[i+1] = params\n logp_trace[i+1] = proposed_log_prob.copy()\n current_log_prob = proposed_log_prob.copy()\n current_params = params\n accepted[j] += 1\n #if j == 0:\n # if proposed_model != current_model:\n # model_swaps += 1\n \n else:\n # Reject\n rejected[j] += 1\n \n '''\n # Tune every 100 iterations\n if (not (i+1) % tune_interval) and (i < tune_for) and j != 0:\n\n acceptance_rate = (1.*accepted[j])/tune_interval \n if acceptance_rate<0.2:\n prop_sd[j] *= 0.9\n elif acceptance_rate>0.5:\n prop_sd[j] *= 1.1 \n\n #print(prop_sd[j])\n accepted[j] = 0 \n'''\n accept_prod = np.array(accepted)/(np.array(accepted)+np.array(rejected)) \n\n print('Proposed standard deviations are: '+str(prop_sd))\n \n return trace, trace[tune_for:], logp_trace, logp_trace[tune_for:],accept_prod, model_swaps, model_swap_attempts\n\n# Set the number of iterations to run RJMC and how long to tune for\nn_iter = 50000 # 20000 appears to be sufficient\ntune_for = 10000 #10000 appears to be sufficient\nguess_0=[0,0.5,0.5]\nguess_var=[1,0.1,0.1]\ntrace_all,trace_tuned,logp_all,logp_tuned, acc_tuned, model_swaps, model_swap_attempts = RJMC_tuned(calc_posterior, n_iter, guess_0, prop_var=guess_var, tune_for=tune_for,map_scale='True')\n\n\nprint('Acceptance Rate during production for eps, sig: '+str(acc_tuned[1:]))\n\nprint('Acceptance model swap during production: '+str(model_swaps/model_swap_attempts))\n\n#OCM: Something is wrong with this as it is greater than one, which shouldn't be possible. Probably just a calculation error that doesn't affect RJMC\n\nmodel_params = trace_all\n\n# Converts the array with number of model parameters into an array with the number of times there was 1 parameter or 2 parameters\nmodel_count = np.array([len(model_params[model_params==0]),len(model_params[model_params==1]),len(model_params[model_params==2])])\n\n\nprob_0 = 1.*model_count[0]/(n_iter)\nprint('Percent that model 0 is sampled: '+str(prob_0 * 100.)) #The percent that use 1 parameter model\n\nprob_1 = 1.*model_count[1]/(n_iter)\nprint('Percent that model 1 is sampled: '+str(prob_1 * 100.)) #The percent that use two center UA LJ\n\nprob_2 = 1.*model_count[2]/(n_iter)\nprint('Percent that model 2 is sampled: '+str(prob_2 * 100.)) #The percent that use two center AUA LJ\n \n\nf = plt.figure()\nplt.scatter(trace_all[:,2],trace_all[:,1],label='Trajectory')\nplt.legend()\nplt.show()\nplt.plot(trace_all[:,0],label='Model Choice')\nplt.legend()\nplt.show()"
] | [
[
"numpy.array",
"numpy.log",
"numpy.zeros",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"numpy.random.random"
]
] |
heiya7/tfnn | [
"19d604e11e5d76461570d7cc5dd105e1d9ed1964"
] | [
"tfnn/evaluating/evaluator.py"
] | [
"import tfnn\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tfnn.evaluating.scalar_monitor import ScaleMonitor\nfrom tfnn.evaluating.layer_monitor import LayerMonitor\nfrom tfnn.evaluating.data_fitting_monitor import DataFittingMonitor\nfrom tfnn.evaluating.line_fitting_monitor import LineFittingMonitor\nplt.style.use('ggplot')\n\n\nclass Evaluator(object):\n def __init__(self, network, ):\n self.network = network\n if isinstance(self.network, tfnn.RegNetwork):\n self._set_r2()\n if isinstance(self.network, tfnn.ClfNetwork):\n self._set_confusion_metrics()\n self._set_accuracy()\n\n def compute_scores(self, scores, xs, ys):\n if isinstance(scores, str):\n scores = [scores]\n if not isinstance(scores, (list, tuple)):\n raise TypeError('Scores must be a string or a tuple or a list of strings')\n scores_ops = []\n for score in scores:\n score = score.lower()\n if score == 'r2':\n scores_ops.append(self.r2)\n elif score == 'cost':\n scores_ops.append(self.network.loss)\n elif score == 'f1':\n scores_ops.append(self.f1)\n elif score == 'recall':\n scores_ops.append(self.recall)\n elif score == 'precision':\n scores_ops.append(self.precision)\n elif score == 'accuracy':\n scores_ops.append(self.accuracy)\n else:\n raise ValueError('Do not have %s score' % score)\n feed_dict = self.get_feed_dict(xs, ys)\n return self.network.sess.run(scores_ops, feed_dict=feed_dict)\n\n def compute_r2(self, xs, ys):\n feed_dict = self.get_feed_dict(xs, ys)\n return self.r2.eval(feed_dict, self.network.sess)\n\n def compute_accuracy(self, xs, ys):\n # ignore dropout and regularization\n if not isinstance(self.network, tfnn.ClfNetwork):\n raise NotImplementedError('Can only compute accuracy for Classification neural network.')\n feed_dict = self.get_feed_dict(xs, ys)\n return self.accuracy.eval(feed_dict, self.network.sess)\n\n def compute_cost(self, xs, ys):\n feed_dict = self.get_feed_dict(xs, ys)\n return self.network.loss.eval(feed_dict, self.network.sess)\n\n def compute_f1(self, xs, ys):\n feed_dict = self.get_feed_dict(xs, ys)\n return self.f1.eval(feed_dict, self.network.sess)\n\n def set_scale_monitor(self, objects, figsize=(10, 10), sleep=0.001):\n \"\"\"\n :param objects: a list. A list like ['cost', 'r2'];\n :param grid_space: a tuple or list of (max_rows, max_cols);\n :return: Monitor\n \"\"\"\n if isinstance(objects, (tuple, list)):\n grid_space = (len(objects), 1)\n else:\n raise ValueError(\"\"\"objects should be a a list or dictionary. A list like ['cost', 'r2'].\n Not a %s\"\"\" % type(objects))\n if isinstance(self.network, tfnn.ClfNetwork):\n if 'r2' in objects:\n raise ValueError('r2 score is not used for classification networks')\n if isinstance(self.network, tfnn.RegNetwork):\n if ('accuracy' in objects) or ('f1' in objects):\n raise ValueError('accuracy or f1 score are not used for regression networks')\n self.scale_monitor = ScaleMonitor(grid_space, objects, self, figsize, sleep)\n return self.scale_monitor\n\n def set_layer_monitor(self, objects, figsize=(13, 10), cbar_range=(-1, 1), cmap='rainbow',\n sleep=0.001):\n if isinstance(objects, (tuple, list)):\n grid_space = (2, len(objects)+1)\n else:\n raise ValueError(\"\"\"objects should be a a list or dictionary. A list of layer index like\n [0, 1, 3].\n Not a %s\"\"\" % type(objects))\n self.layer_monitor = LayerMonitor(grid_space, objects, self, figsize, cbar_range, cmap, sleep)\n return self.layer_monitor\n\n def set_data_fitting_monitor(self, figsize=(8, 7), sleep=0.001):\n \"\"\"\n Suitable for analysing the preprocessing with only one output unit.\n :param v_xs: validated xs\n :param v_ys: validated ys (single attribute)\n :param continue_plot: True or False\n :return: Plotting\n \"\"\"\n if not isinstance(self.network, tfnn.RegNetwork):\n raise NotImplementedError('Can only plot for Regression neural network.')\n self.data_fitting_monitor = DataFittingMonitor(self, figsize, sleep)\n return self.data_fitting_monitor\n\n def set_line_fitting_monitor(self, figsize=(8, 7), sleep=0.001):\n if not isinstance(self.network, tfnn.RegNetwork):\n raise NotImplementedError('Can only plot this result for Regression neural network.')\n self.line_fitting_monitor = LineFittingMonitor(self, figsize, sleep)\n return self.line_fitting_monitor\n\n def monitoring(self, t_xs, t_ys, **kwargs):\n if hasattr(self, 'scale_monitor'):\n v_xs, v_ys = kwargs['v_xs'], kwargs['v_ys']\n self.scale_monitor.monitoring(t_xs, t_ys, v_xs, v_ys)\n if hasattr(self, 'layer_monitor'):\n self.layer_monitor.monitoring(t_xs, t_ys)\n if hasattr(self, 'data_fitting_monitor'):\n v_xs, v_ys = kwargs['v_xs'], kwargs['v_ys']\n self.data_fitting_monitor.monitoring(v_xs, v_ys)\n if hasattr(self, 'line_fitting_monitor'):\n self.line_fitting_monitor.monitoring(t_xs, t_ys)\n\n def get_feed_dict(self, xs, ys):\n if self.network.reg == 'dropout':\n feed_dict = {self.network.data_placeholder: xs,\n self.network.target_placeholder: ys,\n self.network.keep_prob_placeholder: 1.}\n elif self.network.reg == 'l2':\n feed_dict = {self.network.data_placeholder: xs,\n self.network.target_placeholder: ys,\n self.network.l2_placeholder: 0.}\n else:\n feed_dict = {self.network.data_placeholder: xs,\n self.network.target_placeholder: ys}\n return feed_dict\n\n @staticmethod\n def hold_plot():\n print('Press any key to exit...')\n plt.ioff()\n plt.waitforbuttonpress()\n plt.close()\n\n def _set_accuracy(self):\n if isinstance(self.network, tfnn.ClfNetwork):\n with tfnn.name_scope('accuracy'):\n correct_prediction = tfnn.equal(\n tfnn.argmax(self.network.predictions, 1),\n tfnn.argmax(self.network.target_placeholder, 1),\n name='correct_prediction')\n self.accuracy = tfnn.reduce_mean(\n tfnn.cast(correct_prediction, tfnn.float32), name='accuracy')\n tfnn.scalar_summary('accuracy', self.accuracy)\n\n def _set_r2(self):\n if isinstance(self.network, tfnn.RegNetwork):\n with tfnn.name_scope('r2_score'):\n self.ys_mean = ys_mean = tfnn.reduce_mean(self.network.target_placeholder,\n reduction_indices=[0],\n name='ys_mean')\n self.ss_tot = ss_tot = tfnn.reduce_sum(\n tfnn.square(self.network.target_placeholder - ys_mean),\n reduction_indices=[0], name='total_sum_squares')\n # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)\n self.ss_res = ss_res = tfnn.reduce_sum(\n tfnn.square(self.network.target_placeholder - self.network.predictions),\n reduction_indices=[0], name='residual_sum_squares')\n self.aaa = ss_res / ss_tot\n self.r2 = tfnn.reduce_mean(\n tfnn.sub(tfnn.ones_like(ss_res, dtype=tfnn.float32), (ss_res / ss_tot)),\n name='coefficient_of_determination')\n tfnn.scalar_summary('r2_score', self.r2)\n\n def _set_confusion_metrics(self):\n # from https://cloud.google.com/solutions/machine-learning-with-financial-time-series-data\n # for onehot data\n with tfnn.name_scope('f1_score'):\n predictions = tfnn.argmax(self.network.predictions, 1)\n actuals = tfnn.argmax(self.network.target_placeholder, 1)\n\n ones_like_actuals = tfnn.ones_like(actuals)\n zeros_like_actuals = tfnn.zeros_like(actuals)\n ones_like_predictions = tfnn.ones_like(predictions)\n zeros_like_predictions = tfnn.zeros_like(predictions)\n\n tp = tfnn.reduce_sum(\n tfnn.cast(\n tfnn.logical_and(\n tfnn.equal(actuals, ones_like_actuals),\n tfnn.equal(predictions, ones_like_predictions)\n ), \"float\"))\n\n tn = tfnn.reduce_sum(\n tfnn.cast(\n tfnn.logical_and(\n tfnn.equal(actuals, zeros_like_actuals),\n tfnn.equal(predictions, zeros_like_predictions)\n ), \"float\"))\n\n fp = tfnn.reduce_sum(\n tfnn.cast(\n tfnn.logical_and(\n tfnn.equal(actuals, zeros_like_actuals),\n tfnn.equal(predictions, ones_like_predictions)\n ), \"float\"))\n\n fn = tfnn.reduce_sum(\n tfnn.cast(\n tfnn.logical_and(\n tfnn.equal(actuals, ones_like_actuals),\n tfnn.equal(predictions, zeros_like_predictions)\n ), \"float\"))\n\n self.recall = tp / (tp + fn)\n self.precision = tp / (tp + fp)\n\n self.f1 = tfnn.div(2 * (self.precision * self.recall),\n (self.precision + self.recall), name='f1_score')\n tfnn.scalar_summary('f1_score', self.f1)\n tfnn.scalar_summary('precision', self.precision)\n tfnn.scalar_summary('recall', self.recall)"
] | [
[
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.waitforbuttonpress",
"matplotlib.pyplot.close",
"matplotlib.pyplot.style.use"
]
] |
rakib045/TCarto | [
"3c3d92d09ab940e4ba89d1fc67bc30930d377a25"
] | [
"PrescribedAreaDrawing.py"
] | [
"#Best you use Anaconda. If you do not have quadprog, you have to\n#install it.. in anaconda you can use this '-c omnia quadprog '\nimport numpy as np\nimport random\n#import quadprog\nfrom sympy.geometry import *\nfrom PIL import Image, ImageDraw\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom time import time\nimport math as m\nfrom energyMinimization import *\n\n######################################\n# Node class; each node has a name and location\n######################################\n#build the node class\nclass node:\n movable = True\n def __init__(self, name, loc):\n self.name = name\n self.loc = loc\n\n\n######################################\n# INPUT: A table with randomly filled positive numbers\n# The sum of the numbers is (totalnode-1)*(totalnode-1)\n# There is also a list of nodes that must not move\n######################################\n\n# the variable totalnode represents the size of the table\n# right now it is a (totalnode-1)*(totalnode-1) table\n# the following loop creates a bunch of nodes, each at point i,j\n# the name of a node at point i,j is ij\n\n\n#python PrescribedAreaDrawing.py 64 5 \"input/PBLH_10_new_grid64_64.txt\" \"SingleThread_PBLH_10_new_grid64_64\"\n#python PrescribedAreaDrawing.py 64 5 \"input/TCarto_checker_data_8_8.txt\" \"SingleThread_TCarto_checker_data_8_8\"\n# First param = python file name\n# Second Param = square grid\n# Third Param = Count of Iteration\n# Forth Param = Input Data File\n# Fifth Param = Output File Name\n'''\nsquare_grid = int(sys.argv[1])\niteration = int(sys.argv[2])\ninput_data_file = sys.argv[3]\noutput_img_filename = sys.argv[4]\n'''\n'''\nsquare_grid = 16\n#iteration = int(m.log(square_grid, 2))\niteration = 5\ninput_data_file = \"input/data_cat_16_16.txt\"\noutput_img_filename = \"SingleThread_Gaussian_16_16\"\n'''\n\nsquare_grid = 2\niteration = 40\ninput_data_file = \"input/TCarto_checker_data_8_8.txt\"\noutput_img_filename = \"DivideAndConq_TCarto_checker_data_8_8\"\n\ngrid_count_horizontal = square_grid\ngrid_count_vertical = square_grid\n\n\n\noutput_image_size = [1024, 1024]\nis_max_heiristic = True\nboundary_node_movement = True\ninput_img_file = \"input/cats.jpg\"\n\n\ntotal_algo_processing_time = []\n\n\n######## Node Generation ########\nnodes = grid_node_generation(node, grid_count_horizontal, grid_count_vertical)\n######## Node Generation ########\n\n\n#the following creates a matrix that contains the cell values\n#the cell values are currently randomly assigned\n\n\n\n\n##### Reading from Input File ###########\nvalues = read_text_file(input_data_file, grid_count_horizontal, grid_count_vertical)\n\n\n#nodes that should not move\nnodes[0][0].movable = False\nnodes[0][grid_count_vertical].movable = False\nnodes[grid_count_horizontal][0].movable = False\nnodes[grid_count_horizontal][grid_count_vertical].movable = False\n#print(nodes)\n\n\n#####################################################################################\n\n\n\n#################################\n# Algorithm: Prescribed_Area_Drawing\n#################################\n# This is just like Tutte's algorithm\n#for each node update its location\n\n\n\nprint(\"Algorithm Started for \" + str(grid_count_horizontal) + \"_by_\" + str(grid_count_vertical))\n\n#poly_draw(output_img_filename,0, output_image_size, nodes, grid_count_horizontal, grid_count_vertical)\nmin_boundary_p_dist = 0.1\n\n\nout_file_name = \"output/out_log_\" + output_img_filename + \".txt\"\noutput_txt_file = open(out_file_name, \"w\")\noutput_txt_file.write(\"Iteration, |UV-EV|/EV, UV/EV - 1, RMSE, MQE = (((|UV-EV|/EV) ** 2) ** 0.5)/N, Updated MQE = (((|UV-EV|/(UV+EV)) ** 2) ** 0.5)/N, Iteration Time (sec)\\n\")\noutput_txt_file.close()\n\n\nfor x in range(iteration):\n print(\"------------------------------------------\")\n print('iteration: ' + str(x+1) + '(out of ' + str(iteration) + '): ')\n\n updated = 1\n iteration_start_time = time()\n\n for i in range(grid_count_horizontal + 1):\n for j in range(grid_count_vertical + 1):\n #if the node is in the skip nodelist then do not move those nodes\n\n if(nodes[i][j].movable == False):\n continue\n elif(i == 0):\n\n p_top = nodes[i][j + 1]\n p_middle = nodes[i][j]\n p_bottom = nodes[i][j - 1]\n\n p_top_right = nodes[i + 1][j + 1]\n p_right = nodes[i + 1][j]\n p_bottom_right = nodes[i + 1][j - 1]\n\n poly1 = Polygon(p_middle.loc, p_bottom.loc, p_bottom_right.loc, p_right.loc)\n A1 = poly1.area\n\n poly2 = Polygon(p_middle.loc, p_right.loc, p_top_right.loc, p_top.loc)\n A2 = poly2.area\n\n V1 = values[i][j-1]\n V2 = values[i][j]\n\n # p_right is the corresponding inside node and line is x=0\n changed_y = updateBoundaryNode(p_right.loc, A1, A2, V1, V2, 1, 0, 0)\n updated_y = nodes[i][j].loc[1] + changed_y\n\n if updated_y < p_bottom.loc[1] + min_boundary_p_dist:\n updated_y = p_bottom.loc[1] + min_boundary_p_dist\n\n if updated_y > p_top.loc[1] - min_boundary_p_dist:\n updated_y = p_top.loc[1] - min_boundary_p_dist\n\n BR_V_Line = Line(p_bottom_right.loc, p_right.loc)\n TR_V_Line = Line(p_right.loc, p_top_right.loc)\n\n val = Point2D(nodes[i][j].loc[0], updated_y)\n\n checkSignVal_1 = isSatisfyInEquility(val, BR_V_Line)\n checkSignVal_2 = isSatisfyInEquility(val, TR_V_Line)\n\n if checkSignVal_1 >= 0 and checkSignVal_2 >= 0 and boundary_node_movement:\n poly_1 = Polygon(val, p_bottom.loc, p_bottom_right.loc, p_right.loc)\n poly_2 = Polygon(val, p_right.loc, p_top_right.loc, p_top.loc)\n\n if poly_1.is_convex() and poly_2.is_convex():\n nodes[i][j].loc = val\n\n continue\n elif(i == grid_count_horizontal):\n #TODO: Replace 'continue' with your own code\n\n p_top = nodes[i][j + 1]\n p_middle = nodes[i][j]\n p_bottom = nodes[i][j - 1]\n\n p_top_left = nodes[i - 1][j + 1]\n p_left = nodes[i - 1][j]\n p_bottom_left = nodes[i - 1][j - 1]\n\n poly1 = Polygon(p_middle.loc, p_left.loc, p_bottom_left.loc, p_bottom.loc)\n A1 = poly1.area\n\n poly2 = Polygon(p_middle.loc, p_top.loc, p_top_left.loc, p_left.loc)\n A2 = poly2.area\n\n V1 = values[i-1][j-1]\n V2 = values[i-1][j]\n\n # p_right is the corresponding inside node and line is x=0\n changed_y = updateBoundaryNode(p_left.loc, A1, A2, V1, V2, 1, 0, -grid_count_horizontal)\n updated_y = nodes[i][j].loc[1] + changed_y\n\n if updated_y < p_bottom.loc[1] + min_boundary_p_dist:\n updated_y = p_bottom.loc[1] + min_boundary_p_dist\n\n if updated_y > p_top.loc[1] - min_boundary_p_dist:\n updated_y = p_top.loc[1] - min_boundary_p_dist\n\n BL_V_Line = Line(p_left.loc, p_bottom_left.loc)\n TL_V_Line = Line(p_top_left.loc, p_left.loc)\n\n val = Point2D(nodes[i][j].loc[0], updated_y)\n\n checkSignVal_1 = isSatisfyInEquility(val, BL_V_Line)\n checkSignVal_2 = isSatisfyInEquility(val, TL_V_Line)\n\n\n if checkSignVal_1 <= 0 and checkSignVal_2 <= 0 and boundary_node_movement:\n poly_1 = Polygon(val, p_left.loc, p_bottom_left.loc, p_bottom.loc)\n poly_2 = Polygon(val, p_top.loc, p_top_left.loc, p_left.loc)\n\n if poly_1.is_convex() and poly_2.is_convex():\n nodes[i][j].loc = val\n continue\n elif(j == 0):\n #TODO: Replace 'continue' with your own code\n\n p_top_left = nodes[i - 1][j + 1]\n p_left = nodes[i - 1][j]\n\n p_top = nodes[i][j + 1]\n p_middle = nodes[i][j]\n\n p_top_right = nodes[i + 1][j + 1]\n p_right = nodes[i + 1][j]\n\n poly1 = Polygon(p_middle.loc, p_top.loc, p_top_left.loc, p_left.loc)\n A1 = poly1.area\n\n poly2 = Polygon(p_middle.loc, p_right.loc, p_top_right.loc, p_top.loc)\n A2 = poly2.area\n\n V1 = values[i-1][j]\n V2 = values[i][j]\n\n changed_x = updateBoundaryNode(p_top.loc, A1, A2, V1, V2, 0, 1, 0)\n updated_x = nodes[i][j].loc[0] + changed_x\n\n if updated_x < p_left.loc[0] + min_boundary_p_dist:\n updated_x = p_left.loc[0] + min_boundary_p_dist\n\n if updated_x > p_right.loc[0] - min_boundary_p_dist:\n updated_x = p_right.loc[0] - min_boundary_p_dist\n\n TL_H_Line = Line(p_top_left.loc, p_top.loc)\n TR_H_Line = Line(p_top.loc, p_top_right.loc)\n\n val = Point2D(updated_x, nodes[i][j].loc[1])\n\n checkSignVal_1 = isSatisfyInEquility(val, TL_H_Line)\n checkSignVal_2 = isSatisfyInEquility(val, TR_H_Line)\n\n if checkSignVal_1 >= 0 and checkSignVal_2 >= 0 and boundary_node_movement:\n poly_1 = Polygon(val, p_top.loc, p_top_left.loc, p_left.loc)\n poly_2 = Polygon(val, p_right.loc, p_top_right.loc, p_top.loc)\n\n if poly_1.is_convex() and poly_2.is_convex():\n nodes[i][j].loc = val\n continue\n elif(j == grid_count_vertical):\n #TODO: Replace 'continue' with your own code\n\n p_left = nodes[i - 1][j]\n p_bottom_left = nodes[i - 1][j - 1]\n\n p_middle = nodes[i][j]\n p_bottom = nodes[i][j - 1]\n\n p_right = nodes[i + 1][j]\n p_bottom_right = nodes[i + 1][j - 1]\n\n poly1 = Polygon(p_middle.loc, p_left.loc, p_bottom_left.loc, p_bottom.loc)\n A1 = poly1.area\n\n poly2 = Polygon(p_middle.loc, p_bottom.loc, p_bottom_right.loc, p_right.loc)\n A2 = poly2.area\n\n V1 = values[i-1][j-1]\n V2 = values[i][j-1]\n\n changed_x = updateBoundaryNode(p_bottom.loc, A1, A2, V1, V2, 0, 1, -grid_count_vertical)\n updated_x = nodes[i][j].loc[0] + changed_x\n\n if updated_x < p_left.loc[0] + min_boundary_p_dist:\n updated_x = p_left.loc[0] + min_boundary_p_dist\n\n if updated_x > p_right.loc[0] - min_boundary_p_dist:\n updated_x = p_right.loc[0] - min_boundary_p_dist\n\n BL_H_Line = Line(p_bottom_left.loc, p_bottom.loc)\n BR_H_Line = Line(p_bottom.loc, p_bottom_right.loc)\n\n val = Point2D(updated_x, nodes[i][j].loc[1])\n\n checkSignVal_1 = isSatisfyInEquility(val, BL_H_Line)\n checkSignVal_2 = isSatisfyInEquility(val, BR_H_Line)\n\n\n if checkSignVal_1 <= 0 and checkSignVal_2 <= 0 and boundary_node_movement:\n poly_1 = Polygon(val, p_left.loc, p_bottom_left.loc, p_bottom.loc)\n poly_2 = Polygon(val, p_bottom.loc, p_bottom_right.loc, p_right.loc)\n\n if poly_1.is_convex() and poly_2.is_convex():\n nodes[i][j].loc = val\n\n continue\n else:\n p_top_left = nodes[i - 1][j + 1]\n p_left = nodes[i - 1][j]\n p_bottom_left = nodes[i - 1][j - 1]\n\n p_top = nodes[i][j + 1]\n p_middle = nodes[i][j]\n p_bottom = nodes[i][j - 1]\n\n p_top_right = nodes[i + 1][j + 1]\n p_right = nodes[i + 1][j]\n p_bottom_right = nodes[i + 1][j - 1]\n\n val_TL = values[i - 1][j]\n val_BL = values[i - 1][j - 1]\n val_TR = values[i][j]\n val_BR = values[i][j - 1]\n\n if x == 34 or x == 33:\n temp = 10\n\n val = updateNode([p_top_left, p_left, p_bottom_left,\n p_top, p_middle, p_bottom,\n p_top_right, p_right, p_bottom_right],\n [val_TL, val_BL, val_TR, val_BR])\n\n if (val[0] == -1 and val[1] == -1):\n continue\n\n poly_1 = Polygon(val, p_top.loc, p_top_left.loc, p_left.loc)\n poly_2 = Polygon(val, p_left.loc, p_bottom_left.loc, p_bottom.loc)\n poly_3 = Polygon(val, p_bottom.loc, p_bottom_right.loc, p_right.loc)\n poly_4 = Polygon(val, p_right.loc, p_top_right.loc, p_top.loc)\n\n if poly_1.is_convex() and poly_2.is_convex() and poly_3.is_convex() and poly_4.is_convex():\n nodes[i][j].loc = val\n\n #poly_draw(output_img_filename, x+1, output_image_size, nodes, grid_count_horizontal, grid_count_vertical)\n #print(\"Updated val : \" + str(val.x) + \", \" + str(val.y))\n\n iteration_end_time = time()\n estimation_time = iteration_end_time - iteration_start_time\n\n total_algo_processing_time.append(estimation_time)\n poly_draw(output_img_filename, x+1, output_image_size, nodes, grid_count_horizontal, grid_count_vertical)\n\n all_error_calc(values, nodes, grid_count_horizontal, grid_count_vertical, estimation_time, output_img_filename,\n x+1, -1, -1)\n\n\n\nprint(\"------------------------------------------\")\nprint(\"Total Algorithm Processing Time (sec): \" + str(round(np.sum(total_algo_processing_time), 4)))\n\noutput_txt_file = open(out_file_name, \"a\")\noutput_txt_file.write(\"\\n\\nTotal Pre Processing Time(sec): 0\\n\")\noutput_txt_file.write(\"Total Processing Time(sec): \" + str(round(np.sum(total_algo_processing_time), 4)))\noutput_txt_file.close()\n\nprint(\"------------------------------------------\")\nprint(\"Algorithm Finished !! \")\n\nprint(\"Drawing Image ... \")\n\npoly_draw(output_img_filename, iteration, output_image_size, nodes, grid_count_horizontal, grid_count_vertical)\n#imageDraw(input_image.size, splitted_image, nodes, \"output\", grid_count_horizontal, grid_count_vertical)\nprint(\"Finished\")\n\n\n"
] | [
[
"numpy.sum"
]
] |
souljaboy764/intprim | [
"ecf905ce69dc14215230be3b3819d2236223e9ba"
] | [
"intprim/filter/spatiotemporal/ekf.py"
] | [
"##\r\n# This module defines a spatiotemporal filter based off of the extended Kalman filter.\r\n#\r\n# @author Joseph Campbell <[email protected]>, Interactive Robotics Lab, Arizona State University\r\nimport numpy as np\r\nimport scipy.linalg\r\n\r\nimport intprim.constants\r\nfrom intprim.filter.spatiotemporal import nonlinear_system\r\n\r\n##\r\n# The ExtendedKalmanFilter class localizes an interaction in time and space via the extended Kalman filter.\r\n# This class is a recursive filter, meaning it maintains state information between successive calls to localize().\r\n# As with the other spatiotemporal filters, the ExtendedKalmanFilter's internal state consists of (N+1) dimensions modeling\r\n# the N-th order phase system plus B dimensions modeling the latent space of the interaction for a total size of N+1+B.\r\n# This class corresponds to a Bayesian Interaction Primitive.\r\n#\r\n# References:\\n\r\n# Campbell, J., Stepputtis, S., & Ben Amor, H. (2019). Probabilistic Multimodal Modeling for Human-Robot Interaction Tasks.\\n\r\n# Campbell, J., & Amor, H. B. (2017). Bayesian interaction primitives: A slam approach to human-robot interaction. In Conference on Robot Learning (pp. 379-387).\\n\r\n#\r\nclass ExtendedKalmanFilter(nonlinear_system.NonLinearSystem):\r\n ##\r\n # The initialization method for the ExtendedKalmanFilter. Responsible for initializing the state.\r\n #\r\n # @param basis_model The basis model corresponding to this state space.\r\n # @param initial_phase_mean Vector of dimension N corresponding to the initial mean of the constant velocity phase system. If this is a 0th order system, should contain only [phase_mean]. If this is 1st order, should contain [phase_mean, phase_velocity]. If this is 2nd order, should contain [phase_mean, phase_velocity, phase_acceleration]. Anything above 2nd order is not supported.\r\n # @param initial_phase_var Vector of dimension N corresponding to the diagonal of the initial covariance of the constant velocity phase system. If this is a 0th order system, should contain only [phase_var]. If this is 1st order, should contain [phase_var, phase_velocity_var]. If this is 2nd order, should contain [phase_var, phase_velocity_var, phase_acceleration_var]. Anything above 2nd order is not supported.\r\n # @param proc_var The process noise of the constant velocity phase system. This is a scalar value corresponding to the variance of a piecewise white noise model.\r\n # @param mean_basis_weights Vector of dimension D containing the initial state for the basis weights.\r\n # @param cov_basis_weights Matrix of dimension D x D containing the initial covariance matrix for the basis weights.\r\n # @param time_delta The amount of time that elapses between time steps. This serves as a scaling factor to the constant velocity phase system. In most cases, this should be set to 1.0.\r\n # @param cyclical Indicates whether this is a cyclical primitive. If True, the internal phase state will cycle back to 0 once it exceeds 1, allowing for continuous inference of periodic interactions.\r\n #\r\n def __init__(self,\r\n basis_model,\r\n initial_phase_mean,\r\n initial_phase_var,\r\n proc_var,\r\n mean_basis_weights,\r\n cov_basis_weights,\r\n time_delta = 1.0,\r\n cyclical = False):\r\n super(ExtendedKalmanFilter, self).__init__(basis_model, proc_var, time_delta, len(initial_phase_mean) - 1)\r\n\r\n self.cyclical = cyclical\r\n\r\n initial_phase_mean = np.array(initial_phase_mean, dtype = intprim.constants.DTYPE)\r\n initial_phase_var = np.diag(initial_phase_var).astype(intprim.constants.DTYPE)\r\n\r\n # Initial phase is 0 while the landmarks are the mean basis weights of the demonstrations.\r\n self.state_mean = np.zeros(self.state_dimension, dtype = intprim.constants.DTYPE)\r\n self.state_mean[:self.system_size] = initial_phase_mean[:self.system_size]\r\n self.state_mean[self.system_size:] = mean_basis_weights.astype(intprim.constants.DTYPE)\r\n\r\n # Covariance starts at 0 for phase since we assume trajectories start at the initial point\r\n # The covariance for the basis weights is the same as computed from demonstrations\r\n self.state_cov = np.zeros((self.state_dimension, self.state_dimension), dtype = intprim.constants.DTYPE)\r\n\r\n # Assume discrete white noise model for the phase/phase velocity.\r\n self.state_cov[:self.system_size, :self.system_size] = initial_phase_var[:self.system_size, :self.system_size]\r\n self.state_cov[self.system_size:, self.system_size:] = cov_basis_weights\r\n\r\n self.identity_cov = np.eye(self.state_cov.shape[0], dtype = intprim.constants.DTYPE)\r\n\r\n ##\r\n # Gets the mean of the internal state.\r\n #\r\n # @returns Vector of dimension N+1+B containing the state mean.\r\n #\r\n def get_mean(self):\r\n return self.state_mean\r\n\r\n ##\r\n # Gets the covariance of the internal state.\r\n #\r\n # @returns Matrix of dimension N+1+B x N+1+B.\r\n def get_covariance(self):\r\n return self.state_cov\r\n\r\n ##\r\n # Calculates the projection of the internal mean and covariance to measurement space.\r\n #\r\n # @param phase The phase value \\f$ \\phi \\f$ to use for the projection.\r\n # @param mean The mean from which to calculate the projected mean. If none, uses the internal state mean.\r\n # @param cov The covariance from which to calculate the projected covariance. If none, uses the interal state covariance.\r\n #\r\n # @returns Vector of dimension D containing the sample mean, matrix of dimension D x D containing the sample covariance.\r\n #\r\n def get_projected_mean_covariance(self, phase, mean = None, cov = None):\r\n if(mean is None):\r\n mean = self.get_mean()\r\n if(cov is None):\r\n cov = self.get_covariance()\r\n\r\n if(phase is None):\r\n phase = mean[0]\r\n\r\n temp_mean = np.array(mean, copy = True)\r\n temp_mean[0] = phase\r\n measurement_model = self.get_measurement_model(temp_mean)\r\n projected_mean = np.dot(measurement_model[:,self.system_size:], temp_mean[self.system_size:])\r\n projected_cov = np.dot(measurement_model, cov).dot(measurement_model.T)\r\n\r\n return projected_mean, projected_cov\r\n\r\n ##\r\n # This method performs simultaneous localization of both time (phase) and space (basis weights).\r\n # This is a recursive call, which means it updates the internal state estimate based on the given observations.\r\n # For each observation given, two steps are performed recursively:\r\n # First, the current state is propagated forward in time in what is known as the prediction step.\r\n # $$ \\\\begin{align}\r\n # \\\\boldsymbol{\\\\mu}_{t|t-1} &=\r\n # \\\\boldsymbol{G}$\r\n # \\\\boldsymbol{\\\\mu}_{t-1|t-1},\\\\\r\n # %\r\n # \\\\boldsymbol{\\\\Sigma}_{t|t-1} &= \\\\boldsymbol{G} \\\\boldsymbol{\\\\Sigma}_{t-1|t-1} \\\\boldsymbol{G}^{T} +\r\n # \\\\boldsymbol{Q}_t,\r\n # \\\\end{align} $$\r\n # Note that in this case, we only apply the dot product to the first N+1 dimensions of the state. This is for computational efficiency as only the constant velocity phase system has a non-zero transition.\r\n #\r\n # Next, we integrate the observations into the current state in the update step.\r\n # $$ \\\\begin{align}\r\n # \\\\boldsymbol{K}_t &= \\\\boldsymbol{\\\\Sigma}_{t|t-1} \\\\boldsymbol{H}_t^{T} (\\\\boldsymbol{H}_t \\\\boldsymbol{\\\\Sigma}_{t|t-1} \\\\boldsymbol{H}_t^{T} + \\\\boldsymbol{R}_t)^{-1},\\\\\r\n # %\r\n # \\\\boldsymbol{\\\\mu}_{t|t} &= \\\\boldsymbol{\\\\mu}_{t|t-1} + \\\\boldsymbol{K}_t(\\\\boldsymbol{y}_t - h(\\\\boldsymbol{\\\\mu}_{t|t-1})),\\\\\r\n # %\r\n # \\\\boldsymbol{\\\\Sigma}_{t|t} &= (I - \\\\boldsymbol{K}_t \\\\boldsymbol{H}_t)\\\\boldsymbol{\\\\Sigma}_{t|t-1},\r\n # \\\\end{align} $$\r\n #\r\n # Lastly, the mean and covariance of the state are returned.\r\n # At the end of both the prediction and update steps the internal phase value is clipped such that it falls within the range [0, 1].\r\n #\r\n # @param measurement Matrix of dimension T x D containing observations, where T is the number of timesteps that have been observed since the last call to localize() and D is the dimension of the measurement space.\r\n # @param measurement_noise Matrix of dimension D x D containing the measurement noise for the given set of measurements.\r\n # @param active_dofs Vector of dimension \\f$ D_o \\f$ containing measurement space indices of the observed degrees of freedom. Note that the measurements will also contain unobserved degrees of freedom, but their values should not be used for inference.\r\n # @param return_phase_variance True if the mean/variance for the phase system should be returned in addition to the basis weights.\r\n #\r\n # @returns Scalar value containing the inferred phase, Vector of dimension D (or N+1+D if return_phase_variance is True) containing inferred mean, Matrix of dimension D x D (or N+1+D x N+1+D if return_phase_variance is True).\r\n def localize(self, measurement, measurement_noise, active_dofs, return_phase_variance = False):\r\n transition_model = self.get_transition_model()\r\n\r\n nonactive_dofs = np.setdiff1d(range(self.measurement_dimension), active_dofs)\r\n\r\n for measurement_idx in range(measurement.shape[0]):\r\n # Make forward prediction\r\n self.state_mean = np.dot(transition_model, self.state_mean)\r\n self.state_cov = np.dot(transition_model, self.state_cov).dot(transition_model.T) + self.get_process_noise()\r\n\r\n # Restrict final output to [0.0, 1.0] so it's a valid phase.\r\n if(self.state_mean[0] > 1.0):\r\n self.state_mean[0] = 1.0\r\n elif(self.state_mean[0] < 0.0):\r\n self.state_mean[0] = 0.0\r\n\r\n measurement_model = self.get_measurement_model(self.state_mean)\r\n predicted_measurement = np.dot(measurement_model[:,self.system_size:], self.state_mean[self.system_size:])\r\n\r\n kalman_gain = np.dot(self.state_cov, measurement_model.T)\r\n kalman_gain = kalman_gain.dot(scipy.linalg.inv(np.dot(measurement_model, self.state_cov).dot(measurement_model.T) + measurement_noise))\r\n\r\n # Zero out the Kalman gain entries for the non-active DoFs. Since we aren't considering them we don't want them to affect the update process.\r\n kalman_gain[:, nonactive_dofs] = 0.0;\r\n\r\n self.state_mean += np.dot(kalman_gain, measurement[measurement_idx] - predicted_measurement)\r\n\r\n self.state_cov = (self.identity_cov - np.dot(kalman_gain, measurement_model)).dot(self.state_cov)\r\n\r\n # If the interaction is cyclical and the mean is greater than 1.0, then set the mean phase to 0. Leave covariance and higher order moments alone.\r\n if(self.cyclical and self.state_mean[0] >= 1.0):\r\n self.state_mean[0] -= 1.0\r\n\r\n # Restrict final output to [0.0, 1.0] so it's a valid phase.\r\n if(self.state_mean[0] > 1.0):\r\n self.state_mean[0] = 1.0\r\n elif(self.state_mean[0] < 0.0):\r\n self.state_mean[0] = 0.0\r\n\r\n # Return phase and updated weights and covariance\r\n if(return_phase_variance is False):\r\n return self.state_mean[0], self.state_mean[self.system_size:], self.state_cov[self.system_size:, self.system_size:]\r\n else:\r\n return self.state_mean[0], self.state_cov[0, 0], self.state_mean[self.system_size:], self.state_cov[self.system_size:, self.system_size:]\r\n"
] | [
[
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.eye",
"numpy.diag"
]
] |
stumpdk/ripnfind | [
"0642232e190b6c3029da6536ea4823a17633569c"
] | [
"get_names.py"
] | [
"import pandas as pd \nimport json\n\nfirstnames = []\nlastnames = []\ndata = pd.read_csv(\"hack4dk_police_person.csv\", low_memory=False) \nfor l in data['firstnames']:\n if isinstance(l, str): \n for fname in l.split():\n if fname.find('.') == -1 and fname.find(',') == -1 and fname.find(' ') == -1 and fname.find('-') == -1:\n if fname not in firstnames:\n firstnames.append(fname)\n\nfile = open('firstnames.json', 'w')\nfile.write(json.dumps(firstnames))\nfile.close()\ni = 0\nfor l in data['lastname']:\n i = i+1\n if(i%1000 == 0):\n print(i)\n if isinstance(l, str):\n for lname in l.split():\n if lname.find('.') == -1 and lname.find(',') == -1 and lname.find(' ') == -1 and lname.find('-') == -1:\n if lname not in lastnames:\n lastnames.append(lname)\n\nfile = open('lastnames.json', 'w')\nfile.write(json.dumps(lastnames))\nfile.close()\n\nprint(len(firstnames))\nprint(len(lastnames))\n\nprint(firstnames[0:10])\nprint(lastnames[0:10])\n "
] | [
[
"pandas.read_csv"
]
] |
TaikiMiyagawa/MSPRT-TANDEM | [
"dc337c5ffc3ae0e2feaca1da2b1760737a930c98"
] | [
"utils/misc.py"
] | [
"# MIT License\n\n# Copyright (c) 2021 Taiki Miyagawa and Akinori F. Ebihara\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# ==============================================================================\n\nimport os, yaml\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport tensorflow as tf \n\ndef load_yaml(yaml_path):\n assert os.path.exists(yaml_path), \"Yaml path does not exist: \" + yaml_path\n with open(yaml_path, \"r\") as f:\n config = yaml.load(f, Loader=yaml.SafeLoader)\n return config\n\n\ndef set_gpu_devices(gpu):\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n assert len(physical_devices) > 0, \"Not enough GPU hardware devices available\"\n tf.config.experimental.set_visible_devices(physical_devices[gpu], 'GPU')\n tf.config.experimental.set_memory_growth(physical_devices[gpu], True)\n\n\ndef make_directories(path):\n if not os.path.exists(path):\n print(\"Path '{}' does not exist.\".format(path))\n print(\"Make directory: \" + path)\n os.makedirs(path)\n \n \ndef fix_random_seed(flag_seed, seed=None):\n if flag_seed:\n np.random.seed(seed)\n tf.random.set_seed(seed)\n print(\"Numpy and TensorFlow's random seeds fixed: seed=\" + str(seed))\n \n else:\n print(\"Random seed not fixed.\")\n\n\ndef show_layers(model):\n \"\"\"Shows layers in model.\n Args:\n model: A tf.keras.Model object.\n \"\"\"\n print(\"================= Model contains the followding layers ================\")\n for iter_layer in model.layers:\n print(\"Layer: \", iter_layer.name)\n print(\"=======================================================================\")\n\n\ndef restrict_classes(llrs, labels, list_classes):\n \"\"\" \n Args:\n llrs: A Tensor with shape (batch, ...). \n E.g., (batch, duration, num classes, num classes).\n labels: A Tensor with shape (batch, ...). \n E.g., (batch, ).\n list_classes: A list of integers specifying the classes\n to be extracted. E.g. list_classes = [0,2,9] for NMNIST.\n Returns:\n llrs_rest: A Tensor with shape (<= batch, llrs.shape[:1]). \n If no class data found in llrs_rest, llrs_rest = None.\n lbls_rest: A Tensor with shape (<= batch, labels.shape[:1]).\n If no class data found in llrs_rest, lbls_rest = None.\n \"\"\"\n if list_classes == []:\n return llrs, labels\n\n #assert tf.reduce_min(labels).numpy() <= np.min(list_classes)\n #assert np.max(list_classes) <= tf.reduce_max(labels).numpy() \n \n ls_idx = []\n for itr_cls in list_classes:\n ls_idx.append(tf.reshape(tf.where(labels == itr_cls), [-1]))\n idx = tf.concat(ls_idx, axis=0)\n idx = tf.sort(idx)\n \n llrs_rest = tf.gather(llrs, idx, axis=0)\n lbls_rest = tf.gather(labels, idx, axis=0)\n \n llrs_rest = None if llrs_rest.shape[0] == 0 else llrs_rest\n lbls_rest = None if lbls_rest.shape[0] == 0 else lbls_rest\n\n return llrs_rest, lbls_rest\n\n \ndef extract_positive_row(llrs, labels):\n \"\"\" Extract y_i-th rows of LLR matrices.\n Args:\n llrs: (batch, duraiton, num classes, num classes)\n labels: (batch,)\n Returns:\n llrs_posrow: (batch, duration, num classes)\n \"\"\"\n llrs_shape = llrs.shape\n duration = llrs_shape[1]\n num_classes = llrs_shape[2]\n \n labels_oh = tf.one_hot(labels, depth=num_classes, axis=1)\n # (batch, num cls)\n labels_oh = tf.reshape(labels_oh,[-1, 1, num_classes, 1])\n labels_oh = tf.tile(labels_oh, [1, duration, 1, 1])\n # (batch, duration, num cls, 1)\n\n llrs_pos = llrs * labels_oh\n # (batch, duration, num cls, num cls)\n llrs_posrow = tf.reduce_sum(llrs_pos, axis=2)\n # (batch, duration, num cls): = LLR_{:, :, y_i, :}\n \n return llrs_posrow\n\n\ndef add_max_to_diag(llrs):\n \"\"\"\n Args:\n llrs: (batch, duration, num classes, num classes)\n Returns:\n llrs_maxdiag: (batch, duration, num classes, num classes),\n max(|llrs|) is added to diag of llrs.\n \"\"\"\n num_classes = llrs.shape[2]\n \n llrs_abs = tf.abs(llrs)\n llrs_max = tf.reduce_max(llrs_abs)\n # max |LLRs|\n tmp = tf.linalg.tensor_diag([1.] * num_classes) * llrs_max\n tmp = tf.reshape(tmp, [1, 1, num_classes, num_classes])\n llrs_maxdiag = llrs + tmp\n\n return llrs_maxdiag\n\n\ndef plot_heatmatrix(mx, figsize=(10,7), annot=True):\n \"\"\"\n Args:\n mx: A square matrix.\n figsize: A tuple of two positive integers.\n annot: A bool. Plot a number at the center of a cell or not.\n \"\"\"\n plt.figure(figsize=figsize)\n sns.heatmap(mx, annot=annot)\n plt.show()\n"
] | [
[
"tensorflow.config.experimental.set_visible_devices",
"tensorflow.abs",
"tensorflow.concat",
"tensorflow.where",
"tensorflow.tile",
"numpy.random.seed",
"tensorflow.random.set_seed",
"tensorflow.one_hot",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.linalg.tensor_diag",
"tensorflow.reshape",
"matplotlib.pyplot.figure",
"tensorflow.reduce_max",
"tensorflow.sort",
"tensorflow.reduce_sum",
"tensorflow.gather",
"matplotlib.pyplot.show",
"tensorflow.config.experimental.list_physical_devices"
]
] |
zilongzhong/LM-LSTM-CRF | [
"7d71abf80df1599e57b9ab77a2f2152f1341ca83"
] | [
"train_wc.py"
] | [
"from __future__ import print_function\r\nimport datetime\r\nimport time\r\nimport torch\r\nimport torch.autograd as autograd\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport codecs\r\nfrom model.crf import *\r\nfrom model.lm_lstm_crf import *\r\nimport model.utils as utils\r\nfrom model.evaluator import eval_wc\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\nimport sys\r\nfrom tqdm import tqdm\r\nimport itertools\r\nimport functools\r\n\r\ndef eprint(*args, **kwargs):\r\n print(*args, file=sys.stderr, **kwargs)\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(description='Learning with LM-LSTM-CRF together with Language Model')\r\n parser.add_argument('--rand_embedding', action='store_true', help='random initialize word embedding')\r\n parser.add_argument('--emb_file', default='./embedding/glove.6B.100d.txt', help='path to pre-trained embedding')\r\n parser.add_argument('--train_file', default='./data/ner/eng.train.iobes', help='path to training file')\r\n parser.add_argument('--dev_file', default='./data/ner/eng.testa.iobes', help='path to development file')\r\n parser.add_argument('--test_file', default='./data/ner/eng.testb.iobes', help='path to test file')\r\n parser.add_argument('--gpu', type=int, default=0, help='gpu id')\r\n parser.add_argument('--batch_size', type=int, default=10, help='batch_size')\r\n parser.add_argument('--unk', default='unk', help='unknow-token in pre-trained embedding')\r\n parser.add_argument('--char_hidden', type=int, default=300, help='dimension of char-level layers')\r\n parser.add_argument('--word_hidden', type=int, default=300, help='dimension of word-level layers')\r\n parser.add_argument('--drop_out', type=float, default=0.55, help='dropout ratio')\r\n parser.add_argument('--epoch', type=int, default=200, help='maximum epoch number')\r\n parser.add_argument('--start_epoch', type=int, default=0, help='start point of epoch')\r\n parser.add_argument('--checkpoint', default='./checkpoint/', help='checkpoint path')\r\n parser.add_argument('--caseless', action='store_true', help='caseless or not')\r\n parser.add_argument('--char_dim', type=int, default=30, help='dimension of char embedding')\r\n parser.add_argument('--word_dim', type=int, default=100, help='dimension of word embedding')\r\n parser.add_argument('--char_layers', type=int, default=1, help='number of char level layers')\r\n parser.add_argument('--word_layers', type=int, default=1, help='number of word level layers')\r\n parser.add_argument('--lr', type=float, default=0.015, help='initial learning rate')\r\n parser.add_argument('--lr_decay', type=float, default=0.05, help='decay ratio of learning rate')\r\n parser.add_argument('--fine_tune', action='store_false', help='fine tune the diction of word embedding or not')\r\n parser.add_argument('--load_check_point', default='', help='path previous checkpoint that want to be loaded')\r\n parser.add_argument('--load_opt', action='store_true', help='also load optimizer from the checkpoint')\r\n parser.add_argument('--update', choices=['sgd', 'adam'], default='sgd', help='optimizer choice')\r\n parser.add_argument('--momentum', type=float, default=0.9, help='momentum for sgd')\r\n parser.add_argument('--clip_grad', type=float, default=5.0, help='clip grad at')\r\n parser.add_argument('--small_crf', action='store_false', help='use small crf instead of large crf, refer model.crf module for more details')\r\n parser.add_argument('--mini_count', type=float, default=5, help='thresholds to replace rare words with <unk>')\r\n parser.add_argument('--lambda0', type=float, default=1, help='lambda0')\r\n parser.add_argument('--co_train', action='store_true', help='cotrain language model')\r\n parser.add_argument('--patience', type=int, default=15, help='patience for early stop')\r\n parser.add_argument('--high_way', action='store_true', help='use highway layers')\r\n parser.add_argument('--highway_layers', type=int, default=1, help='number of highway layers')\r\n parser.add_argument('--eva_matrix', choices=['a', 'fa'], default='fa', help='use f1 and accuracy or accuracy alone')\r\n parser.add_argument('--least_iters', type=int, default=50, help='at least train how many epochs before stop')\r\n parser.add_argument('--shrink_embedding', action='store_true', help='shrink the embedding dictionary to corpus (open this if pre-trained embedding dictionary is too large, but disable this may yield better results on external corpus)')\r\n args = parser.parse_args()\r\n\r\n if args.gpu >= 0:\r\n torch.cuda.set_device(args.gpu)\r\n\r\n print('setting:')\r\n print(args)\r\n\r\n # load corpus\r\n print('loading corpus')\r\n with codecs.open(args.train_file, 'r', 'utf-8') as f:\r\n lines = f.readlines()\r\n with codecs.open(args.dev_file, 'r', 'utf-8') as f:\r\n dev_lines = f.readlines()\r\n with codecs.open(args.test_file, 'r', 'utf-8') as f:\r\n test_lines = f.readlines()\r\n\r\n dev_features, dev_labels = utils.read_corpus(dev_lines)\r\n test_features, test_labels = utils.read_corpus(test_lines)\r\n\r\n if args.load_check_point:\r\n if os.path.isfile(args.load_check_point):\r\n print(\"loading checkpoint: '{}'\".format(args.load_check_point))\r\n checkpoint_file = torch.load(args.load_check_point)\r\n args.start_epoch = checkpoint_file['epoch']\r\n f_map = checkpoint_file['f_map']\r\n l_map = checkpoint_file['l_map']\r\n c_map = checkpoint_file['c_map']\r\n in_doc_words = checkpoint_file['in_doc_words']\r\n train_features, train_labels = utils.read_corpus(lines)\r\n else:\r\n print(\"no checkpoint found at: '{}'\".format(args.load_check_point))\r\n else:\r\n print('constructing coding table')\r\n\r\n # converting format\r\n train_features, train_labels, f_map, l_map, c_map = utils.generate_corpus_char(lines, if_shrink_c_feature=True, c_thresholds=args.mini_count, if_shrink_w_feature=False)\r\n \r\n f_set = {v for v in f_map}\r\n f_map = utils.shrink_features(f_map, train_features, args.mini_count)\r\n\r\n if args.rand_embedding:\r\n print(\"embedding size: '{}'\".format(len(f_map)))\r\n in_doc_words = len(f_map)\r\n else:\r\n dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_features), f_set)\r\n dt_f_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_features), dt_f_set)\r\n print(\"feature size: '{}'\".format(len(f_map)))\r\n print('loading embedding')\r\n if args.fine_tune: # which means does not do fine-tune\r\n f_map = {'<eof>': 0}\r\n f_map, embedding_tensor, in_doc_words = utils.load_embedding_wlm(args.emb_file, ' ', f_map, dt_f_set, args.caseless, args.unk, args.word_dim, shrink_to_corpus=args.shrink_embedding)\r\n print(\"embedding size: '{}'\".format(len(f_map)))\r\n\r\n l_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), dev_labels))\r\n l_set = functools.reduce(lambda x, y: x | y, map(lambda t: set(t), test_labels), l_set)\r\n for label in l_set:\r\n if label not in l_map:\r\n l_map[label] = len(l_map)\r\n \r\n print('constructing dataset')\r\n # construct dataset\r\n dataset, forw_corp, back_corp = utils.construct_bucket_mean_vb_wc(train_features, train_labels, l_map, c_map, f_map, args.caseless)\r\n dev_dataset, forw_dev, back_dev = utils.construct_bucket_mean_vb_wc(dev_features, dev_labels, l_map, c_map, f_map, args.caseless)\r\n test_dataset, forw_test, back_test = utils.construct_bucket_mean_vb_wc(test_features, test_labels, l_map, c_map, f_map, args.caseless)\r\n \r\n dataset_loader = [torch.utils.data.DataLoader(tup, args.batch_size, shuffle=True, drop_last=False) for tup in dataset]\r\n dev_dataset_loader = [torch.utils.data.DataLoader(tup, 50, shuffle=False, drop_last=False) for tup in dev_dataset]\r\n test_dataset_loader = [torch.utils.data.DataLoader(tup, 50, shuffle=False, drop_last=False) for tup in test_dataset]\r\n\r\n # build model\r\n print('building model')\r\n ner_model = LM_LSTM_CRF(len(l_map), len(c_map), args.char_dim, args.char_hidden, args.char_layers, args.word_dim, args.word_hidden, args.word_layers, len(f_map), args.drop_out, large_CRF=args.small_crf, if_highway=args.high_way, in_doc_words=in_doc_words, highway_layers = args.highway_layers)\r\n\r\n if args.load_check_point:\r\n ner_model.load_state_dict(checkpoint_file['state_dict'])\r\n else:\r\n if not args.rand_embedding:\r\n ner_model.load_pretrained_word_embedding(embedding_tensor)\r\n ner_model.rand_init(init_word_embedding=args.rand_embedding)\r\n\r\n if args.update == 'sgd':\r\n optimizer = optim.SGD(ner_model.parameters(), lr=args.lr, momentum=args.momentum)\r\n elif args.update == 'adam':\r\n optimizer = optim.Adam(ner_model.parameters(), lr=args.lr)\r\n\r\n if args.load_check_point and args.load_opt:\r\n optimizer.load_state_dict(checkpoint_file['optimizer'])\r\n\r\n crit_lm = nn.CrossEntropyLoss()\r\n crit_ner = CRFLoss_vb(len(l_map), l_map['<start>'], l_map['<pad>'])\r\n\r\n if args.gpu >= 0:\r\n if_cuda = True\r\n print('device: ' + str(args.gpu))\r\n torch.cuda.set_device(args.gpu)\r\n crit_ner.cuda()\r\n crit_lm.cuda()\r\n ner_model.cuda()\r\n packer = CRFRepack_WC(len(l_map), True)\r\n else:\r\n if_cuda = False\r\n packer = CRFRepack_WC(len(l_map), False)\r\n\r\n tot_length = sum(map(lambda t: len(t), dataset_loader))\r\n\r\n best_f1 = float('-inf')\r\n best_acc = float('-inf')\r\n track_list = list()\r\n start_time = time.time()\r\n epoch_list = range(args.start_epoch, args.start_epoch + args.epoch)\r\n patience_count = 0\r\n\r\n evaluator = eval_wc(packer, l_map, args.eva_matrix)\r\n\r\n for epoch_idx, args.start_epoch in enumerate(epoch_list):\r\n\r\n epoch_loss = 0\r\n ner_model.train()\r\n for f_f, f_p, b_f, b_p, w_f, tg_v, mask_v, len_v in tqdm(\r\n itertools.chain.from_iterable(dataset_loader), mininterval=2,\r\n desc=' - Tot it %d (epoch %d)' % (tot_length, args.start_epoch), leave=False, file=sys.stdout):\r\n f_f, f_p, b_f, b_p, w_f, tg_v, mask_v = packer.repack_vb(f_f, f_p, b_f, b_p, w_f, tg_v, mask_v, len_v)\r\n ner_model.zero_grad()\r\n scores = ner_model(f_f, f_p, b_f, b_p, w_f)\r\n loss = crit_ner(scores, tg_v, mask_v)\r\n epoch_loss += utils.to_scalar(loss)\r\n if args.co_train:\r\n cf_p = f_p[0:-1, :].contiguous()\r\n cb_p = b_p[1:, :].contiguous()\r\n cf_y = w_f[1:, :].contiguous()\r\n cb_y = w_f[0:-1, :].contiguous()\r\n cfs, _ = ner_model.word_pre_train_forward(f_f, cf_p)\r\n loss = loss + args.lambda0 * crit_lm(cfs, cf_y.view(-1))\r\n cbs, _ = ner_model.word_pre_train_backward(b_f, cb_p)\r\n loss = loss + args.lambda0 * crit_lm(cbs, cb_y.view(-1))\r\n loss.backward()\r\n nn.utils.clip_grad_norm(ner_model.parameters(), args.clip_grad)\r\n optimizer.step()\r\n epoch_loss /= tot_length\r\n\r\n # update lr\r\n if args.update == 'sgd':\r\n utils.adjust_learning_rate(optimizer, args.lr / (1 + (args.start_epoch + 1) * args.lr_decay))\r\n\r\n # eval & save check_point\r\n\r\n if 'f' in args.eva_matrix:\r\n dev_f1, dev_pre, dev_rec, dev_acc = evaluator.calc_score(ner_model, dev_dataset_loader)\r\n\r\n if dev_f1 > best_f1:\r\n patience_count = 0\r\n best_f1 = dev_f1\r\n\r\n test_f1, test_pre, test_rec, test_acc = evaluator.calc_score(ner_model, test_dataset_loader)\r\n\r\n track_list.append(\r\n {'loss': epoch_loss, 'dev_f1': dev_f1, 'dev_acc': dev_acc, 'test_f1': test_f1,\r\n 'test_acc': test_acc})\r\n\r\n print(\r\n '(loss: %.4f, epoch: %d, dev F1 = %.4f, dev acc = %.4f, F1 on test = %.4f, acc on test= %.4f), saving...' %\r\n (epoch_loss,\r\n args.start_epoch,\r\n dev_f1,\r\n dev_acc,\r\n test_f1,\r\n test_acc))\r\n\r\n try:\r\n utils.save_checkpoint({\r\n 'epoch': args.start_epoch,\r\n 'state_dict': ner_model.state_dict(),\r\n 'optimizer': optimizer.state_dict(),\r\n 'f_map': f_map,\r\n 'l_map': l_map,\r\n 'c_map': c_map,\r\n 'in_doc_words': in_doc_words\r\n }, {'track_list': track_list,\r\n 'args': vars(args)\r\n }, args.checkpoint + 'cwlm_lstm_crf')\r\n except Exception as inst:\r\n print(inst)\r\n\r\n else:\r\n patience_count += 1\r\n print('(loss: %.4f, epoch: %d, dev F1 = %.4f, dev acc = %.4f)' %\r\n (epoch_loss,\r\n args.start_epoch,\r\n dev_f1,\r\n dev_acc))\r\n track_list.append({'loss': epoch_loss, 'dev_f1': dev_f1, 'dev_acc': dev_acc})\r\n\r\n else:\r\n\r\n dev_acc = evaluator.calc_score(ner_model, dev_dataset_loader)\r\n\r\n if dev_acc > best_acc:\r\n patience_count = 0\r\n best_acc = dev_acc\r\n \r\n test_acc = evaluator.calc_score(ner_model, test_dataset_loader)\r\n\r\n track_list.append(\r\n {'loss': epoch_loss, 'dev_acc': dev_acc, 'test_acc': test_acc})\r\n\r\n print(\r\n '(loss: %.4f, epoch: %d, dev acc = %.4f, acc on test= %.4f), saving...' %\r\n (epoch_loss,\r\n args.start_epoch,\r\n dev_acc,\r\n test_acc))\r\n\r\n try:\r\n utils.save_checkpoint({\r\n 'epoch': args.start_epoch,\r\n 'state_dict': ner_model.state_dict(),\r\n 'optimizer': optimizer.state_dict(),\r\n 'f_map': f_map,\r\n 'l_map': l_map,\r\n 'c_map': c_map,\r\n 'in_doc_words': in_doc_words\r\n }, {'track_list': track_list,\r\n 'args': vars(args)\r\n }, args.checkpoint + 'cwlm_lstm_crf')\r\n except Exception as inst:\r\n print(inst)\r\n\r\n else:\r\n patience_count += 1\r\n print('(loss: %.4f, epoch: %d, dev acc = %.4f)' %\r\n (epoch_loss,\r\n args.start_epoch,\r\n dev_acc))\r\n track_list.append({'loss': epoch_loss, 'dev_acc': dev_acc})\r\n\r\n print('epoch: ' + str(args.start_epoch) + '\\t in ' + str(args.epoch) + ' take: ' + str(\r\n time.time() - start_time) + ' s')\r\n\r\n if patience_count >= args.patience and args.start_epoch >= args.least_iters:\r\n break\r\n\r\n #print best\r\n if 'f' in args.eva_matrix:\r\n eprint(args.checkpoint + ' dev_f1: %.4f dev_rec: %.4f dev_pre: %.4f dev_acc: %.4f test_f1: %.4f test_rec: %.4f test_pre: %.4f test_acc: %.4f\\n' % (dev_f1, dev_rec, dev_pre, dev_acc, test_f1, test_rec, test_pre, test_acc))\r\n else:\r\n eprint(args.checkpoint + ' dev_acc: %.4f test_acc: %.4f\\n' % (dev_acc, test_acc))\r\n\r\n # printing summary\r\n print('setting:')\r\n print(args)"
] | [
[
"torch.cuda.set_device",
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.utils.data.DataLoader"
]
] |
Mavrikant/ros_swarm_control | [
"eae9dd6a8ea064ee46ff0c08549dd4a974fc04e4"
] | [
"swarm_control/scripts/virt_formation.py"
] | [
"#!/usr/bin/env python\n# coding=utf8\n\nimport numpy as np\nfrom swarm_msgs.msg import FormationParam\n\ndef create_virtual_structure(formation, angle=0.0, length=0., width=0.):\n \"\"\"\n ะคัะฝะบัะธั ะฒะธัััะฐะปัะฝะพะณะพ ะฟะพัััะพะตะฝะธั\n Virtual_formation - ะผะฐััะธะฒ ะบะพะพัะดะธะฝะฐั ัะฐัะฟะพะปะพะถะตะฝะธั ะ ะขะ ะฒ ัััะพั\n x0,y0 - ะบะพะพัะดะธะฝะฐัั ัะตะฝััะฐ ะฒะธัััะฐะปัะฝะพะณะพ ะฟะพัััะพะตะฝะธั\n sizeBuild - ัะฐะทะผะตั ะฟะพัััะพะฝะธั (ัะธัะธะฝะฐ ะฅ ะณะปัะฑะธะฝะฐ)\n :param formation.count: ะบะพะปะธัะตััะฒะพ ะ ะขะ\n :param length: ะดะปะธะฝะฐ ะ ะขะ\n :param width: ัะธัะธะฝะฐ ะ ะขะ\n :param distance: ัะฐัััะพัะฝะธะต ะผะตะถะดั ะ ะขะ\n :param typeFormation: ัะธะฟ ะฟะพัััะพะตะฝะธั, ะทะฐะดะฐะตััั ะพั 1 ะดะพ 7:\n 0-ะคะฐะปะฐะฝะณะฐ RANK=0\n 1-ะะฐัะต (ะบะฒะฐะดัะฐั) SQUARE=1\n 2-ะะปะธะฝ KLIN=2\n 3-ะะฑัะฐัะฝัะน ะบะปะธะฝ REVERSE_KLIN=3\n 4-ะะพะปะพะฝะฝะฐ COLUMM=4\n 5-ะญัะตะปะพะฝ ECHELON=5\n 6-ะะพะปััะพ CIRCLE=6\n :param angle: ัะณะพะป ะฟะพะฒะพัะพัะฐ ะฒะธัััะฐะปัะฝะพะณะพ ะฟะพัััะพะตะฝะธั\n :return:\n \"\"\"\n\n safety_radius_rtp = np.sqrt((length / 2) ** 2 + (width / 2) ** 2)\n\n sizeBuild = np.zeros((1, 2))\n Virtual_formation = np.zeros((formation.count, 2))\n\n if formation.type == FormationParam.RANK and formation.type == FormationParam.NO_DATA: # ะคะฐะปะฐะฝะณะฐ\n r = formation.distance / 2 + safety_radius_rtp\n if formation.count == 2:\n a = 2 * r * formation.count\n else:\n a = (2 * r) * np.ceil(formation.count / 2)\n\n if formation.count > 4:\n b = 4 * r\n else:\n b = (2 * r) * np.ceil(formation.count / 2)\n # sizeBuild = [a b];\n kx = np.fix((a / (2 * r))) # ะบะพะปะธัะตััะฒะพ ะ ะขะ ะฟะพะผะตัะฐััะธั
ัั ะฟะพ ะณะพัะธะทะพะฝัะฐะปะธ\n ky = np.fix((b / (2 * r))) # ะบะพะปะธัะตััะฒะพ ะ ะขะ ะฟะพะผะตัะฐััะธั
ัั ะฟะพ ะฒะตััะธะบะฐะปะธ\n N = kx * ky # ะผะฐะบัะธะผะฐะปัะฝะพะต ะบะพะปะธัะตััะฒะพ ัะพะฑะพัะพะฒ\n # n = 5; #n = 12; # ะบะพะปะธัะตััะฒะพ ัะพะฑะพัะพะฒ ะฒะฒะพะดะธะผัะต ะฟะพะปัะทะพะฒะฐัะตะปะตะผ\n D = N - formation.count # ัะฐะทะฝะธัะฐ ะผะตะถะดั ะผะฐะบัะธะผะฐะปัะฝัะผ ะบะพะปะธัะตััะฒะพะผ\n # ัะพะฑะพัะพะฒ ะธ ะฝะตะพะฑั
ะพะดะธะผัะผ, ะทะฐะดะฐะฝะฝัะผ ะฟะพะปัะทะพะฒะฐัะตะปะตะผ\n if np.fix(D / kx) > 0:\n ky = ky - np.fix(D / kx)\n N = N - np.fix(D / kx) * kx\n dx = (a - kx * (2 * r)) / (kx + 1) # ัะฐัััะพัะฝะธะต ะผะตะถะดั ะทะพะฝะฐะผะธ ะฑะตะทะพะฟะฐัะฝะพััะธ ะ ะขะ\n dy = (b - ky * (2 * r)) / (ky + 1) # ัะฐัััะพัะฝะธะต ะผะตะถะดั ะทะพะฝะฐะผะธ ะฑะตะทะพะฟะฐัะฝะพััะธ ะ ะขะ\n y = (- b) / 2 - r # ะฝะฐัะฐะปะพ ะพัััะตัะฐ ะฟะพ ั\n k = 0\n for j in np.arange(1, ky + 1):\n y = y + dy + np.dot(2, r)\n x = - a / 2 - r # ะฝะฐัะฐะปะพ ะพัััะตัะฐ ะฟะพ ั
\n if j == ky and formation.count < N:\n kx = kx - (N - formation.count)\n dx = (a - kx * (2 * r)) / (kx + 1) # ัะฐัััะพัะฝะธะต ะผะตะถะดั ะทะพะฝะฐะผะธ ะฑะตะทะพะฟะฐัะฝะพััะธ ะ ะขะ\n for i in np.arange(1, kx+1):\n # x0 = -(a - k*(2*r))/(k+1)/2 - r\n x = x + dx + np.dot(2, r)\n Virtual_formation[k, :] = np.array([x, y])\n k = k + 1 # ะฐะฑััะธัะฐ ะ ะขะ\n elif formation.type == FormationParam.SQUARE: # ะะฐัะต - ะบะฒะฐะดัะฐั\n P = formation.count * (formation.distance + 2 * safety_radius_rtp)\n b = P / 4\n if formation.count == 1:\n a = 0\n elif formation.count == 2:\n a = b\n else:\n a = b / 2\n # sizeBuild = [2*a 2*a];\n # -->\n visionAngle = np.deg2rad(360)\n Virtual_formation = np.zeros((formation.count, 2))\n alpha_0 = np.deg2rad(0)\n d_alpha = visionAngle / formation.count\n for i in np.arange(0, formation.count):\n if np.deg2rad(0) <= alpha_0 < np.deg2rad(45):\n Virtual_formation[i, :] = np.array([a, a * np.tan(alpha_0)])\n elif np.deg2rad(45) <= alpha_0 < np.deg2rad(135):\n Virtual_formation[i, :] = np.array([a / np.tan(alpha_0), a])\n elif np.deg2rad(135) <= alpha_0 < np.deg2rad(225):\n Virtual_formation[i, :] = np.array([- a, - a * np.tan(alpha_0)])\n elif np.deg2rad(225) <= alpha_0 < np.deg2rad(315):\n Virtual_formation[i, :] = np.array([- a / np.tan(alpha_0), - a])\n else:\n Virtual_formation[i, :] = np.array([a, a * np.tan(alpha_0)])\n alpha_0 = np.double(alpha_0 + d_alpha)\n if alpha_0 > 2 * np.pi:\n alpha_0 = alpha_0 - 2 * np.pi\n elif formation.type == FormationParam.KLIN:\n d = formation.distance + 2 * safety_radius_rtp\n a = d\n b = d * np.sqrt(3) / 2\n sizeBuild = np.array([np.fix(formation.count / 2) * a, np.fix(formation.count / 2) * b])\n k = 0\n if np.mod(formation.count, 2) == 1:\n Virtual_formation[0, :] = np.array([0, 0])\n k = k + 1\n for i in np.arange(1, np.fix(formation.count / 2) + 1):\n Virtual_formation[k, :] = np.array([(- i) * a / 2, - i * b])\n k = k + 1\n Virtual_formation[k, :] = np.array([i * a / 2, (- i) * b])\n k = k + 1\n # ัะผะตัะตะฝะธะต ะบะปะธะฝะฐ ะฒะฒะตัั
\n # -->\n Virtual_formation[:, 1] = Virtual_formation[:, 1] + sizeBuild[1] / 2\n # ัะตะฝัั ะบะปะธะฝะฐ (ะฟัะธ ัะตัะฝะพะผ ะบะพะปะธัะตััะฒะต ะ ะขะ ัะตะฝัั ะฑัะดะตั ัะผะตัะตะฝ ะตัั ะฝะฐ 1/4)\n if np.mod(formation.count, 2) == 0:\n # -->\n Virtual_formation[:, 1] = Virtual_formation[:, 1] + b / 2\n\n elif formation.type == FormationParam.REVERSE_KLIN:\n # -->\n d = formation.distance + 2 * safety_radius_rtp\n a = d\n b = d * np.sqrt(3) / 2\n sizeBuild = np.array([np.fix(formation.count / 2) * a, np.fix(formation.count / 2) * b])\n k = 0\n if np.mod(formation.count, 2) == 1:\n Virtual_formation[0, :] = np.array([0, 0])\n k = k + 1\n for i in np.arange(1, np.fix(formation.count / 2) + 1):\n Virtual_formation[k, :] = np.array([- i * a / 2, i * b])\n k = k + 1\n Virtual_formation[k, :] = np.array([i * a / 2, i * b])\n k = k + 1\n # ัะผะตัะตะฝะธะต ะบะปะธะฝะฐ ะฒะฝะธะท\n # -->\n Virtual_formation[:, 1] = Virtual_formation[:, 1] - sizeBuild[1] / 2\n # ัะตะฝัั ะพะฑัะฐัะฝะพะณะพ ะบะปะธะฝะฐ(ะฟัะธ ัะตัะฝะพะผ ะบะพะปะธัะตััะฒะต ะ ะขะ ัะตะฝัั ะฑัะดะตั ัะผะตัะตะฝ\n # ะตัั ะฝะฐ 1/4\n if np.mod(formation.count, 2) == 0:\n # -->\n Virtual_formation[:, 1] = Virtual_formation[:, 1] - b / 2\n\n elif formation.type == FormationParam.COLUMN:\n # -->\n d = formation.distance + 2 * safety_radius_rtp\n # b = double((n-1)*d + 2*safety_radius_rtp);\n # sizeBuild = [a b];\n # -->\n Virtual_formation[0, :] = np.array([0, 0])\n for i in np.arange(1, formation.count + 1):\n Virtual_formation[i - 1, :] = np.array([0, - (i - 1) * d])\n y0 = - (formation.count - 1) * d / 2\n Virtual_formation[:, 1] = Virtual_formation[:, 1] - y0\n elif formation.type == FormationParam.ECHELON:\n d = formation.distance + 2 * safety_radius_rtp\n a = d\n # sizeBuild = [(n-1)*a+2*widthRTP b];\n k = 0\n if np.mod(formation.count, 2) == 1:\n Virtual_formation[0, :] = np.array([0, 0])\n k = k + 1\n for i in np.arange(1, np.fix(formation.count / 2) + 1):\n Virtual_formation[k, :] = np.array([np.dot(- i, a), 0])\n k = k + 1\n Virtual_formation[k, :] = np.array([np.dot(i, a), 0])\n k = k + 1\n else:\n for i in np.arange(0, np.fix(formation.count / 2)):\n Virtual_formation[k, :] = np.array([- a / 2 - i * a, 0])\n k = k + 1\n Virtual_formation[k, :] = np.array([a / 2 + i * a, 0])\n k = k + 1\n elif formation.type == FormationParam.CIRCLE:\n d = formation.distance + 2 * safety_radius_rtp\n # ะพะฟัะตะดะตะปัะตััั ะฟะพ ะดัะณะต ะพะบััะถะฝะพััะธ\n if formation.count == 1:\n a = 0\n elif formation.count == 2:\n a = d / 2\n else:\n a = d * formation.count / (2 * np.pi)\n # sizeBuild = [2*a 2*a]\n alpha_0 = np.deg2rad(0)\n scanAngleResolution = np.deg2rad(np.double(360) / formation.count)\n visionAngle = np.deg2rad(360)\n Virtual_formation = np.zeros((formation.count, 2))\n k = 0\n for i in np.arange(alpha_0, visionAngle + alpha_0, scanAngleResolution):\n Virtual_formation[k, :] = np.array([a * np.cos(i), a * np.sin(i)])\n k = k + 1\n\n # ะะพััะตะบัะธั ัะฐะทะผะตัะพะฒ ะฒะธัััะฐะปัะฝะพะน ััััะบัััั\n sX = np.max(Virtual_formation[:, 0]) - np.min(Virtual_formation[:, 0])\n sY = np.max(Virtual_formation[:, 1]) - np.min(Virtual_formation[:, 1])\n if sX < 0.001:\n sX = 2 * safety_radius_rtp\n if sY < 0.001:\n sY = 2 * safety_radius_rtp\n\n sizeBuild = np.array([sX, sY])\n\n # ----------------------------------------------------\n # ะฟะพะฒะพัะพั ะฒะธัััะฐะปัะฝะพะน ััััะบัััั ะฝะฐ ัะณะพะป alpha\n # -----------------------------------------------------\n f0 = np.zeros((1, formation.count))\n r = np.zeros((1, formation.count))\n for i in np.arange(0, formation.count):\n r[:, i] = np.sqrt(Virtual_formation[i, 0] ** 2 + Virtual_formation[i, 1] ** 2)\n f0[:, i] = myAngle_new(Virtual_formation[i, 0], Virtual_formation[i, 1])\n\n Virtual_formation[:, 0] = np.multiply(r.flatten(), np.cos(f0.flatten() + angle-np.deg2rad(90)))\n Virtual_formation[:, 1] = np.multiply(r.flatten(), np.sin(f0.flatten() + angle - np.deg2rad(90)))\n\n return Virtual_formation, sizeBuild\n\n\ndef myAngle_new(dx, dy):\n \"\"\"\n myANGLE ะฒััะธัะปะตะฝะธะต ัะณะปะฐ\n ะคัะฝะบัะธั ะฒััะธัะปะตะฝะธั ัะณะปะฐ ะฒ ัะฐะดะธะฐะฝะฐั
\n ะัั
ะพะดะฝัะผะธ ะดะฐะฝะฝัะผะธ ัะฒะปััััั ัะฐัััะพัะฝะธั\n ะผะตะถะดั ัะพัะบะฐะผะธ ะฟะพ ะพัะธ ะะฅ ะธ ะะฃ ัะพะพัะฒะตัััะฒะตะฝะฝะพ\n :param dx:\n :param dy:\n :return:\n \"\"\"\n\n Angle = 0\n if dx > 0 and dy >= 0:\n Angle = np.arctan((dy / dx))\n elif dx > 0 > dy:\n Angle = np.arctan(dy / dx) + 2 * np.pi\n elif dx < 0:\n Angle = np.arctan(dy / dx) + np.pi\n elif dx == 0 and dy > 0:\n Angle = np.pi / 2\n elif dx == 0 and dy < 0:\n Angle = 3 * np.pi / 2\n elif dx == 0 and dy == 0:\n Angle = 0\n\n return Angle\n"
] | [
[
"numpy.fix",
"numpy.max",
"numpy.array",
"numpy.ceil",
"numpy.dot",
"numpy.sin",
"numpy.zeros",
"numpy.double",
"numpy.tan",
"numpy.min",
"numpy.arctan",
"numpy.arange",
"numpy.sqrt",
"numpy.cos",
"numpy.deg2rad",
"numpy.mod"
]
] |
LynnHo/VAE-Tensorflow | [
"aa908554925540f8c79c5228cde7fb306fe8868a"
] | [
"tflib/vision/dataset/mnist.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport multiprocessing\nimport os\nimport struct\nimport subprocess\n\nimport numpy as np\nimport tensorflow as tf\nfrom tflib.data.memory_data import MemoryData\n\n\n_N_CPU = multiprocessing.cpu_count()\n\n\ndef unzip_gz(file_name):\n unzip_name = file_name.replace('.gz', '')\n gz_file = gzip.GzipFile(file_name)\n open(unzip_name, 'wb+').write(gz_file.read())\n gz_file.close()\n\n\ndef mnist_download(download_dir):\n url_base = 'http://yann.lecun.com/exdb/mnist/'\n file_names = ['train-images-idx3-ubyte.gz',\n 'train-labels-idx1-ubyte.gz',\n 't10k-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz']\n for file_name in file_names:\n url = url_base + file_name\n save_path = os.path.join(download_dir, file_name)\n cmd = ['curl', url, '-o', save_path]\n print('Downloading ', file_name)\n if not os.path.exists(save_path):\n subprocess.call(cmd)\n else:\n print('%s exists, skip!' % file_name)\n\n\ndef mnist_load(data_dir, split='train'):\n \"\"\"Load MNIST dataset, modified from https://gist.github.com/akesling/5358964.\n\n Returns:\n `imgs`, `lbls`, `num`.\n\n `imgs` : [-1.0, 1.0] float64 images of shape (N * H * W).\n `lbls` : Int labels of shape (N,).\n `num` : # of datas.\n \"\"\"\n mnist_download(data_dir)\n\n if split == 'train':\n fname_img = os.path.join(data_dir, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(data_dir, 'train-labels-idx1-ubyte')\n elif split == 'test':\n fname_img = os.path.join(data_dir, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(data_dir, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"split must be 'test' or 'train'\")\n\n if not os.path.exists(fname_img):\n unzip_gz(fname_img + '.gz')\n if not os.path.exists(fname_lbl):\n unzip_gz(fname_lbl + '.gz')\n\n with open(fname_lbl, 'rb') as flbl:\n struct.unpack('>II', flbl.read(8))\n lbls = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n _, _, rows, cols = struct.unpack('>IIII', fimg.read(16))\n imgs = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbls), rows, cols)\n imgs = imgs / 127.5 - 1\n\n return imgs, lbls, len(lbls)\n\n\nclass Mnist(MemoryData):\n\n def __init__(self, data_dir, batch_size, split='train', prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,\n map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1, sess=None):\n imgs, lbls, _ = mnist_load(data_dir, split)\n imgs.shape = imgs.shape + (1,)\n\n imgs_pl = tf.placeholder(tf.float32, imgs.shape)\n lbls_pl = tf.placeholder(tf.int64, lbls.shape)\n\n memory_data_dict = {'img': imgs_pl, 'lbl': lbls_pl}\n\n self.feed_dict = {imgs_pl: imgs, lbls_pl: lbls}\n super(Mnist, self).__init__(memory_data_dict, batch_size, prefetch_batch, drop_remainder, filter,\n map_func, num_threads, shuffle, buffer_size, repeat, sess)\n\n def reset(self):\n super(Mnist, self).reset(self.feed_dict)\n\nif __name__ == '__main__':\n import imlib as im\n from tflib import session\n sess = session()\n mnist = Mnist('/tmp', 5000, repeat=1, sess=sess)\n print(len(mnist))\n for batch in mnist:\n print(batch['lbl'][-1])\n im.imshow(batch['img'][-1].squeeze())\n im.show()\n sess.close()\n"
] | [
[
"tensorflow.placeholder",
"numpy.fromfile"
]
] |
chriscannon9001/tablarray | [
"f07530f84a8c86abe996cdb999233ed9bb8edf7e"
] | [
"tablarray/np2ta/cbroadcast.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 28 12:09:36 2020\n\n@author: chris\n\"\"\"\n# std lib\nimport attr\nimport numpy as np\n\n# broadcast loop controls\nDIMEQ = 0 # EQ dimensions\nDIMLP1 = 1 # loop over side 1\nDIMLP2 = 2 # loop over side 2\nDIMPAD1 = -1 # pad on side 1\nDIMPAD2 = -2 # pad on side 2\nDIMERR = -3 # error, incompatible dimension\n\n_DTYPE_PRIORITY = {'bool': 0,\n 'bool8': 1,\n 'int8': 2,\n 'int16': 3,\n 'int32': 4,\n 'int64': 5,\n 'float16': 6,\n 'float32': 7,\n 'float64': 8,\n 'float128': 9,\n 'complex64': 10,\n 'complex128': 11,\n 'complex256': 12}\n\n\ndef _prioritize_dtype(override, atype, btype):\n \"\"\"when arrays a and b are broadcast,\n determine dtype of the return array\"\"\"\n if override is not None:\n return override\n a_priority = _DTYPE_PRIORITY[atype.__str__()]\n b_priority = _DTYPE_PRIORITY[btype.__str__()]\n dtype = atype if (a_priority >= b_priority) else btype\n return dtype\n\n\ndef broadcast_shape(shape1, shape2):\n \"\"\"given 2 shapes, return the broadcast result shape and controls\n\n >>> broadcast_shape((1, 1, 3), (2, 1))\n (array([1, 2, 3]), array([-2, 1, 2]), True)\n >>> broadcast_shape((1, 4, 3), (2, 1))\n (array([1, 4, 3]), array([-2, -3, 2]), False)\n \"\"\"\n if shape1 == shape2:\n # shorten the process if the answer is trivial\n control = np.zeros(len(shape1), dtype=int)\n return shape1, control, True\n # get padding ready for any difference in length\n delta_len = len(shape1) - len(shape2)\n pad = [0] * abs(delta_len)\n if delta_len >= 0:\n shape1 = np.array(shape1)\n shape2 = np.array(pad + list(shape2))\n else:\n shape2 = np.array(shape2)\n shape1 = np.array(pad + list(shape1))\n new_shape = np.zeros(len(shape1), dtype=int)\n mask1 = shape1 >= shape2\n new_shape[mask1] = shape1[mask1]\n mask2 = shape2 > shape1\n new_shape[mask2] = shape2[mask2]\n controls = DIMERR * np.ones(len(shape1), dtype=int)\n # control indicates broadcasting method per dim\n controls[shape1 == shape2] = DIMEQ\n controls[np.logical_and(shape1 > shape2, shape2 <= 1)] = DIMLP2\n controls[np.logical_and(shape2 > shape1, shape1 <= 1)] = DIMLP1\n controls[shape1 == 0] = DIMPAD1\n controls[shape2 == 0] = DIMPAD2\n valid = not np.any(controls == DIMERR)\n return new_shape, controls, valid\n\n\[email protected]\nclass CellBroadcast(object):\n \"\"\"Given a and b arrays, being segmented between tabular and cellular\n shapes, provide an iterator that yields 3 slices to be used for\n broadcasting.\n\n Example::\n\n cb = CellBroadcast((2,), (1,), (1,), (2, 2))\n cb.demo()\n \"\"\"\n _tshape_a = attr.ib(type=tuple)\n _tshape_b = attr.ib(type=tuple)\n _cshape_a = attr.ib(type=tuple)\n _cshape_b = attr.ib(type=tuple)\n\n def __attrs_post_init__(self):\n # tabularshape controls\n tshape_ctrl = broadcast_shape(\n self._tshape_a, self._tshape_b)\n # cellularshape controls\n cshape_ctrl = broadcast_shape(\n self._cshape_a, self._cshape_b)\n # new_cdim = len(new_cshape)\n self.new_cdim = len(cshape_ctrl[0])\n # master shape controls\n shape_ctrl = self._broadcast_ctrl_combiner(\n tshape_ctrl, cshape_ctrl)\n self.new_shape, self._controls, self.valid = shape_ctrl\n self._ndim = len(self.new_shape)\n self.rslice = [slice(None)] * self._ndim\n is_in_a = np.logical_not(self._controls == DIMPAD1)\n ndim_a = np.sum(is_in_a)\n self.aslice = [slice(None)] * ndim_a\n self._a_ndim_map = np.zeros(self._ndim, dtype=int)\n self._a_ndim_map[is_in_a] = np.arange(ndim_a)\n is_in_b = np.logical_not(self._controls == DIMPAD2)\n ndim_b = np.sum(is_in_b)\n self.bslice = [slice(None)] * ndim_b\n self._b_ndim_map = np.zeros(self._ndim, dtype=int)\n self._b_ndim_map[is_in_b] = np.arange(ndim_b)\n\n @classmethod\n def from_tshapes(cls, a_ts, b_ts):\n return cls(a_ts.tshape, b_ts.tshape, a_ts.cshape, b_ts.cshape)\n\n @staticmethod\n def _broadcast_ctrl_combiner(a_ctrl, b_ctrl):\n \"\"\"given a_ctr,b_ctrl: (new_shape, controls, valid), combine into 1\"\"\"\n a_shape, a_controls, a_valid = a_ctrl\n b_shape, b_controls, b_valid = b_ctrl\n new_shape = (np.concatenate((a_shape, b_shape))).astype(int)\n controls = (np.concatenate((a_controls, b_controls))).astype(int)\n valid = a_valid and b_valid\n return new_shape, controls, valid\n\n def _set_slice(self, dim, this_ctrl, this_slice):\n self.rslice[dim] = (this_slice)\n adim = self._a_ndim_map[dim]\n bdim = self._b_ndim_map[dim]\n # place this slice into aslice and/or bslice\n if this_ctrl == DIMLP2 or this_ctrl == DIMPAD2:\n self.aslice[adim] = (this_slice)\n if this_ctrl == DIMLP1 or this_ctrl == DIMPAD1:\n self.bslice[bdim] = (this_slice)\n # DIMLP requires slice(0) to dereference\n if this_ctrl == DIMLP1:\n self.aslice[adim] = (0)\n elif this_ctrl == DIMLP2:\n self.bslice[bdim] = (0)\n\n def __iter__(self, dim=0):\n if dim == self._ndim:\n # end recursion using yield as an iterator\n yield tuple(self.rslice), tuple(self.aslice), tuple(self.bslice)\n return\n this_ctrl = self._controls[dim]\n if this_ctrl == DIMEQ:\n # recursion\n yield from self.__iter__(dim + 1)\n else:\n this_n = self.new_shape[dim]\n for i in range(this_n):\n self._set_slice(dim, this_ctrl, i)\n # recursion\n yield from self.__iter__(dim + 1)\n\n def demo(self):\n print('new_shape: %s' % self.new_shape)\n for rslice, aslice, bslice in self:\n print('\\nr ', rslice)\n print('a ', aslice)\n print('b ', bslice)\n\n def calc_function(self, func, a, b, dtype=None):\n \"\"\"calculate rval=func(a, b) using my iterator\"\"\"\n assert self.valid, (\n \"couldn't broadcast compound shapes %s and %s\" %\n (a.ts, b.ts))\n dtype = _prioritize_dtype(dtype, a.dtype, b.dtype)\n rval = np.zeros(self.new_shape, dtype=dtype)\n for rslice, aslice, bslice in self:\n rval[rslice] = func(a[aslice], b[bslice])\n return rval\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n"
] | [
[
"numpy.logical_not",
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.logical_and",
"numpy.any",
"numpy.arange"
]
] |
rcchun/Mask_RCNN | [
"199a12e16fde4bdd2553f33a75eba530cbca586d"
] | [
"mrcnn/visualize_bak.py"
] | [
"\"\"\"\nMask R-CNN\nDisplay and Visualization Functions.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport sys\nimport random\nimport itertools\nimport colorsys\n\nimport numpy as np\nfrom skimage.measure import find_contours\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches, lines\nfrom matplotlib.patches import Polygon\nimport IPython.display\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\n\n\n############################################################\n# Visualization\n############################################################\n\ndef display_images(images, titles=None, cols=4, cmap=None, norm=None,\n interpolation=None):\n \"\"\"Display the given set of images, optionally with titles.\n images: list or array of image tensors in HWC format.\n titles: optional. A list of titles to display with each image.\n cols: number of images per row\n cmap: Optional. Color map to use. For example, \"Blues\".\n norm: Optional. A Normalize instance to map values to colors.\n interpolation: Optional. Image interpolation to use for display.\n \"\"\"\n titles = titles if titles is not None else [\"\"] * len(images)\n rows = len(images) // cols + 1\n plt.figure(figsize=(14, 14 * rows // cols))\n i = 1\n for image, title in zip(images, titles):\n plt.subplot(rows, cols, i)\n plt.title(title, fontsize=9)\n plt.axis('off')\n plt.imshow(image.astype(np.uint8), cmap=cmap,\n norm=norm, interpolation=interpolation)\n i += 1\n plt.show()\n\n\ndef random_colors(N, bright=True):\n \"\"\"\n Generate random colors.\n To get visually distinct colors, generate them in HSV space then\n convert to RGB.\n \"\"\"\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors\n\n\ndef apply_mask(image, mask, color, alpha=0.5):\n \"\"\"Apply the given mask to the image.\n \"\"\"\n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n image[:, :, c] *\n (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n return image\n\n\ndef display_instances(image, boxes, masks, class_ids, class_names,\n scores=None, title=\"\",\n figsize=(16, 16), ax=None,\n show_mask=True, show_bbox=True,\n colors=None, captions=None):\n \"\"\"\n boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n masks: [height, width, num_instances]\n class_ids: [num_instances]\n class_names: list of class names of the dataset\n scores: (optional) confidence scores for each box\n title: (optional) Figure title\n show_mask, show_bbox: To show masks and bounding boxes or not\n figsize: (optional) the size of the image\n colors: (optional) An array or colors to use with each object\n captions: (optional) A list of strings to use as captions for each object\n \"\"\"\n # Number of instances\n N = boxes.shape[0]\n if not N:\n print(\"\\n*** No instances to display *** \\n\")\n else:\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n\n # If no axis is passed, create one and automatically call show()\n auto_show = False\n if not ax:\n _, ax = plt.subplots(1, figsize=figsize)\n auto_show = True\n\n # Generate random colors\n colors = colors or random_colors(N)\n\n # Show area outside image boundaries.\n height, width = image.shape[:2]\n ax.set_ylim(height + 10, -10)\n ax.set_xlim(-10, width + 10)\n ax.axis('off')\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n color = colors[i]\n\n # Bounding box\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n if show_bbox:\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=0.7, linestyle=\"dashed\",\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Label\n if not captions:\n class_id = class_ids[i]\n score = scores[i] if scores is not None else None\n label = class_names[class_id]\n caption = \"{} {:.3f}\".format(label, score) if score else label\n else:\n caption = captions[i]\n ax.text(x1, y1 + 8, caption,\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n mask = masks[:, :, i]\n if show_mask:\n masked_image = apply_mask(masked_image, mask, color)\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n if auto_show:\n plt.show()\n\n\ndef display_differences(image,\n gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n class_names, title=\"\", ax=None,\n show_mask=True, show_box=False,\n iou_threshold=0.5, score_threshold=0.5):\n \"\"\"Display ground truth and prediction instances on the same image.\"\"\"\n # Match predictions to ground truth\n gt_match, pred_match, overlaps = utils.compute_matches(\n gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_threshold=iou_threshold, score_threshold=score_threshold)\n # Ground truth = green. Predictions = red\n colors = [(0, 1, 0, .8)] * len(gt_match)\\\n + [(1, 0, 0, 1)] * len(pred_match)\n # Concatenate GT and predictions\n class_ids = np.concatenate([gt_class_id, pred_class_id])\n scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])\n boxes = np.concatenate([gt_box, pred_box])\n masks = np.concatenate([gt_mask, pred_mask], axis=-1)\n # Captions per instance show score/IoU\n captions = [\"\" for m in gt_match] + [\"{:.2f} / {:.2f}\".format(\n pred_score[i],\n (overlaps[i, int(pred_match[i])]\n if pred_match[i] > -1 else overlaps[i].max()))\n for i in range(len(pred_match))]\n # Set title if not provided\n title = title or \"Ground Truth and Detections\\n GT=green, pred=red, captions: score/IoU\"\n # Display\n display_instances(\n image,\n boxes, masks, class_ids,\n class_names, scores, ax=ax,\n show_bbox=show_box, show_mask=show_mask,\n colors=colors, captions=captions,\n title=title)\n\n\ndef draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):\n \"\"\"\n anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.\n proposals: [n, 4] the same anchors but refined to fit objects better.\n \"\"\"\n masked_image = image.copy()\n\n # Pick random anchors in case there are too many.\n ids = np.arange(rois.shape[0], dtype=np.int32)\n ids = np.random.choice(\n ids, limit, replace=False) if ids.shape[0] > limit else ids\n\n fig, ax = plt.subplots(1, figsize=(12, 12))\n if rois.shape[0] > limit:\n plt.title(\"Showing {} random ROIs out of {}\".format(\n len(ids), rois.shape[0]))\n else:\n plt.title(\"{} ROIs\".format(len(ids)))\n\n # Show area outside image boundaries.\n ax.set_ylim(image.shape[0] + 20, -20)\n ax.set_xlim(-50, image.shape[1] + 20)\n ax.axis('off')\n\n for i, id in enumerate(ids):\n color = np.random.rand(3)\n class_id = class_ids[id]\n # ROI\n y1, x1, y2, x2 = rois[id]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n edgecolor=color if class_id else \"gray\",\n facecolor='none', linestyle=\"dashed\")\n ax.add_patch(p)\n # Refined ROI\n if class_id:\n ry1, rx1, ry2, rx2 = refined_rois[id]\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal for easy visualization\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Label\n label = class_names[class_id]\n ax.text(rx1, ry1 + 8, \"{}\".format(label),\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n m = utils.unmold_mask(mask[id], rois[id]\n [:4].astype(np.int32), image.shape)\n masked_image = apply_mask(masked_image, m, color)\n\n ax.imshow(masked_image)\n\n # Print stats\n print(\"Positive ROIs: \", class_ids[class_ids > 0].shape[0])\n print(\"Negative ROIs: \", class_ids[class_ids == 0].shape[0])\n print(\"Positive Ratio: {:.2f}\".format(\n class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))\n\n\n# TODO: Replace with matplotlib equivalent?\ndef draw_box(image, box, color):\n \"\"\"Draw 3-pixel width bounding boxes on the given image array.\n color: list of 3 int values for RGB.\n \"\"\"\n y1, x1, y2, x2 = box\n image[y1:y1 + 2, x1:x2] = color\n image[y2:y2 + 2, x1:x2] = color\n image[y1:y2, x1:x1 + 2] = color\n image[y1:y2, x2:x2 + 2] = color\n return image\n\n\ndef display_top_masks(image, mask, class_ids, class_names, limit=4):\n \"\"\"Display the given image and the top few class masks.\"\"\"\n to_display = []\n titles = []\n to_display.append(image)\n titles.append(\"H x W={}x{}\".format(image.shape[0], image.shape[1]))\n # Pick top prominent classes in this image\n unique_class_ids = np.unique(class_ids)\n mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])\n for i in unique_class_ids]\n top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),\n key=lambda r: r[1], reverse=True) if v[1] > 0]\n # Generate images and titles\n for i in range(limit):\n class_id = top_ids[i] if i < len(top_ids) else -1\n # Pull masks of instances belonging to the same class.\n m = mask[:, :, np.where(class_ids == class_id)[0]]\n m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)\n to_display.append(m)\n titles.append(class_names[class_id] if class_id != -1 else \"-\")\n display_images(to_display, titles=titles, cols=limit + 1, cmap=\"Blues_r\")\n\n\ndef plot_precision_recall(AP, precisions, recalls):\n \"\"\"Draw the precision-recall curve.\n\n AP: Average precision at IoU >= 0.5\n precisions: list of precision values\n recalls: list of recall values\n \"\"\"\n # Plot the Precision-Recall curve\n _, ax = plt.subplots(1)\n ax.set_title(\"Precision-Recall Curve. AP@50 = {:.3f}\".format(AP))\n ax.set_ylim(0, 1.1)\n ax.set_xlim(0, 1.1)\n _ = ax.plot(recalls, precisions)\n\n\ndef plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,\n overlaps, class_names, threshold=0.5):\n \"\"\"Draw a grid showing how ground truth objects are classified.\n gt_class_ids: [N] int. Ground truth class IDs\n pred_class_id: [N] int. Predicted class IDs\n pred_scores: [N] float. The probability scores of predicted classes\n overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes.\n class_names: list of all class names in the dataset\n threshold: Float. The prediction probability required to predict a class\n \"\"\"\n gt_class_ids = gt_class_ids[gt_class_ids != 0]\n pred_class_ids = pred_class_ids[pred_class_ids != 0]\n\n plt.figure(figsize=(12, 10))\n plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)\n plt.yticks(np.arange(len(pred_class_ids)),\n [\"{} ({:.2f})\".format(class_names[int(id)], pred_scores[i])\n for i, id in enumerate(pred_class_ids)])\n plt.xticks(np.arange(len(gt_class_ids)),\n [class_names[int(id)] for id in gt_class_ids], rotation=90)\n\n thresh = overlaps.max() / 2.\n for i, j in itertools.product(range(overlaps.shape[0]),\n range(overlaps.shape[1])):\n text = \"\"\n if overlaps[i, j] > threshold:\n text = \"match\" if gt_class_ids[j] == pred_class_ids[i] else \"wrong\"\n color = (\"white\" if overlaps[i, j] > thresh\n else \"black\" if overlaps[i, j] > 0\n else \"grey\")\n plt.text(j, i, \"{:.3f}\\n{}\".format(overlaps[i, j], text),\n horizontalalignment=\"center\", verticalalignment=\"center\",\n fontsize=9, color=color)\n\n plt.tight_layout()\n plt.xlabel(\"Ground Truth\")\n plt.ylabel(\"Predictions\")\n\n\ndef draw_boxes(image, boxes=None, refined_boxes=None,\n masks=None, captions=None, visibilities=None,\n title=\"\", ax=None):\n \"\"\"Draw bounding boxes and segmentation masks with different\n customizations.\n\n boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.\n refined_boxes: Like boxes, but draw with solid lines to show\n that they're the result of refining 'boxes'.\n masks: [N, height, width]\n captions: List of N titles to display on each box\n visibilities: (optional) List of values of 0, 1, or 2. Determine how\n prominent each bounding box should be.\n title: An optional title to show over the image\n ax: (optional) Matplotlib axis to draw on.\n \"\"\"\n # Number of boxes\n assert boxes is not None or refined_boxes is not None\n N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]\n\n # Matplotlib Axis\n if not ax:\n _, ax = plt.subplots(1, figsize=(12, 12))\n\n # Generate random colors\n colors = random_colors(N)\n\n # Show area outside image boundaries.\n margin = image.shape[0] // 10\n ax.set_ylim(image.shape[0] + margin, -margin)\n ax.set_xlim(-margin, image.shape[1] + margin)\n ax.axis('off')\n\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n # Box visibility\n visibility = visibilities[i] if visibilities is not None else 1\n if visibility == 0:\n color = \"gray\"\n style = \"dotted\"\n alpha = 0.5\n elif visibility == 1:\n color = colors[i]\n style = \"dotted\"\n alpha = 1\n elif visibility == 2:\n color = colors[i]\n style = \"solid\"\n alpha = 1\n\n # Boxes\n if boxes is not None:\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=alpha, linestyle=style,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Refined boxes\n if refined_boxes is not None and visibility > 0:\n ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal\n if boxes is not None:\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Captions\n if captions is not None:\n caption = captions[i]\n # If there are refined boxes, display captions on them\n if refined_boxes is not None:\n y1, x1, y2, x2 = ry1, rx1, ry2, rx2\n ax.text(x1, y1, caption, size=11, verticalalignment='top',\n color='w', backgroundcolor=\"none\",\n bbox={'facecolor': color, 'alpha': 0.5,\n 'pad': 2, 'edgecolor': 'none'})\n\n # Masks\n if masks is not None:\n mask = masks[:, :, i]\n masked_image = apply_mask(masked_image, mask, color)\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n\n\ndef display_table(table):\n \"\"\"Display values in a table format.\n table: an iterable of rows, and each row is an iterable of values.\n \"\"\"\n html = \"\"\n for row in table:\n row_html = \"\"\n for col in row:\n row_html += \"<td>{:40}</td>\".format(str(col))\n html += \"<tr>\" + row_html + \"</tr>\"\n html = \"<table>\" + html + \"</table>\"\n IPython.display.display(IPython.display.HTML(html))\n\n\ndef display_weight_stats(model):\n \"\"\"Scans all the weights in the model and returns a list of tuples\n that contain stats about each weight.\n \"\"\"\n layers = model.get_trainable_layers()\n table = [[\"WEIGHT NAME\", \"SHAPE\", \"MIN\", \"MAX\", \"STD\"]]\n for l in layers:\n weight_values = l.get_weights() # list of Numpy arrays\n weight_tensors = l.weights # list of TF tensors\n for i, w in enumerate(weight_values):\n weight_name = weight_tensors[i].name\n # Detect problematic layers. Exclude biases of conv layers.\n alert = \"\"\n if w.min() == w.max() and not (l.__class__.__name__ == \"Conv2D\" and i == 1):\n alert += \"<span style='color:red'>*** dead?</span>\"\n if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:\n alert += \"<span style='color:red'>*** Overflow?</span>\"\n # Add row\n table.append([\n weight_name + alert,\n str(w.shape),\n \"{:+9.4f}\".format(w.min()),\n \"{:+10.4f}\".format(w.max()),\n \"{:+9.4f}\".format(w.std()),\n ])\n display_table(table)\n"
] | [
[
"numpy.random.choice",
"numpy.random.rand",
"numpy.where",
"matplotlib.patches.Rectangle",
"numpy.concatenate",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotlib.lines.Line2D",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.fliplr",
"matplotlib.pyplot.xlabel",
"numpy.any",
"matplotlib.pyplot.ylabel",
"numpy.unique",
"matplotlib.pyplot.imshow"
]
] |
Levintsky/ARKitScenes | [
"58bf410f65bc2ae2e35e3c3d2a7c45d8b7863fca"
] | [
"depth_upsampling/sampler.py"
] | [
"import numpy as np\nimport torch.utils.data\n\n\nclass MultiEpochSampler(torch.utils.data.Sampler):\n r\"\"\"Samples elements randomly over multiple epochs\n\n Arguments:\n data_source (Dataset): dataset to sample from\n num_iter (int) : Number of times to loop over the dataset\n start_itr (int) : which iteration to begin from\n \"\"\"\n\n def __init__(self, data_source, num_iter, start_itr=0, batch_size=128):\n super().__init__(data_source)\n self.data_source = data_source\n self.dataset_size = len(self.data_source)\n self.num_iter = num_iter\n self.start_itr = start_itr\n self.batch_size = batch_size\n self.num_epochs = int(np.ceil((self.num_iter * self.batch_size) / float(self.dataset_size)))\n\n if not isinstance(self.dataset_size, int) or self.dataset_size <= 0:\n raise ValueError(\"dataset size should be a positive integeral \"\n \"value, but got dataset_size={}\".format(self.dataset_size))\n\n def __iter__(self):\n n = self.dataset_size\n # Determine number of epochs\n num_epochs = int(np.ceil(((self.num_iter - self.start_itr) * self.batch_size) / float(n)))\n out = np.concatenate([np.random.permutation(n) for epoch in range(self.num_epochs)])[-num_epochs * n: self.num_iter * self.batch_size]\n out = out[(self.start_itr * self.batch_size % n):]\n return iter(out)\n\n def __len__(self):\n return (self.num_iter - self.start_itr) * self.batch_size\n"
] | [
[
"numpy.random.permutation"
]
] |
fortiema/keras-lm | [
"0e844ef7000e1459e49d34a959ab3fe5279edced"
] | [
"lm/text.py"
] | [
"from collections import Counter\nfrom concurrent.futures import ProcessPoolExecutor\nimport logging\nfrom pathlib import Path\nimport pickle\nimport re\n\nimport numpy as np\nimport spacy\nfrom spacy.attrs import ORTH\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TextDataSource:\n \"\"\"Generic source of text data (iterator)\n\n Raises:\n IOError: [description]\n IOError: [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n\n def __init__(self, source, name_pattern=r\".*\\.txt\"):\n self.source = Path(source)\n self.files = None\n\n if not self.source.exists():\n raise IOError(f\"Ressource {str(self.source)} does not exist!\")\n\n if self.source.is_dir():\n self.files = [f for f in self.source.iterdir() if re.match(name_pattern, str(f))]\n elif self.source.is_file() and re.match(name_pattern, str(self.source.resolve())):\n self.files = [self.source.resolve()]\n else:\n raise IOError(\n f\"Ressource {str(self.source)} must contain at least one file matching pattern {name_pattern}.\"\n )\n\n logger.info(f\"Source {str(self.source)} initialized with {len(self.files)} files.\")\n\n def __iter__(self):\n for f in self.files:\n with open(f, \"r\") as fin:\n for line in fin:\n if line:\n yield line.split() + [\"<eos>\"]\n\n def __len__(self):\n return len(list(self.__iter__()))\n\n\nclass Dictionary:\n def __init__(self, source=None):\n self.freq = Counter()\n self.pad, self.unk = \"<pad>\", \"<unk>\"\n self.itos = []\n self.stoi = {}\n\n if source is not None:\n self.fit(source)\n\n def fit(self, source):\n logger.info(f\"Fitting vocabulary...\")\n self.freq = Counter()\n\n # Using this syntax to avoid consuming whole source in memory!\n for doc in source:\n for tok in doc:\n self.freq[tok.lower()] += 1\n\n self.itos = [o for o, c in self.freq.most_common()]\n self.itos.insert(0, self.pad)\n self.itos.insert(1, self.unk)\n self.stoi = {v: i for i, v in enumerate(self.itos)}\n\n def prune(self, max_vocab, min_freq=0):\n logger.info(f\"Pruning vocabulary to keep at most {max_vocab} tokens...\")\n self.itos = [o for o, c in self.freq.most_common(max_vocab) if c > min_freq]\n self.itos.insert(0, self.pad)\n self.itos.insert(1, self.unk)\n self.stoi = {v: i for i, v in enumerate(self.itos)}\n logger.info(\"Pruning completed!\")\n\n def numericalize(self, documents, np=False):\n for doc in documents:\n yield [self.stoi.get(tok.lower(), 1) for tok in doc]\n\n def __len__(self):\n return len(self.itos)\n\n def save(self, fname):\n _path = Path(fname)\n if _path.parent.exists():\n with open(_path, \"wb\") as fout:\n pickle.dump(self, fout, -1)\n else:\n raise IOError(f\"Can't save - Directory {str(_path.parent)} does not exist!\")\n\n @staticmethod\n def load(fname):\n _path = Path(fname)\n if _path.is_file():\n with open(_path, \"rb\") as fin:\n return pickle.load(fin)\n\n\nclass LanguageModelLoader:\n \"\"\" Language model data loader that iterates through batches that are of length N(bptt,5)\n\n Notes:\n\n The iterator will loop indefinitely over the data, which is a requirement of Keras API. Keep this in mind when\n consuming it elsewhere.\n\n The first batch returned is always the max possible length.\n \"\"\"\n\n def __init__(self, nums, bs, bptt, backwards=False):\n self.bs, self.bptt, self.backwards = bs, bptt, backwards\n self.steps, self.data = self.batchify(nums)\n self.i, self.iter = 0, 0\n self.n = len(self.data)\n\n def __iter__(self):\n self.i, self.iter = 0, 0\n while self.i < self.n - 1 and self.iter < len(self):\n if self.i == 0:\n seq_len = self.bptt + 5 * 5\n else:\n bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.\n seq_len = max(5, int(np.random.normal(bptt, 5)))\n res = self.get_batch(self.i, seq_len)\n self.i += seq_len\n self.i %= self.n\n self.iter += 1\n self.iter %= len(self)\n yield res\n\n def __len__(self):\n return self.n // self.bptt - 1\n\n def batchify(self, data):\n nb = data.shape[0] // self.bs\n data = np.array(data[: nb * self.bs])\n data = data.reshape(self.bs, -1).T\n if self.backwards:\n data = data[::-1]\n return nb, data\n\n def get_batch(self, i, seq_len):\n \"\"\"[summary]\n\n Args:\n i ([type]): [description]\n seq_len ([type]): [description]\n\n Returns:\n [type]: [description]\n\n Notes:\n\n `np.expand_dims(y, -1)` must be used on the target to accomodate Keras `sparse_categorical_crossentropy`\n objective, according to the documentation.\n\n See also: https://github.com/tensorflow/tensorflow/issues/17150\n\n \"\"\"\n source = self.data\n seq_len = min(seq_len, len(source) - 1 - i)\n return source[i : i + seq_len], np.expand_dims(source[i + 1 : i + 1 + seq_len], -1)\n\n\nclass Tokenizer:\n def __init__(self, lang=\"en\"):\n self.re_br = re.compile(r\"<\\s*br\\s*/?>\", re.IGNORECASE)\n self.tok = spacy.load(lang)\n for w in (\"<eos>\", \"<bos>\", \"<unk>\"):\n self.tok.tokenizer.add_special_case(w, [{ORTH: w}])\n\n def sub_br(self, x):\n return self.re_br.sub(\"\\n\", x)\n\n def spacy_tok(self, x):\n return [t.text for t in self.tok.tokenizer(self.sub_br(x))]\n\n re_rep = re.compile(r\"(\\S)(\\1{3,})\")\n re_word_rep = re.compile(r\"(\\b\\w+\\W+)(\\1{3,})\")\n\n @staticmethod\n def replace_rep(m):\n TK_REP = \"tk_rep\"\n c, cc = m.groups()\n return f\" {TK_REP} {len(cc)+1} {c} \"\n\n @staticmethod\n def replace_wrep(m):\n TK_WREP = \"tk_wrep\"\n c, cc = m.groups()\n return f\" {TK_WREP} {len(cc.split())+1} {c} \"\n\n @staticmethod\n def do_caps(ss):\n TOK_UP, TOK_SENT, TOK_MIX = \" t_up \", \" t_st \", \" t_mx \"\n res = []\n prev = \".\"\n re_word = re.compile(\"\\w\")\n re_nonsp = re.compile(\"\\S\")\n for s in re.findall(r\"\\w+|\\W+\", ss):\n res += [TOK_UP, s.lower()] if (s.isupper() and (len(s) > 2)) else [s.lower()]\n return \"\".join(res)\n\n def proc_text(self, s):\n s = self.re_rep.sub(Tokenizer.replace_rep, s)\n s = self.re_word_rep.sub(Tokenizer.replace_wrep, s)\n s = Tokenizer.do_caps(s)\n s = re.sub(r\"([/#])\", r\" \\1 \", s)\n s = re.sub(\" {2,}\", \" \", s)\n return self.spacy_tok(s)\n\n @staticmethod\n def proc_all(ss, lang):\n tok = Tokenizer(lang)\n return [tok.proc_text(s) for s in ss]\n\n @staticmethod\n def proc_all_mp(ss, lang=\"en\", ncpus=1):\n with ProcessPoolExecutor(ncpus) as e:\n return sum(e.map(Tokenizer.proc_all, ss, [lang] * len(ss)), [])\n"
] | [
[
"numpy.random.random",
"numpy.array",
"numpy.random.normal",
"numpy.expand_dims"
]
] |
realtwister/LearnedEvolution | [
"2ec49b50a49acae9693cfb05ac114dfbcc4aa337"
] | [
"tests/config/evaluation.py"
] | [
"import numpy as np\n\nfrom collections import namedtuple\n\nPopulation = namedtuple(\"Population\", ['mean_fitness','mean', 'covariance'])\n\nconfig = dict(\n dimension = 2,\n population_size = 100,\n algorithm = dict(\n mean_function =dict(\n type = \"RLMean\"\n ),\n covariance_function = dict(\n type = \"AMaLGaMCovariance\"\n ),\n convergence_criterion = dict(\n type = \"CovarianceConvergence\",\n threshold = 1e-20\n )\n ),\n problem_suite = dict(\n clss=[\n [\"RotateProblem\", \"TranslateProblem\", \"Rosenbrock\"]\n ]\n ),\n evaluator = dict(\n algorithm = dict(\n mean_function =dict(\n type = \"RLMean\"\n ),\n covariance_function = dict(\n type = \"AMaLGaMCovariance\"\n ),\n convergence_criterion = dict(\n type = \"TimeConvergence\",\n max_iter = 200\n )\n ),\n restoredir = \"/tmp/thesis/single_benchmarks/differentialReward_TimeConv/10000\",\n logdir = \"/tmp/thesis/single_benchmarks/differentialReward_TimeConv/evaluations/10000\",\n seed = 1001,\n N_episodes = 100,\n summarizer = lambda pop: Population(np.mean(pop.fitness), pop.mean, pop.covariance),\n )\n)\n\n\nfrom learnedevolution import Benchmark, Evaluator\n#benchmark = Benchmark.from_config(config, 'benchmark')\n#benchmark.run()\n\nevaluator = Evaluator.from_config(config, 'evaluator')\nhistories = evaluator.run()\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\n# 1 history fitness plot\nplt.figure();\ndata = dict(\n fitness =[],\n)\nfor i in range(len(histories)):\n history = histories[i]\n mean_fitness = -np.array([population.mean_fitness for population in history])\n data['fitness'] += [mean_fitness];\n plt.semilogy(mean_fitness, alpha = 0.1, color = 'k')\n\ndef plot_time_mean(fitness):\n max_T = np.max([len(f) for f in fitness]);\n transpose_fitness = [];\n for t in range(max_T):\n transpose_fitness.append([])\n for f in fitness:\n if t <len(f):\n transpose_fitness[t].append(f[t]);\n\n mean_fitness = [np.mean(f) for f in transpose_fitness];\n plt.semilogy(mean_fitness)\n\ndef precision_hits(fitness, precisions, ts = None):\n if ts is None:\n ts = list(np.arange(len(fitness)).astype(float))\n ps = sorted(precisions)[::-1]\n hits = []\n i = 0\n for t, f in zip(ts, fitness):\n while True:\n if i>=len(ps):\n break\n if f < ps[i]:\n hits.append(t)\n i += 1\n else:\n break\n if i>=len(ps):\n break\n return hits, ps[:len(hits)]\n\n\ndef plot_precision_mean(fitness, num_bins=100):\n ts = [i for f in fitness for i in range(len(f)) ]\n fs = [f for ff in fitness for f in ff]\n fs,ts = zip(*sorted(zip(fs,ts), key=lambda pair: -pair[0]))\n N = len(fs)\n bin_size = np.ceil(N/num_bins).astype(int)\n xs = [];\n ys = [];\n for i in range(num_bins):\n xs.append(np.mean(ts[i*bin_size: (i+1)*bin_size]))\n ys.append(np.mean(fs[i*bin_size: (i+1)*bin_size]))\n\n plt.semilogy(xs,ys)\n\ndef plot_precision_hits (fitness, num_bins = 100 ):\n max_precision = 0\n min_precision = float('inf')\n for f in fitness:\n max_precision = max(max_precision, np.min(f))\n min_precision = min(min_precision, np.max(f))\n\n precisions = np.logspace(np.log10(min_precision), np.log10(max_precision), num_bins)\n data = pd.DataFrame(columns=['time','precision'])\n for f in fitness:\n hits,ps = precision_hits(f, precisions)\n plt.semilogy(hits,ps)\n data = data.append([dict(time=t, precision=p) for t,p in zip(hits,ps)])\n plt.figure()\n ax = sns.scatterplot(x= 'precision', y='time', data=data, alpha= 0.1)\n ax.set( xscale=\"log\")\n ax = sns.lineplot(x= 'precision', y='time', data=data, ax=ax, ci='sd')\n ax.set( xscale=\"log\")\n\n\n\n\n\n\n\n\n\nplt.figure();\nplt.yscale('log')\nplot_time_mean(data['fitness'])\nplot_precision_hits(data['fitness'], num_bins=10)\n\nplt.show();\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.ceil",
"pandas.DataFrame",
"matplotlib.pyplot.semilogy",
"numpy.min",
"numpy.mean",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.log10",
"matplotlib.pyplot.yscale"
]
] |
melissa-sa/melissa | [
"d07c43a396267d26f52b7b152a385f8a600ac2e8"
] | [
"examples/heat_example/src/heat.py"
] | [
"#!@PYTHON_EXECUTABLE@\n\n###################################################################\n# Melissa #\n#-----------------------------------------------------------------#\n# COPYRIGHT (C) 2017 by INRIA and EDF. ALL RIGHTS RESERVED. #\n# #\n# This source is covered by the BSD 3-Clause License. #\n# Refer to the LICENCE file for further information. #\n# #\n#-----------------------------------------------------------------#\n# Original Contributors: #\n# Theophile Terraz, #\n# Bruno Raffin, #\n# Alejandro Ribes, #\n# Bertrand Iooss, #\n###################################################################\n\n\n\"\"\"\n heat example in Python\n\"\"\"\n\nimport sys\nimport time\nimport numpy as np\nfrom ctypes import c_double, c_int, c_char, byref\nfrom mpi4py import MPI\nimport imp\n\nimp.load_source(\"melissa_api\", '@CMAKE_INSTALL_PREFIX@/lib/melissa_api.py')\nimport melissa_api\n\nheat_utils = np.ctypeslib.load_library('libheat_utils','@CMAKE_INSTALL_PREFIX@/share/melissa/examples/heat_example_base/lib/libheat_utils.so')\nA_array = c_double * 3\nA = A_array()\n\n# The program takes at least one parameter: the initial temperature\nnarg = len(sys.argv)\nif (narg < 3):\n print(\"Missing parameter\")\n exit()\n# The initial temperature is stored in param[0]\n# The four next optional parameters are the boundary temperatures\nparam_array = c_double * 5\npyparam = []\nfor i in range(5):\n if (narg > i+1):\n pyparam.append(float(sys.argv[i+1]))\n else:\n pyparam.append(0)\nparam = param_array(pyparam[0],pyparam[1],pyparam[2],pyparam[3],pyparam[4])\ntemp = c_double(pyparam[0])\n\n# The MPI communicator, process rank and communicator size\nappnum = MPI.COMM_WORLD.Get_attr(MPI.APPNUM)\ncomm = MPI.COMM_WORLD.Split(appnum, MPI.COMM_WORLD.Get_rank())\nme = c_int(comm.Get_rank())\nNP = c_int(comm.Get_size())\ni1 = c_int(0)\niN = c_int(0)\n\n# Init timer\nt1 = time.time()\n\n# Neighbour ranks\nnext = c_int(me.value+1)\nprevious = c_int(me.value-1)\n\nif (next.value == NP.value):\n next = c_int(MPI.PROC_NULL)\nif (previous.value == -1):\n previous = c_int(MPI.PROC_NULL)\n\nnx = c_int(100) # x axis grid subdivisions\nny = c_int(100) # y axis grid subdivisions\nlx = c_double(10.0) # x length\nly = c_double(10.0) # y length\nd = c_double(1.0) # diffusion coefficient\nt = c_double(0.0)\ndt = c_double(0.01) # timestep value\nnmax = c_int(100) # number of timesteps\ndx = c_double(lx.value/(nx.value+1)) # x axis step\ndy = c_double(ly.value/(ny.value+1)) # y axis step\nepsilon = c_double(0.0001) # conjugated gradient precision\nn = c_int(nx.value*ny.value) # number of cells in the drid\n\n# work repartition over the MPI processes\n# i1 and in: first and last global cell indices atributed to this process\nheat_utils.load(byref(me), byref(n), byref(NP), byref(i1), byref(iN))\n\n# local number of cells\nvect_size = iN.value-i1.value+1\n\n# initialization\nfield_name = \"heat1\"\nheat_array = c_double * vect_size\nU = heat_array()\nF = heat_array()\n# we will solve Au=F\nheat_utils.init(U,\n byref(i1),\n byref(iN),\n byref(dx),\n byref(dy),\n byref(nx),\n byref(lx),\n byref(ly),\n byref(temp))\n# init A (tridiagonal matrix):\nheat_utils.filling_A(byref(d),\n byref(dx),\n byref(dy),\n byref(dt),\n byref(nx),\n byref(ny),\n A) # fill A\n\nmelissa_api.melissa_init (field_name, vect_size, comm);\n\n# main loop:\nfor i in range(nmax.value):\n t = c_double(t.value + dt.value)\n # filling F (RHS) before each iteration:\n heat_utils.filling_F(byref(nx),\n byref(ny),\n U,\n byref(d),\n byref(dx),\n byref(dy),\n byref(dt),\n byref(t),\n F,\n byref(i1),\n byref(iN),\n byref(lx),\n byref(ly),\n param)\n # conjugated gradient to solve Au = F.\n heat_utils.conjgrad(A,\n F,\n U,\n byref(nx),\n byref(ny),\n byref(epsilon),\n byref(i1),\n byref(iN),\n byref(NP),\n byref(me),\n byref(next),\n byref(previous),\n byref(c_int(comm.py2f())))\n # The result is U\n melissa_api.melissa_send (field_name, U);\n\nmelissa_api.melissa_finalize ();\n\n# end timer\nt2 = time.time()\nprint(\"Calcul time: \"+str(t2-t1)+\" sec\\n\");\n"
] | [
[
"numpy.ctypeslib.load_library"
]
] |
HarshCasper/jina | [
"97f9e97a4a678a28bdeacbc7346eaf7bbd2aeb89"
] | [
"tests/unit/drivers/test_helper.py"
] | [
"import random\n\nimport numpy as np\nimport pytest\n\nfrom jina import Document, DocumentSet\nfrom jina.proto import jina_pb2\nfrom jina.types.message import Message\nfrom jina.types.ndarray.generic import NdArray\n\n\[email protected](scope='function')\ndef document():\n with Document() as doc:\n doc.text = 'this is text'\n doc.tags['id'] = 'id in tags'\n doc.tags['inner_dict'] = {'id': 'id in inner_dict'}\n with Document() as chunk:\n chunk.text = 'text in chunk'\n chunk.tags['id'] = 'id in chunk tags'\n doc.chunks.add(chunk)\n return doc\n\n\[email protected](\n 'proto_type', ['float32', 'float64', 'uint8']\n)\[email protected](10)\ndef test_array_protobuf_conversions(proto_type):\n random_array = np.random.rand(random.randrange(0, 50), random.randrange(0, 20)).astype(proto_type)\n d = NdArray()\n d.value = random_array\n np.testing.assert_almost_equal(d.value, random_array)\n\n\[email protected](\n 'quantize, proto_type', [('fp16', 'float32'), ('fp16', 'float64'), ('uint8', 'uint8')],\n)\[email protected](10)\ndef test_array_protobuf_conversions_with_quantize(quantize, proto_type):\n random_array = np.random.rand(random.randrange(0, 50), random.randrange(0, 20)).astype(proto_type)\n d = NdArray(quantize=quantize)\n d.value = random_array\n np.testing.assert_almost_equal(d.value, random_array, decimal=2)\n\n\ndef test_pb_obj2dict(document):\n res = document.get_attrs('text', 'tags', 'chunks')\n assert res['text'] == 'this is text'\n assert res['tags']['id'] == 'id in tags'\n assert res['tags']['inner_dict']['id'] == 'id in inner_dict'\n rcs = list(res['chunks'])\n assert len(rcs) == 1\n assert isinstance(rcs[0], Document)\n assert rcs[0].text == 'text in chunk'\n assert rcs[0].tags['id'] == 'id in chunk tags'\n\n\ndef test_add_route():\n r = jina_pb2.RequestProto()\n r.control.command = jina_pb2.RequestProto.ControlRequestProto.IDLE\n msg = Message(None, r, pod_name='test1', identity='sda')\n msg.add_route('name', 'identity')\n assert len(msg.envelope.routes) == 2\n assert msg.envelope.routes[1].pod == 'name'\n assert msg.envelope.routes[1].pod_id == 'identity'\n\n\ndef test_extract_docs():\n d = Document()\n\n contents, docs_pts = DocumentSet([d]).all_embeddings\n assert contents is None\n\n vec = np.random.random([2, 2])\n d.embedding = vec\n contents, docs_pts = DocumentSet([d]).all_embeddings\n np.testing.assert_equal(contents[0], vec)\n"
] | [
[
"numpy.testing.assert_almost_equal",
"numpy.random.random",
"numpy.testing.assert_equal"
]
] |
aymgal/lenstronomy | [
"870ba77851e7b83646fdf7c50d770e45c00500ee"
] | [
"test/test_LensModel/test_Profiles/test_convergence.py"
] | [
"__author__ = 'sibirrer'\n\n\nfrom lenstronomy.LensModel.Profiles.convergence import Convergence\n\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\n\n\nclass TestConvergence(object):\n \"\"\"\n tests the Gaussian methods\n \"\"\"\n def setup(self):\n self.profile = Convergence()\n self.kwargs_lens = {'kappa_ext': 0.1}\n\n def test_function(self):\n x = np.array([1])\n y = np.array([0])\n values = self.profile.function(x, y, **self.kwargs_lens)\n npt.assert_almost_equal(values[0], self.kwargs_lens['kappa_ext']/2, decimal=5)\n x = np.array([0])\n y = np.array([0])\n values = self.profile.function(x, y, **self.kwargs_lens)\n npt.assert_almost_equal(values[0], 0, decimal=5)\n\n x = np.array([2, 3, 4])\n y = np.array([1, 1, 1])\n values = self.profile.function(x, y, **self.kwargs_lens)\n npt.assert_almost_equal(values[0], 0.25, decimal=5)\n npt.assert_almost_equal(values[1], 0.5, decimal=5)\n\n def test_derivatives(self):\n x = np.array([1])\n y = np.array([2])\n f_x, f_y = self.profile.derivatives(x, y, **self.kwargs_lens)\n npt.assert_almost_equal(f_x[0], 0.1, decimal=5)\n npt.assert_almost_equal(f_y[0], 0.2, decimal=5)\n\n x = np.array([1, 3, 4])\n y = np.array([2, 1, 1])\n values = self.profile.derivatives(x, y, **self.kwargs_lens)\n npt.assert_almost_equal(values[0][0], 0.1, decimal=5)\n npt.assert_almost_equal(values[1][0], 0.2, decimal=5)\n\n def test_hessian(self):\n x = np.array([1])\n y = np.array([2])\n f_xx, f_xy, f_yx, f_yy = self.profile.hessian(x, y, **self.kwargs_lens)\n npt.assert_almost_equal(f_xx, 0.1, decimal=5)\n npt.assert_almost_equal(f_yy, 0.1, decimal=5)\n npt.assert_almost_equal(f_xy, 0, decimal=5)\n npt.assert_almost_equal(f_yx, 0, decimal=5)\n\n x = np.array([1,3,4])\n y = np.array([2,1,1])\n values = self.profile.hessian(x, y, **self.kwargs_lens)\n npt.assert_almost_equal(values[0], 0.1, decimal=5)\n npt.assert_almost_equal(values[3], 0.1, decimal=5)\n npt.assert_almost_equal(values[1], 0, decimal=5)\n\n\nif __name__ == '__main__':\n pytest.main()\n"
] | [
[
"numpy.testing.assert_almost_equal",
"numpy.array"
]
] |
quyuanhang/match_in_chat | [
"a546c2430c88511ace4dc2e8b0d20f3b7ae3449c"
] | [
"src/utils/ShuffleData.py"
] | [
"import sys\nimport numpy as np\n\ndef load_data(fp, apd=None):\n with open(fp) as f:\n data = f.read().strip().split('\\n')\n data = [x.strip().split('\\001') for x in data]\n if apd:\n for d in data:\n d.append(apd)\n return data\n\ndef shuffle(data):\n n = len(data)\n permutation = np.random.permutation(n)\n data = [data[i] for i in permutation]\n return data\n\ndef save_data(fp, data):\n with open(fp, 'w') as f:\n for d in data:\n f.write('\\001'.join(d))\n f.write('\\n')\n return\n\nif __name__ == '__main__':\n train_neg = load_data('../Data/interview_split/train/interview_split.negative', '0')\n train_posi = load_data('../Data/interview_split/train/interview_split.positive', '1')\n train = shuffle(train_neg + train_posi)\n print('num of train data', len(train))\n\n # test_neg = load_data('../Data/interview_split/test/interview_split.negative', '0')\n # test_posi = load_data('../Data/interview_split/test/interview_split.positive', '1')\n test_neg = load_data('../Data/add/add.negative', '0')\n test_posi = load_data('../Data/add/add.positive', '1')\n test = shuffle(test_neg + test_posi)\n print('num of test data', len(test))\n\n train_pair = set([(d[0], d[3]) for d in train])\n test = [d for d in test if (d[0], d[3]) not in train_pair]\n print('num of distinct train', len(train_pair))\n print('num of clean test', len(test))\n\n save_data('data/interview.train', train)\n save_data('data/interview.test', test)\n\n\n\n"
] | [
[
"numpy.random.permutation"
]
] |
lchenat/TSA | [
"661266ba16e06f63962b306a7c30d25f37920c2d"
] | [
"deep_rl/utils/torch_utils.py"
] | [
"#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nfrom .config import *\nimport torch\nimport os\nimport random\nfrom torch import nn\nimport torch.nn.functional as F\n\ndef select_device(gpu_id):\n # if torch.cuda.is_available() and gpu_id >= 0:\n if gpu_id >= 0:\n Config.DEVICE = torch.device('cuda:%d' % (gpu_id))\n else:\n Config.DEVICE = torch.device('cpu')\n\ndef tensor(x, dtype=torch.float32):\n if torch.is_tensor(x):\n return x.type(dtype)\n x = torch.tensor(x, device=Config.DEVICE, dtype=dtype)\n return x\n\ndef is_cuda(x):\n if isinstance(x, nn.Module):\n return all([p.is_cuda for p in x.parameters()])\n return x.is_cuda\n\ndef tensor_dict(d, dtype=torch.float32):\n return {k: tensor(v, dtype=dtype) for k, v in d.items()}\n\ndef range_tensor(end):\n return torch.arange(end).long().to(Config.DEVICE)\n\ndef to_np(t):\n if torch.is_tensor(t):\n return t.cpu().detach().numpy()\n return t\n\ndef random_seed(seed=None):\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(np.random.randint(int(1e6)))\n\ndef set_one_thread():\n os.environ['OMP_NUM_THREADS'] = '1'\n os.environ['MKL_NUM_THREADS'] = '1'\n torch.set_num_threads(1)\n\ndef huber(x, k=1.0):\n return torch.where(x.abs() < k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))\n\ndef epsilon_greedy(epsilon, x):\n if len(x.shape) == 1:\n return np.random.randint(len(x)) if np.random.rand() < epsilon else np.argmax(x)\n elif len(x.shape) == 2:\n random_actions = np.random.randint(x.shape[1], size=x.shape[0])\n greedy_actions = np.argmax(x, axis=-1)\n dice = np.random.rand(x.shape[0])\n return np.where(dice < epsilon, random_actions, greedy_actions)\n\ndef sync_grad(target_network, src_network):\n for param, src_param in zip(target_network.parameters(), src_network.parameters()):\n param._grad = src_param.grad.clone()\n\ndef diag_gt(score_matrix):\n assert score_matrix.dim() == 2, 'score matrix needs dim = 2.'\n return torch.LongTensor(range(score_matrix.size(0))).to(score_matrix.device)\n\ndef batch_linear(input, weight, bias=None):\n \"\"\" input: (N, D), weight: (N, D, H), bias: (N, H) \"\"\"\n if bias is not None:\n return torch.bmm(input.unsqueeze(1), weight).squeeze(1) + bias\n else:\n return torch.bmm(input.unsqueeze(1), weight).squeeze(1)\n\nclass one_hot:\n # input: LongTensor of any shape\n # output one dim more, with one-hot on new dim\n @staticmethod\n def encode(indices, dim):\n encodings = torch.zeros(*indices.shape, dim).to(indices.device)\n encodings.scatter_(-1, indices.view(*indices.shape, 1), 1)\n return encodings\n\n # input: one_hot of any shape, last dim is one hot\n # output: indices of that shape\n @staticmethod\n def decode(encodings):\n _, indices = encodings.max(dim=-1)\n return indices\n\n### optimizer ###\nclass VanillaOptimizer:\n def __init__(self, params, opt, grad_clip=None):\n self.params = params\n self.opt = opt # params already passed in\n self.grad_clip = grad_clip\n\n def step(self, loss, retain_graph=False):\n self.opt.zero_grad()\n loss.backward(retain_graph=retain_graph)\n if self.grad_clip:\n nn.utils.clip_grad_norm_(self.params, self.grad_clip)\n self.opt.step()\n\n# update the first / second params using the first / second opt with freq_list[0/1] times before switching\nclass AlternateOptimizer:\n def __init__(self, params_list, opt_list, freq_list, grad_clip):\n self.params_list = params_list\n self.opt_list = opt_list\n self.freq_list = freq_list\n self.grad_clip = grad_clip\n self.cur = 0 # current parameter to update\n self.t = 0 # count how many times the current parameter has been update\n \n def step(self, loss, retain_graph=False):\n opt = self.opt_list[self.cur]\n opt.zero_grad()\n loss.backward(retain_graph=retain_graph)\n nn.utils.clip_grad_norm_(self.params_list[self.cur], self.grad_clip)\n opt.step()\n self.t += 1\n if self.t >= self.freq_list[self.cur]:\n self.t = 0\n self.cur = 1 - self.cur\n\n# https://gist.github.com/yzh119/fd2146d2aeb329d067568a493b20172f\nclass gumbel_softmax:\n @staticmethod\n def sample_gumbel(shape, eps=1e-20):\n U = torch.rand(shape)\n return -torch.log(-torch.log(U + eps) + eps)\n\n @staticmethod\n def soft_sample(logits, temperature):\n y = logits + gumbel_softmax.sample_gumbel(logits.size()).to(logits.device)\n return F.softmax(y / temperature, dim=-1)\n\n @staticmethod\n def hard_sample(logits, temperature):\n y = gumbel_softmax.soft_sample(logits, temperature)\n ind = y.argmax(dim=-1)\n y_hard = one_hot.encode(ind, logits.size(-1))\n return (y_hard - y).detach() + y\n\nclass relaxed_Bernolli:\n @staticmethod\n def sample_logit(shape, eps=1e-20):\n U = torch.rand(shape)\n return torch.log(U + eps) - torch.log(1 - U + eps)\n\n @staticmethod\n def sample(logits):\n return logits + relaxed_Bernolli.sample_logit(logits.size()).to(logits.device)\n \n @staticmethod\n def soft_sample(logits, temperature):\n return F.sigmoid(relaxed_Bernolli.sample(logits) / temperature)\n\n @staticmethod\n def hard_sample(logits, temperature):\n y = relaxed_Bernolli.soft_sample(logits, temperature)\n y_hard = (y > 0.5).type(y.dtype)\n return (y_hard - y).detach() + y\n\n\nclass ListModule(nn.Module):\n def __init__(self, *args):\n super(ListModule, self).__init__()\n idx = 0\n for module in args:\n self.add_module(str(idx), module)\n idx += 1\n\n def __getitem__(self, idx):\n if idx < 0 or idx >= len(self._modules):\n raise IndexError('index {} is out of range'.format(idx))\n it = iter(self._modules.values())\n for i in range(idx):\n next(it)\n return next(it)\n\n def __iter__(self):\n return iter(self._modules.values())\n\n def __len__(self):\n return len(self._modules)\n"
] | [
[
"torch.zeros",
"torch.device",
"torch.rand",
"torch.arange",
"torch.nn.utils.clip_grad_norm_",
"torch.is_tensor",
"torch.tensor",
"torch.nn.functional.softmax",
"torch.log",
"torch.set_num_threads"
]
] |
Tiexin-RS/segment-with-nn | [
"f008f436e2fb3dc7a32d58dcf8bd45b5c5d8aed9"
] | [
"train/manual_infer/infer_deeplab.py"
] | [
"import logging\nimport tensorflow as tf\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nfrom segelectri.loss_metrics.loss import FocalLoss, LovaszLoss, DiceLoss, BoundaryLoss\nfrom segelectri.loss_metrics.metrics import MeanIou\n\n\ndef read_image(file_name, resize=True):\n img = tf.io.read_file(filename=file_name)\n img = tf.io.decode_image(img)\n return img\n\n\ndef save_image(save_data, save_path):\n plt.imshow(save_data)\n plt.colorbar()\n plt.savefig(save_path, dpi=300)\n\n\ndef get_label(label_path, resize=True):\n label_data = read_image(label_path, resize)\n label_data = tf.image.resize(label_data, [224, 224])\n label_data = label_data[:, :, 1]\n return label_data\n\n\ndef get_pred(seg_model, data_path, resize=True):\n ori_data = read_image(data_path, resize)\n ori_data = tf.cast(tf.reshape(ori_data,\n (-1, 1024, 1024, 3)), tf.float32)\n pred_data = seg_model.predict(ori_data)\n pred_data = tf.argmax(pred_data, axis=-1)\n if resize:\n pred_data = tf.reshape(pred_data, (224, 224))\n else:\n pred_data = tf.reshape(pred_data, (1024, 1024))\n return pred_data\n\n\ndef show_label(label_path, label_save_path, resize=True):\n label_data = get_label(label_path, resize)\n save_image(label_data, label_save_path)\n\n\ndef show_pred(seg_model, data_path, data_save_path, resize=True):\n pred_data = get_pred(seg_model, data_path, resize)\n save_image(pred_data, data_save_path)\n\n\ndef show_meaniou(seg_model, resize=True):\n m = MeanIou(num_classes=4)\n label_p = Path('/opt/dataset/tr3_cropped/label/')\n original_p = Path('/opt/dataset/tr3_cropped/data/')\n label_list = list(sorted(label_p.glob(\"*.png\")))\n original_list = list(sorted(original_p.glob(\"*.png\")))\n for label_path, original_path in zip(label_list, original_list):\n label_data = get_label(str(label_path), resize)\n pred_data = get_pred(seg_model, str(original_path), resize)\n label_data = tf.cast(label_data, tf.int64)\n pred_data = tf.cast(pred_data, tf.int64)\n label_data = tf.one_hot(label_data, 4)\n pred_data = tf.one_hot(pred_data, 4)\n m.update_state(pred_data, label_data)\n logging.info('iou is %s', m.result().numpy())\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n label_path = '/opt/dataset/tr3_cropped/label/1.png'\n label_save_path = 'img/label/img_1_resize.png'\n data_path = '/opt/dataset/tr3_cropped/data/1.png'\n data_save_path = 'img/deeplab/tr3/cross/img1_ep20.png'\n model_path = '../train_deeplab/exp/43_unfreeze/saved_model'\n seg_model = tf.keras.models.load_model(filepath=model_path,\n custom_objects={\n 'MeanIou': MeanIou,\n 'FocalLoss': FocalLoss,\n 'LovaszLoss': LovaszLoss,\n 'DiceLoss': DiceLoss,\n 'BoundaryLoss': BoundaryLoss\n })\n # show_label(label_path, label_save_path)\n show_pred(seg_model, data_path, data_save_path)\n # show_meaniou(seg_model=seg_model)\n"
] | [
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.savefig",
"tensorflow.io.read_file",
"tensorflow.argmax",
"tensorflow.io.decode_image",
"tensorflow.one_hot",
"tensorflow.reshape",
"tensorflow.keras.models.load_model",
"tensorflow.image.resize",
"tensorflow.cast",
"matplotlib.pyplot.imshow"
]
] |
mmaguero/lang_detection | [
"edba3d8aab9c16568c338057fbb5372daf279bcc"
] | [
"src/utils/utility.py"
] | [
"#utils.py\n\nimport pandas as pd\nimport numpy as np\nimport nltk\nfrom nltk.tokenize import RegexpTokenizer\n\n\ndef intersecting_str(x,y,method='contains'):\n '''\n Set (per Guarani) with the top M most frequent words or dictionary. Then intersect|contains your text with each set. The set with the highest number of intersections|coincides will be your detected language: Guarani.\n x = text convert to set ... of words to infer language (i.e, tweets)\n y = file convert to set ... of words of the language to infer (i.e., Guarani dictionary)\n method = intersection|contains\n return = tuple() of number of x in y words, and number of x words, also the match words are given\n finally = with the tuple... percent: divide the number of words by the total number of words in the text, or\n count: take those with more than X words in the dictionary\n '''\n # convert x (text) to set \n tokenizer = RegexpTokenizer(r\"(\\w+\\'\\w?)|(\\w+)\") # alphabetic characters only + '...\n words = tokenizer.tokenize(str(x)) # return tuples because \"|\" #word_tokenize(str(x)) #set(str(x).lower().split()) \n x = set([t[0] for t in words]+[t[1] for t in words])\n # convert y (file) to set\n #y = set(readFile(y))\n #\n if method=='contains':\n # prepare set\n try:\n xnp = np.array(x)\n xpd = pd.DataFrame(x)\n xpd.columns=[\"col\"]\n gn=xpd[xpd.col.str.contains('|'.join(y))].reset_index() # partial match: e.g., \"omba'apo\" contains \"apo\" = TRUE\n gn=gn.col.tolist()\n except:\n gn=list()\n elif method=='intersection':\n gn = x.intersection(y) # strict match: e.g., \"omba'apo\" intersect \"apo\" = FALSE\n\n match = set(list(gn))\n return len(gn), len(x), match\n\n\ndef readFile(fileName):\n fileObj = open(fileName, \"r\") #opens the file in read mode\n words = [line.split(\",\") for line in fileObj.read().splitlines()] # puts the file into an array\n merged_list = []\n for l in words: # an array from the list of arrays: syn/n-grams comma separated support\n merged_list += l\n fileObj.close()\n return [word for word in merged_list if len(word)>2]\n\n\n"
] | [
[
"pandas.DataFrame",
"numpy.array"
]
] |
TheDebbio/srt-single-dish-tools | [
"a01059f888a55d4b3c9d0768d61c58ae005961ca"
] | [
"srttools/tests/test_read_config.py"
] | [
"# -*- coding: utf-8 -*-\n\n\n\nimport numpy as np\n\nfrom srttools.read_config import read_config\nimport os\nimport astropy.units as u\n\n\nclass TestConfig(object):\n @classmethod\n def setup_class(cls):\n cls.curdir = os.path.abspath(os.path.dirname(__file__))\n cls.datadir = os.path.join(cls.curdir, 'data')\n\n def test_read_config(self):\n \"\"\"Test that config file are read.\"\"\"\n\n fname = os.path.join(self.datadir, 'test_config.ini')\n\n config = read_config(fname)\n\n np.testing.assert_almost_equal(config['pixel_size'].to(u.rad).value,\n np.radians(0.5 / 60))\n assert config['interpolation'] == 'spline'\n\n def test_read_incomplete_config(self):\n \"\"\"Test that config file are read.\"\"\"\n fname = os.path.join(self.datadir, 'test_config_incomplete.ini')\n\n config = read_config(fname)\n\n np.testing.assert_almost_equal(config['pixel_size'].to(u.rad).value,\n np.radians(1 / 60))\n assert config['interpolation'] == 'linear'\n"
] | [
[
"numpy.radians"
]
] |
wamiq-reyaz/facade-segmentation | [
"49112ef302108efd9813e4978d17935ee472c120"
] | [
"pyfacades/util/util.py"
] | [
"import os\n\nimport numpy as np\n\n\ndef softmax(a, axis=0):\n a = np.exp(a - a.max(axis=axis))\n a /= a.max(axis=axis)\n return a\n\n\ndef channels_first(image):\n return image.transpose(2, 0, 1)\n\n\ndef channels_last(image):\n return image.transpose(1, 2, 0)\n\n\ndef colorize(labels, colors):\n result = np.zeros(labels.shape + (3,), dtype=np.uint8)\n if not isinstance(colors, dict):\n colors = {i: colors[i] for i in range(len(colors))}\n rgb = colors.values()\n indices = colors.keys()\n for i in range(len(indices)):\n mask = labels == indices[i]\n color = rgb[i]\n result[mask, 0] = color[0]\n result[mask, 1] = color[1]\n result[mask, 2] = color[2]\n return result\n\n\ndef replace_ext(f, e):\n return os.path.splitext(f)[0] + e\n\n\ndef find_files(dir, pattern):\n import fnmatch\n import os\n\n matches = []\n for root, dirnames, filenames in os.walk(dir):\n for filename in fnmatch.filter(filenames, pattern):\n matches.append(os.path.join(root, filename))\n\n return matches"
] | [
[
"numpy.zeros"
]
] |
lcispro/FlexTensor | [
"329256e44669f36f62c72ac37d8849a841e2c007"
] | [
"flextensor/test/test_tvm_expr/grad/te-mse_loss-case1.py"
] | [
"import tvm\nimport numpy as np \nimport torch\n\nbatch_size = 3\nnum_classes = 5\nshape_size = [batch_size, num_classes]\ndtype = \"float32\"\nltype = \"int64\"\n\nA = tvm.te.placeholder(shape_size, dtype=dtype, name=\"A\", requires_grad=True)\ntargets = tvm.te.placeholder(shape_size, dtype=dtype, name=\"targets\", requires_grad=False)\nn = tvm.te.reduce_axis([0, A.shape[0]], name=\"n\")\nk = tvm.te.reduce_axis([0, A.shape[1]], name=\"k\")\nloss = tvm.te.compute([1], lambda i: tvm.te.sum(\n (A[i + n, k]-targets[n, k])*(A[i + n, k]-targets[n, k]), axis=[n, k]), name=\"mse\", requires_grad=True)\n\ndloss = tvm.te.placeholder([1], dtype=dtype, name=\"dloss\")\ndA, = tvm.te.mygradient(loss, [A], dloss)\n\ns = tvm.te.create_schedule([loss.op, dA.op])\n\nprint(tvm.lower(s, [A, targets, loss, dloss, dA], simple_mode=True))\n\nfunc = tvm.build(s, [A, targets, loss, dloss, dA], target=\"llvm\")\n\nA_np = np.random.uniform(-10, 10, shape_size).astype(dtype)\ndA_np = np.zeros([batch_size, num_classes]).astype(dtype) * 0 + 1\nlabels_np = np.random.randint(0, num_classes, [batch_size]).astype(ltype)\ntargets_np = np.zeros([batch_size, num_classes]).astype(dtype)\nfor i in range(batch_size):\n targets_np[i][labels_np[i]] = 1.0\nloss_np = np.zeros([1]).astype(dtype)\ndloss_np = np.random.uniform(-1, 1, [1]).astype(dtype) * 0 + 1\n\nctx = tvm.context(\"llvm\", 0)\nA_tvm = tvm.nd.array(A_np, ctx)\ndA_tvm = tvm.nd.array(dA_np, ctx)\ntargets_tvm = tvm.nd.array(targets_np, ctx)\nloss_tvm = tvm.nd.array(loss_np, ctx)\ndloss_tvm = tvm.nd.array(dloss_np, ctx)\n\nfunc(A_tvm, targets_tvm, loss_tvm, dloss_tvm, dA_tvm)\n\nprint(\"loss_tvm\", loss_tvm)\nprint(\"dA_tvm\", dA_tvm)\n\n# =======>\n# compare the results with pytorch\nA_torch = torch.tensor(A_np, requires_grad=True)\ntargets_torch = torch.tensor(targets_np)\nloss_torch = torch.nn.functional.mse_loss(A_torch, targets_torch, reduction=\"sum\")\nprint(\"loss_pytorch\", loss_torch.detach().numpy())\nloss_torch.backward()\nprint(\"dA_pytorch\", A_torch.grad.numpy())\ntvm.testing.assert_allclose(dA_tvm.asnumpy(), A_torch.grad.numpy(), rtol=1e-30, atol=1e-30)\nprint(\"Compare to PyTorch success!\")\n"
] | [
[
"numpy.zeros",
"torch.nn.functional.mse_loss",
"numpy.random.uniform",
"numpy.random.randint",
"torch.tensor"
]
] |
nemcekj/eeict2022 | [
"155caace09351b5bad824d85653debe12c32cd75"
] | [
"trainUnet.py"
] | [
"from torch.utils import data\nimport torch\nimport torchvision\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport pickle\nimport json\nimport matplotlib.pyplot as plt\nfrom skimage.transform import resize\nimport pandas as pd\n\n\n\nfrom config import Config\nfrom myDataset_Unet import MyDataset\nfrom utils.log import Log\nfrom utils.losses import wce\nfrom utils.log import get_lr\nfrom read_filenames_and_labels import read_filenames_and_labels\n\n\n\nif __name__ == '__main__':\n\n device = torch.device(\"cuda:0\")\n \n \n # if not os.path.exists(Config.tmp_save_dir):\n # os.mkdir(Config.tmp_save_dir)\n\n df = pd.read_csv(r'D:\\Konferencie\\EEICT2022\\Data\\Unet\\train.csv',delimiter=',')\n \n file_names_train = df['slicePath'].tolist()\n masks_train = df['maskPath'].to_list()\n Pat_ind_train = df['PatientNumber'].to_numpy()\n \n \n df = pd.read_csv(r'D:\\Konferencie\\EEICT2022\\Data\\Unet\\val.csv',delimiter=',')\n \n \n file_names_test = df['slicePath'].tolist()\n masks_test = df['maskPath'].to_list()\n \n \n \n loader = MyDataset(split='train',file_names=file_names_train,mask_names=masks_train)\n trainloader= data.DataLoader(loader, batch_size=Config.train_batch_size, num_workers=Config.train_num_workers, shuffle=True,drop_last=True)\n \n loader = MyDataset(split='valid',file_names=file_names_test,mask_names=masks_test)\n validLoader= data.DataLoader(loader, batch_size=Config.test_batch_size, num_workers=Config.test_num_workers, shuffle=True,drop_last=True)\n \n \n model = Config.net(input_size=3, output_size=len(w_positive)).to(device)\n \n \n optimizer = optim.Adam(model.parameters(),lr=Config.init_lr ,betas= (0.9, 0.999),eps=1e-8,weight_decay=1e-8)\n scheduler=optim.lr_scheduler.MultiStepLR(optimizer, milestones=Config.lr_steps, gamma=Config.gamma, last_epoch=-1)\n \n log = Log(names=['loss','acc'])\n \n for epoch_num in range(Config.max_epochs):\n \n \n N = len(trainloader)\n for it, (batch,lbls) in enumerate(trainloader):\n \n model.train()\n batch=batch.to(device) \n lbls=lbls.to(device)\n \n res,res2,heatmap,heatmap2 = model(batch)\n \n res = torch.sigmoid(res)\n loss = wce(res,lbls,w_positive_tensor,w_negative_tensor)\n \n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n loss=loss.detach().cpu().numpy()\n res=res.detach().cpu().numpy()\n lbls=lbls.detach().cpu().numpy()\n\n acc = np.mean(((res>0.5)==(lbls>0.5)).astype(np.float32))\n \n log.append_train([loss,acc])\n \n \n if (it % int(N/Config.plots_in_epoch) == 0) and (it != 0):\n model.eval() \n with torch.no_grad():\n for itt, (batch,lbls) in enumerate(validLoader): \n\n batch=batch.to(device)\n lbls=lbls.to(device)\n \n res,res2,heatmap,heatmap2 = model(batch)\n \n res = torch.sigmoid(res)\n loss = wce(res,lbls,w_positive_tensor,w_negative_tensor)\n \n \n loss=loss.detach().cpu().numpy()\n res=res.detach().cpu().numpy()\n lbls=lbls.detach().cpu().numpy()\n \n acc = np.mean(((res>0.5)==(lbls>0.5)).astype(np.float32))\n \n \n log.append_valid([loss,acc])\n \n\n log.save_and_reset()\n \n info= str(epoch_num) + '_' + str(it) + '_' + str(get_lr(optimizer)) + '_train_' + str(log.train_logs['acc'][-1]) + '_valid_' + str(log.valid_logs['acc'][-1]) \n \n print(info)\n \n tmp_file_name= Config.tmp_save_dir + os.sep +Config.model_name + info\n log.plot(save_name = tmp_file_name + '_plot.png')\n \n \n batch = batch.detach().cpu().numpy()\n heatmap = heatmap.detach().cpu().numpy()\n heatmap2 = heatmap2.detach().cpu().numpy()\n \n for k in range(batch.shape[0]):\n res_tmp = res[k,0]\n lbl_tmp = lbls[k,0]\n img_tmp = batch[k,1,:,:]\n heatmap_tmp = heatmap[k,0,:,:]\n heatmap_tmp = resize(heatmap_tmp,img_tmp.shape)\n \n heatmap2_tmp = heatmap2[k,0,:,:]\n heatmap2_tmp = resize(heatmap2_tmp,img_tmp.shape)\n \n \n plt.figure(figsize=[6.4*3, 4.8*3])\n plt.subplot(131)\n plt.imshow(img_tmp)\n plt.title(str(k) + ' gt=' + str(lbl_tmp) + ' res=' + str(res_tmp))\n \n plt.subplot(132)\n plt.imshow(heatmap_tmp)\n \n plt.subplot(133)\n plt.imshow(heatmap2_tmp)\n \n plt.savefig(Config.tmp_save_dir + os.sep +Config.model_name + info + '_example_image' + str(k) + '.png')\n plt.show()\n plt.close()\n \n \n \n \n \n \n \n \n\n \n \n \n info= str(epoch_num) + '_' + str(get_lr(optimizer)) + '_train_' + str(log.train_logs['acc'][-1]) + '_valid_' + str(log.valid_logs['acc'][-1]) \n \n print(info)\n \n scheduler.step()\n\n\n\n tmp_file_name= Config.tmp_save_dir + os.sep +Config.model_name + info\n torch.save(model.state_dict(),tmp_file_name + '_model.pt')\n \n\n with open(tmp_file_name + '_log.pkl', 'wb') as f:\n pickle.dump(log, f)\n \n with open(tmp_file_name + '_config.pkl', 'wb') as f:\n pickle.dump(Config(), f)\n \n with open(tmp_file_name + 'filenames_and_lbls.json', 'w') as f:\n filenames_and_lbls = {'file_names_train':file_names_train,'labels_train':np.stack(labels_train,axis=0).tolist(),\n 'file_names_valid':file_names_valid,'labels_valid':np.stack(labels_valid,axis=0).tolist()}\n json.dump(filenames_and_lbls, f, indent=2) \n \n \n \n \n"
] | [
[
"torch.device",
"torch.sigmoid",
"torch.no_grad",
"matplotlib.pyplot.close",
"torch.optim.lr_scheduler.MultiStepLR",
"matplotlib.pyplot.figure",
"numpy.stack",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplot"
]
] |
wesleyktatum/chemical_properties_predictor | [
"0f9b6b3bbb634ef8cd7a66293dd5300da30829ac"
] | [
"chemicalize_scraper.py"
] | [
"import os\nimport sys\nfrom glob import glob\nimport pandas as pd\nimport zipfile\n\nfrom rdkit import Chem\nfrom rdkit.Chem import PandasTools\n\nclass ChemicalizeExtractor():\n \"\"\"\n \n \"\"\"\n def __init__(self, database_path):\n super().__init__()\n \n self.dbase_path = database_path\n self.database = None\n self.struct_handler = MolStructureHandler()\n\n self.xl_files = ['basic_properties', 'geometry', 'lipophilicity', 'names_and_identifiers',\n 'structural_properties'] #pKa, logD\n self.xl_arrays = ['pKa', 'logD']\n \n self.structure_file = 'structure.sdf'\n \n return\n\n \n def unzip_downloads(self, zipped_directory = None, extraction_path = None):\n \n #assumes files are in downloads\n if zipped_directory != None:\n zipped_path = zipped_directory\n else:\n zipped_path = 'C:\\\\Users\\\\tatum\\\\Downloads\\\\calculation-result*.zip'\n \n #default save path is Desktop\n if extraction_path != None:\n extr_path = extraction_path\n else:\n extr_path = 'C:\\\\Users\\\\tatum\\\\Desktop\\\\unzipped_dirs\\\\'\n \n #get list of zipped files and save the uncompressed versions\n zipped_dirs = glob(zipped_path)\n for zpd_dir in zipped_dirs:\n with zipfile.ZipFile(zpd_dir, 'r') as f:\n f.extractall(extr_path+zpd_dir[-19:-4])\n \n #get list of unzipped dirs containing chemicalize files\n self.dirs = glob(extr_path+'*')\n \n return\n \n \n def extract_xlsx(self):\n new_additions = []\n \n for unzp in self.dirs:\n molecule = [[],[],[]]\n mol_dict = {}\n for fl in self.xl_files:\n# print(fl)\n try:\n addition_df = pd.read_csv(unzp + '\\\\' + fl + '.csv')\n\n prop_names = addition_df['Property'].tolist()\n values = addition_df['Value'].tolist()\n units = addition_df['Unit'].tolist()\n\n molecule[0].extend(prop_names)\n molecule[1].extend(values)\n molecule[2].extend(units)\n \n except:\n print(unzp + '\\\\' + fl + '.csv does not exist')\n \n for i, prop in enumerate(molecule[0]):\n mol_dict[prop] = molecule[1][i]\n mol_dict[prop+' Unit'] = molecule[2][i]\n# print(mol_dict, '\\n')\n \n new_additions.append(mol_dict)\n \n# print(new_additions)\n new_mol_df = pd.DataFrame()\n for mol in new_additions:\n# print(mol)\n mol_df = pd.DataFrame.from_dict(mol, orient = 'index')\n mol_df = mol_df.transpose()\n# print(mol_df)\n new_mol_df = new_mol_df.append(mol_df, ignore_index = True)\n \n return new_mol_df\n \n \n def extract_xl_arrays(self):\n ph_dependent_df = pd.DataFrame()\n\n for unzp in self.dirs:\n \n mol_df = pd.Series()\n for fl in self.xl_arrays:\n try:\n if fl == 'pKa':\n raw_pka = pd.read_csv(unzp + '\\\\' + fl + '.csv')\n x, y = raw_pka.shape\n y = y-1 #w/o pH column, y now equals the # of microspecies\n pH_vals = raw_pka['pH'].tolist()\n raw_pka = raw_pka.drop('pH', axis = 1)\n\n #want to save all microspecies at each pH as a list, creating a list of lists\n pka_lists = []\n for row in raw_pka.iterrows():\n row_list = []\n\n for el in row[1]:\n row_list.append(el)\n\n pka_lists.append(row_list)\n\n mol_df['pH_vals'] = pH_vals\n mol_df['number_of_microspecies'] = y\n mol_df['pH_dependent_microspecies'] = pka_lists\n\n if fl == 'logD':\n raw_logD = pd.read_csv(unzp + '\\\\' + fl + '.csv')\n\n #want to save all logDs at each pH as a list\n logD_list = raw_logD['logD'].tolist()\n\n mol_df['logD'] = logD_list\n except:\n print(unzp + '\\\\' + fl + '.csv does not exist')\n \n ph_dependent_df = ph_dependent_df.append(mol_df, ignore_index = True)\n \n return ph_dependent_df\n \n \n \n \n def load_db(self):\n database = pd.read_excel(self.dbase_path)\n self.database = database\n return\n \n \n def save_to_db(self, new_mol_df):\n if self.database is not None:\n pass\n else:\n self.load_db\n \n #check for duplicates and save others\n duplicates = []\n \n #make sure database has InChI column\n if 'InChI' in self.database.columns:\n for inchi in new_mol_df['InChI']:\n if inchi in self.database['InChI'].values:\n print('Duplicate Compound:\\t', inchi)\n duplicates.append(inchi)\n else:\n pass\n \n else:\n pass\n \n new_mols = new_mol_df[~new_mol_df['InChI'].isin(duplicates)]\n self.database = self.database.append(new_mols, ignore_index = True)\n \n #for some reason, appending new mols can add columns to front of df. This removes them\n self.database = self.database.loc[:, ~self.database.columns.str.contains('Unnamed')]\n \n self.database.to_excel(self.dbase_path)\n \n return\n \n \n def extract_functional_groups(self, new_xl_df):\n smiles = new_xl_df['SMILES'].tolist()\n \n substructure_df = pd.DataFrame()\n \n for sm in smiles:\n mol = self.struct_handler.mol_from_smiles(sm)\n mol_df = self.struct_handler.extract_functional_groups(mol)\n substructure_df = substructure_df.append(mol_df, ignore_index = True)\n \n return substructure_df\n \n \n def extract(self, return_df = False):\n self.load_db()\n \n #These lines can be used for re-extracting from uncompressed files, otherwise leave them commented out\n# extr_path = 'C:\\\\Users\\\\tatum\\\\Desktop\\\\unzipped_dirs\\\\'\n# self.dirs = glob(extr_path+'*')\n \n #extract from the various types of files\n new_xl_df = self.extract_xlsx()\n new_ph_dep_df = self.extract_xl_arrays()\n new_funct_gr_df = self.extract_functional_groups(new_xl_df)\n \n new_mol_df = new_xl_df.join(new_ph_dep_df)\n new_mol_df = new_mol_df.join(new_funct_gr_df)\n \n self.save_to_db(new_mol_df)\n \n if return_df:\n return new_mol_df\n else:\n return\n \n \nclass MolStructureHandler():\n \"\"\"\n Class to load, plot, and analyze chemical structures. SMILES or InChI can be used, though SMILES IS \n PREFERRED as it more cleanly preserves molecular structure. RDKit is used to interpret the structure\n and for sub-structure matching.\n \n Sub-structure matching is currently only supported with SMILES strings\n \"\"\"\n \n def __init__(self, style = 'SMILES'):\n super().__init__()\n \n self.style = style\n \n #substructure patterns:\n self.functional_groups_smiles = {\n \"1,1,1-trifluoroethane\": \"CC(F)(F)F\",\n \"1,1'-biphenyl\": \"C1(C2=CC=CC=C2)=CC=CC=C1\",\n \"1H-indene\": \"C1(CC=C2)=C2C=CC=C1\",\n \"1H-pyrrole\": \"[NH]1CCCC1\",\n \"2-butyne\": \"CC#CC\",\n \"2-ethyl-1-butanol\": \"CCC(CC)CO\",\n \"2-methylpenta-2,3-diene\": \"CC=C=C(C)C\",\n \"(E)-1,2-dimethyldiazene\": \"C/N=N/C\",\n \"N,N-dimethylacetamide\": \"CC(N(C)C)=O\",\n \"N-methylpropan-2-imine\": \"C/C(C)=N/C\",\n \"(Z)-N,N,N'-trimethylacetimidamide\": \"C/C(N(C)C)=N/C\",\n \"acetic anydride\": \"CC(=O)OC(=O)C\",\n \"acyl bromide\": \"C(=O)Br\",\n \"acyl chloride\": \"C(=O)Cl\",\n \"acyl fluoride\": \"C(=O)F\",\n \"acyl iodide\": \"C(=O)I\",\n \"aldehyde\": \"CC=O\",\n \"amide\": \"C(=O)N\",\n \"amino\": \"*N\",\n \"azide\": \"C([N-][N+]#N)\",\n \"bicyclohexyl\": \"C1CCCCC1C1CCCCC1\",\n \"bromine\": \"Br\",\n \"but-1-ene\": \"CCC=C\",\n \"but-1-yne\": \"CCC#C\",\n \"carbon dioxide\": \"O=C=O\",\n \"carboxylic acid\": \"C(=O)O\",\n \"chlorine\": \"Cl\",\n \"chloromethyl methyl ether\": \"COCCl\",\n \"deuteroethane\": \"[2H][CH2]C\",\n \"dimethyl ether\": \"COC\",\n \"diethyl ether\": \"CCOCC\",\n \"diisopropyl ether\": \"CC(C)OC(C)C\",\n \"diazomethane\": \"C=[N+]=[N-]\",\n \"diammonium thiosulfate\": \"[NH4+].[NH4+].[O-]S(=O)(=O)[S-]\",\n \"enamine\": \"N\",\n \"ethane\": \"CC\",\n \"ethanethiol\": \"CCS\",\n \"ethanol\": \"CCO\",\n \"ethene\": \"C=C\",\n \"ether\": \"COC\",\n \"ester\": \"C(=O)OC\",\n \"fluorine\": \"F\",\n \"formaldehyde\": \"C=O\",\n \"hydrogen cyanide\": \"C#N\",\n \"hydroxide\": \"[OH-]\",\n \"hydroxyl amine\": \"NO\",\n \"ketone\": \"CC(=O)C\",\n \"methane\": \"C\",\n \"methanethiol\": \"CS\",\n \"methyl acetate\": \"CC(OC)=O\",\n \"methyl pyrrole\": \"CN1CCCC1\",\n \"methyl tert-butyl ether\": \"CC(C)(C)OC\",\n \"nitro\": \"[N+](=O)[O-]\",\n \"nitromethane\": \"C[N+]([O-])=O\",\n \"pentalene\": \"C12=CC=CC1=CC=C2\",\n \"perhydroisoquinoline\": \"N1CC2CCCC2CC1\",\n \"phenol\": \"OC1CCCCC1\",\n \"phenyl\": \"C=1(C=CC=CC1)\",\n \"primary alcohol\": \"O\",\n \"primary amine\": \"N\",\n \"propan-2-one\": \"CC(C)=O\",\n \"propanol\": \"CCC=O\",\n \"prop-1-ene\": \"CC=C\",\n \"prop-1-yne\": \"CC#C\",\n \"pyridine-n-oxide\": \"O=[N+]1CCCCC1\",\n \"secondary amine\": \"NC\",\n \"spiro[5.5]undecane\": \"C12(CCCCC1)CCCCC2\",\n \"sulfoxide\": \"S(=O)(=O)\",\n \"tetramethylammonium\": \"C[N+](C)(C)C\",\n \"thiol\": \"S\",\n \"thiosulfate\": \"OS(=O)(=S)O\",\n \"trimethylamine\": \"CN(C)C\",\n \"triphenylene\": \"C1(C=CC=C2)=C2C(C=CC=C3)=C3C4=C1C=CC=C4\",\n }\n\n self.ring_systems_smiles = {\n \"anthracene\": \"C12=CC=CC=C1C=C3C(C=CC=C3)=C2\",\n 'benzene': 'C1=CC=CC=C1',\n \"benzene thiol\": \"C1=CC=C(C=C1)S\",\n \"cyclobutadiene\": \"C1=CC=C1\",\n \"cyclobutane\": \"C1CCC1\",\n \"cycloheptane\": \"C1CCCCCC1\",\n \"cyclohexane\": \"C1CCCCC1\",\n \"cyclohexa-1,3-diene\": \"C1=CCCC=C1\",\n \"cyclohexa-1,4-diene\": \"C1=CCC=CC1\",\n \"cyclohexene\": \"C=1CCCCC=1\",\n \"cyclopentane\": \"C1CCCC1\",\n \"cyclopenta-1,3-diene\": \"C1=CCC=C1\",\n \"cyclopropane\": \"C1CC1\",\n \"cyclopropene\": \"C1=CC1\",\n 'furan': 'C1OC=CC=1',\n 'indane': 'C1=CC=CC(CCC2)=C12',\n 'indole': 'C12=C(C=CN2)C=CC=C1',\n \"naphthalene\": \"C12=CC=CC=C1C=CC=C2\",\n 'pyridine': 'C1=CC=NC=C1',\n 'pyrrole': 'N1C=CC=C1',\n 'thiophene': 'S1C=CC=C1',\n\n }\n \n return\n \n \n def mol_from_smiles(self, smiles):\n# print(smiles)\n m = Chem.MolFromSmiles(smiles, sanitize = False)\n m.UpdatePropertyCache()\n Chem.SetHybridization(m)\n return m\n \n \n def numbered_strcture_from_smiles(self, smiles):\n mol = self.mol_from_smiles(smiles)\n atoms = mol.GetNumAtoms()\n for idx in range( atoms ):\n mol.GetAtomWithIdx( idx ).SetProp( 'molAtomMapNumber', str( mol.GetAtomWithIdx( idx ).GetIdx()))\n \n return mol\n \n \n def draw_aligned_structure_from_smiles(self, mol_smiles_string, template_smiles_string = None, ax=None):\n if not ax:\n f, ax = plt.subplots()\n \n if not template_smiles_string:\n template_smiles_string = mol_smiles_string\n\n #generate image of molecule \n m = Chem.MolFromSmiles(mol_smiles_string, sanitize=False)\n m.UpdatePropertyCache()\n Chem.SetHybridization(m)\n\n #generate sub-structure that is used for alignment\n t = Chem.MolFromSmiles(template_smiles_string, sanitize=False)\n t.UpdatePropertyCache()\n Chem.SetHybridization(t)\n Chem.AllChem.Compute2DCoords(t)\n\n #re-draw molecule using substructure alignment\n Chem.AllChem.GenerateDepictionMatching2DStructure(m, t)\n img = Chem.Draw.MolToImage(m)\n\n return ax.imshow(img, interpolation='bessel')\n \n \n def draw_aligned_structure_from_inchi(self, mol_inchi_string, template_inchi_string = None, ax=None):\n if not ax:\n f, ax = plt.subplots()\n \n if not template_inchi_string:\n template_inchi_string = mol_inchi_string\n\n #generate image of molecule \n m = Chem.inchi.MolFromInchi(mol_inchi_string, sanitize=False)\n m.UpdatePropertyCache()\n Chem.SetHybridization(m)\n\n #generate sub-structure that is used for alignment\n t = Chem.inchi.MolFromInchi(template_inchi_string, sanitize=False)\n t.UpdatePropertyCache()\n Chem.SetHybridization(t)\n Chem.AllChem.Compute2DCoords(t)\n\n #re-draw molecule using substructure alignment\n Chem.AllChem.GenerateDepictionMatching2DStructure(m, t)\n img = Chem.Draw.MolToImage(m)\n\n return ax.imshow(img, interpolation='bessel')\n \n \n def get_ring_systems(self, mol, includeSpiro=False):\n \"\"\"\n identifies all rings in molecule, but does not produce functional group flags. For\n that functionality, use self.get_ring_groups\n \"\"\"\n ri = mol.GetRingInfo()\n systems = []\n for ring in ri.AtomRings():\n ringAts = set(ring)\n nSystems = []\n for system in systems:\n nInCommon = len(ringAts.intersection(system))\n if nInCommon and (includeSpiro or nInCommon>1):\n ringAts = ringAts.union(system)\n else:\n nSystems.append(system)\n nSystems.append(ringAts)\n systems = nSystems\n return systems\n \n \n def count_ring_systems(self, mol):\n systems = self.get_ring_systems(mol)\n return len(systems)\n \n \n def get_functional_groups(self, mol, return_matches = False):\n funct_grp_matches = {}\n \n for group, pattern in self.functional_groups_smiles.items():\n funct_gr = Chem.MolFromSmiles(pattern)\n matches = mol.GetSubstructMatches(funct_gr)\n \n if return_matches:\n funct_grp_matches[group] = matches\n else:\n funct_grp_matches[group] = len(matches)\n \n return funct_grp_matches\n \n \n def get_ring_groups(self, mol, return_matches = False):\n ring_matches = {}\n \n for group, pattern in self.ring_systems_smiles.items():\n funct_gr = Chem.MolFromSmiles(pattern)\n matches = mol.GetSubstructMatches(funct_gr)\n \n if return_matches:\n ring_matches[group] = matches\n else:\n ring_matches[group] = len(matches)\n \n return ring_matches\n \n \n def get_stereochemistry(self, mol, return_matches = False):\n stereo_matches = {}\n \n matches = Chem.FindMolChiralCenters(mol, force = True, useLegacyImplementation = False)\n \n if return_matches:\n return matches\n else:\n return len(matches)\n \n \n def extract_functional_groups(self, mol, return_matches = False):\n \n #extract all functional groups and return results as dicts\n funct_grs = self.get_functional_groups(mol, return_matches = return_matches)\n rings = self.get_ring_groups(mol, return_matches = return_matches)\n stereos = self.get_stereochemistry(mol, return_matches = return_matches)\n \n #convert dicts to a single dataframe\n all_grs = {}\n for k, v in funct_grs.items():\n all_grs[k] = v\n for k, v in rings.items():\n all_grs[k] = v\n all_grs['stereo_centers'] = stereos\n \n substruct_df = pd.DataFrame.from_dict(all_grs, orient = 'index')\n substruct_df = substruct_df.transpose()\n \n return substruct_df"
] | [
[
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"pandas.read_excel",
"pandas.Series",
"pandas.read_csv"
]
] |
kyotoor/kuzushijiness | [
"eb2ed7c2fcd1cc55940059fe5b8fc509910bd2a1"
] | [
"preprocess_kuzushiji.py"
] | [
"import numpy as np\nimport glob\nimport cv2\nimport os\n\n# resize function\n# resize pic by adding margin\ndef resize(img):\n width = int(img.shape[1])\n height = int(img.shape[0])\n \n if width == height:\n return_img = img\n elif width > height:\n return_img = np.zeros((width, width))\n return_img[:,:] = 0\n margin = int((width - height) / 2)\n extra = (width - height) % 2\n return_img[margin+extra:width-margin, 0:width] = img[:,:]\n else:\n return_img = np.zeros((height, height))\n return_img[:,:] = 0\n margin = int((height - width) / 2)\n extra = (height - width) % 2\n return_img[0:height, margin+extra:height-margin] = img[:,:]\n \n return_img = cv2.resize(return_img, (64, 64))\n \n # return img_data\n return return_img\n \ndocument_path = './original_data/all/*'\nkmnist_aug_path = './processed_data/kuzushiji/'\ndocuments = glob.glob(document_path)\n\ncount = 0\n\nfor i, document in enumerate(documents):\n char_classes = glob.glob(document + '/characters/*')\n print(document)\n \n for j, char_class in enumerate(char_classes):\n samples = glob.glob(char_class + '/*')\n \n for k, sample in enumerate(samples):\n img = cv2.imread(sample, 0)\n _, img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n img = cv2.bitwise_not(img)\n img = resize(img)\n\n u_index = 100\n \n for l in range(0, len(sample), 1):\n if sample[l] == 'U':\n u_index = l\n if l > u_index and sample[l] == '/':\n end_index = l\n break\n\n os.makedirs(kmnist_aug_path + sample[u_index:end_index], exist_ok = True)\n cv2.imwrite(kmnist_aug_path + sample[u_index:end_index] + '/' + str(count) + '.png', img)\n count = count + 1\n"
] | [
[
"numpy.zeros"
]
] |
CodingCat/mxnet | [
"658bad6521c0c32738908abd57b99cc265605b4d"
] | [
"tests/python/unittest/test_random.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport math\nimport itertools\nimport mxnet as mx\nfrom mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf\nimport numpy as np\nimport random as rnd\nfrom common import setup_module, with_seed, random_seed\nimport scipy.stats as ss\n\ndef same(a, b):\n return np.sum(a != b) == 0\n\ndef check_with_device(device, dtype):\n # The thresholds chosen for the tests are too loose. We will rely on the other tests to test the samples from the\n # generators.\n tol = 0.1\n symbols = [\n {\n 'name': 'normal',\n 'symbol': mx.sym.random.normal,\n 'ndop': mx.nd.random.normal,\n 'params': { 'loc': 10.0, 'scale': 0.5 },\n 'inputs': [ ('loc',[ [ 0.0, 2.5 ], [ -9.75, -7.0 ] ]) , ('scale',[ [ 1.0, 3.7 ], [ 4.2, 1.5 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64) - params['loc']), tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)\n ]\n },\n {\n 'name': 'uniform',\n 'symbol': mx.sym.random.uniform,\n 'ndop': mx.nd.random.uniform,\n 'params': { 'low': -1.5, 'high': 3.0 },\n 'inputs': [ ('low', [ [ 0.0, 2.5 ], [ -9.75, -1.0 ] ]) , ('high', [ [ 1.0, 3.7 ], [ 4.2, 10.5 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - (params['low'] + params['high']) / 2.0, tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(1.0 / 12.0) * (params['high'] - params['low']), tol)\n ]\n },\n {\n 'name': 'gamma',\n 'symbol': mx.sym.random.gamma,\n 'ndop': mx.nd.random.gamma,\n 'params': { 'alpha': 9.0, 'beta': 0.5 },\n 'inputs': [ ('alpha', [ [ 0.0, 2.5 ], [ 9.75, 11.0 ] ]) , ('beta', [ [ 1.0, 0.7 ], [ 0.5, 0.3 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['alpha'] * params['beta'], tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['alpha'] * params['beta'] ** 2), tol)\n ]\n },\n {\n 'name': 'exponential',\n 'symbol': mx.sym.random.exponential,\n 'ndop': mx.nd.random.exponential,\n 'params': { 'scale': 1.0/4.0 },\n 'inputs': [ ('scale', [ [ 1.0/1.0, 1.0/8.5 ], [ 1.0/2.7 , 1.0/0.5 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['scale'], tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - params['scale'], tol)\n ]\n },\n {\n 'name': 'poisson',\n 'symbol': mx.sym.random.poisson,\n 'ndop': mx.nd.random.poisson,\n 'params': { 'lam': 4.0 },\n 'inputs': [ ('lam', [ [ 25.0, 8.5 ], [ 2.7 , 0.5 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['lam'], tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['lam']), tol)\n ]\n },\n {\n 'name': 'neg-binomial',\n 'symbol': mx.sym.random.negative_binomial,\n 'ndop': mx.nd.random.negative_binomial,\n 'params': { 'k': 3, 'p': 0.4 },\n 'inputs': [ ('k', [ [ 3, 4 ], [ 5 , 6 ] ]) , ('p', [ [ 0.4 , 0.77 ], [ 0.5, 0.84 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['k'] * (1.0 - params['p']) / params['p'], tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['k'] * (1.0 - params['p']))/params['p'], tol)\n ]\n },\n {\n 'name': 'gen-neg-binomial',\n 'symbol': mx.sym.random.generalized_negative_binomial,\n 'ndop': mx.nd.random.generalized_negative_binomial,\n 'params': { 'mu': 2.0, 'alpha': 0.3 },\n 'inputs': [ ('mu', [ [ 2.0, 2.5 ], [ 1.3, 1.9 ] ]) , ('alpha', [ [ 1.0, 0.1 ], [ 0.2, 0.5 ] ]) ],\n 'checks': [\n ('mean', lambda x, params: np.mean(x.astype(np.float64)) - params['mu'], tol),\n ('std', lambda x, params: np.std(x.astype(np.float64)) - np.sqrt(params['mu'] + params['alpha'] * params['mu'] ** 2 ), tol)\n ]\n }\n\n ]\n\n # Create enough samples such that we get a meaningful distribution.\n shape = (500, 500)\n for symbdic in symbols:\n name = symbdic['name']\n ndop = symbdic['ndop']\n\n # check directly\n params = symbdic['params'].copy()\n params.update(shape=shape, dtype=dtype, ctx=device)\n mx.random.seed(128)\n ret1 = ndop(**params).asnumpy()\n mx.random.seed(128)\n ret2 = ndop(**params).asnumpy()\n assert same(ret1, ret2), \\\n \"ndarray test: `%s` should give the same result with the same seed\" % name\n\n for check_name, check_func, tol in symbdic['checks']:\n assert np.abs(check_func(ret1, params)) < tol, \"ndarray test: %s check for `%s` did not pass\" % (check_name, name)\n\n # check multi-distribution sampling\n params = {'shape': shape, 'dtype': dtype, 'ctx': device}\n params.update({k : mx.nd.array(v, ctx=device, dtype=dtype) for k, v in symbdic['inputs']})\n mx.random.seed(128)\n ret1 = ndop(**params).asnumpy()\n mx.random.seed(128)\n ret2 = ndop(**params).asnumpy()\n assert same(ret1, ret2), \\\n \"ndarray test: `%s` should give the same result with the same seed\" % name\n for i in range(2):\n for j in range(2):\n stats = {k : v[i][j] for k, v in symbdic['inputs']}\n for check_name, check_func, tol in symbdic['checks']:\n err = np.abs(check_func(ret2[i,j], stats))\n assert err < tol, \"%f vs %f: symbolic test: %s check for `%s` did not pass\" % (err, tol, check_name, name)\n\n # check symbolic\n symbol = symbdic['symbol']\n X = mx.sym.Variable(\"X\")\n params = symbdic['params'].copy()\n params.update(shape=shape, dtype=dtype)\n Y = symbol(**params) + X\n x = mx.nd.zeros(shape, dtype=dtype, ctx=device)\n xgrad = mx.nd.zeros(shape, dtype=dtype, ctx=device)\n yexec = Y.bind(device, {'X' : x}, {'X': xgrad})\n mx.random.seed(128)\n yexec.forward(is_train=True)\n yexec.backward(yexec.outputs[0])\n un1 = (yexec.outputs[0] - x).copyto(device)\n assert same(xgrad.asnumpy(), un1.asnumpy())\n mx.random.seed(128)\n yexec.forward()\n un2 = (yexec.outputs[0] - x).copyto(device)\n assert same(un1.asnumpy(), un2.asnumpy()), \\\n \"symbolic test: `%s` should give the same result with the same seed\" % name\n\n ret1 = un1.asnumpy()\n for check_name, check_func, tol in symbdic['checks']:\n assert np.abs(check_func(ret1, params)) < tol, \"symbolic test: %s check for `%s` did not pass\" % (check_name, name)\n\n # check multi-distribution sampling\n symbol = symbdic['symbol']\n params = { 'shape' : shape, 'dtype' : dtype }\n single_param = len(symbdic['inputs']) == 1;\n v1 = mx.sym.Variable('v1')\n v2 = mx.sym.Variable('v2')\n Y = symbol(v1,**params) if single_param else symbol(v1,v2,**params)\n bindings = { 'v1' : mx.nd.array(symbdic['inputs'][0][1]) }\n if not single_param :\n bindings.update({ 'v2' : mx.nd.array(symbdic['inputs'][1][1]) })\n yexec = Y.bind(ctx=device, args=bindings)\n yexec.forward()\n un1 = yexec.outputs[0].copyto(device).asnumpy()\n params = {}\n for i, r in enumerate(symbdic['inputs'][0][1]):\n for j, p1 in enumerate(r):\n params.update({ symbdic['inputs'][0][0] : p1 })\n if not single_param:\n params.update({ symbdic['inputs'][1][0] : symbdic['inputs'][1][1][i][j] })\n samples = un1[i,j]\n for check_name, check_func, tol in symbdic['checks']:\n assert np.abs(check_func(samples, params)) < tol, \"symbolic test: %s check for `%s` did not pass\" % (check_name, name)\n\n@with_seed()\ndef test_random():\n check_with_device(mx.context.current_context(), 'float16')\n check_with_device(mx.context.current_context(), 'float32')\n check_with_device(mx.context.current_context(), 'float64')\n\n\n# Set seed variously based on `start_seed` and `num_init_seeds`, then set seed finally to `final_seed`\ndef set_seed_variously(init_seed, num_init_seeds, final_seed):\n end_seed = init_seed + num_init_seeds\n for seed in range(init_seed, end_seed):\n mx.random.seed(seed)\n mx.random.seed(final_seed)\n return end_seed\n\n# Tests that seed setting of std (non-parallel) rng is synchronous w.r.t. rng use before and after.\n@with_seed()\ndef test_random_seed_setting():\n ctx = mx.context.current_context()\n seed_to_test = 1234\n num_temp_seeds = 25\n probs = [0.125, 0.25, 0.25, 0.0625, 0.125, 0.1875]\n num_samples = 100000\n for dtype in ['float16', 'float32', 'float64']:\n seed = set_seed_variously(1, num_temp_seeds, seed_to_test)\n samples1 = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx, dtype=dtype),\n shape=num_samples)\n seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)\n samples2 = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx, dtype=dtype),\n shape=num_samples)\n samples1np = samples1.asnumpy()\n set_seed_variously(seed, num_temp_seeds, seed_to_test+1)\n samples2np = samples2.asnumpy()\n assert same(samples1np, samples2np), \\\n \"seed-setting test: `multinomial` should give the same result with the same seed\"\n\n\n# Tests that seed setting of parallel rng is synchronous w.r.t. rng use before and after.\n@with_seed()\ndef test_parallel_random_seed_setting():\n ctx = mx.context.current_context()\n seed_to_test = 1234\n for dtype in ['float16', 'float32', 'float64']:\n # Avoid excessive test cpu runtimes\n num_temp_seeds = 25 if ctx.device_type == 'gpu' else 1\n # To flush out a possible race condition, run multiple times\n for _ in range(20):\n # Create enough samples such that we get a meaningful distribution.\n shape = (200, 200)\n params = { 'low': -1.5, 'high': 3.0 }\n params.update(shape=shape, dtype=dtype, ctx=ctx)\n\n # check directly\n seed = set_seed_variously(1, num_temp_seeds, seed_to_test)\n ret1 = mx.nd.random.uniform(**params)\n seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)\n ret2 = mx.nd.random.uniform(**params)\n seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)\n assert same(ret1.asnumpy(), ret2.asnumpy()), \\\n \"ndarray seed-setting test: `uniform` should give the same result with the same seed\"\n\n # check symbolic\n X = mx.sym.Variable(\"X\")\n Y = mx.sym.random.uniform(**params) + X\n x = mx.nd.zeros(shape, dtype=dtype, ctx=ctx)\n xgrad = mx.nd.zeros(shape, dtype=dtype, ctx=ctx)\n yexec = Y.bind(ctx, {'X' : x}, {'X': xgrad})\n seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)\n yexec.forward(is_train=True)\n yexec.backward(yexec.outputs[0])\n un1 = (yexec.outputs[0] - x).copyto(ctx)\n seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)\n yexec.forward()\n set_seed_variously(seed, num_temp_seeds, seed_to_test)\n un2 = (yexec.outputs[0] - x).copyto(ctx)\n assert same(un1.asnumpy(), un2.asnumpy()), \\\n \"symbolic seed-setting test: `uniform` should give the same result with the same seed\"\n\n\n@with_seed()\ndef test_sample_multinomial():\n for x in [mx.nd.array([[0,1,2,3,4],[4,3,2,1,0]])/10.0, mx.nd.array([0,1,2,3,4])/10.0]:\n dx = mx.nd.ones_like(x)\n mx.contrib.autograd.mark_variables([x], [dx])\n # Adding rtol and increasing samples needed to pass with seed 2951820647\n samples = 5000\n with mx.autograd.record():\n y, prob = mx.nd.random.multinomial(x, shape=samples, get_prob=True)\n r = prob * 5\n r.backward()\n\n y = y.asnumpy()\n x = x.asnumpy()\n dx = dx.asnumpy()\n if len(x.shape) is 1:\n x = x.reshape((1, x.shape[0]))\n dx = dx.reshape(1, dx.shape[0])\n y = y.reshape((1, y.shape[0]))\n prob = prob.reshape((1, prob.shape[0]))\n for i in range(x.shape[0]):\n freq = np.bincount(y[i,:], minlength=5)/np.float32(samples)*x[i,:].sum()\n mx.test_utils.assert_almost_equal(freq, x[i], rtol=0.20)\n rprob = x[i][y[i]]/x[i].sum()\n mx.test_utils.assert_almost_equal(np.log(rprob), prob.asnumpy()[i], atol=1e-5)\n\n real_dx = np.zeros((5,))\n for j in range(samples):\n real_dx[y[i][j]] += 5.0 / rprob[j]\n mx.test_utils.assert_almost_equal(real_dx, dx[i, :], rtol=1e-4, atol=1e-5)\n\n# Test the generators with the chi-square testing\n@with_seed()\ndef test_normal_generator():\n ctx = mx.context.current_context()\n samples = 1000000\n # Default success rate is 0.25, so 2 successes of 8 trials will pass.\n trials = 8\n num_buckets = 5\n for dtype in ['float16', 'float32', 'float64']:\n for mu, sigma in [(0.0, 1.0), (1.0, 5.0)]:\n print(\"ctx=%s, dtype=%s, Mu=%g, Sigma=%g:\" % (ctx, dtype, mu, sigma))\n buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.norm.ppf(x, mu, sigma), num_buckets)\n # Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly\n buckets = np.array(buckets, dtype=dtype).tolist()\n probs = [(ss.norm.cdf(buckets[i][1], mu, sigma) -\n ss.norm.cdf(buckets[i][0], mu, sigma)) for i in range(num_buckets)]\n generator_mx = lambda x: mx.nd.random.normal(mu, sigma, shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs,\n nsamples=samples, nrepeat=trials)\n generator_mx_same_seed =\\\n lambda x: np.concatenate(\n [mx.nd.random.normal(mu, sigma, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs,\n nsamples=samples, nrepeat=trials)\n\n@with_seed()\ndef test_uniform_generator():\n ctx = mx.context.current_context()\n for dtype in ['float16', 'float32', 'float64']:\n for low, high in [(-1.0, 1.0), (1.0, 3.0)]:\n print(\"ctx=%s, dtype=%s, Low=%g, High=%g:\" % (ctx, dtype, low, high))\n scale = high - low\n buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=scale), 5)\n # Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly\n buckets = np.array(buckets, dtype=dtype).tolist()\n probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(5)]\n generator_mx = lambda x: mx.nd.random.uniform(low, high, shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.uniform(low, high, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n\n@with_seed()\ndef test_gamma_generator():\n ctx = mx.context.current_context()\n for dtype in ['float16', 'float32', 'float64']:\n for kappa, theta in [(0.5, 1.0), (1.0, 5.0)]:\n print(\"ctx=%s, dtype=%s, Shape=%g, Scale=%g:\" % (ctx, dtype, kappa, theta))\n buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.gamma.ppf(x, a=kappa, loc=0, scale=theta), 5)\n generator_mx = lambda x: mx.nd.random.gamma(kappa, theta, shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.gamma(kappa, theta, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n\n@with_seed()\ndef test_exponential_generator():\n ctx = mx.context.current_context()\n for dtype in ['float16', 'float32', 'float64']:\n for scale in [0.1, 1.0]:\n print(\"ctx=%s, dtype=%s, Scale=%g:\" % (ctx, dtype, scale))\n buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.expon.ppf(x, loc=0, scale=scale), 5)\n generator_mx = lambda x: mx.nd.random.exponential(scale, shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.exponential(scale, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n\n@with_seed()\ndef test_poisson_generator():\n ctx = mx.context.current_context()\n for dtype in ['float16', 'float32', 'float64']:\n for lam in [1, 10]:\n print(\"ctx=%s, dtype=%s, Lambda=%d:\" % (ctx, dtype, lam))\n buckets = [(-1.0, lam - 0.5), (lam - 0.5, 2 * lam + 0.5), (2 * lam + 0.5, np.inf)]\n probs = [ss.poisson.cdf(bucket[1], lam) - ss.poisson.cdf(bucket[0], lam) for bucket in buckets]\n generator_mx = lambda x: mx.nd.random.poisson(lam, shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.poisson(lam, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n\n@with_seed()\ndef test_negative_binomial_generator():\n ctx = mx.context.current_context()\n for dtype in ['float16', 'float32', 'float64']:\n success_num = 2\n success_prob = 0.2\n print(\"ctx=%s, dtype=%s, Success Num=%d:, Success Prob=%g\" % (ctx, dtype, success_num, success_prob))\n buckets = [(-1.0, 2.5), (2.5, 5.5), (5.5, 8.5), (8.5, np.inf)]\n probs = [ss.nbinom.cdf(bucket[1], success_num, success_prob) -\n ss.nbinom.cdf(bucket[0], success_num, success_prob) for bucket in buckets]\n generator_mx = lambda x: mx.nd.random.negative_binomial(success_num, success_prob,\n shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.negative_binomial(success_num, success_prob, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n # Also test the Gamm-Poisson Mixture\n print('Gamm-Poisson Mixture Test:')\n alpha = 1.0 / success_num\n mu = (1.0 - success_prob) / success_prob / alpha\n generator_mx = lambda x: mx.nd.random.generalized_negative_binomial(mu, alpha,\n shape=x, ctx=ctx, dtype=dtype).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=probs)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.generalized_negative_binomial(mu, alpha, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)\n\n@with_seed()\ndef test_multinomial_generator():\n # This test fails with dtype float16 if the probabilities themselves cannot be\n # well-represented in float16. When the float16 random picks are assigned to buckets,\n # only certain bucket-probabilities are possible. Here we map the desired probabilites\n # (e.g. 0.1) to nearby float16 probabilities (e.g. 0.10009766) that are achievable.\n def quantize_probs(probs, dtype):\n if dtype == 'float16':\n # float16 has a 10-bit fraction plus an implicit leading 1, so all probabilities\n # of the form N/2^11 (where N is an integer) are representable.\n num_quanta = 2048.0\n quantized_probs = np.rint(np.array(probs) * num_quanta) / num_quanta\n # Ensure probabilities add to 1\n quantized_probs[0] += 1.0 - quantized_probs.sum()\n else:\n # no need to quantize probs with this data precision\n quantized_probs = np.array(probs)\n return quantized_probs\n\n ctx = mx.context.current_context()\n probs = [0.1, 0.2, 0.3, 0.05, 0.15, 0.2]\n samples = 1000000\n trials = 5\n buckets = list(range(6))\n for dtype in ['float16', 'float32', 'float64']:\n print(\"ctx=%s, dtype=%s\" %(ctx, dtype))\n quantized_probs = quantize_probs(probs, dtype)\n generator_mx = lambda x: mx.nd.random.multinomial(data=mx.nd.array(quantized_probs, ctx=ctx, dtype=dtype),\n shape=x).asnumpy()\n verify_generator(generator=generator_mx, buckets=buckets, probs=quantized_probs,\n nsamples=samples, nrepeat=trials)\n generator_mx_same_seed = \\\n lambda x: np.concatenate(\n [mx.nd.random.multinomial(data=mx.nd.array(quantized_probs, ctx=ctx, dtype=dtype),\n shape=x // 10).asnumpy()\n for _ in range(10)])\n verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=quantized_probs,\n nsamples=samples, nrepeat=trials)\n\n\n@with_seed()\ndef test_with_random_seed():\n ctx = mx.context.current_context()\n size = 100\n shape = (size,)\n\n def check_same(x, y, name):\n assert same(x, y), \\\n \"%s rng should give the same result with the same seed\" % name\n\n def check_diff(x, y, name):\n assert not same(x, y), \\\n \"%s rng should give different results with different seeds\" % name\n\n # generate python, numpy and mxnet datasets with the given seed\n def gen_data(seed=None):\n with random_seed(seed):\n python_data = [rnd.random() for _ in range(size)]\n np_data = np.random.rand(size)\n mx_data = mx.nd.random_uniform(shape=shape, ctx=ctx).asnumpy()\n return (seed, python_data, np_data, mx_data)\n\n # check data, expecting them to be the same or different based on the seeds\n def check_data(a, b):\n seed_a = a[0]\n seed_b = b[0]\n if seed_a == seed_b and seed_a is not None:\n check_same(a[1], b[1], 'python')\n check_same(a[2], b[2], 'numpy')\n check_same(a[3], b[3], 'mxnet')\n else:\n check_diff(a[1], b[1], 'python')\n check_diff(a[2], b[2], 'numpy')\n check_diff(a[3], b[3], 'mxnet')\n\n # 5 tests that include a duplicated seed 1 and randomizing seed None\n seeds = [1, 2, 1, None, None]\n data = [gen_data(seed) for seed in seeds]\n\n # Add more complicated test case scenarios\n with random_seed(1):\n seeds.append(None)\n data.append(gen_data(None))\n with random_seed(2):\n seeds.append(None)\n data.append(gen_data(None))\n with random_seed():\n seeds.append(1)\n data.append(gen_data(1))\n with random_seed():\n seeds.append(2)\n data.append(gen_data(2))\n with random_seed(1):\n seeds.append(2)\n data.append(gen_data(2))\n\n num_seeds = len(seeds)\n for i in range(0, num_seeds-1):\n for j in range(i+1, num_seeds):\n check_data(data[i],data[j])\n\n@with_seed()\ndef test_zipfian_generator():\n # dummy true classes\n num_true = 5\n num_sampled = 1000\n range_max = 20\n\n def compute_expected_prob():\n # P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)\n classes = mx.nd.arange(0, range_max)\n expected_counts = ((classes + 2).log() - (classes + 1).log()) / np.log(range_max + 1)\n return expected_counts\n\n exp_cnt = compute_expected_prob() * num_sampled\n\n # test ndarray\n true_classes = mx.nd.random.uniform(0, range_max, shape=(num_true,)).astype('int32')\n sampled_classes, exp_cnt_true, exp_cnt_sampled = mx.nd.contrib.rand_zipfian(true_classes, num_sampled, range_max)\n mx.test_utils.assert_almost_equal(exp_cnt_sampled.asnumpy(), exp_cnt[sampled_classes].asnumpy(), rtol=1e-1, atol=1e-2)\n mx.test_utils.assert_almost_equal(exp_cnt_true.asnumpy(), exp_cnt[true_classes].asnumpy(), rtol=1e-1, atol=1e-2)\n\n # test symbol\n true_classes_var = mx.sym.var('true_classes')\n outputs = mx.sym.contrib.rand_zipfian(true_classes_var, num_sampled, range_max)\n outputs = mx.sym.Group(outputs)\n executor = outputs.bind(mx.context.current_context(), {'true_classes' : true_classes})\n executor.forward()\n sampled_classes, exp_cnt_true, exp_cnt_sampled = executor.outputs\n mx.test_utils.assert_almost_equal(exp_cnt_sampled.asnumpy(), exp_cnt[sampled_classes].asnumpy(), rtol=1e-1, atol=1e-2)\n mx.test_utils.assert_almost_equal(exp_cnt_true.asnumpy(), exp_cnt[true_classes].asnumpy(), rtol=1e-1, atol=1e-2)\n\n# Issue #10277 (https://github.com/apache/incubator-mxnet/issues/10277) discusses this test.\n@with_seed()\ndef test_shuffle():\n def check_first_axis_shuffle(arr):\n stride = int(arr.size / arr.shape[0])\n column0 = arr.reshape((arr.size,))[::stride].sort()\n seq = mx.nd.arange(0, arr.size - stride + 1, stride, ctx=arr.context)\n assert (column0 == seq).prod() == 1\n for i in range(arr.shape[0]):\n subarr = arr[i].reshape((arr[i].size,))\n start = subarr[0].asscalar()\n seq = mx.nd.arange(start, start + stride, ctx=arr.context)\n assert (subarr == seq).prod() == 1\n\n # This tests that the shuffling is along the first axis with `repeat1` number of shufflings\n # and the outcomes are uniformly distributed with `repeat2` number of shufflings.\n # Note that the enough number of samples (`repeat2`) to verify the uniformity of the distribution\n # of the outcomes grows factorially with the length of the first axis of the array `data`.\n # So we have to settle down with small arrays in practice.\n # `data` must be a consecutive sequence of integers starting from 0 if it is flattened.\n def testSmall(data, repeat1, repeat2):\n # Check that the shuffling is along the first axis.\n # The order of the elements in each subarray must not change.\n # This takes long time so `repeat1` need to be small.\n for i in range(repeat1):\n ret = mx.nd.random.shuffle(data)\n check_first_axis_shuffle(ret)\n # Count the number of each different outcome.\n # The sequence composed of the first elements of the subarrays is enough to discriminate\n # the outcomes as long as the order of the elements in each subarray does not change.\n count = {}\n stride = int(data.size / data.shape[0])\n for i in range(repeat2):\n ret = mx.nd.random.shuffle(data)\n h = str(ret.reshape((ret.size,))[::stride])\n c = count.get(h, 0)\n count[h] = c + 1\n # Check the total number of possible outcomes.\n # If `repeat2` is not large enough, this could fail with high probability.\n assert len(count) == math.factorial(data.shape[0])\n # The outcomes must be uniformly distributed.\n # If `repeat2` is not large enough, this could fail with high probability.\n for p in itertools.permutations(range(0, data.size - stride + 1, stride)):\n err = abs(1. * count[str(mx.nd.array(p))] / repeat2 - 1. / math.factorial(data.shape[0]))\n assert err < 0.01, \"The absolute error {} is larger than the tolerance.\".format(err)\n # Check symbol interface\n a = mx.sym.Variable('a')\n b = mx.sym.random.shuffle(a)\n c = mx.sym.random.shuffle(data=b, name='c')\n d = mx.sym.sort(c, axis=0)\n assert (d.eval(a=data, ctx=mx.current_context())[0] == data).prod() == 1\n\n # This test is weaker than `testSmall` and to test larger arrays.\n # `repeat` should be much smaller than the factorial of `len(x.shape[0])`.\n # `data` must be a consecutive sequence of integers starting from 0 if it is flattened.\n def testLarge(data, repeat):\n # Check that the shuffling is along the first axis\n # and count the number of different outcomes.\n stride = int(data.size / data.shape[0])\n count = {}\n for i in range(repeat):\n ret = mx.nd.random.shuffle(data)\n check_first_axis_shuffle(ret)\n h = str(ret.reshape((ret.size,))[::stride])\n c = count.get(h, 0)\n count[h] = c + 1\n # The probability of duplicated outcomes is very low for large arrays.\n assert len(count) == repeat\n\n # Test small arrays with different shapes\n testSmall(mx.nd.arange(0, 3), 100, 40000)\n testSmall(mx.nd.arange(0, 9).reshape((3, 3)), 100, 40000)\n testSmall(mx.nd.arange(0, 18).reshape((3, 2, 3)), 100, 40000)\n # Test larger arrays\n testLarge(mx.nd.arange(0, 100000).reshape((10, 10000)), 10)\n testLarge(mx.nd.arange(0, 100000).reshape((10000, 10)), 10)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n"
] | [
[
"numpy.bincount",
"numpy.array",
"scipy.stats.norm.ppf",
"numpy.random.rand",
"scipy.stats.nbinom.cdf",
"numpy.log",
"numpy.zeros",
"scipy.stats.uniform.ppf",
"numpy.sum",
"numpy.float32",
"scipy.stats.expon.ppf",
"numpy.sqrt",
"scipy.stats.gamma.ppf",
"scipy.stats.norm.cdf",
"scipy.stats.poisson.cdf"
]
] |
yzy1015/Tensorflow_Template | [
"35732cf13c66465b7924339a170005094f70ea61"
] | [
"data_generator.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom PIL import Image\n\n\nclass ImgDataGen(tf.keras.utils.Sequence):\n\n def __init__(self, img_path, img_label, batch_size, preprocess_input=None,\n transform=None, shuffle=True, data_type=np.float32):\n\n self.img_path = img_path\n self.img_label = img_label\n self.batch_size = batch_size\n self.transform = transform\n self.shuffle = shuffle\n self.index_list = np.arange(len(self.img_path))\n self.data_type = data_type\n self.preprocess_input = preprocess_input\n\n def __getitem__(self, index):\n idx = self.index_list[index * self.batch_size:(index + 1) * self.batch_size]\n selected_path = self.img_path[idx]\n y = self.img_label[idx]\n img_placeholder = []\n for i_path in selected_path:\n img = np.array(Image.open(i_path))\n if self.transform is not None:\n img = self.transform(image=img)['image']\n\n img_placeholder.append(img[None, :, :, :])\n\n x = np.concatenate(img_placeholder).astype(self.data_type)\n if self.preprocess_input is not None:\n x = self.preprocess_input(x)\n\n return x, y\n\n def __len__(self):\n return len(self.img_path) // self.batch_size\n\n def on_epoch_end(self):\n if self.shuffle:\n np.random.shuffle(self.index_list)"
] | [
[
"numpy.concatenate",
"numpy.random.shuffle"
]
] |
allenai/HyBayes | [
"9ac1b923953f471f104a4312499d007a676edc92"
] | [
"HyBayes/experiment.py"
] | [
"import copy\nimport configparser\nimport logging\nimport matplotlib.pyplot as plt\nimport pickle\nimport pymc3 as pm\n\nfrom .models.beta_bern_model import add_beta_bern_model\nfrom .models.beta_binomial_model import add_beta_binomial_model\nfrom .models.count_model import add_count_model\nfrom .models.metric_model import add_exp_uniform_normal_t_model\nfrom .models.ordinal_model import add_ordinal_model\n\nfrom .Bayes_factor_analysis import bayes_factor_analysis\nfrom .visualization import difference_plots\nfrom scipy import stats\nfrom .utils import *\n\nlogger = logging.getLogger('root')\n\n\ndef get_rope(config, parameter):\n \"\"\"\n Read ROPE (corresponding to the parameter) information from config\n :param config:\n :param parameter:\n :return:\n \"\"\"\n return config.getfloat(f\"{parameter}_ROPE_begin\"), config.getfloat(f\"{parameter}_ROPE_end\")\n\n\nclass HierarchicalModel:\n \"\"\"\n Keeps the configuration of different models.\n \"\"\"\n\n def __init__(self, y) -> None:\n \"\"\"\n :param y: the list of observations\n \"\"\"\n super().__init__()\n self.n_groups = len(y) # the number of experiments (often set to 2)\n self.stats_y = [stats.describe(yi) for yi in y] # statistics describing the results\n self.y = y # n_groups list of numpy arrays or it s None\n self.pymc_model = None # the internal model to communicate with PyMC3\n self.add_observations_function = None\n self.mu_parameter = None\n self.sigma_parameter = None\n self.skewness = None\n self.trace = None\n\n def __str__(self) -> str:\n return f\"{self.n_groups}_{super().__str__()}\"\n\n def get_GraphViz_object(self, file_prefix: str, save_dot: bool = True, save_png: bool = True,\n extension: str = \"png\"):\n \"\"\"\n Returns the GraphViz object corresponding to the underlying hierarchical model.\n :param file_prefix: a string with desired prefix to add to saved files. It can include a folder name too.\n :param save_dot: a boolean indicating if text file need to be stored too\n :param save_png: a boolean indicating if an pictorial file need to be stored too\n :param extension: a string indicating the extension of pictorial file, e.g., \"png\"\n \"\"\"\n graph = pm.model_to_graphviz(self.pymc_model)\n graph.format = extension\n if save_dot:\n txtFileName = f\"{file_prefix}_hierarchical_graph.txt\"\n graph.save(txtFileName)\n logger.info(f\"Graph's source saved to {txtFileName}\")\n if save_png:\n pngFileName = f\"{file_prefix}_hierarchical_graph\"\n graph.render(pngFileName, view=False, cleanup=True)\n logger.info(f\"Graph picture saved to {pngFileName}\")\n return graph\n\n\nclass Experiment:\n\n def __init__(self, y: list, config: configparser.ConfigParser) -> None:\n \"\"\"\n :param y: observations a list of numpy arrays. len(y) is the number of experiment results to compare or groups\n :param config: configuration of the experiments.\n \"\"\"\n super().__init__()\n self.y = y\n self.run_prior = config[\"Prior\"].getboolean(\"Analyze\")\n self.run_post = config[\"Posterior\"].getboolean(\"Analyze\")\n self.file_prefix = config[\"Files\"].get(\"Output_prefix\")\n self.config_model = config[\"Model\"]\n self.config_prior = config[\"Prior\"]\n self.config_post = config[\"Posterior\"]\n self.config_plots = config[\"Plots\"]\n self.config_Bayes_factor = config[\"Bayes_factor\"]\n self.extension = self.config_plots.get(\"Extension\")\n\n def __str__(self) -> str:\n return super().__str__()\n\n def run_model(self, hierarchical_model, corresponding_config,\n file_prefix=\"experiment\",\n draws=500, chains=None, cores=None, tune=500):\n \"\"\"\n :param hierarchical_model:\n :param corresponding_config: either config_prior or config_post\n Note that the config is still accessible by self.*\n :param file_prefix: a string with desired prefix to add to saved files. It can include a folder name too.\n e.g., \"metric_experiment_results/metric\"\n :param draws: the length of sample in each chain after tuning steps\n (refer to https://docs.pymc.io/api/inference.html for detailed information)\n :param chains: the number of independent chains for sampling\n (refer to https://docs.pymc.io/api/inference.html for detailed information)\n :param cores: the number of cores to use. For now we use 1\n (refer to https://docs.pymc.io/api/inference.html for detailed information)\n :param tune: the number initial samples to discard as tuning steps.\n (refer to https://docs.pymc.io/api/inference.html for detailed information)\n :return:\n \"\"\"\n printLine()\n with hierarchical_model.pymc_model:\n hierarchical_model.trace = pm.sample(model=hierarchical_model.pymc_model,\n draws=draws, chains=chains, cores=cores, tune=tune)\n printLine()\n logger.info(f\"Effective Sample Size (ESS) = {pm.diagnostics.effective_n(hierarchical_model.trace)}\")\n if corresponding_config.getboolean(\"Save_trace\"):\n traceFolderName = f\"{file_prefix}_trace\"\n if os.path.exists(traceFolderName):\n ind = 0\n while os.path.exists(f\"{traceFolderName}_{ind}\"):\n ind += 1\n traceFolderName = f\"{traceFolderName}_{ind}\"\n pm.save_trace(hierarchical_model.trace, directory=traceFolderName)\n with open(os.path.join(traceFolderName, \"pickeled_trace.pkl\"), 'wb') as buff:\n pickle.dump({'model': hierarchical_model.pymc_model, 'trace': hierarchical_model.trace}, buff)\n logger.info(f\"{traceFolderName} is saved!\")\n\n printLine()\n if corresponding_config.getboolean(\"Diagnostic_plots\"):\n pm.traceplot(hierarchical_model.trace)\n diag_file_name = f\"{file_prefix}_diagnostics.{self.extension}\"\n plt.savefig(diag_file_name)\n logger.info(f\"{diag_file_name} is saved!\")\n plt.clf()\n printLine()\n if hierarchical_model.n_groups == 2:\n difference_plots(hierarchical_model=hierarchical_model,\n corresponding_config=corresponding_config,\n file_prefix=file_prefix,\n config_plot=self.config_plots,\n config_model=self.config_model)\n printLine()\n\n def add_model(self, model_object):\n \"\"\"\n Constructing the appropriate model based on the specifications in the config file.\n :param model_object: the default model\n \"\"\"\n error = False\n model_name = self.config_model.get(\"Variable_type\")\n if model_name == \"Binary\":\n if self.config_model.get(\"Prior_model\") == \"Beta\":\n add_beta_bern_model(model_object)\n else:\n logger.error(f'The given prior model {self.config_model.get(\"Prior_model\")} is not recognized')\n elif model_name == \"Metric\":\n if self.config_model.getboolean(\"UnitInterval\"):\n raise NotImplementedError(\"work in progress . . . \")\n # add_inv_logit_normal_model(model_object)\n else:\n add_exp_uniform_normal_t_model(model_object)\n elif model_name == \"Count\":\n add_count_model(model_object)\n elif model_name == \"Ordinal\":\n add_ordinal_model(model_object)\n elif model_name == \"Binomial\":\n add_beta_binomial_model(model_object)\n else:\n error = False\n if error:\n logger.error(\"The model in config file not found. Exiting the program!\")\n exit(0)\n\n def run(self) -> None:\n \"\"\"\n This is the main function called from experiment class.\n It forms the HierarchicalModel, loads the appropriate model from models package\n :return: None\n \"\"\"\n y = self.y\n prior_model = HierarchicalModel(y=y)\n logger.info(\"Summary of statistics for the given data\")\n logger.info(f\"n_groups: {prior_model.n_groups}\")\n for ind, x in enumerate(prior_model.stats_y):\n logger.info(f\"Group index = {ind}:\")\n logger.info(x)\n\n self.add_model(prior_model)\n if self.run_prior:\n prior_model.get_GraphViz_object(\n self.file_prefix + \"_prior\",\n self.config_prior.getboolean(\"Save_hierarchical_TXT\"),\n self.config_prior.getboolean(\"Save_hierarchical_PNG\"),\n extension=self.extension,\n )\n\n logger.info(\"Sampling From Prior ...\")\n\n self.run_model(\n prior_model,\n corresponding_config=self.config_prior,\n file_prefix=self.file_prefix + \"_prior\",\n draws=self.config_prior.getint(\"Draws\"),\n chains=self.config_prior.getint(\"Chains\"),\n cores=1,\n tune=self.config_prior.getint(\"Tune\"),\n )\n\n if self.run_post:\n post_model = copy.copy(prior_model)\n post_model.add_observations_function()\n post_model.get_GraphViz_object(\n self.file_prefix + \"_posterior\",\n self.config_post.getboolean(\"Save_hierarchical_TXT\"),\n self.config_post.getboolean(\"Save_hierarchical_PNG\"),\n extension=self.extension,\n )\n\n logger.info(\"Sampling From Posterior ...\")\n self.run_model(\n post_model,\n corresponding_config=self.config_post,\n file_prefix=self.file_prefix + \"_posterior\",\n draws=self.config_post.getint(\"Draws\"),\n chains=self.config_post.getint(\"Chains\"),\n cores=1,\n tune=self.config_post.getint(\"Tune\"),\n )\n if self.config_Bayes_factor.getboolean(\"analyze\"):\n\n if self.run_prior and self.run_post:\n rope = get_rope(self.config_Bayes_factor, prior_model.mu_parameter)\n if None in rope:\n rope = get_rope(self.config_model, prior_model.mu_parameter)\n if None in rope:\n # TODO infer the rope from input data if not given in config\n rope = (-0.1, 0.1)\n bayes_factor_data_frame = bayes_factor_analysis(\n self.config_Bayes_factor,\n prior_model,\n post_model,\n init_rope=rope)\n bayes_factor_file_name = self.file_prefix + \"_Bayes_factor.csv\"\n bayes_factor_data_frame.to_csv(bayes_factor_file_name)\n logger.info(f\"Bayes Factor DataFrame is saved at {bayes_factor_file_name}\")\n else:\n logger.info(\"For running Bayes factor analysis, \"\n \"flags for both prior and posterior analysis should be on.\")\n # if self.postPredict: # TODO impose data plot\n # self.drawPPC(trace, model=postModel)\n\n def draw_ppc(self, trace, model):\n \"\"\"\n Makes Posterior Predictive Checks (PPC). Posterior predictive checks are, in simple words, \"simulating replicated data under the fitted model and then comparing these to the observed data\" (Gelman and Hill, 2007, p. 158). So, you use posterior predictive to \"look for systematic discrepancies between real and simulated data\" (Gelman et al. 2004, p. 169).\n :param trace:\n :param model:\n :return:\n \"\"\"\n raise NotImplementedError(\"work in progress . . . \")\n ppc = pm.sample_posterior_predictive(trace, samples=500, model=model.pymc_model,\n vars=[model.pymc_model.mu,\n model.pymc_model.nu,\n model.pymc_model.sigma])\n\n _, ax = plt.subplots(figsize=(12, 6))\n ax.hist(self.y[0], bins=19, alpha=0.5, histtype='bar', color=\"red\", rwidth=0.3)\n MLmu = np.mean(ppc[\"mu\"][0])\n MLsd = np.mean(ppc[\"sigma\"][0])\n MLnu = np.mean(ppc[\"nu\"])\n xp = np.linspace(MLmu - 4 * MLsd, MLmu + 4 * MLsd, 100)\n yp = MLsd * stats.t(nu=MLnu).pdf(xp) + MLmu\n ax.scatter(x=xp,\n y=yp)\n ax.scatter(x=self.y[0],\n y=np.zeros(self.y[0].shape), marker='x', color=\"black\")\n ax.set(title='Posterior predictive of the mean',\n xlabel='mean(x)',\n ylabel='Frequency')\n plt.savefig(\"ppc.png\")\n plt.clf()\n\n"
] | [
[
"matplotlib.pyplot.savefig",
"scipy.stats.describe",
"scipy.stats.t",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.clf"
]
] |
michhar/Machine-Learning-Workstreams | [
"dba91e4af39006ae1705f48d8cda8014b560a5b3"
] | [
"src/utils.py"
] | [
"\"\"\"\nMask R-CNN\nCommon utility functions and classes.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport sys\nimport os\nimport math\nimport random\nimport numpy as np\nimport scipy.misc\nimport scipy.ndimage\nimport skimage.color\nimport skimage.io\nimport torch\nimport urllib.request\nimport shutil\nimport warnings\n\n# URL from which to download the latest COCO trained weights\nCOCO_MODEL_URL = \"https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5\"\n\n############################################################\n# Bounding Boxes\n############################################################\n\ndef extract_bboxes(mask):\n \"\"\"Compute bounding boxes from masks.\n mask: [height, width, num_instances]. Mask pixels are either 1 or 0.\n\n Returns: bbox array [num_instances, (y1, x1, y2, x2)].\n \"\"\"\n boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)\n for i in range(mask.shape[-1]):\n m = mask[:, :, i]\n # Bounding box.\n horizontal_indicies = np.where(np.any(m, axis=0))[0]\n vertical_indicies = np.where(np.any(m, axis=1))[0]\n if horizontal_indicies.shape[0]:\n x1, x2 = horizontal_indicies[[0, -1]]\n y1, y2 = vertical_indicies[[0, -1]]\n # x2 and y2 should not be part of the box. Increment by 1.\n x2 += 1\n y2 += 1\n else:\n # No mask for this instance. Might happen due to\n # resizing or cropping. Set bbox to zeros\n x1, x2, y1, y2 = 0, 0, 0, 0\n boxes[i] = np.array([y1, x1, y2, x2])\n return boxes.astype(np.int32)\n\n\ndef compute_iou(box, boxes, box_area, boxes_area):\n \"\"\"Calculates IoU of the given box with the array of the given boxes.\n box: 1D vector [y1, x1, y2, x2]\n boxes: [boxes_count, (y1, x1, y2, x2)]\n box_area: float. the area of 'box'\n boxes_area: array of length boxes_count.\n\n Note: the areas are passed in rather than calculated here for\n efficency. Calculate once in the caller to avoid duplicate work.\n \"\"\"\n # Calculate intersection areas\n y1 = np.maximum(box[0], boxes[:, 0])\n y2 = np.minimum(box[2], boxes[:, 2])\n x1 = np.maximum(box[1], boxes[:, 1])\n x2 = np.minimum(box[3], boxes[:, 3])\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n union = box_area + boxes_area[:] - intersection[:]\n iou = intersection / union\n return iou\n\ndef compute_ap(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold=0.5):\n \"\"\"Compute Average Precision at a set IoU threshold (default 0.5).\n Returns:\n mAP: Mean Average Precision\n precisions: List of precisions at different class score thresholds.\n recalls: List of recall values at different class score thresholds.\n overlaps: [pred_boxes, gt_boxes] IoU overlaps.\n \"\"\"\n # Get matches and overlaps\n gt_match, pred_match, overlaps = compute_matches(\n gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold)\n\n # Compute precision and recall at each prediction box step\n precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)\n recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)\n\n # Pad with start and end values to simplify the math\n precisions = np.concatenate([[0], precisions, [0]])\n recalls = np.concatenate([[0], recalls, [1]])\n\n # Ensure precision values decrease but don't increase. This way, the\n # precision value at each recall threshold is the maximum it can be\n # for all following recall thresholds, as specified by the VOC paper.\n for i in range(len(precisions) - 2, -1, -1):\n precisions[i] = np.maximum(precisions[i], precisions[i + 1])\n\n # Compute mean AP over recall range\n indices = np.where(recalls[:-1] != recalls[1:])[0] + 1\n mAP = np.sum((recalls[indices] - recalls[indices - 1]) *\n precisions[indices])\n\n return mAP, precisions, recalls, overlaps\n\n############################################################\n# Miscellaneous\n############################################################\n\ndef trim_zeros(x):\n \"\"\"It's common to have tensors larger than the available data and\n pad with zeros. This function removes rows that are all zeros.\n x: [rows, columns].\n \"\"\"\n assert len(x.shape) == 2\n return x[~np.all(x == 0, axis=1)]\n\n\ndef compute_matches(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold=0.5, score_threshold=0.0):\n \"\"\"Finds matches between prediction and ground truth instances.\n Returns:\n gt_match: 1-D array. For each GT box it has the index of the matched\n predicted box.\n pred_match: 1-D array. For each predicted box, it has the index of\n the matched ground truth box.\n overlaps: [pred_boxes, gt_boxes] IoU overlaps.\n \"\"\"\n # Trim zero padding\n # TODO: cleaner to do zero unpadding upstream\n gt_boxes = trim_zeros(gt_boxes)\n gt_masks = gt_masks[..., :gt_boxes.shape[0]]\n pred_boxes = trim_zeros(pred_boxes)\n pred_scores = pred_scores[:pred_boxes.shape[0]]\n # Sort predictions by score from high to low\n indices = np.argsort(pred_scores)[::-1]\n pred_boxes = pred_boxes[indices]\n pred_class_ids = pred_class_ids[indices]\n pred_scores = pred_scores[indices]\n pred_masks = pred_masks[..., indices]\n\n # Compute IoU overlaps [pred_masks, gt_masks]\n overlaps = compute_overlaps_masks(pred_masks, gt_masks)\n\n # Loop through predictions and find matching ground truth boxes\n match_count = 0\n pred_match = -1 * np.ones([pred_boxes.shape[0]])\n gt_match = -1 * np.ones([gt_boxes.shape[0]])\n for i in range(len(pred_boxes)):\n # Find best matching ground truth box\n # 1. Sort matches by score\n sorted_ixs = np.argsort(overlaps[i])[::-1]\n # 2. Remove low scores\n low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]\n if low_score_idx.size > 0:\n sorted_ixs = sorted_ixs[:low_score_idx[0]]\n # 3. Find the match\n for j in sorted_ixs:\n # If ground truth box is already matched, go to next one\n if gt_match[j] > 0:\n continue\n # If we reach IoU smaller than the threshold, end the loop\n iou = overlaps[i, j]\n if iou < iou_threshold:\n break\n # Do we have a match?\n if pred_class_ids[i] == gt_class_ids[j]:\n match_count += 1\n gt_match[j] = i\n pred_match[i] = j\n break\n\n return gt_match, pred_match, overlaps\n\n\ndef compute_ap(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold=0.5):\n \"\"\"Compute Average Precision at a set IoU threshold (default 0.5).\n Returns:\n mAP: Mean Average Precision\n precisions: List of precisions at different class score thresholds.\n recalls: List of recall values at different class score thresholds.\n overlaps: [pred_boxes, gt_boxes] IoU overlaps.\n \"\"\"\n # Get matches and overlaps\n gt_match, pred_match, overlaps = compute_matches(\n gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold)\n\n # Compute precision and recall at each prediction box step\n precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)\n recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)\n\n # Pad with start and end values to simplify the math\n precisions = np.concatenate([[0], precisions, [0]])\n recalls = np.concatenate([[0], recalls, [1]])\n\n # Ensure precision values decrease but don't increase. This way, the\n # precision value at each recall threshold is the maximum it can be\n # for all following recall thresholds, as specified by the VOC paper.\n for i in range(len(precisions) - 2, -1, -1):\n precisions[i] = np.maximum(precisions[i], precisions[i + 1])\n\n # Compute mean AP over recall range\n indices = np.where(recalls[:-1] != recalls[1:])[0] + 1\n mAP = np.sum((recalls[indices] - recalls[indices - 1]) *\n precisions[indices])\n\n return mAP, precisions, recalls, overlaps\n\n\ndef compute_ap_range(gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_thresholds=None, verbose=1):\n \"\"\"Compute AP over a range or IoU thresholds. Default range is 0.5-0.95.\"\"\"\n # Default is 0.5 to 0.95 with increments of 0.05\n iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)\n \n # Compute AP over range of IoU thresholds\n AP = []\n for iou_threshold in iou_thresholds:\n ap, precisions, recalls, overlaps =\\\n compute_ap(gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_threshold=iou_threshold)\n if verbose:\n print(\"AP @{:.2f}:\\t {:.3f}\".format(iou_threshold, ap))\n AP.append(ap)\n AP = np.array(AP).mean()\n if verbose:\n print(\"AP @{:.2f}-{:.2f}:\\t {:.3f}\".format(\n iou_thresholds[0], iou_thresholds[-1], AP))\n return AP\n\n\ndef compute_recall(pred_boxes, gt_boxes, iou):\n \"\"\"Compute the recall at the given IoU threshold. It's an indication\n of how many GT boxes were found by the given prediction boxes.\n pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates\n gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates\n \"\"\"\n # Measure overlaps\n overlaps = compute_overlaps(pred_boxes, gt_boxes)\n iou_max = np.max(overlaps, axis=1)\n iou_argmax = np.argmax(overlaps, axis=1)\n positive_ids = np.where(iou_max >= iou)[0]\n matched_gt_boxes = iou_argmax[positive_ids]\n\n recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]\n return recall, positive_ids\n\n\n# ## Batch Slicing\n# Some custom layers support a batch size of 1 only, and require a lot of work\n# to support batches greater than 1. This function slices an input tensor\n# across the batch dimension and feeds batches of size 1. Effectively,\n# an easy way to support batches > 1 quickly with little code modification.\n# In the long run, it's more efficient to modify the code to support large\n# batches and getting rid of this function. Consider this a temporary solution\ndef batch_slice(inputs, graph_fn, batch_size, names=None):\n \"\"\"Splits inputs into slices and feeds each slice to a copy of the given\n computation graph and then combines the results. It allows you to run a\n graph on a batch of inputs even if the graph is written to support one\n instance only.\n inputs: list of tensors. All must have the same first dimension length\n graph_fn: A function that returns a TF tensor that's part of a graph.\n batch_size: number of slices to divide the data into.\n names: If provided, assigns names to the resulting tensors.\n \"\"\"\n if not isinstance(inputs, list):\n inputs = [inputs]\n\n outputs = []\n for i in range(batch_size):\n inputs_slice = [x[i] for x in inputs]\n output_slice = graph_fn(*inputs_slice)\n if not isinstance(output_slice, (tuple, list)):\n output_slice = [output_slice]\n outputs.append(output_slice)\n # Change outputs from a list of slices where each is\n # a list of outputs to a list of outputs and each has\n # a list of slices\n outputs = list(zip(*outputs))\n\n if names is None:\n names = [None] * len(outputs)\n\n result = [tf.stack(o, axis=0, name=n)\n for o, n in zip(outputs, names)]\n if len(result) == 1:\n result = result[0]\n\n return result\n\n\ndef download_trained_weights(coco_model_path, verbose=1):\n \"\"\"Download COCO trained weights from Releases.\n coco_model_path: local path of COCO trained weights\n \"\"\"\n if verbose > 0:\n print(\"Downloading pretrained model to \" + coco_model_path + \" ...\")\n with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out:\n shutil.copyfileobj(resp, out)\n if verbose > 0:\n print(\"... done downloading pretrained model!\")\n\n\ndef norm_boxes(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [N, (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n Returns:\n [N, (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.divide((boxes - shift), scale).astype(np.float32)\n\n\ndef denorm_boxes(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [N, (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n Returns:\n [N, (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)\n\ndef compute_overlaps(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n\n For better performance, pass the largest set first and the smaller second.\n \"\"\"\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps\n\ndef compute_overlaps_masks(masks1, masks2):\n '''Computes IoU overlaps between two sets of masks.\n masks1, masks2: [Height, Width, instances]\n '''\n # flatten masks\n masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)\n masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)\n area1 = np.sum(masks1, axis=0)\n area2 = np.sum(masks2, axis=0)\n\n # intersections and union\n intersections = np.dot(masks1.T, masks2)\n union = area1[:, None] + area2[None, :] - intersections\n overlaps = intersections / union\n\n return overlaps\n\n\ndef non_max_suppression(boxes, scores, threshold):\n \"\"\"Performs non-maximum supression and returns indicies of kept boxes.\n boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.\n scores: 1-D array of box scores.\n threshold: Float. IoU threshold to use for filtering.\n \"\"\"\n assert boxes.shape[0] > 0\n if boxes.dtype.kind != \"f\":\n boxes = boxes.astype(np.float32)\n\n # Compute box areas\n y1 = boxes[:, 0]\n x1 = boxes[:, 1]\n y2 = boxes[:, 2]\n x2 = boxes[:, 3]\n area = (y2 - y1) * (x2 - x1)\n\n # Get indicies of boxes sorted by scores (highest first)\n ixs = scores.argsort()[::-1]\n\n pick = []\n while len(ixs) > 0:\n # Pick top box and add its index to the list\n i = ixs[0]\n pick.append(i)\n # Compute IoU of the picked box with the rest\n iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n # Identify boxes with IoU over the threshold. This\n # returns indicies into ixs[1:], so add 1 to get\n # indicies into ixs.\n remove_ixs = np.where(iou > threshold)[0] + 1\n # Remove indicies of the picked and overlapped boxes.\n ixs = np.delete(ixs, remove_ixs)\n ixs = np.delete(ixs, 0)\n return np.array(pick, dtype=np.int32)\n\n\n\ndef box_refinement(box, gt_box):\n \"\"\"Compute refinement needed to transform box to gt_box.\n box and gt_box are [N, (y1, x1, y2, x2)]\n \"\"\"\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = torch.log(gt_height / height)\n dw = torch.log(gt_width / width)\n\n result = torch.stack([dy, dx, dh, dw], dim=1)\n return result\n\n\n############################################################\n# Dataset\n############################################################\n\nclass Dataset(object):\n \"\"\"The base class for dataset classes.\n To use it, create a new class that adds functions specific to the dataset\n you want to use. For example:\n\n class CatsAndDogsDataset(Dataset):\n def load_cats_and_dogs(self):\n ...\n def load_mask(self, image_id):\n ...\n def image_reference(self, image_id):\n ...\n\n See COCODataset and ShapesDataset as examples.\n \"\"\"\n\n def __init__(self, class_map=None):\n self._image_ids = []\n self.image_info = []\n # Background is always the first class\n self.class_info = [{\"source\": \"\", \"id\": 0, \"name\": \"BG\"}]\n self.source_class_ids = {}\n\n def add_class(self, source, class_id, class_name):\n assert \".\" not in source, \"Source name cannot contain a dot\"\n # Does the class exist already?\n for info in self.class_info:\n if info['source'] == source and info[\"id\"] == class_id:\n # source.class_id combination already available, skip\n return\n # Add the class\n self.class_info.append({\n \"source\": source,\n \"id\": class_id,\n \"name\": class_name,\n })\n\n def add_image(self, source, image_id, path, **kwargs):\n image_info = {\n \"id\": image_id,\n \"source\": source,\n \"path\": path,\n }\n image_info.update(kwargs)\n self.image_info.append(image_info)\n\n def image_reference(self, image_id):\n \"\"\"Return a link to the image in its source Website or details about\n the image that help looking it up or debugging it.\n\n Override for your dataset, but pass to this function\n if you encounter images not in your dataset.\n \"\"\"\n return \"\"\n\n def prepare(self, class_map=None):\n \"\"\"Prepares the Dataset class for use.\n\n TODO: class map is not supported yet. When done, it should handle mapping\n classes from different datasets to the same class ID.\n \"\"\"\n def clean_name(name):\n \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n return \",\".join(name.split(\",\")[:1])\n\n # Build (or rebuild) everything else from the info dicts.\n self.num_classes = len(self.class_info)\n self.class_ids = np.arange(self.num_classes)\n self.class_names = [clean_name(c[\"name\"]) for c in self.class_info]\n self.num_images = len(self.image_info)\n self._image_ids = np.arange(self.num_images)\n\n self.class_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.class_info, self.class_ids)}\n\n # Map sources to class_ids they support\n self.sources = list(set([i['source'] for i in self.class_info]))\n self.source_class_ids = {}\n # Loop over datasets\n for source in self.sources:\n self.source_class_ids[source] = []\n # Find classes that belong to this dataset\n for i, info in enumerate(self.class_info):\n # Include BG class in all datasets\n if i == 0 or source == info['source']:\n self.source_class_ids[source].append(i)\n\n def map_source_class_id(self, source_class_id):\n \"\"\"Takes a source class ID and returns the int class ID assigned to it.\n\n For example:\n dataset.map_source_class_id(\"coco.12\") -> 23\n \"\"\"\n return self.class_from_source_map[source_class_id]\n\n def get_source_class_id(self, class_id, source):\n \"\"\"Map an internal class ID to the corresponding class ID in the source dataset.\"\"\"\n info = self.class_info[class_id]\n assert info['source'] == source\n return info['id']\n\n def append_data(self, class_info, image_info):\n self.external_to_class_id = {}\n for i, c in enumerate(self.class_info):\n for ds, id in c[\"map\"]:\n self.external_to_class_id[ds + str(id)] = i\n\n # Map external image IDs to internal ones.\n self.external_to_image_id = {}\n for i, info in enumerate(self.image_info):\n self.external_to_image_id[info[\"ds\"] + str(info[\"id\"])] = i\n\n @property\n def image_ids(self):\n return self._image_ids\n\n def source_image_link(self, image_id):\n \"\"\"Returns the path or URL to the image.\n Override this to return a URL to the image if it's availble online for easy\n debugging.\n \"\"\"\n return self.image_info[image_id][\"path\"]\n\n def load_image(self, image_id):\n \"\"\"Load the specified image and return a [H,W,3] Numpy array.\n \"\"\"\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n return image\n\n def load_mask(self, image_id):\n \"\"\"Load instance masks for the given image.\n\n Different datasets use different ways to store masks. Override this\n method to load instance masks and return them in the form of am\n array of binary masks of shape [height, width, instances].\n\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n a binary mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids\n\n\ndef resize_image(image, min_dim=None, max_dim=None, padding=False):\n \"\"\"\n Resizes an image keeping the aspect ratio.\n\n min_dim: if provided, resizes the image such that it's smaller\n dimension == min_dim\n max_dim: if provided, ensures that the image longest side doesn't\n exceed this value.\n padding: If true, pads image with zeros so it's size is max_dim x max_dim\n\n Returns:\n image: the resized image\n window: (y1, x1, y2, x2). If max_dim is provided, padding might\n be inserted in the returned image. If so, this window is the\n coordinates of the image part of the full image (excluding\n the padding). The x2, y2 pixels are not included.\n scale: The scale factor used to resize the image\n padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n # Default window (y1, x1, y2, x2) and default scale == 1.\n h, w = image.shape[:2]\n window = (0, 0, h, w)\n scale = 1\n\n # Scale?\n if min_dim:\n # Scale up but not down\n scale = max(1, min_dim / min(h, w))\n # Does it exceed max dim?\n if max_dim:\n image_max = max(h, w)\n if round(image_max * scale) > max_dim:\n scale = max_dim / image_max\n # Resize image and mask\n if scale != 1:\n image = scipy.misc.imresize(\n image, (round(h * scale), round(w * scale)))\n # Need padding?\n if padding:\n # Get new height and width\n h, w = image.shape[:2]\n top_pad = (max_dim - h) // 2\n bottom_pad = max_dim - h - top_pad\n left_pad = (max_dim - w) // 2\n right_pad = max_dim - w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n return image, window, scale, padding\n\n\ndef resize_mask(mask, scale, padding):\n \"\"\"Resizes a mask using the given scale and padding.\n Typically, you get the scale and padding from resize_image() to\n ensure both, the image and the mask, are resized consistently.\n\n scale: mask scaling factor\n padding: Padding to add to the mask in the form\n [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n h, w = mask.shape[:2]\n mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)\n mask = np.pad(mask, padding, mode='constant', constant_values=0)\n return mask\n\n\ndef minimize_mask(bbox, mask, mini_shape):\n \"\"\"Resize masks to a smaller version to cut memory load.\n Mini-masks can then resized back to image scale using expand_masks()\n\n See inspect_data.ipynb notebook for more details.\n \"\"\"\n mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n m = mask[:, :, i]\n y1, x1, y2, x2 = bbox[i][:4]\n m = m[y1:y2, x1:x2]\n if m.size == 0:\n raise Exception(\"Invalid bounding box with area of zero\")\n m = scipy.misc.imresize(m.astype(float), mini_shape, interp='bilinear')\n mini_mask[:, :, i] = np.where(m >= 128, 1, 0)\n return mini_mask\n\n\ndef expand_mask(bbox, mini_mask, image_shape):\n \"\"\"Resizes mini masks back to image size. Reverses the change\n of minimize_mask().\n\n See inspect_data.ipynb notebook for more details.\n \"\"\"\n mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n m = mini_mask[:, :, i]\n y1, x1, y2, x2 = bbox[i][:4]\n h = y2 - y1\n w = x2 - x1\n m = scipy.misc.imresize(m.astype(float), (h, w), interp='bilinear')\n mask[y1:y2, x1:x2, i] = np.where(m >= 128, 1, 0)\n return mask\n\n\n# TODO: Build and use this function to reduce code duplication\ndef mold_mask(mask, config):\n pass\n\n\ndef unmold_mask(mask, bbox, image_shape):\n \"\"\"Converts a mask generated by the neural network into a format similar\n to it's original shape.\n mask: [height, width] of type float. A small, typically 28x28 mask.\n bbox: [y1, x1, y2, x2]. The box to fit the mask in.\n\n Returns a binary mask with the same size as the original image.\n \"\"\"\n threshold = 0.5\n y1, x1, y2, x2 = bbox\n mask = scipy.misc.imresize(\n mask, (y2 - y1, x2 - x1), interp='bilinear').astype(np.float32) / 255.0\n mask = np.where(mask >= threshold, 1, 0).astype(np.uint8)\n\n # Put the mask in the right location.\n full_mask = np.zeros(image_shape[:2], dtype=np.uint8)\n full_mask[y1:y2, x1:x2] = mask\n return full_mask\n\n\n############################################################\n# Anchors\n############################################################\n\ndef generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):\n \"\"\"\n scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]\n ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]\n shape: [height, width] spatial shape of the feature map over which\n to generate anchors.\n feature_stride: Stride of the feature map relative to the image in pixels.\n anchor_stride: Stride of anchors on the feature map. For example, if the\n value is 2 then generate anchors for every other feature map pixel.\n \"\"\"\n # Get all combinations of scales and ratios\n scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))\n scales = scales.flatten()\n ratios = ratios.flatten()\n\n # Enumerate heights and widths from scales and ratios\n heights = scales / np.sqrt(ratios)\n widths = scales * np.sqrt(ratios)\n\n # Enumerate shifts in feature space\n shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride\n shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride\n shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)\n\n # Enumerate combinations of shifts, widths, and heights\n box_widths, box_centers_x = np.meshgrid(widths, shifts_x)\n box_heights, box_centers_y = np.meshgrid(heights, shifts_y)\n\n # Reshape to get a list of (y, x) and a list of (h, w)\n box_centers = np.stack(\n [box_centers_y, box_centers_x], axis=2).reshape([-1, 2])\n box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])\n\n # Convert to corner coordinates (y1, x1, y2, x2)\n boxes = np.concatenate([box_centers - 0.5 * box_sizes,\n box_centers + 0.5 * box_sizes], axis=1)\n return boxes\n\n\ndef generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides,\n anchor_stride):\n \"\"\"Generate anchors at different levels of a feature pyramid. Each scale\n is associated with a level of the pyramid, but each ratio is used in\n all levels of the pyramid.\n\n Returns:\n anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted\n with the same order of the given scales. So, anchors of scale[0] come\n first, then anchors of scale[1], and so on.\n \"\"\"\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n anchors = []\n for i in range(len(scales)):\n anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i],\n feature_strides[i], anchor_stride))\n return np.concatenate(anchors, axis=0)\n\n\ndef download_trained_weights(coco_model_path, verbose=1):\n \"\"\"Download COCO trained weights from Releases.\n\n coco_model_path: local path of COCO trained weights\n \"\"\"\n if verbose > 0:\n print(\"Downloading pretrained model to \" + coco_model_path + \" ...\")\n with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out:\n shutil.copyfileobj(resp, out)\n if verbose > 0:\n print(\"... done downloading pretrained model!\")\n\n\n\n\n\n"
] | [
[
"numpy.dot",
"torch.stack",
"numpy.minimum",
"numpy.multiply",
"numpy.where",
"numpy.cumsum",
"numpy.concatenate",
"numpy.max",
"numpy.divide",
"numpy.empty",
"numpy.argmax",
"numpy.arange",
"numpy.sqrt",
"numpy.array",
"numpy.pad",
"numpy.delete",
"numpy.reshape",
"numpy.zeros",
"numpy.stack",
"numpy.argsort",
"torch.log",
"numpy.sum",
"numpy.ones",
"numpy.any",
"numpy.all",
"numpy.meshgrid",
"numpy.maximum"
]
] |
SidRama/Longitudinal-VAE | [
"3b8a341da14063728dd37a8e76b4372eb5256c97"
] | [
"training.py"
] | [
"from torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import BatchSampler\n\nimport numpy as np\nimport torch\nimport os\n\nfrom elbo_functions import deviance_upper_bound, elbo, KL_closed, minibatch_KLD_upper_bound, minibatch_KLD_upper_bound_iter\nfrom model_test import MSE_test_GPapprox, MSE_test\nfrom utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader\nfrom predict_HealthMNIST import recon_complete_gen, gen_rotated_mnist_plot, variational_complete_gen\nfrom validation import validate\n\ndef hensman_training(nnet_model, type_nnet, epochs, dataset, optimiser, type_KL, num_samples, latent_dim, covar_module0,\n covar_module1, likelihoods, m, H, zt_list, P, T, varying_T, Q, weight, id_covariate, loss_function,\n natural_gradient=False, natural_gradient_lr=0.01, subjects_per_batch=20, memory_dbg=False,\n eps=1e-6, results_path=None, validation_dataset=None, generation_dataset=None,\n prediction_dataset=None, gp_model=None, csv_file_test_data=None, csv_file_test_label=None,\n test_mask_file=None, data_source_path=None):\n\n \"\"\"\n Perform training with minibatching and Stochastic Variational Inference [Hensman et. al, 2013]. See L-VAE supplementary\n materials\n\n :param nnet_model: encoder/decoder neural network model \n :param type_nnet: type of encoder/decoder\n :param epochs: numner of epochs\n :param dataset: dataset to use in training\n :param optimiser: optimiser to be used\n :param type_KL: type of KL divergenve computation to use\n :param num_samples: number of samples to use\n :param latent_dim: number of latent dimensions\n :param covar_module0: additive kernel (sum of cross-covariances) without id covariate\n :param covar_module1: additive kernel (sum of cross-covariances) with id covariate\n :param likelihoods: GPyTorch likelihood model\n :param m: variational mean\n :param H: variational variance\n :param zt_list: list of inducing points\n :param P: number of unique instances\n :param T: number of longitudinal samples per individual\n :param Q: number of covariates\n :param weight: value for the weight\n :param id_covariate: covariate number of the id\n :param loss_function: selected loss function\n :param natural_gradient: use of natural gradients\n :param natural_gradient_lr: natural gradients learning rate\n :param subject_per_batch; number of subjects per batch (vectorisation)\n :param memory_dbg: enable debugging\n :param eps: jitter\n :param results_path: path to results\n :param validation_dataset: dataset for vaildation set\n :param generation_dataset: dataset to help with sample image generation\n :param prediction_dataset; dataset with subjects for prediction\n :param gp_mode: GPyTorch gp model\n :param csv_file_test_data: path to test data\n :param csv_file_test_label: path to test label\n :param test_mask_file: path to test mask\n :param data_source_path: path to data source\n\n :return trained models and resulting losses\n\n \"\"\"\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n N = len(dataset)\n assert type_KL == 'GPapprox_closed'\n\n if varying_T:\n n_batches = (P + subjects_per_batch - 1)//subjects_per_batch\n dataloader = HensmanDataLoader(dataset, batch_sampler=VaryingLengthBatchSampler(VaryingLengthSubjectSampler(dataset, id_covariate), subjects_per_batch), num_workers=4) \n else:\n batch_size = subjects_per_batch*T\n n_batches = (P*T + batch_size - 1)//(batch_size)\n dataloader = HensmanDataLoader(dataset, batch_sampler=BatchSampler(SubjectSampler(dataset, P, T), batch_size, drop_last=False), num_workers=4)\n\n net_train_loss_arr = np.empty((0, 1))\n recon_loss_arr = np.empty((0, 1))\n nll_loss_arr = np.empty((0, 1))\n kld_loss_arr = np.empty((0, 1))\n penalty_term_arr = np.empty((0, 1))\n best_val_pred_mse = np.Inf\n best_epoch = 0\n for epoch in range(1, epochs + 1):\n recon_loss_sum = 0\n nll_loss_sum = 0\n kld_loss_sum = 0\n net_loss_sum = 0\n iid_kld_sum = 0\n for batch_idx, sample_batched in enumerate(dataloader):\n optimiser.zero_grad()\n nnet_model.train()\n covar_module0.train()\n covar_module1.train()\n indices = sample_batched['idx']\n data = sample_batched['digit'].double().to(device)\n train_x = sample_batched['label'].double().to(device)\n mask = sample_batched['mask'].double().to(device)\n N_batch = data.shape[0]\n\n covariates = torch.cat((train_x[:, :id_covariate], train_x[:, id_covariate+1:]), dim=1)\n\n recon_batch, mu, log_var = nnet_model(data)\n [recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)\n recon_loss = torch.sum(recon_loss)\n nll_loss = torch.sum(nll)\n\n PSD_H = H if natural_gradient else torch.matmul(H, H.transpose(-1, -2))\n\n if varying_T:\n P_in_current_batch = torch.unique(train_x[:, id_covariate]).shape[0]\n kld_loss, grad_m, grad_H = minibatch_KLD_upper_bound_iter(covar_module0, covar_module1, likelihoods, latent_dim, m, PSD_H, train_x, mu, log_var, zt_list, P, P_in_current_batch, N, natural_gradient, id_covariate, eps)\n else:\n P_in_current_batch = N_batch // T\n kld_loss, grad_m, grad_H = minibatch_KLD_upper_bound(covar_module0, covar_module1, likelihoods, latent_dim, m, PSD_H, train_x, mu, log_var, zt_list, P, P_in_current_batch, T, natural_gradient, eps)\n\n recon_loss = recon_loss * P/P_in_current_batch\n nll_loss = nll_loss * P/P_in_current_batch\n\n if loss_function == 'nll':\n net_loss = nll_loss + kld_loss\n elif loss_function == 'mse':\n kld_loss = kld_loss / latent_dim\n net_loss = recon_loss + weight * kld_loss\n\n net_loss.backward()\n optimiser.step()\n\n if natural_gradient:\n LH = torch.cholesky(H)\n iH = torch.cholesky_solve(torch.eye(H.shape[-1], dtype=torch.double).to(device), LH)\n iH_new = iH + natural_gradient_lr*(grad_H + grad_H.transpose(-1,-2))\n LiH_new = torch.cholesky(iH_new)\n H = torch.cholesky_solve(torch.eye(H.shape[-1], dtype=torch.double).to(device), LiH_new).detach()\n m = torch.matmul(H, torch.matmul(iH, m) - natural_gradient_lr*(grad_m - 2*torch.matmul(grad_H, m))).detach()\n\n net_loss_sum += net_loss.item() / n_batches \n recon_loss_sum += recon_loss.item() / n_batches\n nll_loss_sum += nll_loss.item() / n_batches\n kld_loss_sum += kld_loss.item() / n_batches\n\n print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (\n epoch, epochs, net_loss_sum, kld_loss_sum, nll_loss_sum, recon_loss_sum), flush=True)\n penalty_term_arr = np.append(penalty_term_arr, 0.0)\n net_train_loss_arr = np.append(net_train_loss_arr, net_loss_sum)\n recon_loss_arr = np.append(recon_loss_arr, recon_loss_sum)\n nll_loss_arr = np.append(nll_loss_arr, nll_loss_sum)\n kld_loss_arr = np.append(kld_loss_arr, kld_loss_sum)\n\n if (not epoch % 25) and epoch != epochs:\n with torch.no_grad():\n nnet_model.eval()\n covar_module0.eval()\n covar_module1.eval()\n if validation_dataset is not None:\n full_mu = torch.zeros(len(dataset), latent_dim, dtype=torch.double).to(device)\n prediction_x = torch.zeros(len(dataset), Q, dtype=torch.double).to(device)\n for batch_idx, sample_batched in enumerate(dataloader):\n label_id = sample_batched['idx']\n prediction_x[label_id] = sample_batched['label'].double().to(device)\n data = sample_batched['digit'].double().to(device)\n covariates = torch.cat((prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate+1:]), dim=1)\n\n mu, log_var = nnet_model.encode(data)\n full_mu[label_id] = mu\n val_pred_mse = validate(nnet_model, type_nnet, validation_dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T, weight, full_mu, prediction_x, id_covariate, loss_function, eps=1e-6)\n if val_pred_mse < best_val_pred_mse:\n best_val_pred_mse = val_pred_mse\n best_epoch = epoch\n\n prediction_dataloader = DataLoader(prediction_dataset, batch_sampler=VaryingLengthBatchSampler(\n VaryingLengthSubjectSampler(prediction_dataset, id_covariate), subjects_per_batch),\n num_workers=4)\n full_mu = torch.zeros(len(prediction_dataset), latent_dim, dtype=torch.double).to(device)\n prediction_x = torch.zeros(len(prediction_dataset), Q, dtype=torch.double).to(device)\n\n with torch.no_grad():\n for batch_idx, sample_batched in enumerate(prediction_dataloader):\n label_id = sample_batched['idx']\n prediction_x[label_id] = sample_batched['label'].double().to(device)\n data = sample_batched['digit'].double().to(device)\n covariates = torch.cat(\n (prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate + 1:]),\n dim=1)\n\n mu, log_var = nnet_model.encode(data)\n full_mu[label_id] = mu\n covar_module0.eval()\n covar_module1.eval()\n if type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':\n MSE_test_GPapprox(csv_file_test_data, csv_file_test_label, test_mask_file,\n data_source_path, type_nnet,\n nnet_model, covar_module0, covar_module1, likelihoods, results_path,\n latent_dim, prediction_x,\n full_mu, zt_list, P, T, id_covariate, varying_T,\n save_file='result_error_best.csv')\n\n print('Saving better model')\n try:\n torch.save(nnet_model.state_dict(), os.path.join(results_path, 'nnet_model_best.pth'))\n torch.save(gp_model.state_dict(), os.path.join(results_path, 'gp_model_best.pth'))\n torch.save(zt_list, os.path.join(results_path, 'zt_list_best.pth'))\n torch.save(m, os.path.join(results_path, 'm_best.pth'))\n torch.save(H, os.path.join(results_path, 'H_best.pth'))\n\n if results_path and generation_dataset:\n prediction_dataloader = DataLoader(prediction_dataset,\n batch_sampler=VaryingLengthBatchSampler(\n VaryingLengthSubjectSampler(prediction_dataset,\n id_covariate),\n subjects_per_batch), num_workers=4)\n full_mu = torch.zeros(len(prediction_dataset), latent_dim, dtype=torch.double).to(\n device)\n prediction_x = torch.zeros(len(prediction_dataset), Q, dtype=torch.double).to(device)\n for batch_idx, sample_batched in enumerate(prediction_dataloader):\n label_id = sample_batched['idx']\n prediction_x[label_id] = sample_batched['label'].double().to(device)\n data = sample_batched['digit'].double().to(device)\n covariates = torch.cat((prediction_x[label_id, :id_covariate],\n prediction_x[label_id, id_covariate + 1:]), dim=1)\n\n mu, log_var = nnet_model.encode(data)\n full_mu[label_id] = mu\n\n recon_complete_gen(generation_dataset, nnet_model, type_nnet,\n results_path, covar_module0,\n covar_module1, likelihoods, latent_dim,\n './data', prediction_x, full_mu, epoch,\n zt_list, P, T, id_covariate, varying_T)\n except e:\n print(e)\n print('Saving intermediate model failed!')\n pass\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n return penalty_term_arr, net_train_loss_arr, nll_loss_arr, recon_loss_arr, kld_loss_arr, m, H, best_epoch\n\n\ndef minibatch_training(nnet_model, type_nnet, epochs, dataset, optimiser, type_KL, num_samples, latent_dim, \n covar_module0, covar_module1, likelihoods, zt_list, P, T, Q, weight, id_covariate, \n loss_function, memory_dbg=False, eps=1e-6, results_path=None, validation_dataset=None, \n generation_dataset=None, prediction_dataset=None):\n\n \"\"\"\n Perform training with minibatching (psuedo-minibatching) similar to GPPVAE [Casale el. al, 2018]. See L-VAE supplementary\n materials\n\n :param nnet_model: encoder/decoder neural network model \n :param type_nnet: type of encoder/decoder\n :param epochs: numner of epochs\n :param dataset: dataset to use in training\n :param optimiser: optimiser to be used\n :param type_KL: type of KL divergenve computation to use\n :param num_samples: number of samples to use\n :param latent_dim: number of latent dimensions\n :param covar_module0: additive kernel (sum of cross-covariances) without id covariate\n :param covar_module1: additive kernel (sum of cross-covariances) with id covariate\n :param likelihoods: GPyTorch likelihood model\n :param zt_list: list of inducing points\n :param P: number of unique instances\n :param T: number of longitudinal samples per individual\n :param Q: number of covariates\n :param weight: value for the weight\n :param id_covariate: covariate number of the id\n :param loss_function: selected loss function\n :param memory_dbg: enable debugging\n :param eps: jitter\n :param results_path: path to results\n :param validation_dataset: dataset for vaildation set\n :param generation_dataset: dataset to help with sample image generation\n :param prediction_dataset; dataset with subjects for prediction\n\n :return trained models and resulting losses\n\n \"\"\"\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n batch_size = T\n assert (type_KL == 'GPapprox_closed' or type_KL == 'GPapprox')\n\n # set up Data Loader for training\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4)\n \n net_train_loss_arr = np.empty((0, 1))\n recon_loss_arr = np.empty((0, 1))\n nll_loss_arr = np.empty((0, 1))\n gp_loss_arr = np.empty((0, 1))\n penalty_term_arr = np.empty((0, 1))\n\n for epoch in range(1, epochs + 1):\n\n optimiser.zero_grad()\n\n full_mu = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)\n full_log_var = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)\n train_x = torch.zeros(len(dataset), Q, dtype=torch.double, requires_grad=False).to(device)\n\n #Step 1: Encode the sample data to obtain \\bar{\\mu} and diag(W)\n with torch.no_grad():\n for batch_idx, sample_batched in enumerate(dataloader):\n indices = sample_batched['idx']\n data = sample_batched['digit'].double().to(device)\n train_x[indices] = sample_batched['label'].double().to(device)\n\n covariates = torch.cat((train_x[indices, :id_covariate], train_x[indices, id_covariate+1:]), dim=1)\n mu, log_var = nnet_model.encode(data)\n\n full_mu[indices] = mu\n full_log_var[indices] = log_var\n\n mu_grads = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)\n log_var_grads = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)\n\n gp_losses = 0\n gp_loss_sum = 0\n param_list = []\n\n #Steps 2 & 3: compute d and E, compute gradients of KLD w.r.t S and theta\n if type_KL == 'GPapprox':\n for sample in range(0, num_samples):\n Z = nnet_model.sample_latent(full_mu, full_log_var)\n for i in range(0, latent_dim):\n Z_dim = Z[:, i]\n gp_loss = -elbo(covar_module0[i], covar_module1[i], likelihoods[i], train_x, Z_dim,\n zt_list[i].to(device), P, T, eps)\n gp_loss_sum = gp_loss.item() + gp_loss_sum\n gp_losses = gp_losses + gp_loss\n gp_losses = gp_losses / num_samples\n gp_loss_sum /= num_samples\n\n elif type_KL == 'GPapprox_closed':\n for i in range(0, latent_dim):\n mu_sliced = full_mu[:, i]\n log_var_sliced = full_log_var[:, i]\n gp_loss = deviance_upper_bound(covar_module0[i], covar_module1[i],\n likelihoods[i], train_x,\n mu_sliced, log_var_sliced,\n zt_list[i].to(device), P,\n T, eps)\n gp_loss_sum = gp_loss.item() + gp_loss_sum\n gp_losses = gp_losses + gp_loss\n\n \n for i in range(0, latent_dim):\n param_list += list(covar_module0[i].parameters())\n param_list += list(covar_module1[i].parameters())\n# param_list.append(zt_list[i])\n\n if loss_function == 'mse':\n gp_losses = weight*gp_losses/latent_dim\n gp_loss_sum /= latent_dim\n \n mu_grads = torch.autograd.grad(gp_losses, full_mu, retain_graph=True)[0]\n log_var_grads = torch.autograd.grad(gp_losses, full_log_var, retain_graph=True)[0]\n grads = torch.autograd.grad(gp_losses, param_list)\n\n for ind, p in enumerate(param_list):\n p.grad = grads[ind]\n\n recon_loss_sum = 0\n nll_loss_sum = 0\n #Step 4: compute reconstruction losses w.r.t phi and psi, add dKLD/dphi to the gradients\n for batch_idx, sample_batched in enumerate(dataloader):\n data = sample_batched['digit'].double().to(device)\n mask = sample_batched['mask'].double().to(device)\n indices = sample_batched['idx']\n\n label = sample_batched['label'].double().to(device)\n covariates = torch.cat((label[:, :id_covariate], label[:, id_covariate+1:]), dim=1)\n recon_batch, mu, log_var = nnet_model(data)\n \n [recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)\n recon_loss = torch.sum(recon_loss)\n nll = torch.sum(nll)\n\n mu.backward(mu_grads[indices], retain_graph = True)\n log_var.backward(log_var_grads[indices], retain_graph = True)\n\n if loss_function == 'mse': \n recon_loss.backward()\n elif loss_function == 'nll':\n nll.backward()\n \n recon_loss_sum = recon_loss_sum + recon_loss.item()\n nll_loss_sum = nll_loss_sum + nll.item()\n\n #Do logging\n print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL loss: %.3f - Recon Loss: %.3f' % (\n epoch, epochs, recon_loss_sum + weight*gp_loss_sum, gp_loss_sum, nll_loss_sum, recon_loss_sum))\n penalty_term_arr = np.append(penalty_term_arr, 0.0)\n net_train_loss_arr = np.append(net_train_loss_arr, recon_loss_sum + weight*gp_loss_sum)\n nll_loss_arr = np.append(nll_loss_arr, nll_loss_sum)\n recon_loss_arr = np.append(recon_loss_arr, recon_loss_sum)\n gp_loss_arr = np.append(gp_loss_arr, gp_loss_sum)\n\n #Step 5: apply gradients using an Adam optimiser\n optimiser.step()\n\n if (not epoch % 100) and epoch != epochs:\n if validation_dataset is not None:\n validate(nnet_model, type_nnet, validation_dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T, weight, full_mu, train_x, id_covariate, loss_function, eps=1e-6)\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n if results_path and generation_dataset:\n prediction_dataloader = DataLoader(prediction_dataset, batch_size=1000, shuffle=False, num_workers=4)\n full_mu = torch.zeros(len(prediction_dataset), latent_dim, dtype=torch.double).to(device)\n prediction_x = torch.zeros(len(prediction_dataset), Q, dtype=torch.double).to(device)\n with torch.no_grad():\n for batch_idx, sample_batched in enumerate(prediction_dataloader):\n # no mini-batching. Instead get a batch of dataset size\n label_id = sample_batched['idx']\n prediction_x[label_id] = sample_batched['label'].double().to(device)\n data = sample_batched['digit'].double().to(device)\n covariates = torch.cat((prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate+1:]), dim=1)\n\n mu, log_var = nnet_model.encode(data)\n\n full_mu[label_id] = mu\n\n recon_complete_gen(generation_dataset, nnet_model, type_nnet,\n results_path, covar_module0,\n covar_module1, likelihoods, latent_dim,\n './data', prediction_x, full_mu, epoch,\n zt_list, P, T, id_covariate)\n\n return penalty_term_arr, net_train_loss_arr, nll_loss_arr, recon_loss_arr, gp_loss_arr\n\ndef standard_training(nnet_model, type_nnet, epochs, dataset, optimiser, type_KL, num_samples, \n latent_dim, covar_modules, likelihoods, zt_list, id_covariate, P, T, Q, weight, constrain_scales, \n loss_function, memory_dbg=False, eps=1e-6, validation_dataset=None, generation_dataset=None, prediction_dataset=None):\n\n\n \"\"\"\n Perform training without minibatching.\n\n :param nnet_model: encoder/decoder neural network model \n :param type_nnet: type of encoder/decoder\n :param epochs: numner of epochs\n :param dataset: dataset to use in training\n :param optimiser: optimiser to be used\n :param type_KL: type of KL divergenve computation to use\n :param num_samples: number of samples to use\n :param latent_dim: number of latent dimensions\n :param covar_modules: additive kernel (sum of cross-covariances)\n :param likelihoods: GPyTorch likelihood model\n :param zt_list: list of inducing points\n :param id_covariate: covariate number of the id\n :param P: number of unique instances\n :param T: number of longitudinal samples per individual\n :param Q: number of covariates\n :param weight: value for the weight\n :param constrain_scales: boolean to constrain scales to 1\n :param loss_function: selected loss function\n :param memory_dbg: enable debugging\n :param eps: jitter\n :param validation_dataset: dataset for vaildation set\n :param generation_dataset: dataset to help with sample image generation\n :param prediction_dataset; dataset with subjects for prediction\n\n :return trained models and resulting losses\n\n \"\"\"\n if type_KL == 'closed':\n covar_module = covar_modules[0]\n elif type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':\n covar_module0 = covar_modules[0]\n covar_module1 = covar_modules[1]\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # set up Data Loader for training\n dataloader = DataLoader(dataset, batch_size=len(dataset), shuffle=False, num_workers=4)\n\n net_train_loss_arr = np.empty((0, 1))\n recon_loss_arr = np.empty((0, 1))\n nll_loss_arr = np.empty((0, 1))\n gp_loss_arr = np.empty((0, 1))\n penalty_term_arr = np.empty((0, 1))\n\n for epoch in range(1, epochs + 1):\n for batch_idx, sample_batched in enumerate(dataloader):\n\n # no mini-batching. Instead get a batch of dataset size.\n optimiser.zero_grad() # clear gradients\n label_id = sample_batched['idx']\n label = sample_batched['label']\n data = sample_batched['digit']\n data = data.double().to(device)\n mask = sample_batched['mask']\n mask = mask.to(device)\n\n train_x = label.double().to(device)\n covariates = torch.cat((train_x[:, :id_covariate], train_x[:, id_covariate+1:]), dim=1)\n\n # encode data\n recon_batch, mu, log_var = nnet_model(data)\n\n [recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)\n recon_loss = torch.sum(recon_loss)\n nll_loss = torch.sum(nll)\n\n gp_loss_avg = torch.tensor([0.0]).to(device)\n net_loss = torch.tensor([0.0]).to(device)\n penalty_term = torch.tensor([0.0]).to(device)\n\n for sample_iter in range(0, num_samples):\n\n # Iterate over specified number of samples. Default: num_samples = 1.\n Z = nnet_model.sample_latent(mu, log_var)\n gp_loss = torch.tensor([0.0]).to(device)\n\n for i in range(0, latent_dim):\n Z_dim = Z[:, i].view(-1).type(torch.DoubleTensor).to(device)\n\n if type_KL == 'closed':\n\n # Closed-form KL divergence formula\n kld1 = KL_closed(covar_module[i], train_x, likelihoods[i], data, mu[:, i], log_var[:, i])\n gp_loss = gp_loss + kld1\n elif type_KL == 'conj_gradient':\n\n # GPyTorch default: use modified batch conjugate gradients\n # See: https://arxiv.org/abs/1809.11165\n gp_models[i].set_train_data(train_x.to(device), Z_dim.to(device))\n gp_loss = gp_loss - mlls[i](gp_models[i](train_x.to(device)), Z_dim)\n elif type_KL == 'GPapprox':\n\n # Our proposed efficient approximate GP inference scheme\n # See: http://arxiv.org/abs/2006.09763\n loss = -elbo(covar_module0[i], covar_module1[i], likelihoods[i], train_x, Z_dim,\n zt_list[i].to(device), P, T, eps)\n gp_loss = gp_loss + loss\n\n elif type_KL == 'GPapprox_closed':\n\n # A variant of our proposed efficient approximate GP inference scheme.\n # The key difference with GPapprox is the direct use of the variational mean and variance,\n # instead of a sample from Z. We can call this a deviance upper bound.\n # See the L-VAE supplement for more details: http://arxiv.org/abs/2006.09763\n loss = deviance_upper_bound(covar_module0[i], covar_module1[i], likelihoods[i], train_x,\n mu[:, i].view(-1), log_var[:, i].view(-1), zt_list[i].to(device), P,\n T, eps)\n gp_loss = gp_loss + loss\n\n\n if type_KL == 'closed' or type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':\n if loss_function == 'mse':\n gp_loss_avg = gp_loss_avg + (gp_loss / latent_dim)\n elif loss_function == 'nll':\n gp_loss_avg = gp_loss_avg + gp_loss\n elif type_KL == 'conj_gradient':\n if loss_function == 'mse':\n gp_loss = gp_loss * data.shape[0] / latent_dim\n elif loss_function == 'nll':\n gp_loss = gp_loss * data.shape[0]\n gp_loss_avg = gp_loss_avg + gp_loss\n\n if type_KL == 'closed' or type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':\n gp_loss_avg = gp_loss_avg / num_samples\n if loss_function == 'mse':\n net_loss = recon_loss + weight * gp_loss_avg\n elif loss_function == 'nll':\n net_loss = nll_loss + gp_loss_avg\n elif type_KL == 'conj_gradient':\n gp_loss_avg = gp_loss_avg / num_samples\n penalty_term = -0.5 * log_var.sum() / latent_dim\n if loss_function == 'mse':\n net_loss = recon_loss + weight * (gp_loss_avg + penalty_term)\n elif loss_function == 'nll':\n net_loss = nll_loss + gp_loss_avg + penalty_term\n\n net_loss.backward()\n\n if type_KL == 'closed' or type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':\n print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (\n epoch, epochs, net_loss.item(), gp_loss_avg.item(), nll_loss.item(), recon_loss.item()))\n elif type_KL == 'conj_gradient':\n print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - Penalty: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (\n epoch, epochs, net_loss.item(), gp_loss_avg.item(), penalty_term.item(), nll_loss.item(), recon_loss.item()))\n\n penalty_term_arr = np.append(penalty_term_arr, penalty_term.cpu().item())\n net_train_loss_arr = np.append(net_train_loss_arr, net_loss.cpu().item())\n recon_loss_arr = np.append(recon_loss_arr, recon_loss.cpu().item())\n nll_loss_arr = np.append(nll_loss_arr, nll_loss.cpu().item())\n gp_loss_arr = np.append(gp_loss_arr, gp_loss_avg.cpu().item())\n optimiser.step()\n if constrain_scales:\n for i in range(0, latent_dim):\n likelihoods[i].noise = torch.tensor([1], dtype=torch.float).to(device)\n\n if (not epoch % 100) and epoch != epochs:\n if validation_dataset is not None:\n standard_validate(nnet_model, type_nnet, validation_dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T, weight, mu, train_x, id_covariate, loss_function, eps=1e-6)\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n return penalty_term_arr, net_train_loss_arr, nll_loss_arr, recon_loss_arr, gp_loss_arr\n\ndef variational_inference_optimization(nnet_model, type_nnet, epochs, dataset, prediction_dataset, optimiser, \n latent_dim, covar_module0, covar_module1, likelihoods, zt_list, P, T, Q, weight, constrain_scales, \n id_covariate, loss_function, memory_dbg=False, eps=1e-6, results_path=None, save_path=None, gp_model_folder=None,\n generation_dataset=None):\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # set up Data Loader for training\n dataloader = DataLoader(dataset, batch_size=len(dataset), shuffle=False, num_workers=4)\n\n net_train_loss_arr = np.empty((0, 1))\n recon_loss_arr = np.empty((0, 1))\n nll_loss_arr = np.empty((0, 1))\n gp_loss_arr = np.empty((0, 1))\n penalty_term_arr = np.empty((0, 1))\n\n for batch_idx, sample_batched in enumerate(dataloader):\n label_id = sample_batched['idx']\n label = sample_batched['label'].double().to(device)\n data = sample_batched['digit'].double().to(device)\n mask = sample_batched['mask'].double().to(device)\n\n covariates = torch.cat((label[:, :id_covariate], label[:, id_covariate+1:]), dim=1)\n\n # encode data\n mu, log_var = nnet_model.encode(data)\n\n mu = torch.nn.Parameter(mu.clone().detach(), requires_grad=True)\n log_var = torch.nn.Parameter(log_var.clone().detach(), requires_grad=True)\n\n try:\n mu = torch.load(os.path.join(gp_model_folder, 'mu.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)\n log_var = torch.load(os.path.join(gp_model_foder, 'log_var.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)\n except:\n pass\n\n optimiser.add_param_group({'params': mu})\n optimiser.add_param_group({'params': log_var})\n\n for epoch in range(1, epochs + 1):\n optimiser.zero_grad()\n Z = nnet_model.sample_latent(mu, log_var)\n recon_batch = nnet_model.decode(Z)\n [recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)\n recon_loss = torch.sum(recon_loss)\n nll_loss = torch.sum(nll)\n\n gp_loss_avg = torch.tensor([0.0]).to(device)\n net_loss = torch.tensor([0.0]).to(device)\n penalty_term = torch.tensor([0.0]).to(device)\n\n for i in range(0, latent_dim):\n loss = deviance_upper_bound(covar_module0[i], covar_module1[i], likelihoods[i], label,\n mu[:, i].view(-1), log_var[:, i].view(-1), zt_list[i].to(device), P,\n T, eps)\n gp_loss_avg = gp_loss_avg + loss / latent_dim\n\n if loss_function == 'mse':\n net_loss = recon_loss + weight * gp_loss_avg\n elif loss_function == 'nll':\n net_loss = nll_loss + gp_loss_avg\n\n net_loss.backward()\n\n print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (\n epoch, epochs, net_loss.item(), gp_loss_avg.item(), nll_loss.item(), recon_loss.item()),\n flush=True)\n\n penalty_term_arr = np.append(penalty_term_arr, penalty_term.cpu().item())\n net_train_loss_arr = np.append(net_train_loss_arr, net_loss.cpu().item())\n recon_loss_arr = np.append(recon_loss_arr, recon_loss.cpu().item())\n nll_loss_arr = np.append(nll_loss_arr, nll_loss.cpu().item())\n gp_loss_arr = np.append(gp_loss_arr, gp_loss_avg.cpu().item())\n optimiser.step()\n\n if not epoch % 100:\n sv_pth = os.path.join(save_path, 'recon_' + str(epoch) + '.pdf')\n gen_rotated_mnist_plot(data[1920:2080].cpu().detach(), recon_batch[1920:2080].cpu().detach(), label[1920:2080].cpu().detach(), seq_length=20, num_sets=8, save_file=sv_pth)\n\n torch.save(nnet_model.state_dict(), os.path.join(save_path, 'final-vae_model.pth'))\n torch.save(mu, os.path.join(save_path, 'mu.pth'))\n torch.save(log_var, os.path.join(save_path, 'log_var.pth'))\n for i in range(0, latent_dim):\n torch.save(covar_module0[i].state_dict(), os.path.join(save_path, 'cov_module0_' + str(i) + '.pth'))\n torch.save(covar_module1[i].state_dict(), os.path.join(save_path, 'cov_module1_' + str(i) + '.pth'))\n\n prediction_dataloader = DataLoader(prediction_dataset, batch_size=len(prediction_dataset), shuffle=False, num_workers=1)\n for batch_idx, sample_batched in enumerate(prediction_dataloader):\n label_pred = sample_batched['label'].double().to(device)\n data_pred = sample_batched['digit'].double().to(device)\n mask_pred = sample_batched['mask'].double().to(device)\n covariates = torch.cat((label_pred[:, :id_covariate], label_pred[:, id_covariate+1:]), dim=1)\n # encode data\n mu_pred, log_var_pred = nnet_model.encode(data_pred)\n break\n\n try:\n mu_pred = torch.load(os.path.join(gp_model_folder, 'mu_pred.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)\n log_var_pred = torch.load(os.path.join(gp_model_folder, 'log_var_pred.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)\n except:\n pass\n\n mu_pred = torch.nn.Parameter(mu_pred.clone().detach(), requires_grad=True)\n log_var_pred = torch.nn.Parameter(log_var_pred.clone().detach(), requires_grad=True)\n adam_param_list = []\n adam_param_list.append({'params': mu_pred})\n adam_param_list.append({'params': log_var_pred})\n optimiser_pred = torch.optim.Adam(adam_param_list, lr=1e-3)\n for epoch in range(1, 1001):\n optimiser_pred.zero_grad()\n\n Z = nnet_model.sample_latent(mu_pred, log_var_pred)\n\n recon_batch = nnet_model.decode(Z)\n [recon_loss, nll] = nnet_model.loss_function(recon_batch,\n data_pred,\n mask_pred)\n\n recon_loss = torch.sum(recon_loss)\n nll_loss = torch.sum(nll)\n\n gp_loss_avg = torch.tensor([0.0]).to(device)\n\n prediction_mu = torch.cat((mu_pred, mu), dim=0)\n prediction_log_var = torch.cat((log_var_pred, log_var), dim=0)\n prediction_x = torch.cat((label_pred, label), dim=0)\n\n for i in range(0, latent_dim):\n loss = deviance_upper_bound(covar_module0[i], covar_module1[i], likelihoods[i], prediction_x,\n prediction_mu[:, i].view(-1), prediction_log_var[:, i].view(-1),\n zt_list[i].to(device), P+8, T, eps)\n gp_loss_avg = gp_loss_avg + loss / latent_dim\n\n if loss_function == 'mse':\n net_loss = recon_loss + weight * gp_loss_avg\n elif loss_function == 'nll':\n net_loss = nll_loss + gp_loss_avg\n\n net_loss.backward()\n\n print('Iter %d/1000 - Total Loss: %.3f - GP Loss: %.3f - Recon Loss: %.3f' % (\n epoch, net_loss.item(), gp_loss_avg.item(), recon_loss.item()),\n flush=True)\n\n optimiser_pred.step()\n\n torch.save(mu_pred, os.path.join(save_path, 'mu_pred.pth'))\n torch.save(log_var_pred, os.path.join(save_path, 'log_var_pred.pth'))\n\n l = [i*20 + k for i in range(0,8) for k in range(0,5)]\n prediction_x = torch.cat((label_pred[l],\n label))\n prediction_mu = torch.cat((mu_pred[l],\n mu))\n\n if generation_dataset:\n variational_complete_gen(generation_dataset, nnet_model, type_nnet,\n results_path, covar_module0,\n covar_module1, likelihoods, latent_dim,\n './data', prediction_x, prediction_mu, 'final',\n zt_list, P, T, id_covariate)\n\n exit(0)\n"
] | [
[
"torch.device",
"torch.cat",
"torch.cholesky",
"numpy.empty",
"torch.unique",
"torch.no_grad",
"torch.optim.Adam",
"torch.autograd.grad",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.eye",
"numpy.append",
"torch.matmul",
"torch.sum"
]
] |
reubentg/545lab3 | [
"4d907594bbbfb1aba951396ecd568e0023f140f2"
] | [
"src/MPPI.py"
] | [
"#!/usr/bin/env python\n\nimport time\nimport sys\nimport rospy\nimport rosbag\nimport numpy as np\nimport utils as Utils\n\nimport torch\nimport torch.utils.data\nfrom torch.autograd import Variable\n\nfrom nav_msgs.srv import GetMap\nfrom ackermann_msgs.msg import AckermannDriveStamped\nfrom vesc_msgs.msg import VescStateStamped\nfrom nav_msgs.msg import Path\nfrom geometry_msgs.msg import PoseStamped, PoseArray, PoseWithCovarianceStamped, PointStamped\n\n\nclass MPPIController:\n\n def __init__(self, T, K, sigma=0.5, _lambda=0.5):\n self.SPEED_TO_ERPM_OFFSET = float(rospy.get_param(\"/vesc/speed_to_erpm_offset\", 0.0))\n self.SPEED_TO_ERPM_GAIN = float(rospy.get_param(\"/vesc/speed_to_erpm_gain\", 4614.0))\n self.STEERING_TO_SERVO_OFFSET = float(rospy.get_param(\"/vesc/steering_angle_to_servo_offset\", 0.5304))\n self.STEERING_TO_SERVO_GAIN = float(rospy.get_param(\"/vesc/steering_angle_to_servo_gain\", -1.2135))\n self.CAR_LENGTH = 0.33\n\n self.last_pose = None\n # MPPI params\n self.T = T # Length of rollout horizon\n self.K = K # Number of sample rollouts\n self.sigma = sigma\n self._lambda = _lambda\n\n self.goal = None # Lets keep track of the goal pose (world frame) over time\n self.lasttime = None\n\n # PyTorch / GPU data configuration\n # TODO\n # you should pre-allocate GPU memory when you can, and re-use it when\n # possible for arrays storing your controls or calculated MPPI costs, etc\n model_name = rospy.get_param(\"~nn_model\", \"myneuralnetisbestneuralnet.pt\")\n self.model = torch.load(model_name)\n self.model.cuda() # tell torch to run the network on the GPU\n self.dtype = torch.cuda.FloatTensor\n print(\"Loading:\", model_name)\n print(\"Model:\\n\", self.model)\n print(\"Torch Datatype:\", self.dtype)\n\n # control outputs\n self.msgid = 0\n\n # visualization paramters\n self.num_viz_paths = 40\n if self.K < self.num_viz_paths:\n self.num_viz_paths = self.K\n\n # We will publish control messages and a way to visualize a subset of our\n # rollouts, much like the particle filter\n self.ctrl_pub = rospy.Publisher(rospy.get_param(\"~ctrl_topic\", \"/vesc/high_level/ackermann_cmd_mux/input/nav0\"),\n AckermannDriveStamped, queue_size=2)\n self.path_pub = rospy.Publisher(\"/mppi/paths\", Path, queue_size=self.num_viz_paths)\n\n # Use the 'static_map' service (launched by MapServer.launch) to get the map\n map_service_name = rospy.get_param(\"~static_map\", \"static_map\")\n print(\"Getting map from service: \", map_service_name)\n rospy.wait_for_service(map_service_name)\n map_msg = rospy.ServiceProxy(map_service_name, GetMap)().map # The map, will get passed to init of sensor model\n self.map_info = map_msg.info # Save info about map for later use\n print(\"Map Information:\\n\", self.map_info)\n\n # Create numpy array representing map for later use\n self.map_height = map_msg.info.height\n self.map_width = map_msg.info.width\n array_255 = np.array(map_msg.data).reshape((map_msg.info.height, map_msg.info.width))\n self.permissible_region = np.zeros_like(array_255, dtype=bool)\n self.permissible_region[\n array_255 == 0] = 1 # Numpy array of dimension (map_msg.info.height, map_msg.info.width),\n # With values 0: not permissible, 1: permissible\n self.permissible_region = np.negative(self.permissible_region) # 0 is permissible, 1 is not\n\n print(\"Making callbacks\")\n self.goal_sub = rospy.Subscriber(\"/move_base_simple/goal\",\n PoseStamped, self.clicked_goal_cb, queue_size=1)\n self.pose_sub = rospy.Subscriber(\"/pf/ta/viz/inferred_pose\",\n PoseStamped, self.mppi_cb, queue_size=1)\n\n # TODO\n # You may want to debug your bounds checking code here, by clicking on a part\n # of the map and convincing yourself that you are correctly mapping the\n # click, and thus the goal pose, to accessible places in the map\n def clicked_goal_cb(self, msg):\n self.goal = np.array([msg.pose.position.x,\n msg.pose.position.y,\n Utils.quaternion_to_angle(msg.pose.orientation)])\n print(\"Current Pose: \", self.last_pose)\n print(\"SETTING Goal: \", self.goal)\n\n def running_cost(self, pose, goal, ctrl, noise):\n # TODO\n # This cost function drives the behavior of the car. You want to specify a\n # cost function that penalizes behavior that is bad with high cost, and\n # encourages good behavior with low cost.\n # We have split up the cost function for you to a) get the car to the goal\n # b) avoid driving into walls and c) the MPPI control penalty to stay\n # smooth\n # You should feel free to explore other terms to get better or unique\n # behavior\n pose_cost = 0.0\n bounds_check = 0.0\n ctrl_cost = 0.0\n\n return pose_cost + ctrl_cost + bounds_check\n\n def mppi(self, init_pose, init_input):\n t0 = time.time()\n # Network input can be:\n # 0 1 2 3 4 5 6 7\n # xdot, ydot, thetadot, sin(theta), cos(theta), vel, delta, dt\n\n # MPPI should\n # generate noise according to sigma\n # combine that noise with your central control sequence\n # Perform rollouts with those controls from your current pose\n # Calculate costs for each of K trajectories\n # Perform the MPPI weighting on your calculatd costs\n # Scale the added noise by the weighting and add to your control sequence\n # Apply the first control values, and shift your control trajectory\n\n # Notes:\n # MPPI can be assisted by carefully choosing lambda, and sigma\n # It is advisable to clamp the control values to be within the feasible range\n # of controls sent to the Vesc\n # Your code should account for theta being between -pi and pi. This is\n # important.\n # The more code that uses pytorch's cuda abilities, the better; every line in\n # python will slow down the control calculations. You should be able to keep a\n # reasonable amount of calculations done (T = 40, K = 2000) within the 100ms\n # between inferred-poses from the particle filter.\n\n print(\"MPPI: %4.5f ms\" % ((time.time() - t0) * 1000.0))\n\n return run_ctrl, poses\n\n def mppi_cb(self, msg):\n # print(\"callback\")\n if self.last_pose is None:\n self.last_pose = np.array([msg.pose.position.x,\n msg.pose.position.y,\n Utils.quaternion_to_angle(msg.pose.orientation)])\n # Default: initial goal to be where the car is when MPPI node is\n # initialized\n self.goal = self.last_pose\n self.lasttime = msg.header.stamp.to_sec()\n return\n\n theta = Utils.quaternion_to_angle(msg.pose.orientation)\n curr_pose = np.array([msg.pose.position.x,\n msg.pose.position.y,\n theta])\n\n pose_dot = curr_pose - self.last_pose # get state\n self.last_pose = curr_pose\n\n timenow = msg.header.stamp.to_sec()\n dt = timenow - self.lasttime\n self.lasttime = timenow\n nn_input = np.array([pose_dot[0], pose_dot[1], pose_dot[2],\n np.sin(theta),\n np.cos(theta), 0.0, 0.0, dt])\n\n run_ctrl, poses = mppi(curr_pose, nn_input)\n\n self.send_controls(run_ctrl[0], run_ctrl[1])\n\n self.visualize(poses)\n\n def send_controls(self, speed, steer):\n print(\"Speed:\", speed, \"Steering:\", steer)\n ctrlmsg = AckermannDriveStamped()\n ctrlmsg.header.seq = self.msgid\n ctrlmsg.drive.steering_angle = steer\n ctrlmsg.drive.speed = speed\n self.ctrl_pub.publish(ctrlmsg)\n self.msgid += 1\n\n # Publish some paths to RVIZ to visualize rollouts\n def visualize(self, poses):\n if self.path_pub.get_num_connections() > 0:\n frame_id = 'map'\n for i in range(0, self.num_viz_paths):\n pa = Path()\n pa.header = Utils.make_header(frame_id)\n pa.poses = map(Utils.particle_to_posestamped, poses[i, :, :], [frame_id] * self.T)\n self.path_pub.publish(pa)\n\n\ndef test_MPPI(mp, N, goal=np.array([0., 0., 0.])):\n init_input = np.array([0., 0., 0., 0., 1., 0., 0., 0.])\n pose = np.array([0., 0., 0.])\n mp.goal = goal\n print(\"Start:\", pose)\n mp.ctrl.zero_()\n last_pose = np.array([0., 0., 0.])\n for i in range(0, N):\n # ROLLOUT your MPPI function to go from a known location to a specified\n # goal pose. Convince yourself that it works.\n\n print(\"Now:\", pose)\n print(\"End:\", pose)\n\n\nif __name__ == '__main__':\n T = 30\n K = 1000\n sigma = 1.0 # These values will need to be tuned\n _lambda = 1.0\n\n # run with ROS\n rospy.init_node(\"mppi_control\", anonymous=True) # Initialize the node\n mp = MPPIController(T, K, sigma, _lambda)\n rospy.spin()\n\n # test & DEBUG\n mp = MPPIController(T, K, sigma, _lambda)\n test_MPPI(mp, 10, np.array([0., 0., 0.]))\n"
] | [
[
"numpy.zeros_like",
"numpy.array",
"numpy.sin",
"numpy.negative",
"torch.load",
"numpy.cos"
]
] |
krupaddesai/air-canvas | [
"1084a9d3b38753ad7c930b56b3e2363b5748c047"
] | [
"CVCanvas.py"
] | [
"import cv2\r\nimport numpy as np\r\nimport os\r\nimport HandTrackingModule as htm\r\n\r\nfolderPath = \"HeaderImages\"\r\nmyList = os.listdir(folderPath)\r\noverlayList = []\r\nfor path in myList:\r\n image = cv2.imread(f'{folderPath}/{path}')\r\n overlayList.append(image)\r\n\r\nheader = overlayList[0]\r\ncolor = (22, 22 ,255)\r\nbrushThickness = 20\r\neraserThickness = 60\r\n\r\nvid = cv2.VideoCapture(0)\r\nvid.set(3, 1280)\r\nvid.set(4, 720)\r\ndetector = htm.handDetector()\r\n\r\nimgCanvas= np.zeros((720, 1280, 3), np.uint8)\r\nwhile True:\r\n # 1 Import Image\r\n success, img = vid.read()\r\n img = cv2.flip(img, 1)\r\n\r\n # 2 Find Hand Landmarks\r\n img = detector.findHands(img)\r\n lmList = detector.findPosition(img, draw=False)\r\n\r\n \r\n #position of index and middle fingers\r\n if len(lmList) != 0:\r\n x1, y1 = lmList[8][1:]\r\n x2, y2 = lmList[12][1:]\r\n\r\n # 3 Check which fingers are up\r\n fingers = detector.fingersUp()\r\n # 4 If selection Mode - Two finger are up\r\n if fingers[1] and fingers[2]:\r\n xp, yp = 0, 0\r\n if y1 < 125:\r\n if 200 < x1 < 375:\r\n header = overlayList[0]\r\n color = (22, 22, 255)\r\n if 385 < x1 < 555:\r\n header = overlayList[1]\r\n color = (77, 145, 255)\r\n if 565 < x1 < 735:\r\n header = overlayList[2]\r\n color = (89, 222, 255)\r\n if 745 < x1 < 915:\r\n header = overlayList[3]\r\n color = (55, 128, 0)\r\n if 925 < x1 < 1095:\r\n header = overlayList[4]\r\n color = (173, 75, 0)\r\n if 1105 < x1 < 1280:\r\n header = overlayList[5]\r\n color = (0, 0, 0)\r\n\r\n cv2.rectangle(img, (x1, y1-25), (x2,y2+25), color, cv2.FILLED)\r\n \r\n #print(\"selection mode\")\r\n # 5 If Drawing Mode - Index finger is up\r\n if fingers[1] and not fingers[2]:\r\n cv2.circle(img, (x1, y1), 10, color, cv2.FILLED)\r\n print(\"drawing mode\")\r\n if xp == 0 and yp == 0:\r\n xp, yp = x1, y1\r\n\r\n if color == (0, 0, 0):\r\n cv2.line(img, (xp, yp), (x1, y1), color, eraserThickness)\r\n cv2.line(imgCanvas, (xp, yp), (x1, y1), color, eraserThickness)\r\n else:\r\n cv2.line(img, (xp, yp), (x1, y1), color, brushThickness)\r\n cv2.line(imgCanvas, (xp, yp), (x1, y1), color, brushThickness)\r\n \r\n xp,yp = x1, y1\r\n\r\n imgGray = cv2.cvtColor(imgCanvas, cv2.COLOR_BGR2GRAY)\r\n _, imgInv = cv2.threshold(imgGray, 50, 255, cv2.THRESH_BINARY_INV) \r\n imgInv = cv2.cvtColor(imgInv, cv2.COLOR_GRAY2BGR)\r\n img = cv2.bitwise_and(img, imgInv)\r\n img = cv2.bitwise_or(img, imgCanvas)\r\n\r\n img[0:125, 0:1280] = header\r\n #img = cv2.addWeighted(img, 1, imgCanvas, 1, 0)\r\n cv2.imshow(\"Image\", img)\r\n cv2.imshow(\"Canvas\", imgCanvas)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n"
] | [
[
"numpy.zeros"
]
] |
bruinxiong/kornia | [
"0915bfec16425fcedf84f193aaa5c0f04fb6a8da"
] | [
"kornia/augmentation/augmentation.py"
] | [
"from typing import Callable, Tuple, Union, List, Optional, Dict, cast\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import pad\n\nfrom kornia.constants import Resample, BorderType, SamplePadding\nfrom kornia.augmentation import AugmentationBase2D\nfrom . import functional as F\nfrom . import random_generator as rg\nfrom .utils import (\n _range_bound,\n _singular_range_check\n)\n\n\nclass AugmentationBase(AugmentationBase2D):\n __doc__ = AugmentationBase2D.__doc__\n\n def __init__(self, return_transform: bool = False, same_on_batch: bool = False, p: float = 0.5,\n keepdim: bool = False) -> None:\n super(AugmentationBase2D, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n raise DeprecationWarning(\n \"`AugmentationBase` is deprecated. Please use `kornia.augmentation.AugmentationBase2D instead.`\")\n\n\nclass RandomHorizontalFlip(AugmentationBase2D):\n\n r\"\"\"Applies a random horizontal flip to a tensor image or a batch of tensor images with a given probability.\n\n Input should be a tensor of shape (C, H, W) or a batch of tensors :math:`(B, C, H, W)`.\n If Input is a tuple it is assumed that the first element contains the aforementioned tensors and the second,\n the corresponding transformation matrix that has been applied to them. In this case the module\n will Horizontally flip the tensors and concatenate the corresponding transformation matrix to the\n previous one. This is especially useful when using this functionality as part of an ``nn.Sequential`` module.\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation\n wont be concatenated.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> input = torch.tensor([[[[0., 0., 0.],\n ... [0., 0., 0.],\n ... [0., 1., 1.]]]])\n >>> seq = nn.Sequential(RandomHorizontalFlip(p=1.0, return_transform=True),\n ... RandomHorizontalFlip(p=1.0, return_transform=True))\n >>> seq(input)\n (tensor([[[[0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.]]]]), tensor([[[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]]]))\n \"\"\"\n\n def __repr__(self) -> str:\n return self.__class__.__name__ + f\"({super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return dict()\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_hflip_transformation(input)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_hflip(input)\n\n\nclass RandomVerticalFlip(AugmentationBase2D):\n\n r\"\"\"Applies a random vertical flip to a tensor image or a batch of tensor images with a given probability.\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation\n wont be concatenated.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> input = torch.tensor([[[[0., 0., 0.],\n ... [0., 0., 0.],\n ... [0., 1., 1.]]]])\n >>> seq = RandomVerticalFlip(p=1.0, return_transform=True)\n >>> seq(input)\n (tensor([[[[0., 1., 1.],\n [0., 0., 0.],\n [0., 0., 0.]]]]), tensor([[[ 1., 0., 0.],\n [ 0., -1., 2.],\n [ 0., 0., 1.]]]))\n\n \"\"\"\n\n def __repr__(self) -> str:\n return self.__class__.__name__ + f\"({super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return dict()\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_vflip_transformation(input)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_vflip(input)\n\n\nclass ColorJitter(AugmentationBase2D):\n\n r\"\"\"Applies a random transformation to the brightness, contrast, saturation and hue of a tensor image.\n\n Args:\n p (float): probability of applying the transformation. Default value is 1.\n brightness (float or tuple): Default value is 0.\n contrast (float or tuple): Default value is 0.\n saturation (float or tuple): Default value is 0.\n hue (float or tuple): Default value is 0.\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation\n wont be concatenated.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> inputs = torch.ones(1, 3, 3, 3)\n >>> aug = ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.)\n >>> aug(inputs)\n tensor([[[[0.9993, 0.9993, 0.9993],\n [0.9993, 0.9993, 0.9993],\n [0.9993, 0.9993, 0.9993]],\n <BLANKLINE>\n [[0.9993, 0.9993, 0.9993],\n [0.9993, 0.9993, 0.9993],\n [0.9993, 0.9993, 0.9993]],\n <BLANKLINE>\n [[0.9993, 0.9993, 0.9993],\n [0.9993, 0.9993, 0.9993],\n [0.9993, 0.9993, 0.9993]]]])\n \"\"\"\n\n def __init__(\n self, brightness: Union[torch.Tensor, float, Tuple[float, float], List[float]] = 0.,\n contrast: Union[torch.Tensor, float, Tuple[float, float], List[float]] = 0.,\n saturation: Union[torch.Tensor, float, Tuple[float, float], List[float]] = 0.,\n hue: Union[torch.Tensor, float, Tuple[float, float], List[float]] = 0.,\n return_transform: bool = False, same_on_batch: bool = False, p: float = 1.,\n keepdim: bool = False\n ) -> None:\n super(ColorJitter, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n self.brightness: torch.Tensor = _range_bound(brightness, 'brightness', center=1., bounds=(0, 2))\n self.contrast: torch.Tensor = _range_bound(contrast, 'contrast', center=1.)\n self.saturation: torch.Tensor = _range_bound(saturation, 'saturation', center=1.)\n self.hue: torch.Tensor = _range_bound(hue, 'hue', bounds=(-0.5, 0.5))\n\n def __repr__(self) -> str:\n repr = f\"brightness={self.brightness}, contrast={self.contrast}, saturation={self.saturation}, hue={self.hue}\"\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return rg.random_color_jitter_generator(\n batch_shape[0], self.brightness, self.contrast, self.saturation, self.hue, self.same_on_batch,\n self.device, self.dtype)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_intensity_transformation(input)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_color_jitter(input, params)\n\n\nclass RandomGrayscale(AugmentationBase2D):\n r\"\"\"Applies random transformation to Grayscale according to a probability p value.\n\n Args:\n p (float): probability of the image to be transformed to grayscale. Default value is 0.1.\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation\n wont be concatenated.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> inputs = torch.randn((1, 3, 3, 3))\n >>> rec_er = RandomGrayscale(p=1.0)\n >>> rec_er(inputs)\n tensor([[[[-1.1344, -0.1330, 0.1517],\n [-0.0791, 0.6711, -0.1413],\n [-0.1717, -0.9023, 0.0819]],\n <BLANKLINE>\n [[-1.1344, -0.1330, 0.1517],\n [-0.0791, 0.6711, -0.1413],\n [-0.1717, -0.9023, 0.0819]],\n <BLANKLINE>\n [[-1.1344, -0.1330, 0.1517],\n [-0.0791, 0.6711, -0.1413],\n [-0.1717, -0.9023, 0.0819]]]])\n \"\"\"\n\n def __init__(self, return_transform: bool = False, same_on_batch: bool = False, p: float = 0.1,\n keepdim: bool = False) -> None:\n super(RandomGrayscale, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n\n def __repr__(self) -> str:\n return self.__class__.__name__ + f\"({super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return dict()\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_intensity_transformation(input)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_grayscale(input)\n\n\nclass RandomErasing(AugmentationBase2D):\n r\"\"\"Erases a random rectangle of a tensor image according to a probability p value.\n\n The operator removes image parts and fills them with zero values at a selected rectangle\n for each of the images in the batch.\n\n The rectangle will have an area equal to the original image area multiplied by a value uniformly\n sampled between the range [scale[0], scale[1]) and an aspect ratio sampled\n between [ratio[0], ratio[1])\n\n Args:\n p (float): probability that the random erasing operation will be performed. Default value is 0.5.\n scale (Tuple[float, float]): range of proportion of erased area against input image.\n ratio (Tuple[float, float]): range of aspect ratio of erased area.\n same_on_batch (bool): apply the same transformation across the batch. Default: False\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> inputs = torch.ones(1, 1, 3, 3)\n >>> rec_er = RandomErasing((.4, .8), (.3, 1/.3), p=0.5)\n >>> rec_er(inputs)\n tensor([[[[1., 0., 0.],\n [1., 0., 0.],\n [1., 0., 0.]]]])\n \"\"\"\n\n # Note: Extra params, inplace=False in Torchvision.\n def __init__(\n self, scale: Union[torch.Tensor, Tuple[float, float]] = (0.02, 0.33),\n ratio: Union[torch.Tensor, Tuple[float, float]] = (0.3, 3.3),\n value: float = 0., return_transform: bool = False, same_on_batch: bool = False, p: float = 0.5,\n keepdim: bool = False\n ) -> None:\n super(RandomErasing, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n self.scale = cast(torch.Tensor, scale) if isinstance(scale, torch.Tensor) else torch.tensor(scale)\n self.ratio = cast(torch.Tensor, ratio) if isinstance(ratio, torch.Tensor) else torch.tensor(ratio)\n self.value: float = value\n\n def __repr__(self) -> str:\n repr = f\"scale={self.scale}, ratio={self.ratio}, value={self.value}\"\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return rg.random_rectangles_params_generator(\n batch_shape[0], batch_shape[-2], batch_shape[-1], scale=self.scale, ratio=self.ratio,\n value=self.value, same_on_batch=self.same_on_batch, device=self.device, dtype=self.dtype)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_intensity_transformation(input)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_erase_rectangles(input, params)\n\n\nclass RandomPerspective(AugmentationBase2D):\n r\"\"\"Applies a random perspective transformation to an image tensor with a given probability.\n\n Args:\n p (float): probability of the image being perspectively transformed. Default value is 0.5.\n distortion_scale(float): it controls the degree of distortion and ranges from 0 to 1. Default value is 0.5.\n resample (int, str or kornia.Resample): Default: Resample.BILINEAR.\n return_transform (bool): if ``True`` return the matrix describing the transformation\n applied to each. Default: False.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n align_corners(bool): interpolation flag. Default: False.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> inputs= torch.tensor([[[[1., 0., 0.],\n ... [0., 1., 0.],\n ... [0., 0., 1.]]]])\n >>> aug = RandomPerspective(0.5, p=0.5)\n >>> aug(inputs)\n tensor([[[[0.0000, 0.2289, 0.0000],\n [0.0000, 0.4800, 0.0000],\n [0.0000, 0.0000, 0.0000]]]])\n \"\"\"\n\n def __init__(\n self, distortion_scale: Union[torch.Tensor, float] = 0.5,\n interpolation: Optional[Union[str, int, Resample]] = None,\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n return_transform: bool = False, same_on_batch: bool = False,\n align_corners: bool = False, p: float = 0.5, keepdim: bool = False\n ) -> None:\n super(RandomPerspective, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n self.distortion_scale = cast(torch.Tensor, distortion_scale) \\\n if isinstance(distortion_scale, torch.Tensor) else torch.tensor(distortion_scale)\n self.resample: Resample\n if interpolation is not None:\n import warnings\n warnings.warn(\"interpolation is deprecated. Please use resample instead.\", category=DeprecationWarning)\n self.resample = Resample.get(interpolation)\n self.resample = Resample.get(resample)\n self.align_corners = align_corners\n self.flags: Dict[str, torch.Tensor] = dict(\n interpolation=torch.tensor(self.resample.value),\n align_corners=torch.tensor(align_corners)\n )\n\n def __repr__(self) -> str:\n repr = (f\"distortion_scale={self.distortion_scale}, interpolation={self.resample.name}, \"\n f\"align_corners={self.align_corners}\")\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return rg.random_perspective_generator(\n batch_shape[0], batch_shape[-2], batch_shape[-1], self.distortion_scale, self.same_on_batch,\n self.device, self.dtype)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_perspective_transformation(input, params)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_perspective(input, params, self.flags)\n\n\nclass RandomAffine(AugmentationBase2D):\n r\"\"\"Applies a random 2D affine transformation to a tensor image.\n\n The transformation is computed so that the image center is kept invariant.\n\n Args:\n p (float): probability of applying the transformation. Default value is 0.5.\n degrees (float or tuple): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees). Set to 0 to deactivate rotations.\n translate (tuple, optional): tuple of maximum absolute fraction for horizontal\n and vertical translations. For example translate=(a, b), then horizontal shift\n is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is\n randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.\n scale (tuple, optional): scaling factor interval.\n If (a, b) represents isotropic scaling, the scale is randomly sampled from the range a <= scale <= b.\n If (a, b, c, d), the scale is randomly sampled from the range a <= scale_x <= b, c <= scale_y <= d.\n Will keep original scale by default.\n shear (sequence or float, optional): Range of degrees to select from.\n If float, a shear parallel to the x axis in the range (-shear, +shear) will be apllied.\n If (a, b), a shear parallel to the x axis in the range (-shear, +shear) will be apllied.\n If (a, b, c, d), then x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3])\n will be applied. Will not apply shear by default.\n resample (int, str or kornia.Resample): resample mode from \"nearest\" (0) or \"bilinear\" (1).\n Default: Resample.BILINEAR.\n padding_mode (int, str or kornia.SamplePadding): padding mode from \"zeros\" (0), \"border\" (1)\n or \"refection\" (2). Default: SamplePadding.ZEROS.\n return_transform (bool): if ``True`` return the matrix describing the transformation\n applied to each. Default: False.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n align_corners(bool): interpolation flag. Default: False.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> input = torch.rand(1, 1, 3, 3)\n >>> aug = RandomAffine((-15., 20.), return_transform=True, p=1.)\n >>> aug(input)\n (tensor([[[[0.3961, 0.7310, 0.1574],\n [0.1781, 0.3074, 0.5648],\n [0.4804, 0.8379, 0.4234]]]]), tensor([[[ 0.9923, -0.1241, 0.1319],\n [ 0.1241, 0.9923, -0.1164],\n [ 0.0000, 0.0000, 1.0000]]]))\n \"\"\"\n\n def __init__(\n self, degrees: Union[torch.Tensor, float, Tuple[float, float]],\n translate: Optional[Union[torch.Tensor, Tuple[float, float]]] = None,\n scale: Optional[Union[torch.Tensor, Tuple[float, float], Tuple[float, float, float, float]]] = None,\n shear: Optional[Union[torch.Tensor, float, Tuple[float, float]]] = None,\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n return_transform: bool = False, same_on_batch: bool = False, align_corners: bool = False,\n padding_mode: Union[str, int, SamplePadding] = SamplePadding.ZEROS.name, p: float = 0.5, keepdim: bool = False\n ) -> None:\n super(RandomAffine, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n degrees = cast(torch.Tensor, degrees) if isinstance(degrees, torch.Tensor) else torch.tensor(degrees)\n self.degrees = _range_bound(degrees, 'degrees', 0, (-360, 360))\n self.translate: Optional[torch.Tensor] = None\n if translate is not None:\n self.translate = _range_bound(translate, 'translate', bounds=(0, 1), check='singular')\n self.scale: Optional[torch.Tensor] = None\n if scale is not None:\n scale = scale if isinstance(scale, torch.Tensor) else torch.tensor(scale)\n if len(scale) == 2:\n self.scale = _range_bound(scale, 'scale', bounds=(0, float('inf')), check='singular')\n elif len(scale) == 4:\n self.scale = torch.cat([\n _range_bound(scale[:2], 'scale_x', bounds=(0, float('inf')), check='singular'),\n _range_bound(scale[2:], 'scale_y', bounds=(0, float('inf')), check='singular')\n ])\n else:\n raise ValueError(\"'scale' expected to be either 2 or 4 elements. Got {scale}\")\n self.shear: Optional[torch.Tensor] = None\n if shear is not None:\n shear = shear if isinstance(shear, torch.Tensor) else torch.tensor(shear)\n self.shear = torch.stack([\n _range_bound(shear if shear.dim() == 0 else shear[:2], 'shear-x', 0, (-360, 360)),\n torch.tensor([0, 0]) if shear.dim() == 0 or len(shear) == 2 else\n _range_bound(shear[2:], 'shear-y', 0, (-360, 360))\n ])\n self.resample: Resample = Resample.get(resample)\n self.padding_mode: SamplePadding = SamplePadding.get(padding_mode)\n self.align_corners = align_corners\n self.flags: Dict[str, torch.Tensor] = dict(\n resample=torch.tensor(self.resample.value),\n padding_mode=torch.tensor(self.padding_mode.value),\n align_corners=torch.tensor(align_corners)\n )\n\n def __repr__(self) -> str:\n repr = (f\"degrees={self.degrees}, translate={self.translate}, scale={self.scale}, shear={self.shear}, \"\n f\"resample={self.resample.name}\")\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return rg.random_affine_generator(\n batch_shape[0], batch_shape[-2], batch_shape[-1], self.degrees, self.translate, self.scale, self.shear,\n self.same_on_batch, self.device, self.dtype)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_affine_transformation(input, params)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_affine(input, params, self.flags)\n\n\nclass CenterCrop(AugmentationBase2D):\n r\"\"\"Crops a given image tensor at the center.\n\n Args:\n p (float): probability of applying the transformation for the whole batch. Default value is 1.\n size (Tuple[int, int] or int): Desired output size (out_h, out_w) of the crop.\n If integer, out_h = out_w = size.\n If Tuple[int, int], out_h = size[0], out_w = size[1].\n return_transform (bool): if ``True`` return the matrix describing the transformation\n applied to each. Default: False.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, out_h, out_w)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> inputs = torch.randn(1, 1, 4, 4)\n >>> inputs\n tensor([[[[-1.1258, -1.1524, -0.2506, -0.4339],\n [ 0.8487, 0.6920, -0.3160, -2.1152],\n [ 0.3223, -1.2633, 0.3500, 0.3081],\n [ 0.1198, 1.2377, 1.1168, -0.2473]]]])\n >>> aug = CenterCrop(2, p=1.)\n >>> aug(inputs)\n tensor([[[[ 0.6920, -0.3160],\n [-1.2633, 0.3500]]]])\n \"\"\"\n\n def __init__(self, size: Union[int, Tuple[int, int]], align_corners: bool = True,\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n return_transform: bool = False, p: float = 1., keepdim: bool = False) -> None:\n # same_on_batch is always True for CenterCrop\n # Since PyTorch does not support ragged tensor. So cropping function happens batch-wisely.\n super(CenterCrop, self).__init__(p=1., return_transform=return_transform, same_on_batch=True, p_batch=p,\n keepdim=keepdim)\n self.size = size\n self.resample = Resample.get(resample)\n self.align_corners = align_corners\n self.flags: Dict[str, torch.Tensor] = dict(\n interpolation=torch.tensor(self.resample.value),\n align_corners=torch.tensor(align_corners)\n )\n\n def __repr__(self) -> str:\n repr = f\"size={self.size}\"\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n if isinstance(self.size, tuple):\n size_param = (self.size[0], self.size[1])\n elif isinstance(self.size, int):\n size_param = (self.size, self.size)\n else:\n raise Exception(f\"Invalid size type. Expected (int, tuple(int, int). \"\n f\"Got: {type(self.size)}.\")\n return rg.center_crop_generator(\n batch_shape[0], batch_shape[-2], batch_shape[-1], size_param, self.device)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_crop_transformation(input, params, self.flags)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_crop(input, params, self.flags)\n\n\nclass RandomRotation(AugmentationBase2D):\n r\"\"\"Applies a random rotation to a tensor image or a batch of tensor images given an amount of degrees.\n\n Args:\n p (float): probability of applying the transformation. Default value is 0.5.\n degrees (sequence or float or tensor): range of degrees to select from. If degrees is a number the\n range of degrees to select from will be (-degrees, +degrees).\n interpolation (int, str or kornia.Resample): Default: Resample.BILINEAR.\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation\n wont be concatenated.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n align_corners(bool): interpolation flag. Default: False.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> input = torch.tensor([[1., 0., 0., 2.],\n ... [0., 0., 0., 0.],\n ... [0., 1., 2., 0.],\n ... [0., 0., 1., 2.]])\n >>> seq = RandomRotation(degrees=45.0, return_transform=True, p=1.)\n >>> seq(input)\n (tensor([[[[0.9824, 0.0088, 0.0000, 1.9649],\n [0.0000, 0.0029, 0.0000, 0.0176],\n [0.0029, 1.0000, 1.9883, 0.0000],\n [0.0000, 0.0088, 1.0117, 1.9649]]]]), tensor([[[ 1.0000, -0.0059, 0.0088],\n [ 0.0059, 1.0000, -0.0088],\n [ 0.0000, 0.0000, 1.0000]]]))\n \"\"\"\n # Note: Extra params, center=None, fill=0 in TorchVision\n\n def __init__(\n self, degrees: Union[torch.Tensor, float, Tuple[float, float], List[float]],\n interpolation: Optional[Union[str, int, Resample]] = None,\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n return_transform: bool = False, same_on_batch: bool = False, align_corners: bool = True, p: float = 0.5,\n keepdim: bool = False\n ) -> None:\n super(RandomRotation, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n degrees = cast(torch.Tensor, degrees) if isinstance(degrees, torch.Tensor) else torch.tensor(degrees)\n self.degrees = _range_bound(degrees, 'degrees', 0, (-360, 360))\n self.resample: Resample\n if interpolation is not None:\n import warnings\n warnings.warn(\"interpolation is deprecated. Please use resample instead.\", category=DeprecationWarning)\n self.resample = Resample.get(interpolation)\n self.resample = Resample.get(resample)\n self.align_corners = align_corners\n self.flags: Dict[str, torch.Tensor] = dict(\n interpolation=torch.tensor(self.resample.value),\n align_corners=torch.tensor(align_corners)\n )\n\n def __repr__(self) -> str:\n repr = f\"degrees={self.degrees}, interpolation={self.resample.name}\"\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return rg.random_rotation_generator(batch_shape[0], self.degrees, self.same_on_batch, self.device, self.dtype)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_rotate_tranformation(input, params)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_rotation(input, params, self.flags)\n\n\nclass RandomCrop(AugmentationBase2D):\n r\"\"\"Crops random patches of a tensor image on a given size.\n\n Args:\n p (float): probability of applying the transformation for the whole batch. Default value is 1.0.\n size (Tuple[int, int]): Desired output size (out_h, out_w) of the crop.\n Must be Tuple[int, int], then out_h = size[0], out_w = size[1].\n padding (int or sequence, optional): Optional padding on each border\n of the image. Default is None, i.e no padding. If a sequence of length\n 4 is provided, it is used to pad left, top, right, bottom borders\n respectively. If a sequence of length 2 is provided, it is used to\n pad left/right, top/bottom borders, respectively.\n pad_if_needed (boolean): It will pad the image if smaller than the\n desired size to avoid raising an exception. Since cropping is done\n after padding, the padding seems to be done at a random offset.\n fill: Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n resample (int, str or kornia.Resample): Default: Resample.BILINEAR\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation\n wont be concatenated\n same_on_batch (bool): apply the same transformation across the batch. Default: False\n align_corners(bool): interpolation flag. Default: False.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, out_h, out_w)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> inputs = torch.randn(1, 1, 3, 3)\n >>> aug = RandomCrop((2, 2), p=1.)\n >>> aug(inputs)\n tensor([[[[-0.6562, -1.0009],\n [ 0.2223, -0.5507]]]])\n \"\"\"\n\n def __init__(\n self, size: Tuple[int, int], padding: Optional[Union[int, Tuple[int, int], Tuple[int, int, int, int]]] = None,\n pad_if_needed: Optional[bool] = False, fill: int = 0, padding_mode: str = 'constant',\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n return_transform: bool = False, same_on_batch: bool = False, align_corners: bool = False, p: float = 1.0,\n keepdim: bool = False\n ) -> None:\n # Since PyTorch does not support ragged tensor. So cropping function happens batch-wisely.\n super(RandomCrop, self).__init__(\n p=1., return_transform=return_transform, same_on_batch=same_on_batch, p_batch=p, keepdim=keepdim)\n self.size = size\n self.padding = padding\n self.pad_if_needed = pad_if_needed\n self.fill = fill\n self.padding_mode = padding_mode\n self.resample = Resample.get(resample)\n self.align_corners = align_corners\n self.flags: Dict[str, torch.Tensor] = dict(\n interpolation=torch.tensor(self.resample.value),\n align_corners=torch.tensor(align_corners)\n )\n\n def __repr__(self) -> str:\n repr = (f\"crop_size={self.size}, padding={self.padding}, fill={self.fill}, pad_if_needed={self.pad_if_needed}, \"\n f\"padding_mode={self.padding_mode}, resample={self.resample.name}\")\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return rg.random_crop_generator(batch_shape[0], (batch_shape[-2], batch_shape[-1]), self.size,\n same_on_batch=self.same_on_batch, device=self.device, dtype=self.dtype)\n\n def precrop_padding(self, input: torch.Tensor) -> torch.Tensor:\n if self.padding is not None:\n if isinstance(self.padding, int):\n self.padding = cast(int, self.padding)\n padding = [self.padding, self.padding, self.padding, self.padding]\n elif isinstance(self.padding, tuple) and len(self.padding) == 2:\n self.padding = cast(Tuple[int, int], self.padding)\n padding = [self.padding[1], self.padding[1], self.padding[0], self.padding[0]]\n elif isinstance(self.padding, tuple) and len(self.padding) == 4:\n self.padding = cast(Tuple[int, int, int, int], self.padding)\n padding = [self.padding[3], self.padding[2], self.padding[1], self.padding[0]]\n input = pad(input, padding, value=self.fill, mode=self.padding_mode)\n\n if self.pad_if_needed and input.shape[-2] < self.size[0]:\n padding = [0, 0, (self.size[0] - input.shape[-2]), self.size[0] - input.shape[-2]]\n input = pad(input, padding, value=self.fill, mode=self.padding_mode)\n\n if self.pad_if_needed and input.shape[-1] < self.size[1]:\n padding = [self.size[1] - input.shape[-1], self.size[1] - input.shape[-1], 0, 0]\n input = pad(input, padding, value=self.fill, mode=self.padding_mode)\n\n return input\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_crop_transformation(input, params, self.flags)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_crop(input, params, self.flags)\n\n def forward(self, input: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],\n params: Optional[Dict[str, torch.Tensor]] = None, return_transform: Optional[bool] = None\n ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:\n if type(input) == tuple:\n input = (self.precrop_padding(input[0]), input[1])\n else:\n input = cast(torch.Tensor, input)\n input = self.precrop_padding(input)\n return super().forward(input, params, return_transform)\n\n\nclass RandomResizedCrop(AugmentationBase2D):\n r\"\"\"Crops random patches in an image tensor and resizes to a given size.\n\n Args:\n size (Tuple[int, int]): Desired output size (out_h, out_w) of each edge.\n Must be Tuple[int, int], then out_h = size[0], out_w = size[1].\n scale: range of size of the origin size cropped.\n ratio: range of aspect ratio of the origin aspect ratio cropped.\n resample (int, str or kornia.Resample): Default: Resample.BILINEAR.\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation\n wont be concatenated.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n align_corners(bool): interpolation flag. Default: False.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, out_h, out_w)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Example:\n >>> rng = torch.manual_seed(0)\n >>> inputs = torch.tensor([[[0., 1., 2.],\n ... [3., 4., 5.],\n ... [6., 7., 8.]]])\n >>> aug = RandomResizedCrop(size=(3, 3), scale=(3., 3.), ratio=(2., 2.), p=1.)\n >>> aug(inputs)\n tensor([[[[1.2500, 1.7500, 1.5000],\n [4.2500, 4.7500, 3.7500],\n [7.2500, 7.7500, 6.0000]]]])\n \"\"\"\n\n def __init__(\n self, size: Tuple[int, int], scale: Union[torch.Tensor, Tuple[float, float]] = (0.08, 1.0),\n ratio: Union[torch.Tensor, Tuple[float, float]] = (3. / 4., 4. / 3.),\n interpolation: Optional[Union[str, int, Resample]] = None,\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n return_transform: bool = False, same_on_batch: bool = False,\n align_corners: bool = False, p: float = 1., keepdim: bool = False\n ) -> None:\n # Since PyTorch does not support ragged tensor. So cropping function happens all the time.\n super(RandomResizedCrop, self).__init__(\n p=1., return_transform=return_transform, same_on_batch=same_on_batch, p_batch=p, keepdim=keepdim)\n self.size = size\n self.scale = cast(torch.Tensor, scale) if isinstance(scale, torch.Tensor) else torch.tensor(scale)\n self.ratio = cast(torch.Tensor, ratio) if isinstance(ratio, torch.Tensor) else torch.tensor(ratio)\n self.resample: Resample\n if interpolation is not None:\n import warnings\n warnings.warn(\"interpolation is deprecated. Please use resample instead.\", category=DeprecationWarning)\n self.resample = Resample.get(interpolation)\n self.resample = Resample.get(resample)\n self.align_corners = align_corners\n self.flags: Dict[str, torch.Tensor] = dict(\n interpolation=torch.tensor(self.resample.value),\n align_corners=torch.tensor(align_corners)\n )\n\n def __repr__(self) -> str:\n repr = f\"size={self.size}, scale={self.scale}, ratio={self.ratio}, interpolation={self.resample.name}\"\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n target_size: torch.Tensor = rg.random_crop_size_generator(\n batch_shape[0], self.size, self.scale, self.ratio, self.same_on_batch, self.device, self.dtype)['size']\n return rg.random_crop_generator(batch_shape[0], (batch_shape[-2], batch_shape[-1]), target_size,\n resize_to=self.size, same_on_batch=self.same_on_batch,\n device=self.device, dtype=self.dtype)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_crop_transformation(input, params, self.flags)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_crop(input, params, self.flags)\n\n\nclass RandomMotionBlur(AugmentationBase2D):\n r\"\"\"Perform motion blur on 2D images (4D tensor).\n\n Args:\n p (float): probability of applying the transformation. Default value is 0.5.\n kernel_size (int or Tuple[int, int]): motion kernel size (odd and positive).\n If int, the kernel will have a fixed size.\n If Tuple[int, int], it will randomly generate the value from the range batch-wisely.\n angle (float or Tuple[float, float]): angle of the motion blur in degrees (anti-clockwise rotation).\n If float, it will generate the value from (-angle, angle).\n direction (float or Tuple[float, float]): forward/backward direction of the motion blur.\n Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),\n while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a\n uniformly (but still angled) motion blur.\n If float, it will generate the value from (-direction, direction).\n If Tuple[int, int], it will randomly generate the value from the range.\n border_type (int, str or kornia.BorderType): the padding mode to be applied before convolving.\n CONSTANT = 0, REFLECT = 1, REPLICATE = 2, CIRCULAR = 3. Default: BorderType.CONSTANT.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> input = torch.ones(1, 1, 5, 5)\n >>> motion_blur = RandomMotionBlur(3, 35., 0.5, p=1.)\n >>> motion_blur(input)\n tensor([[[[-0.5761, 1.0000, 1.0000, 1.0000, 1.9094],\n [-0.5761, 1.0000, 1.0000, 1.0000, 1.9094],\n [-0.5761, 1.0000, 1.0000, 1.0000, 1.9094],\n [-0.5761, 1.0000, 1.0000, 1.0000, 1.9094],\n [-0.5761, 1.0000, 1.0000, 1.0000, 1.9094]]]])\n \"\"\"\n\n def __init__(\n self, kernel_size: Union[int, Tuple[int, int]],\n angle: Union[torch.Tensor, float, Tuple[float, float]],\n direction: Union[torch.Tensor, float, Tuple[float, float]],\n border_type: Union[int, str, BorderType] = BorderType.CONSTANT.name,\n return_transform: bool = False, same_on_batch: bool = False, p: float = 0.5, keepdim: bool = False\n ) -> None:\n super(RandomMotionBlur, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n self.kernel_size: Union[int, Tuple[int, int]] = kernel_size\n\n angle = cast(torch.Tensor, angle) if isinstance(angle, torch.Tensor) else torch.tensor(angle)\n self.angle = _range_bound(angle, 'angle', center=0., bounds=(-360, 360))\n\n direction = \\\n cast(torch.Tensor, direction) if isinstance(direction, torch.Tensor) else torch.tensor(direction)\n self.direction = _range_bound(direction, 'direction', center=0., bounds=(-1, 1))\n self.border_type = BorderType.get(border_type)\n self.flags: Dict[str, torch.Tensor] = {\n \"border_type\": torch.tensor(self.border_type.value)\n }\n\n def __repr__(self) -> str:\n repr = f\"kernel_size={self.kernel_size}, angle={self.angle}, direction={self.direction}, \" +\\\n f\"border_type='{self.border_type.name.lower()}'\"\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return rg.random_motion_blur_generator(\n batch_shape[0], self.kernel_size, self.angle, self.direction, self.same_on_batch, self.device, self.dtype)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_intensity_transformation(input)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_motion_blur(input, params, self.flags)\n\n\nclass RandomSolarize(AugmentationBase2D):\n r\"\"\"Solarize given tensor image or a batch of tensor images randomly.\n\n Args:\n p (float): probability of applying the transformation. Default value is 0.5.\n thresholds (float or tuple): Default value is 0.1.\n If float x, threshold will be generated from (0.5 - x, 0.5 + x).\n If tuple (x, y), threshold will be generated from (x, y).\n additions (float or tuple): Default value is 0.1.\n If float x, addition will be generated from (-x, x).\n If tuple (x, y), addition will be generated from (x, y).\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation wont be concatenated.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> input = torch.rand(1, 1, 5, 5)\n >>> solarize = RandomSolarize(0.1, 0.1, p=1.)\n >>> solarize(input)\n tensor([[[[0.4132, 0.1412, 0.1790, 0.2226, 0.3980],\n [0.2754, 0.4194, 0.0130, 0.4538, 0.2771],\n [0.4394, 0.4923, 0.1129, 0.2594, 0.3844],\n [0.3909, 0.2118, 0.1094, 0.2516, 0.3728],\n [0.2278, 0.0000, 0.4876, 0.0353, 0.5100]]]])\n \"\"\"\n\n def __init__(\n self, thresholds: Union[torch.Tensor, float, Tuple[float, float], List[float]] = 0.1,\n additions: Union[torch.Tensor, float, Tuple[float, float], List[float]] = 0.1,\n same_on_batch: bool = False, return_transform: bool = False, p: float = 0.5, keepdim: bool = False\n ) -> None:\n super(RandomSolarize, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch)\n\n thresholds = \\\n cast(torch.Tensor, thresholds) if isinstance(thresholds, torch.Tensor) else torch.tensor(thresholds)\n self.thresholds = _range_bound(thresholds, 'thresholds', center=0.5, bounds=(0., 1.))\n\n additions = \\\n cast(torch.Tensor, additions) if isinstance(additions, torch.Tensor) else torch.tensor(additions)\n self.additions = _range_bound(additions, 'additions', bounds=(-0.5, 0.5))\n\n def __repr__(self) -> str:\n repr = f\"thresholds={self.thresholds}, additions={self.additions}\"\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return rg.random_solarize_generator(batch_shape[0], self.thresholds, self.additions, self.same_on_batch,\n self.device, self.dtype)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_intensity_transformation(input)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_solarize(input, params)\n\n\nclass RandomPosterize(AugmentationBase2D):\n r\"\"\"Posterize given tensor image or a batch of tensor images randomly.\n\n Args:\n p (float): probability of applying the transformation. Default value is 0.5.\n bits (int or tuple): Integer that ranged from (0, 8], in which 0 gives black image and 8 gives the original.\n If int x, bits will be generated from (x, 8).\n If tuple (x, y), bits will be generated from (x, y).\n Default value is 3.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation wont be concatenated.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> input = torch.rand(1, 1, 5, 5)\n >>> posterize = RandomPosterize(3, p=1.)\n >>> posterize(input)\n tensor([[[[0.4706, 0.7529, 0.0627, 0.1255, 0.2824],\n [0.6275, 0.4706, 0.8784, 0.4392, 0.6275],\n [0.3451, 0.3765, 0.0000, 0.1569, 0.2824],\n [0.5020, 0.6902, 0.7843, 0.1569, 0.2510],\n [0.6588, 0.9098, 0.3765, 0.8471, 0.4078]]]])\n \"\"\"\n\n def __init__(\n self, bits: Union[int, Tuple[int, int], torch.Tensor] = 3,\n same_on_batch: bool = False, return_transform: bool = False, p: float = 0.5, keepdim: bool = False\n ) -> None:\n super(RandomPosterize, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n bits = cast(torch.Tensor, bits) if isinstance(bits, torch.Tensor) else torch.tensor(bits)\n if len(bits.size()) == 0:\n self.bits = torch.tensor([bits, torch.tensor(8)], dtype=torch.float32)\n elif len(bits.size()) == 1 and bits.size(0) == 2:\n self.bits = torch.tensor([bits[0], bits[1]], dtype=torch.float32)\n else:\n raise ValueError(f\"'bits' shall be either a scalar or a length 2 tensor. Got {bits}.\")\n\n def __repr__(self) -> str:\n repr = f\"(bits={self.bits}\"\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return rg.random_posterize_generator(batch_shape[0], self.bits, self.same_on_batch, self.device, self.dtype)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_intensity_transformation(input)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_posterize(input, params)\n\n\nclass RandomSharpness(AugmentationBase2D):\n r\"\"\"Sharpen given tensor image or a batch of tensor images randomly.\n\n Args:\n p (float): probability of applying the transformation. Default value is 0.5.\n sharpness (float or tuple): factor of sharpness strength. Must be above 0. Default value is 0.5.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation wont be concatenated.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> input = torch.rand(1, 1, 5, 5)\n >>> sharpness = RandomSharpness(1., p=1.)\n >>> sharpness(input)\n tensor([[[[0.4963, 0.7682, 0.0885, 0.1320, 0.3074],\n [0.6341, 0.4810, 0.7367, 0.4177, 0.6323],\n [0.3489, 0.4428, 0.1562, 0.2443, 0.2939],\n [0.5185, 0.6462, 0.7050, 0.2288, 0.2823],\n [0.6816, 0.9152, 0.3971, 0.8742, 0.4194]]]])\n \"\"\"\n\n def __init__(\n self, sharpness: Union[torch.Tensor, float, Tuple[float, float], torch.Tensor] = 0.5,\n same_on_batch: bool = False, return_transform: bool = False, p: float = 0.5, keepdim: bool = False\n ) -> None:\n super(RandomSharpness, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n sharpness = cast(torch.Tensor, sharpness) if isinstance(sharpness, torch.Tensor) else torch.tensor(sharpness)\n if sharpness.dim() == 0:\n self.sharpness = torch.tensor([0, sharpness], dtype=torch.float32)\n elif sharpness.dim() == 1 and sharpness.size(0) == 2:\n self.sharpness = torch.tensor([sharpness[0], sharpness[1]], dtype=torch.float32)\n else:\n raise ValueError(f\"'sharpness' must be a scalar or a length 2 tensor. Got {sharpness}.\")\n\n def __repr__(self) -> str:\n repr = f\"sharpness={self.sharpness}\"\n return self.__class__.__name__ + f\"({repr}, {super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return rg.random_sharpness_generator(batch_shape[0], self.sharpness, self.same_on_batch,\n self.device, self.dtype)\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_intensity_transformation(input)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_sharpness(input, params)\n\n\nclass RandomEqualize(AugmentationBase2D):\n r\"\"\"Equalize given tensor image or a batch of tensor images randomly.\n\n Args:\n p (float): Probability to equalize an image. Default value is 0.5.\n same_on_batch (bool): apply the same transformation across the batch. Default: False.\n return_transform (bool): if ``True`` return the matrix describing the transformation applied to each\n input tensor. If ``False`` and the input is a tuple the applied transformation\n wont be concatenated.\n keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False). Default: False.\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> input = torch.rand(1, 1, 5, 5)\n >>> equalize = RandomEqualize(p=1.)\n >>> equalize(input)\n tensor([[[[0.4963, 0.7682, 0.0885, 0.1320, 0.3074],\n [0.6341, 0.4901, 0.8964, 0.4556, 0.6323],\n [0.3489, 0.4017, 0.0223, 0.1689, 0.2939],\n [0.5185, 0.6977, 0.8000, 0.1610, 0.2823],\n [0.6816, 0.9152, 0.3971, 0.8742, 0.4194]]]])\n \"\"\"\n\n def __init__(\n self, same_on_batch: bool = False, return_transform: bool = False, p: float = 0.5, keepdim: bool = False\n ) -> None:\n super(RandomEqualize, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,\n keepdim=keepdim)\n\n def __repr__(self) -> str:\n return self.__class__.__name__ + f\"({super().__repr__()})\"\n\n def generate_parameters(self, batch_shape: torch.Size) -> Dict[str, torch.Tensor]:\n return dict()\n\n def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.compute_intensity_transformation(input)\n\n def apply_transform(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n return F.apply_equalize(input, params)\n"
] | [
[
"torch.nn.functional.pad",
"torch.tensor"
]
] |
ElsevierSoftwareX/SOFTX-D-20-00001 | [
"e578f21673decb9a40424379cf61e42736b07e13"
] | [
"src/microstructpy/meshing/trimesh.py"
] | [
"\"\"\"Triangle/Tetrahedron Meshing\n\nThis module contains the class definition for the TriMesh class.\n\n\"\"\"\n# --------------------------------------------------------------------------- #\n# #\n# Import Modules #\n# #\n# --------------------------------------------------------------------------- #\n\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport meshpy.tet\nimport meshpy.triangle\nimport numpy as np\nfrom matplotlib import collections\nfrom matplotlib import patches\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\nfrom microstructpy import _misc\n\n__all__ = ['TriMesh']\n__author__ = 'Kenneth (Kip) Hart'\n\n\n# --------------------------------------------------------------------------- #\n# #\n# TriMesh Class #\n# #\n# --------------------------------------------------------------------------- #\nclass TriMesh(object):\n \"\"\"Triangle/Tetrahedron mesh.\n\n The TriMesh class contains the points, facets, and elements in a triangle/\n tetrahedron mesh, also called an unstructured grid.\n\n The points attribute is an Nx2 or Nx3 list of points in the mesh.\n The elements attribute contains the Nx3 or Nx4 list of the points at\n the corners of each triangle/tetrahedron. A list of facets can also be\n included, though it is optional and does not need to include every facet\n in the mesh. Attributes can also be assigned to the elements and facets,\n though they are also optional.\n\n Args:\n points (list, numpy.ndarray): List of coordinates in the mesh.\n elements (list, numpy.ndarray): List of indices of the points at\n the corners of each element. The shape should be Nx3 in 2D or\n Nx4 in 3D.\n element_attributes (list, numpy.ndarray): *(optional)* A number\n associated with each element.\n Defaults to None.\n facets (list, numpy.ndarray): *(optional)* A list of facets in the\n mesh. The shape should be Nx2 in 2D or Nx3 in 3D.\n Defaults to None.\n facet_attributes (list, numpy.ndarray): *(optional)* A number\n associated with each facet.\n Defaults to None.\n\n \"\"\"\n # ----------------------------------------------------------------------- #\n # Constructors #\n # ----------------------------------------------------------------------- #\n def __init__(self, points, elements, element_attributes=None, facets=None,\n facet_attributes=None):\n self.points = points\n self.elements = elements\n self.element_attributes = element_attributes\n self.facets = facets\n self.facet_attributes = facet_attributes\n\n @classmethod\n def from_file(cls, filename):\n \"\"\"Read TriMesh from file.\n\n This function reads in a triangular mesh from a file and creates an\n instance from that file. Currently the only supported file type\n is the output from :meth:`.write` with the ``format='str'`` option.\n\n Args:\n filename (str): Name of file to read from.\n\n Returns:\n TriMesh: An instance of the class.\n\n \"\"\"\n with open(filename, 'r') as file:\n stage = 0\n pts = []\n elems = []\n elem_atts = []\n facets = []\n facet_atts = []\n\n n_eas = 0\n n_facets = 0\n n_fas = 0\n for line in file.readlines():\n if 'Mesh Points'.lower() in line.lower():\n n_pts = int(line.split(':')[1])\n stage = 'points'\n elif 'Mesh Elements'.lower() in line.lower():\n n_elems = int(line.split(':')[1])\n stage = 'elements'\n elif 'Element Attributes'.lower() in line.lower():\n n_eas = int(line.split(':')[1])\n stage = 'element attributes'\n elif 'Facets'.lower() in line.lower():\n n_facets = int(line.split(':')[1])\n stage = 'facets'\n elif 'Facet Attributes'.lower() in line.lower():\n n_fas = int(line.split(':')[1])\n stage = 'facet attributes'\n else:\n if stage == 'points':\n pts.append([float(x) for x in line.split(',')])\n elif stage == 'elements':\n elems.append([int(kp) for kp in line.split(',')])\n elif stage == 'element attributes':\n elem_atts.append(_misc.from_str(line))\n elif stage == 'facets':\n facets.append([int(kp) for kp in line.split(',')])\n elif stage == 'facet attributes':\n facet_atts.append(_misc.from_str(line))\n else:\n pass\n\n # check the inputs\n assert len(pts) == n_pts\n assert len(elems) == n_elems\n assert len(elem_atts) == n_eas\n assert len(facets) == n_facets\n assert len(facet_atts) == n_fas\n\n return cls(pts, elems, elem_atts, facets, facet_atts)\n\n @classmethod\n def from_polymesh(cls, polymesh, phases=None, min_angle=0,\n max_volume=float('inf'), max_edge_length=float('inf')):\n \"\"\"Create TriMesh from PolyMesh.\n\n This constuctor creates a triangle/tetrahedron mesh from a polygon\n mesh (:class:`.PolyMesh`). Polygons of the same seed number are\n merged and the element attribute is set to the seed number it is\n within. The facets between seeds are saved to the mesh and the index\n of the facet is stored in the facet attributes.\n\n Since the PolyMesh can include phase numbers for each region,\n additional information about the phases can be included as an input.\n The \"phases\" input should be a list of material phase dictionaries,\n formatted according to the :ref:`phase_dict_guide` guide.\n\n The minimum angle, maximum volume, and maximum edge length options\n provide quality controls for the mesh. The phase type option can take\n one of several values, described below.\n\n * **crystalline**: granular, solid\n * **amorphous**: glass, matrix\n * **void**: crack, hole\n\n The **crystalline** option creates a mesh where cells of the same seed\n number are merged, but cells are not merged across seeds. _This is\n the default material type._\n\n The **amorphous** option creates a mesh where cells of the same\n phase number are merged to create an amorphous region in the mesh.\n\n Finally, the **void** option will merge neighboring void cells and\n treat them as holes in the mesh.\n\n Args:\n polymesh (PolyMesh): A polygon/polyhedron mesh.\n phases (list): *(optional)* A list of dictionaries containing\n options for each phase.\n Default is\n ``{'material_type': 'solid', 'max_volume': float('inf')}``.\n min_angle (float): The minimum interior angle of an element.\n max_volume (float): The default maximum cell volume, used if one\n is not set for each phase.\n max_edge_length (float): The maximum edge length of elements\n along grain boundaries. Currently only supported in 2D.\n\n \"\"\"\n # condition the phases input\n if phases is None:\n default_dict = {'material_type': 'solid',\n 'max_volume': float('inf')}\n n_phases = int(np.max(polymesh.phase_numbers)) + 1\n phases = [default_dict for _ in range(n_phases)]\n\n # create point and facet lists\n kps = {}\n pts = []\n facets = []\n facet_neighs = []\n facet_nums = []\n for i in range(len(polymesh.facets)):\n facet = polymesh.facets[i]\n neighs = polymesh.facet_neighbors[i]\n if facet_check(neighs, polymesh, phases):\n new_facet = []\n for kp_old in facet:\n if kp_old not in kps:\n kp_new = len(pts)\n pts.append(polymesh.points[kp_old])\n kps[kp_old] = kp_new\n else:\n kp_new = kps[kp_old]\n new_facet.append(kp_new)\n facets.append(new_facet)\n facet_neighs.append(neighs)\n facet_nums.append(i + 1)\n\n # Subdivide facets\n n_dim = len(pts[0])\n if n_dim == 2:\n n_subs = np.ones(len(facets), dtype='int')\n for i, facet in enumerate(facets):\n pt1 = np.array(pts[facet[0]])\n pt2 = np.array(pts[facet[1]])\n rel_pos = pt2 - pt1\n n_float = np.linalg.norm(rel_pos) / max_edge_length\n n_int = max(1, np.ceil(n_float))\n n_subs[i] = n_int\n sub_out = meshpy.triangle.subdivide_facets(n_subs, pts, facets,\n facet_nums)\n pts, facets, facet_nums = sub_out\n\n # create groups/regions\n pts_arr = np.array(polymesh.points)\n regions = []\n holes = []\n\n ungrouped = np.full(len(polymesh.regions), True, dtype='?')\n while np.any(ungrouped):\n cell_ind = np.argmax(ungrouped)\n\n # compute cell center\n facet_list = polymesh.regions[cell_ind]\n cell_kps = set()\n [cell_kps.update(polymesh.facets[n]) for n in facet_list]\n cell_cen = pts_arr[list(cell_kps)].mean(axis=0)\n\n # seed number and phase type\n seed_num = int(polymesh.seed_numbers[cell_ind])\n phase_num = polymesh.phase_numbers[cell_ind]\n phase = phases[phase_num]\n phase_type = phase.get('material_type', 'crystalline')\n phase_vol = phase.get('max_volume', max_volume)\n\n # get all cell numbers in group\n cell_nums = set([cell_ind])\n old_len = len(cell_nums)\n searching_front = True\n while searching_front:\n front = set()\n for n in cell_nums:\n neighs = set()\n for facet_num in polymesh.regions[n]:\n f_neighs = polymesh.facet_neighbors[facet_num]\n neigh_ind = [i for i in f_neighs if i != n][0]\n if neigh_ind < 0:\n continue\n if not facet_check(f_neighs, polymesh, phases):\n neighs.add(neigh_ind)\n assert ungrouped[list(neighs)].all()\n front.update(neighs)\n cell_nums |= front\n new_len = len(cell_nums)\n searching_front = new_len != old_len\n old_len = new_len\n\n ungrouped[list(cell_nums)] = False\n\n # update appropriate list\n if phase_type in _misc.kw_void:\n holes.append(cell_cen)\n else:\n regions.append(cell_cen.tolist() + [seed_num, phase_vol])\n\n # build inputs\n if n_dim == 2:\n info = meshpy.triangle.MeshInfo()\n else:\n info = meshpy.tet.MeshInfo()\n\n info.set_points(pts)\n info.set_facets(facets, facet_nums)\n info.set_holes(holes)\n\n info.regions.resize(len(regions))\n for i, r in enumerate(regions):\n info.regions[i] = tuple(r)\n\n # run MeshPy\n if n_dim == 2:\n tri_mesh = meshpy.triangle.build(info,\n attributes=True,\n volume_constraints=True,\n max_volume=max_volume,\n min_angle=min_angle,\n generate_faces=True)\n else:\n opts = meshpy.tet.Options('pq')\n opts.mindihedral = min_angle\n opts.maxvolume = max_volume\n opts.fixedvolume = 1\n opts.regionattrib = 1\n opts.facesout = 1\n tri_mesh = meshpy.tet.build(info, options=opts)\n\n # return mesh\n tri_pts = np.array(tri_mesh.points)\n tri_elems = np.array(tri_mesh.elements)\n tri_e_atts = np.array(tri_mesh.element_attributes, dtype='int')\n\n tri_faces = np.array(tri_mesh.faces)\n tri_f_atts = np.array(tri_mesh.face_markers)\n f_mask = tri_f_atts > 0\n tri_f = tri_faces[f_mask]\n tri_fa = tri_f_atts[f_mask] - 1\n\n return cls(tri_pts, tri_elems, tri_e_atts, tri_f, tri_fa)\n\n # ----------------------------------------------------------------------- #\n # String and Representation Functions #\n # ----------------------------------------------------------------------- #\n def __str__(self):\n nv = len(self.points)\n nd = len(self.points[0])\n pt_fmt = '\\t'\n pt_fmt += ', '.join(['{pt[' + str(i) + ']: e}' for i in range(nd)])\n\n str_str = 'Mesh Points: ' + str(nv) + '\\n'\n str_str += ''.join([pt_fmt.format(pt=p) + '\\n' for p in self.points])\n\n str_str += 'Mesh Elements: ' + str(len(self.elements)) + '\\n'\n str_str += '\\n'.join(['\\t' + str(tuple(e))[1:-1] for e in\n self.elements])\n\n try:\n str_str += '\\nElement Attributes: '\n str_str += str(len(self.element_attributes)) + '\\n'\n str_str += '\\n'.join(['\\t' + str(a) for a in\n self.element_attributes])\n except TypeError:\n pass\n\n try:\n str_str += '\\nFacets: ' + str(len(self.facets)) + '\\n'\n str_str += '\\n'.join(['\\t' + str(tuple(f))[1:-1] for f in\n self.facets])\n except TypeError:\n pass\n\n try:\n str_str += '\\nFacet Attributes: '\n str_str += str(len(self.facet_attributes)) + '\\n'\n str_str += '\\n'.join(['\\t' + str(a) for a in\n self.facet_attributes])\n except TypeError:\n pass\n\n return str_str\n\n def __repr__(self):\n repr_str = 'TriMesh('\n repr_str += ', '.join([repr(v) for v in (self.points, self.elements,\n self.element_attributes, self.facets,\n self.facet_attributes)])\n repr_str += ')'\n return repr_str\n\n # ----------------------------------------------------------------------- #\n # Write Function #\n # ----------------------------------------------------------------------- #\n def write(self, filename, format='txt', seeds=None, polymesh=None):\n \"\"\"Write mesh to file.\n\n This function writes the contents of the mesh to a file.\n The format options are 'abaqus', 'tet/tri', 'txt', and 'vtk'.\n See the :ref:`s_tri_file_io` section of the :ref:`c_file_formats`\n guide for more details on these formats.\n\n Args:\n filename (str): The name of the file to write. In the cases of\n TetGen/Triangle, this is the basename of the files.\n format (str): {'abaqus' | 'tet/tri' | 'txt' | 'vtk'}\n *(optional)* The format of the output file.\n Default is 'txt'.\n seeds (SeedList): *(optional)* List of seeds. If given, VTK files\n will also include the phase number of of each element in the\n mesh. This assumes the ``element_attributes``\n field contains the seed number of each element.\n polymesh (PolyMesh): *(optional)* Polygonal mesh used for\n generating the triangular mesh. If given, will add surface\n unions to Abaqus files - for easier specification of\n boundary conditions.\n\n \"\"\" # NOQA: E501\n fmt = format.lower()\n if fmt == 'abaqus':\n # write top matter\n abaqus = '*Heading\\n'\n abaqus += '** Job name: microstructure '\n abaqus += 'Model name: microstructure_model\\n'\n abaqus += '** Generated by: MicroStructPy\\n'\n\n # write parts\n abaqus += '**\\n** PARTS\\n**\\n'\n abaqus += '*Part, name=Part-1\\n'\n\n abaqus += '*Node\\n'\n abaqus += ''.join([str(i + 1) + ''.join([', ' + str(x) for x in\n pt]) + '\\n' for i, pt in\n enumerate(self.points)])\n\n n_dim = len(self.points[0])\n elem_type = {2: 'CPS3', 3: 'C3D4'}[n_dim]\n\n abaqus += '*Element, type=' + elem_type + '\\n'\n abaqus += ''.join([str(i + 1) + ''.join([', ' + str(kp + 1) for kp\n in elem]) + '\\n' for\n i, elem in enumerate(self.elements)])\n\n # Element sets - seed number\n elset_n_per = 16\n elem_atts = np.array(self.element_attributes)\n for att in np.unique(elem_atts):\n elset_name = 'Set-E-Seed-' + str(att)\n elset_str = '*Elset, elset=' + elset_name + '\\n'\n elem_groups = [[]]\n for elem_ind, elem_att in enumerate(elem_atts):\n if ~np.isclose(elem_att, att):\n continue\n if len(elem_groups[-1]) >= elset_n_per:\n elem_groups.append([])\n elem_groups[-1].append(elem_ind + 1)\n for group in elem_groups:\n elset_str += ','.join([str(i) for i in group])\n elset_str += '\\n'\n\n abaqus += elset_str\n\n # Element Sets - phase number\n if seeds is not None:\n phase_nums = np.array([seed.phase for seed in seeds])\n for phase_num in np.unique(phase_nums):\n mask = phase_nums == phase_num\n seed_nums = np.nonzero(mask)[0]\n\n elset_name = 'Set-E-Material-' + str(phase_num)\n elset_str = '*Elset, elset=' + elset_name + '\\n'\n groups = [[]]\n for seed_num in seed_nums:\n if seed_num not in elem_atts:\n continue\n if len(groups[-1]) >= elset_n_per:\n groups.append([])\n seed_elset_name = 'Set-E-Seed-' + str(seed_num)\n groups[-1].append(seed_elset_name)\n for group in groups:\n elset_str += ','.join(group)\n elset_str += '\\n'\n abaqus += elset_str\n\n # Surfaces - Exterior and Interior\n facets = np.array(self.facets)\n facet_atts = np.array(self.facet_attributes)\n\n face_ids = {2: [2, 3, 1], 3: [3, 4, 2, 1]}[n_dim]\n\n for att in np.unique(facet_atts):\n facet_name = 'Surface-' + str(att)\n surf_str = '*Surface, name=' + facet_name + ', type=element\\n'\n\n att_facets = facets[facet_atts == att]\n for facet in att_facets:\n mask = np.isin(self.elements, facet)\n n_match = mask.astype('int').sum(axis=1)\n i_elem = np.argmax(n_match)\n elem_id = i_elem + 1\n\n i_missing = np.argmin(mask[i_elem])\n face_id = face_ids[i_missing]\n\n surf_str += str(elem_id) + ', S' + str(face_id) + '\\n'\n\n abaqus += surf_str\n\n # Surfaces - Exterior\n poly_neighbors = np.array(polymesh.facet_neighbors)\n poly_mask = np.any(poly_neighbors < 0, axis=1)\n neigh_nums = np.min(poly_neighbors, axis=1)\n u_neighs = np.unique(neigh_nums[poly_mask])\n for neigh_num in u_neighs:\n mask = neigh_nums == neigh_num\n facet_name = 'Ext-Surface-' + str(-neigh_num)\n surf_str = '*Surface, name=' + facet_name + ', combine=union\\n'\n for i, flag in enumerate(mask):\n if flag:\n surf_str += 'Surface-' + str(i) + '\\n'\n abaqus += surf_str\n\n # End Part\n abaqus += '*End Part\\n\\n'\n\n # Assembly\n abaqus += '**\\n'\n abaqus += '** ASSEMBLY\\n'\n abaqus += '**\\n'\n\n abaqus += '*Assembly, name=assembly\\n'\n abaqus += '**\\n'\n\n # Instances\n abaqus += '*Instance, name=I-Part-1, part=Part-1\\n'\n abaqus += '*End Instance\\n'\n\n # End Assembly\n abaqus += '**\\n'\n abaqus += '*End Assembly\\n'\n\n with open(filename, 'w') as file:\n file.write(abaqus)\n elif fmt in ('str', 'txt'):\n with open(filename, 'w') as file:\n file.write(str(self) + '\\n')\n\n elif fmt == 'tet/tri':\n # create boundary markers\n bnd_mkrs = np.full(len(self.points), 0, dtype='int')\n\n facet_arr = np.array(self.facets)\n f_bnd_mkrs = np.full(len(self.facets), 0, dtype='int')\n elem_arr = np.array(self.elements)\n for elem in self.elements:\n for i in range(len(elem)):\n e_facet = np.delete(elem, i)\n f_mask = np.full(elem_arr.shape[0], True)\n for kp in e_facet:\n f_mask &= np.any(elem_arr == kp, axis=-1)\n\n if np.sum(f_mask) == 1:\n bnd_mkrs[e_facet] = 1\n\n f_mask = np.full(facet_arr.shape[0], True)\n for kp in e_facet:\n f_mask &= np.any(facet_arr == kp, axis=-1)\n f_bnd_mkrs[f_mask] = 1\n\n # write vertices\n n_pts, n_dim = np.array(self.points).shape\n nodes = ' '.join([str(n) for n in (n_pts, n_dim, 0, 1)]) + '\\n'\n nodes += ''.join([str(i) + ''.join([' ' + str(x) for x in pt]) +\n ' ' + str(bnd_mkrs[i]) + '\\n' for i, pt in\n enumerate(self.points)])\n\n with open(filename + '.node', 'w') as file:\n file.write(nodes)\n\n # write elements\n n_ele, n_kp = np.array(self.elements).shape\n is_att = self.element_attributes is not None\n n_att = int(is_att)\n eles = ' '.join([str(n) for n in (n_ele, n_kp, n_att)]) + '\\n'\n for i, simplex in enumerate(self.elements):\n e_str = ' '.join([str(kp) for kp in simplex])\n if is_att:\n e_str += ' ' + str(self.element_attributes[i])\n e_str += '\\n'\n eles += e_str\n\n with open(filename + '.ele', 'w') as file:\n file.write(eles)\n\n # Write edges/faces\n if self.facets is not None:\n ext = {2: '.edge', 3: '.face'}[n_dim]\n\n n_facet, n_kp = np.array(self.facets).shape\n edge = ' '.join([str(n) for n in (n_facet, n_kp, 1)])\n edge += ''.join([str(i) + ''.join([' ' + str(k) for k in f]) +\n ' ' + str(mkr) + '\\n' for f, mkr in\n zip(self.facets, f_bnd_mkrs)])\n with open(filename + ext, 'w') as file:\n file.write(edge)\n\n elif fmt == 'vtk':\n n_kp = len(self.elements[0])\n mesh_type = {3: 'Triangular', 4: 'Tetrahedral'}[n_kp]\n pt_fmt = '{: f} {: f} {: f}\\n'\n # write heading\n vtk = '# vtk DataFile Version 2.0\\n'\n vtk += '{} mesh\\n'.format(mesh_type)\n vtk += 'ASCII\\n'\n vtk += 'DATASET UNSTRUCTURED_GRID\\n'\n\n # Write points\n vtk += 'POINTS ' + str(len(self.points)) + ' float\\n'\n if len(self.points[0]) == 2:\n vtk += ''.join([pt_fmt.format(x, y, 0) for x, y in\n self.points])\n else:\n vtk += ''.join([pt_fmt.format(x, y, z) for x, y, z in\n self.points])\n\n # write elements\n n_elem = len(self.elements)\n cell_fmt = str(n_kp) + n_kp * ' {}' + '\\n'\n cell_sz = (1 + n_kp) * n_elem\n vtk += '\\nCELLS ' + str(n_elem) + ' ' + str(cell_sz) + '\\n'\n vtk += ''.join([cell_fmt.format(*el) for el in self.elements])\n\n # write cell type\n vtk += '\\nCELL_TYPES ' + str(n_elem) + '\\n'\n cell_type = {3: '5', 4: '10'}[n_kp]\n vtk += ''.join(n_elem * [cell_type + '\\n'])\n\n # write element attributes\n try:\n int(self.element_attributes[0])\n att_type = 'int'\n except TypeError:\n att_type = 'float'\n\n vtk += '\\nCELL_DATA ' + str(n_elem) + '\\n'\n vtk += 'SCALARS element_attributes ' + att_type + ' 1 \\n'\n vtk += 'LOOKUP_TABLE element_attributes\\n'\n vtk += ''.join([str(a) + '\\n' for a in self.element_attributes])\n\n # Write phase numbers\n if seeds is not None:\n vtk += '\\nSCALARS phase_numbers int 1 \\n'\n vtk += 'LOOKUP_TABLE phase_numbers\\n'\n vtk += ''.join([str(seeds[a].phase) + '\\n' for a in\n self.element_attributes])\n\n with open(filename, 'w') as file:\n file.write(vtk)\n\n else:\n e_str = 'Cannot write file type ' + str(format) + ' yet.'\n raise NotImplementedError(e_str)\n\n # ----------------------------------------------------------------------- #\n # Plot Function #\n # ----------------------------------------------------------------------- #\n def plot(self, index_by='element', material=[], loc=0, **kwargs):\n \"\"\"Plot the mesh.\n\n This method plots the mesh using matplotlib.\n In 2D, this creates a :class:`matplotlib.collections.PolyCollection`\n and adds it to the current axes.\n In 3D, it creates a\n :class:`mpl_toolkits.mplot3d.art3d.Poly3DCollection` and\n adds it to the current axes.\n The keyword arguments are passed though to matplotlib.\n\n Args:\n index_by (str): *(optional)* {'element' | 'attribute'}\n Flag for indexing into the other arrays passed into the\n function. For example,\n ``plot(index_by='attribute', color=['blue', 'red'])`` will plot\n the elements with ``element_attribute`` equal to 0 in blue, and\n elements with ``element_attribute`` equal to 1 in red.\n Note that in 3D the facets are plotted instead of the elements,\n so kwarg lists must be based on ``facets`` and\n ``facet_attributes``. Defaults to 'element'.\n material (list): *(optional)* Names of material phases. One entry\n per material phase (the ``index_by`` argument is ignored).\n If this argument is set, a legend is added to the plot with\n one entry per material. Note that the ``element_attributes``\n in 2D or the ``facet_attributes`` in 3D must be the material\n numbers for the legend to be formatted properly.\n loc (int or str): *(optional)* The location of the legend,\n if 'material' is specified. This argument is passed directly\n through to :func:`matplotlib.pyplot.legend`. Defaults to 0,\n which is 'best' in matplotlib.\n **kwargs: Keyword arguments that are passed through to matplotlib.\n\n \"\"\"\n n_dim = len(self.points[0])\n if n_dim == 2:\n ax = plt.gca()\n else:\n ax = plt.gcf().gca(projection=Axes3D.name)\n n_obj = _misc.ax_objects(ax)\n if n_obj > 0:\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n else:\n xlim = [float('inf'), -float('inf')]\n ylim = [float('inf'), -float('inf')]\n if n_dim == 2:\n simps = np.array(self.elements)\n pts = np.array(self.points)\n xy = pts[simps, :]\n\n plt_kwargs = {}\n for key, value in kwargs.items():\n if type(value) in (list, np.array):\n plt_value = []\n for e_num, e_att in enumerate(self.element_attributes):\n if index_by == 'element':\n ind = e_num\n elif index_by == 'attribute':\n ind = int(e_att)\n else:\n e_str = 'Cannot index by {}.'.format(index_by)\n raise ValueError(e_str)\n v = value[ind]\n plt_value.append(v)\n else:\n plt_value = value\n plt_kwargs[key] = plt_value\n\n pc = collections.PolyCollection(xy, **plt_kwargs)\n ax.add_collection(pc)\n ax.autoscale_view()\n else:\n if n_obj > 0:\n zlim = ax.get_zlim()\n else:\n zlim = [float('inf'), -float('inf')]\n\n xy = [np.array([self.points[kp] for kp in f]) for f in self.facets]\n\n plt_kwargs = {}\n for key, value in kwargs.items():\n if type(value) in (list, np.array):\n plt_value = []\n for f_num, f_att in enumerate(self.facet_attributes):\n if index_by == 'element':\n ind = f_num\n elif index_by == 'attribute':\n ind = int(f_att)\n else:\n e_str = 'Cannot index by {}.'.format(index_by)\n raise ValueError(e_str)\n if ind < len(value):\n v = value[ind]\n else:\n v = 'none'\n plt_value.append(v)\n else:\n plt_value = value\n plt_kwargs[key] = plt_value\n pc = Poly3DCollection(xy, **plt_kwargs)\n ax.add_collection(pc)\n\n # Add legend\n if material and index_by == 'attribute':\n p_kwargs = [{'label': m} for m in material]\n for key, value in kwargs.items():\n if type(value) not in (list, np.array):\n for kws in p_kwargs:\n kws[key] = value\n\n for i, m in enumerate(material):\n if type(value) in (list, np.array):\n p_kwargs[i][key] = value[i]\n else:\n p_kwargs[i][key] = value\n\n # Replace plural keywords\n for p_kw in p_kwargs:\n for kw in _misc.mpl_plural_kwargs:\n if kw in p_kw:\n p_kw[kw[:-1]] = p_kw[kw]\n del p_kw[kw]\n handles = [patches.Patch(**p_kw) for p_kw in p_kwargs]\n ax.legend(handles=handles, loc=loc)\n\n # Adjust Axes\n mins = np.array(self.points).min(axis=0)\n maxs = np.array(self.points).max(axis=0)\n xlim = (min(xlim[0], mins[0]), max(xlim[1], maxs[0]))\n ylim = (min(ylim[0], mins[1]), max(ylim[1], maxs[1]))\n if n_dim == 2:\n plt.axis('square')\n plt.xlim(xlim)\n plt.ylim(ylim)\n elif n_dim == 3:\n zlim = (min(zlim[0], mins[2]), max(zlim[1], maxs[2]))\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n ax.set_zlim(zlim)\n _misc.axisEqual3D(ax)\n\n\ndef facet_check(neighs, polymesh, phases):\n if any([n < 0 for n in neighs]):\n add_facet = True\n else:\n seed_nums = [polymesh.seed_numbers[n] for n in neighs]\n phase_nums = [polymesh.phase_numbers[n] for n in neighs]\n m1, m2 = [phases[n].get('material_type', 'solid') for n in\n phase_nums]\n\n same_seed = seed_nums[0] == seed_nums[1]\n same_phase = phase_nums[0] == phase_nums[1]\n\n if (m1 in _misc.kw_solid) and same_seed:\n add_facet = False\n elif (m1 in _misc.kw_amorph) and same_phase:\n add_facet = False\n elif (m1 in _misc.kw_void) and (m2 in _misc.kw_void):\n add_facet = False\n else:\n add_facet = True\n\n return add_facet\n"
] | [
[
"numpy.isclose",
"matplotlib.pyplot.xlim",
"numpy.argmin",
"numpy.min",
"matplotlib.pyplot.gcf",
"numpy.max",
"numpy.full",
"numpy.linalg.norm",
"numpy.nonzero",
"numpy.argmax",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axis",
"numpy.array",
"numpy.delete",
"matplotlib.patches.Patch",
"numpy.ceil",
"numpy.sum",
"matplotlib.pyplot.ylim",
"numpy.any",
"matplotlib.collections.PolyCollection",
"numpy.unique",
"numpy.isin"
]
] |
Yunshan-Liu/gdplib | [
"f453abcde1906018635c2245414b707b69fa110c"
] | [
"gdplib/logical/spectralog.py"
] | [
"# coding: utf-8\n\n# # [Pyomo.GDP](./index.ipynb) Logical Expression System Demo - IR Spectroscopy Parameter Estimation\n#\n# This is a reproduction of the IR spectroscopy parameter estimation problem found in:\n#\n# > Vecchietti A. & Grossmann I. E.\n# > LOGMIP: A disjunctive 0-1 non-linear optimizer for process system models,\n# > *Comp. & Chem Eng.* 23, p. 555-565, 1999.\n#\n# This code relies on the logic-v1 branch at https://github.com/qtothec/pyomo/tree/logic-v1\n\n# Optimal value: 12.0893\n\nfrom pyomo.environ import *\nfrom pyomo.gdp import *\nfrom pyomo.core.expr.logical_expr import *\nfrom pyomo.core.plugins.transform.logical_to_linear import update_boolean_vars_from_binary\nfrom six import StringIO\nimport pandas as pd\n\n\ndef build_model():\n spectroscopic_data = StringIO(\"\"\"\n 1 2 3 4 5 6 7 8\n 1 0.0003 0.0764 0.0318 0.0007 0.0534 0.0773 0.0536 0.0320\n 2 0.0007 0.0003 0.0004 0.0009 0.0005 0.0009 0.0005 0.0003\n 3 0.0066 0.0789 0.0275 0.0043 0.0704 0.0683 0.0842 0.0309\n 4 0.0044 0.0186 0.0180 0.0179 0.0351 0.0024 0.0108 0.0052\n 5 0.0208 0.0605 0.0601 0.0604 0.0981 0.0025 0.0394 0.0221\n 6 0.0518 0.1656 0.1491 0.1385 0.2389 0.0248 0.1122 0.0633\n 7 0.0036 0.0035 0.0032 0.0051 0.0015 0.0094 0.0015 0.0024\n 8 0.0507 0.0361 0.0433 0.0635 0.0048 0.0891 0.0213 0.0310\n 9 0.0905 0.0600 0.0754 0.1098 0.0038 0.1443 0.0420 0.0574\n 10 0.0016 0.0209 0.0063 0.0010 0.0132 0.0203 0.0139 0.0057\n \"\"\")\n # Note: this could come from an external data file\n spectroscopic_data_table = pd.read_csv(spectroscopic_data, delimiter=r'\\s+')\n flat_spectro_data = spectroscopic_data_table.stack()\n spectro_data_dict = {(k[0], int(k[1])): v for k, v in flat_spectro_data.to_dict().items()} # column labels to integer\n\n c_data = StringIO(\"\"\"\n 1 2 3 4 5 6 7 8\n 1 502 204 353 702 0 1016 104 204\n 2 97 351 351 351 700 0 201 97\n 3 0 22 8 0 14 22 14 8 \n \"\"\")\n c_data_table = pd.read_csv(c_data, delimiter=r'\\s+')\n c_data_dict = {(k[0], int(k[1])): v for k, v in c_data_table.stack().to_dict().items()}\n\n # Covariance matrix\n r_data = StringIO(\"\"\"\n 1 2 3\n 1 1 0 0\n 2 0 1 0\n 3 0 0 1\n \"\"\")\n r_data_table = pd.read_csv(r_data, delimiter=r'\\s+')\n r_data_dict = {(k[0], int(k[1])): v for k, v in r_data_table.stack().to_dict().items()}\n\n m = ConcreteModel(name=\"IR spectroscopy parameter estimation\")\n m.wave_number = RangeSet(10)\n m.spectra_data = RangeSet(8)\n m.compounds = RangeSet(3)\n\n m.A = Param(m.wave_number, m.spectra_data, initialize=spectro_data_dict)\n m.C = Param(m.compounds, m.spectra_data, initialize=c_data_dict)\n m.R = Param(m.compounds, m.compounds, initialize=r_data_dict)\n\n m.val = Var(m.spectra_data)\n m.ent = Var(m.compounds, m.wave_number, bounds=(0, 1))\n m.Y = BooleanVar(m.compounds, m.wave_number)\n m.P = Var(m.compounds, m.wave_number, bounds=(0, 1000))\n\n @m.Disjunction(m.compounds, m.wave_number)\n def d(m, k, i):\n return [\n [m.P[k, i] <= 1000, m.P[k, i] >= 0, m.ent[k, i] == 1],\n [m.P[k, i] == 0, m.ent[k, i] == 0]\n ]\n\n for k, i in m.compounds * m.wave_number:\n m.Y[k, i].set_binary_var(m.d[k, i].disjuncts[0].indicator_var)\n\n @m.Constraint(m.spectra_data)\n def eq1(m, j):\n return m.val[j] == sum(\n sum((m.C[kk, j] / 100 - sum(m.P[kk, i] * m.A[i, j] for i in m.wave_number))\n * m.R[kk, k]\n for kk in m.compounds)\n * (m.C[k, j] / 100 - sum(m.P[k, i] * m.A[i, j] for i in m.wave_number))\n for k in m.compounds\n )\n\n m.profit = Objective(\n expr=sum(m.val[j] for j in m.spectra_data) + 2 * sum(m.ent[k, i] for k in m.compounds for i in m.wave_number))\n\n return m\n\n\nif __name__ == \"__main__\":\n m = build_model()\n TransformationFactory('core.logical_to_linear').apply_to(m)\n # res = SolverFactory('gdpopt').solve(m, tee=False, nlp_solver='gams')\n TransformationFactory('gdp.bigm').apply_to(m)\n SolverFactory('gams').solve(m, tee=True, solver='baron')\n update_boolean_vars_from_binary(m)\n m.profit.display()\n m.Y.display()\n m.P.display()\n\n\n"
] | [
[
"pandas.read_csv"
]
] |
tanlinc/opticalFlowGAN | [
"f568e531265029f2f25f223ee92e1f53c0bb52f6"
] | [
"tflib/FlowToolsColor.py"
] | [
"import numpy as np\n\ndef makeColorWheel():\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n size = RY + YG + GC + CB + BM + MR\n colorwheel = np.zeros((3, size))\n\n col = 0\n # RY\n colorwheel[0, col:col+RY] = 255\n colorwheel[1, col:col+RY] = np.floor(255 * np.arange(RY)/RY)\n col += RY\n\n # YG\n colorwheel[0, col:col+YG] = 255 - np.floor(255 * np.arange(YG)/YG)\n colorwheel[1, col:col+YG] = 255\n col += YG\n\n # GC\n colorwheel[1, col:col+GC] = 255\n colorwheel[2, col:col+GC] = np.floor(255 * np.arange(GC)/GC)\n col += GC\n\n # CB\n colorwheel[1, col:col+CB] = 255 - np.floor(255 * np.arange(CB)/CB)\n colorwheel[2, col:col+CB] = 255\n col += CB\n\n # BM\n colorwheel[0, col:col+BM] = np.floor(255 * np.arange(BM)/BM)\n colorwheel[2, col:col+BM] = 255\n col += BM\n\n # MR\n colorwheel[0, col:col+MR] = 255\n colorwheel[2, col:col+MR] = 255 - np.floor(255 * np.arange(MR)/MR)\n\n return colorwheel.astype('uint8');\n\ndef computeNormalizedFlow(u, v, max_flow=-1, min_max_flow = -1):\n \n eps = 1e-15\n UNKNOWN_FLOW_THRES = 1e9\n UNKNOWN_FLOW = 1e10\n\n maxu = -999\n maxv = -999\n minu = 999\n minv = 999\n maxrad = -1\n\n # fix unknown flow\n idxUnknown = np.where(np.logical_or(np.abs(u) > UNKNOWN_FLOW_THRES, np.abs(v) > UNKNOWN_FLOW_THRES))\n u[idxUnknown] = 0\n v[idxUnknown] = 0\n\n #maxu = np.maximum(maxu, np.max(u))\n #minu = np.minimum(minu, np.min(u))\n\n #maxv = np.maximum(maxv, np.max(v))\n #minv = np.minimum(minv, np.min(v))\n \n if max_flow < 0:\n rad = np.sqrt(u**2 + v**2)\n if min_max_flow >=0:\n rad = np.max((np.max(rad), min_max_flow)) # lower bound for max_flow => don't amplifiy noise\n else:\n rad = max_flow #biggest allowed flow = max_flow\n maxrad = np.max(rad)\n\n #print(\"max flow: \", maxrad, \" flow range: u = \", minu, \"..\", maxu, \"v = \", minv, \"..\", maxv)\n\n u = u / (maxrad + eps)\n v = v / (maxrad + eps)\n\n return u, v\n\ndef computeFlowImg(flow, max_flow=-1,min_max_flow=-1):\n\n \n u, v = flow[:,:,0], flow[:,:,1]\n \n \n u, v = computeNormalizedFlow(u, v, max_flow,min_max_flow)\n\n nanIdx = np.logical_or(np.isnan(u), np.isnan(v))\n u[np.where(nanIdx)] = 0\n v[np.where(nanIdx)] = 0\n\n cw = makeColorWheel().T\n\n M, N = u.shape\n img = np.zeros((M, N, 3)).astype('uint8')\n\n mag = np.sqrt(u**2 + v**2)\n \n phi = np.arctan2(-v, -u) / np.pi # [-1, 1]\n phi_idx = (phi + 1.0) / 2.0 * (cw.shape[0] - 1)\n f_phi_idx = np.floor(phi_idx).astype('int')\n\n c_phi_idx = f_phi_idx + 1\n c_phi_idx[c_phi_idx == cw.shape[0]] = 0\n\n floor = phi_idx - f_phi_idx\n\n for i in range(cw.shape[1]):\n tmp = cw[:,i]\n \n # linear blend between colors\n col0 = tmp[f_phi_idx] / 255.0 # from colorwheel take specified values in phi_idx\n col1 = tmp[c_phi_idx] / 255.0\n col = (1.0 - floor)*col0 + floor * col1\n\n # increase saturation for small magnitude\n sat_idx = np.where(mag <= 1)\n non_sat_idx = np.where(mag > 1)\n col[sat_idx] = 1 - mag[sat_idx] * (1 - col[sat_idx])\n\n col[non_sat_idx] = col[non_sat_idx] * 0.75\n\n img[:,:, i] = (np.floor(255.0*col*(1-nanIdx))).astype('uint8')\n return img"
] | [
[
"numpy.max",
"numpy.isnan",
"numpy.zeros",
"numpy.where",
"numpy.arange",
"numpy.arctan2",
"numpy.sqrt",
"numpy.abs",
"numpy.floor"
]
] |
chreman/Headstart | [
"5d8b956faac4389c649f3072b5ac55aaa01644c6"
] | [
"server/workers/dataprocessing/src/streamgraph.py"
] | [
"import pandas as pd\nimport logging\nimport sys\nimport re\nimport numpy as np\n\nfrom itertools import chain\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import NMF, LatentDirichletAllocation\n\nformatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\nnp.random.seed(42)\n\n\nclass Streamgraph(object):\n\n def __init__(self, loglevel=\"INFO\"):\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(loglevel)\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(formatter)\n handler.setLevel(loglevel)\n self.logger.addHandler(handler)\n \n def tokenize(self, s):\n #return re.split(\"; | - |, |: \", s)\n return re.split(\"; \", s)\n\n def get_streamgraph_data(self, metadata, query, n=12, method=\"count\"):\n metadata = pd.DataFrame.from_records(metadata)\n df = metadata.copy()\n df.year = pd.to_datetime(df.year).map(lambda x: x.replace(month=1, day=1))\n df = df[df.subject.map(lambda x: x is not None)]\n df.subject = df.subject.map(lambda x: [s.lower() for s in self.tokenize(x)] if isinstance(x, str) else \"\")\n df = df[df.subject.map(lambda x: x != [])]\n df[\"boundary_label\"] = df.year\n df = df.explode('subject')\n df = df[df.subject != \"\"]\n counts = self.get_counts(df)\n boundaries = self.get_boundaries(df)\n daterange = self.get_daterange(boundaries)\n data = pd.merge(counts, boundaries, on='year')\n top_n = self.get_top_n(metadata.copy(), query, n, method)\n data = (data[data.subject.str.contains('|'.join(top_n), case=False)]\n .sort_values(\"year\")\n .reset_index(drop=True))\n sg_data = {}\n sg_data[\"x\"], sg_data[\"subject\"] = self.build_sg_data(daterange, data, top_n)\n return sg_data\n\n @staticmethod\n def get_x_axis(daterange):\n return [str(x.year) for x in daterange]\n\n @staticmethod\n def get_daterange(boundaries):\n daterange = pd.date_range(start=min(boundaries.year),\n end=max(boundaries.year),\n freq='AS')\n if len(daterange) > 0:\n return sorted(daterange)\n else:\n return sorted(pd.unique(boundaries.year))\n\n @staticmethod\n def get_stream_range(df):\n stream_range = {\n \"min\": min(df.year),\n \"max\": max(df.year),\n \"range\": max(df.year) - min(df.year)\n }\n return stream_range\n\n @staticmethod\n def get_counts(df):\n counts = (df.groupby([\"year\", \"subject\"])\n .agg({'subject': 'count', 'id': lambda x: \", \".join(x)}))\n counts.rename({\"subject\": \"counts\"}, axis=1, inplace=True)\n counts.reset_index(inplace=True)\n return counts\n\n @staticmethod\n def get_boundaries(df):\n boundaries = df[[\"boundary_label\", \"year\"]].drop_duplicates()\n return boundaries\n\n def get_top_n(self, df, query, n, method):\n df = df[df.subject.map(lambda x: len(x) > 2)]\n corpus = df.subject.tolist()\n # set stopwords , stop_words='english'\n tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n tokenizer=lambda x: self.tokenize(x),\n lowercase=True,\n stop_words=[query]\n )\n tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\n tokenizer=lambda x: self.tokenize(x),\n lowercase=True,\n stop_words=[query]\n )\n if method == \"count\":\n tf = tf_vectorizer.fit_transform(corpus)\n counts = pd.DataFrame(tf.toarray(),\n columns=tf_vectorizer.get_feature_names())\n candidates = counts.sum().sort_values(ascending=False).index.tolist()\n candidates = [c for c in candidates if len(c) > 2]\n top_n = candidates[:n]\n if method == \"tfidf\":\n tfidf = tfidf_vectorizer.fit_transform(corpus)\n weights = pd.DataFrame(tfidf.toarray(),\n columns=tfidf_vectorizer.get_feature_names())\n candidates = weights.sum().sort_values(ascending=False).index.tolist()\n candidates = [c for c in candidates if len(c) > 2]\n top_n = candidates[:n]\n if method == \"nmf\":\n tfidf = tfidf_vectorizer.fit_transform(corpus)\n nmf = NMF(n_components=n,\n alpha=.1, l1_ratio=.5, init='nndsvd',\n random_state=42).fit(tfidf)\n top_n = list(chain.from_iterable(\n [self.get_top_words(t, tfidf_vectorizer.get_feature_names(), 1)\n for t in nmf.components_]))\n if method == \"lda\":\n tf = tf_vectorizer.fit_transform(corpus)\n lda = LatentDirichletAllocation(n_components=n, max_iter=20,\n learning_method='batch',\n learning_offset=50.,\n random_state=42).fit(tf)\n top_n = list(chain.from_iterable(\n [self.get_top_words(t, tf_vectorizer.get_feature_names(), 1)\n for t in lda.components_]))\n return top_n\n\n @staticmethod\n def get_top_words(topic, feature_names, n):\n indices = topic.argsort()[::-1]\n words = [feature_names[i] for i in indices]\n words = [w for w in words if len(w) > 2]\n return words[:n]\n\n def build_sg_data(self, daterange, data, top_n):\n x = pd.DataFrame(daterange, columns=[\"year\"])\n temp = []\n for item in top_n:\n tmp = (pd.merge(data[data.subject.str.contains(item, case=False)], x,\n left_on=\"year\", right_on=\"year\",\n how=\"right\")\n .groupby(\"year\")\n .agg({\"subject\": \"sum\",\n \"counts\": \"sum\",\n \"id\": aggregate_ids,\n \"boundary_label\": \"max\"})\n .fillna({\"counts\": 0, \"subject\": item, \"id\": \"NA\"})\n .sort_values(\"year\"))\n tmp[\"subject\"] = item\n tmp[\"counts\"] = tmp[\"id\"].map(lambda x: len(set(filter(lambda x: x!=\"NA\", x.split(\", \")))))\n y = tmp.counts.astype(int).to_list()\n ids_timestep = tmp.id.map(lambda x: list(set(filter(lambda x: x!=\"NA\", x.split(\", \"))))).tolist()\n temp.append({\"name\": item, \"y\": y,\n \"ids_timestep\": ids_timestep})\n df = pd.DataFrame.from_records(temp)\n df[\"name\"] = df.name.apply(str.capitalize)\n x, df = self.reduce_daterange(daterange, df)\n df = df[df[\"ids_overall\"].map(lambda x: len(x) != 0)]\n return x, df.to_dict(orient=\"records\")\n \n def reduce_daterange(self, daterange, df):\n x = self.get_x_axis(daterange)\n yearly_sums = pd.DataFrame(df.y.to_list()).T.sum(axis=1)\n yearly_sums_cum = yearly_sums.cumsum()\n # 5% which is chosen here is an arbitrary value, could also be higher 10% or lower\n min_value = int(yearly_sums.sum() * 0.05)\n start_index = yearly_sums_cum[yearly_sums_cum > min_value].index[0]\n df.y = df.y.map(lambda x: x[start_index:])\n df.ids_timestep = df.ids_timestep.map(lambda x: x[start_index:])\n x = x[start_index:]\n df[\"ids_overall\"] = df.ids_timestep.map(lambda x: list(chain.from_iterable(x)))\n return x, df\n \n @staticmethod\n def reduce_metadata_set(metadata, sg_data):\n metadata = pd.read_json(metadata)\n df = pd.DataFrame.from_records(sg_data[\"subject\"])\n all_ids = set(chain.from_iterable(df.ids_overall))\n metadata = metadata[metadata.id.map(lambda x: x in all_ids)]\n return metadata.to_json(orient=\"records\")\n\ndef aggregate_ids(series):\n try:\n return \", \".join(pd.unique(series))\n except Exception:\n return \"NA\""
] | [
[
"pandas.to_datetime",
"pandas.DataFrame.from_records",
"pandas.merge",
"numpy.random.seed",
"pandas.DataFrame",
"pandas.read_json",
"sklearn.decomposition.NMF",
"sklearn.decomposition.LatentDirichletAllocation",
"pandas.unique"
]
] |
VisualComputingInstitute/PARIS-sem-seg | [
"f70cccc533628349870f5fb6d5b0f76b1381392f"
] | [
"output_losses.py"
] | [
"import tensorflow as tf\n\nLOSS_CHOICES = [\n 'cross_entropy_loss',\n 'bootstrapped_cross_entropy_loss',\n 'focal_loss'\n]\n\ndef cross_entropy_loss(logits, target, void=-1):\n logits_flat = tf.reshape(logits, [-1, logits.shape[-1]])\n target_flat = tf.reshape(target, [-1])\n mask = tf.not_equal(target_flat, void)\n logits_masked = tf.boolean_mask(logits_flat, mask)\n target_masked = tf.boolean_mask(target_flat, mask)\n return tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_masked, logits=logits_masked)\n\n\ndef bootstrapped_cross_entropy_loss(logits, target, bootstrap_factor=4,\n void=-1):\n # As described in:\n # Bridging Categorylevel and Instance-level Semantic Image Segmentation\n # Z. Wu, C. Shen, and A. v. d. Hengel.\n # arXiv:1605.06885\n top_count = tf.cast(tf.size(target) / bootstrap_factor, tf.int32)\n losses = cross_entropy_loss(logits, target, void)\n # Sometimes after filtering voids, the top count might be higher than the\n # valid number of pixels. We need to fix that here.\n top_count = tf.minimum(top_count, tf.size(losses))\n\n # It is unclear why this happens, but apparently, sometimes the top_count\n # becomes zero and the gradient computation of top_k fails.\n losses = tf.cond(\n tf.equal(0, top_count),\n lambda: tf.constant([0.0]),\n lambda: tf.nn.top_k(losses, k=top_count, sorted=False)[0])\n\n return losses\n\ndef focal_loss(logits, target, correction_alpha=1, gamma=2, void=-1):\n # As described in:\n # Focal Loss for Dense Object Detection\n # Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, Piotr Dollรกr\n # ICCV'17\n losses = cross_entropy_loss(logits, target, void)\n target_probabilities = tf.exp(-losses)\n weight = correction_alpha * tf.pow(1.0 - target_probabilities, gamma)\n return weight * losses"
] | [
[
"tensorflow.exp",
"tensorflow.size",
"tensorflow.not_equal",
"tensorflow.equal",
"tensorflow.reshape",
"tensorflow.constant",
"tensorflow.nn.top_k",
"tensorflow.pow",
"tensorflow.boolean_mask",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits"
]
] |
chenxu31/DCLGAN | [
"7c190091f6190cfb96579bd2bdf9ee450d0c8151"
] | [
"models/base_model.py"
] | [
"import os\nimport torch\nfrom collections import OrderedDict\nfrom abc import ABC, abstractmethod\nfrom . import networks\n\n\nclass BaseModel(ABC):\n \"\"\"This class is an abstract base class (ABC) for models.\n To create a subclass, you need to implement the following five functions:\n -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).\n -- <set_input>: unpack data from dataset and apply preprocessing.\n -- <forward>: produce intermediate results.\n -- <optimize_parameters>: calculate losses, gradients, and update network weights.\n -- <modify_commandline_options>: (optionally) add model-specific options and set default options.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize the BaseModel class.\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n\n When creating your custom class, you need to implement your own initialization.\n In this fucntion, you should first call <BaseModel.__init__(self, opt)>\n Then, you need to define four lists:\n -- self.loss_names (str list): specify the training losses that you want to plot and save.\n -- self.model_names (str list): specify the images that you want to display and save.\n -- self.visual_names (str list): define networks used in our training.\n -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.\n \"\"\"\n self.opt = opt\n self.gpu_ids = opt.gpu_ids\n self.isTrain = opt.isTrain\n self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU\n self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir\n if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.\n torch.backends.cudnn.benchmark = True\n self.loss_names = []\n self.model_names = []\n self.visual_names = []\n self.optimizers = []\n self.image_paths = []\n self.metric = 0 # used for learning rate policy 'plateau'\n\n @staticmethod\n def dict_grad_hook_factory(add_func=lambda x: x):\n saved_dict = dict()\n\n def hook_gen(name):\n def grad_hook(grad):\n saved_vals = add_func(grad)\n saved_dict[name] = saved_vals\n return grad_hook\n return hook_gen, saved_dict\n\n @staticmethod\n def modify_commandline_options(parser, is_train):\n \"\"\"Add new model-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n \"\"\"\n return parser\n\n @abstractmethod\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): includes the data itself and its metadata information.\n \"\"\"\n pass\n\n @abstractmethod\n def forward(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n pass\n\n @abstractmethod\n def optimize_parameters(self):\n \"\"\"Calculate losses, gradients, and update network weights; called in every training iteration\"\"\"\n pass\n\n def setup(self, opt):\n \"\"\"Load and print networks; create schedulers\n\n Parameters:\n opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n if self.isTrain:\n self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]\n if not self.isTrain or opt.continue_train:\n load_suffix = opt.epoch\n self.load_networks(load_suffix)\n\n self.print_networks(opt.verbose)\n\n def parallelize(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n setattr(self, 'net' + name, torch.nn.DataParallel(net, self.opt.gpu_ids))\n\n def data_dependent_initialize(self, data):\n pass\n\n def eval(self):\n \"\"\"Make models eval mode during test time\"\"\"\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()\n\n def test(self):\n \"\"\"Forward function used in test time.\n\n This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop\n It also calls <compute_visuals> to produce additional visualization results\n \"\"\"\n with torch.no_grad():\n self.forward()\n self.compute_visuals()\n\n def compute_visuals(self):\n \"\"\"Calculate additional output images for visdom and HTML visualization\"\"\"\n pass\n\n def get_image_paths(self):\n \"\"\" Return image paths that are used to load current data\"\"\"\n return self.image_paths\n\n def update_learning_rate(self):\n \"\"\"Update learning rates for all the networks; called at the end of every epoch\"\"\"\n for scheduler in self.schedulers:\n if self.opt.lr_policy == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = %.7f' % lr)\n\n def get_current_visuals(self):\n \"\"\"Return visualization images. train.py will display these images with visdom, and save the images to a HTML\"\"\"\n visual_ret = OrderedDict()\n for name in self.visual_names:\n if isinstance(name, str):\n visual_ret[name] = getattr(self, name)\n return visual_ret\n\n def get_current_losses(self):\n \"\"\"Return traning losses / errors. train.py will print out these errors on console, and save them to a file\"\"\"\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret\n\n def save_networks(self, epoch):\n \"\"\"Save all the networks to the disk.\n\n Parameters:\n epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n \"\"\"\n for name in self.model_names:\n if isinstance(name, str):\n save_filename = '%s_net_%s.pth' % (epoch, name)\n save_path = os.path.join(self.save_dir, save_filename)\n net = getattr(self, 'net' + name)\n\n if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n torch.save(net.module.cpu().state_dict(), save_path)\n net.cuda(self.gpu_ids[0])\n else:\n torch.save(net.module.state_dict(), save_path)\n\n def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):\n \"\"\"Fix InstanceNorm checkpoints incompatibility (prior to 0.4)\"\"\"\n key = keys[i]\n if i + 1 == len(keys): # at the end, pointing to a parameter/buffer\n if module.__class__.__name__.startswith('InstanceNorm') and \\\n (key == 'running_mean' or key == 'running_var'):\n if getattr(module, key) is None:\n state_dict.pop('.'.join(keys))\n if module.__class__.__name__.startswith('InstanceNorm') and \\\n (key == 'num_batches_tracked'):\n state_dict.pop('.'.join(keys))\n else:\n self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)\n\n def load_networks(self, epoch):\n \"\"\"Load all the networks from the disk.\n\n Parameters:\n epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)\n \"\"\"\n for name in self.model_names:\n if isinstance(name, str):\n load_filename = '%s_net_%s.pth' % (epoch, name)\n if self.opt.isTrain and self.opt.pretrained_name is not None:\n load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)\n else:\n load_dir = self.save_dir\n\n load_path = os.path.join(load_dir, load_filename)\n net = getattr(self, 'net' + name)\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n # if you are using PyTorch newer than 0.4 (e.g., built from\n # GitHub source), you can remove str() on self.device\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n\n # patch InstanceNorm checkpoints prior to 0.4\n # for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop\n # self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))\n net.load_state_dict(state_dict)\n\n def print_networks(self, verbose):\n \"\"\"Print the total number of parameters in the network and (if verbose) network architecture\n\n Parameters:\n verbose (bool) -- if verbose: print the network architecture\n \"\"\"\n print('---------- Networks initialized -------------')\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n if verbose:\n print(net)\n print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))\n print('-----------------------------------------------')\n\n def set_requires_grad(self, nets, requires_grad=False):\n \"\"\"Set requies_grad=Fasle for all the networks to avoid unnecessary computations\n Parameters:\n nets (network list) -- a list of networks\n requires_grad (bool) -- whether the networks require gradients or not\n \"\"\"\n if not isinstance(nets, list):\n nets = [nets]\n for net in nets:\n if net is not None:\n for param in net.parameters():\n param.requires_grad = requires_grad\n\n def generate_visuals_for_evaluation(self, data, mode):\n return {}\n"
] | [
[
"torch.device",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.DataParallel"
]
] |
youngzhou1999/DI-drive | [
"73f0009503f78cfa1347fcc57b80d274a0b3824f"
] | [
"core/envs/carla_env_wrapper.py"
] | [
"import gym\nimport copy\nimport numpy as np\nfrom typing import Any, Dict, Optional\nfrom easydict import EasyDict\nfrom itertools import product\n\nfrom core.data.benchmark import ALL_SUITES\nfrom core.eval.carla_benchmark_evaluator import get_suites_list, read_pose_txt, get_benchmark_dir\nfrom .base_carla_env import BaseCarlaEnv\nfrom core.utils.others.config_helper import deep_merge_dicts\nfrom ding.envs.env.base_env import BaseEnvTimestep, BaseEnvInfo\nfrom ding.envs.common.env_element import EnvElementInfo\nfrom ding.torch_utils.data_helper import to_ndarray\n\n\nclass CarlaEnvWrapper(gym.Wrapper):\n \"\"\"\n Environment wrapper to make ``gym.Env`` align with DI-engine definitions, so as to use utilities in DI-engine.\n It changes ``step``, ``reset`` and ``info`` method of ``gym.Env``, while others are straightly delivered.\n\n :Arguments:\n - env (BaseCarlaEnv): The environment to be wrapped.\n - cfg (Dict): Config dict.\n\n :Interfaces: reset, step, info, render, seed, close\n \"\"\"\n\n config = dict()\n\n def __init__(self, env: BaseCarlaEnv, cfg: Dict = None, **kwargs) -> None:\n if cfg is None:\n self._cfg = self.__class__.default_config()\n elif 'cfg_type' not in cfg:\n self._cfg = self.__class__.default_config()\n self._cfg = deep_merge_dicts(self._cfg, cfg)\n else:\n self._cfg = cfg\n self.env = env\n\n def reset(self, *args, **kwargs) -> Any:\n \"\"\"\n Wrapper of ``reset`` method in env. The observations are converted to ``np.ndarray`` and final reward\n are recorded.\n\n :Returns:\n Any: Observations from environment\n \"\"\"\n obs = self.env.reset(*args, **kwargs)\n obs = to_ndarray(obs)\n self._final_eval_reward = 0.0\n return obs\n\n def step(self, action: Any = None) -> BaseEnvTimestep:\n \"\"\"\n Wrapper of ``step`` method in env. This aims to convert the returns of ``gym.Env`` step method into\n that of ``ding.envs.BaseEnv``, from ``(obs, reward, done, info)`` tuple to a ``BaseEnvTimestep``\n namedtuple defined in DI-engine. It will also convert actions, observations and reward into\n ``np.ndarray``, and check legality if action contains control signal.\n\n :Arguments:\n - action (Any, optional): Actions sent to env. Defaults to None.\n\n :Returns:\n BaseEnvTimestep: DI-engine format of env step returns.\n \"\"\"\n action = to_ndarray(action)\n\n obs, rew, done, info = self.env.step(action)\n self._final_eval_reward += rew\n obs = to_ndarray(obs)\n rew = to_ndarray([rew], dtype=np.float32)\n if done:\n info['final_eval_reward'] = self._final_eval_reward\n return BaseEnvTimestep(obs, rew, done, info)\n\n def info(self) -> BaseEnvInfo:\n \"\"\"\n Interface of ``info`` method to suit DI-engine format env.\n It returns a namedtuple ``BaseEnvInfo`` defined in DI-engine\n which contains information about observation, action and reward space.\n\n :Returns:\n BaseEnvInfo: Env information instance defined in DI-engine.\n \"\"\"\n obs_space = self.env.observation_space\n act_space = self.env.action_space\n return BaseEnvInfo(agent_num=1, obs_space=obs_space, act_space=act_space, use_wrappers=None)\n\n def enable_save_replay(self, replay_path: Optional[str] = None) -> None:\n if replay_path is None:\n replay_path = './video'\n self._replay_path = replay_path\n self.env = gym.wrappers.Monitor(self.env, self._replay_path, video_callable=lambda episode_id: True, force=True)\n\n @classmethod\n def default_config(cls: type) -> EasyDict:\n cfg = EasyDict(cls.config)\n cfg.cfg_type = cls.__name__ + 'Config'\n return copy.deepcopy(cfg)\n\n def __repr__(self) -> str:\n return repr(self.env)\n\n def render(self):\n self.env.render()\n\n\nclass BenchmarkEnvWrapper(CarlaEnvWrapper):\n \"\"\"\n Environment Wrapper for Carla Benchmark suite evaluations. It wraps an environment with Benchmark\n suite so that the env will always run with a benchmark suite setting. It has 2 mode to get reset\n params in a suite: 'random' will randomly get reset param, 'order' will get all reset params in\n order.\n\n :Arguments:\n - env (BaseCarlaEnv): The environment to be wrapped.\n - cfg (Dict): Config dict.\n \"\"\"\n\n config = dict(\n suite='FullTown01-v0',\n benchmark_dir=None,\n mode='random',\n )\n\n def __init__(self, env: BaseCarlaEnv, cfg: Dict, **kwargs) -> None:\n super().__init__(env, cfg=cfg, **kwargs)\n suite = self._cfg.suite\n benchmark_dir = self._cfg.benchmark_dir\n self._mode = self._cfg.mode\n if benchmark_dir is None:\n benchmark_dir = get_benchmark_dir()\n assert self._mode in ['random', 'order'], self._mode\n self._param = dict()\n suite_list = get_suites_list(suite)\n\n self._reset_param_list = []\n for suite in suite_list:\n args, kwargs = ALL_SUITES[suite]\n assert len(args) == 0\n reset_params = kwargs.copy()\n poses_txt = reset_params.pop('poses_txt')\n weathers = reset_params.pop('weathers')\n pose_pairs = read_pose_txt(benchmark_dir, poses_txt)\n for (start, end), weather in product(pose_pairs, weathers):\n param = reset_params.copy()\n param['start'] = start\n param['end'] = end\n param['weather'] = weather\n param['col_is_failure'] = True\n self._reset_param_list.append(param)\n self._reset_param_index = 0\n\n def reset(self, *args, **kwargs) -> Any:\n \"\"\"\n Wrapped ``reset`` method for env. it will ignore all incoming arguments and choose one\n from suite reset parameters according to config.\n\n :Returns:\n Any: Returns of Env `reset` method.\n \"\"\"\n if self._mode == 'random':\n self._param = np.random.choice(self._reset_param_list)\n elif self._mode == 'order':\n self._param = self._reset_param_list[self._reset_param_index]\n self._reset_param_index + 1\n if self._reset_param_index >= len(self._reset_param_list):\n self._reset_param_index = 0\n return super().reset(**self._param)\n\n def step(self, action: Dict) -> Any:\n \"\"\"\n Wrapped ``step`` method for Env. It will add a print log when the env is done.\n\n :Arguments:\n - action (Any): Actions sent to env.\n\n :Returns:\n Any: Env step result.\n \"\"\"\n timestep = super().step(action)\n done = timestep.done\n info = timestep.info\n if done:\n done_tick = info['tick']\n done_reward = info['final_eval_reward']\n if info['success']:\n done_state = 'Success'\n elif info['collided']:\n done_state = \"Collided\"\n elif info['wrong_direction']:\n done_state = \"Wrong Direction\"\n elif info['off_road']:\n done_state = \"Off road\"\n elif info['stuck']:\n done_state = \"Stuck\"\n elif info['timeout']:\n done_state = \"Timeout\"\n else:\n done_state = 'None'\n print(\n \"[ENV] {} done with tick: {}, state: {}, reward: {}\".format(\n repr(self.env), done_tick, done_state, done_reward\n )\n )\n return timestep\n\n\n# TODO: complete scenario env wrapper\nclass ScenarioEnvWrapper(CarlaEnvWrapper):\n\n config = dict()\n\n def __init__(self, env: BaseCarlaEnv, cfg: Dict, **kwargs) -> None:\n super().__init__(env, cfg=cfg, **kwargs)\n"
] | [
[
"numpy.random.choice"
]
] |
PaccMann/paccmann_datasets | [
"0cb0cee349ffab8e227f09f7df0a8bca6a71f22e"
] | [
"pytoda/smiles/smiles_language.py"
] | [
"# Copyright 2020 Matteo Manica, Jannis Born, Ali Oskooei, Joris Cadow\n# Most parts of this file are Licenced under the MIT Licence.\n# Specifically the functions from_pretrained and save_pretrained are derivative\n# works with sources under the following licence:\n# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use these functions except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"SMILES language handling.\"\"\"\nimport copy\nimport json\nimport logging\nimport os\nimport warnings\nfrom collections import Counter\n\nimport rdkit # Needs import before torch in some envs\nimport dill\nimport torch\nfrom selfies import decoder as selfies_decoder\nfrom selfies import encoder as selfies_encoder\n\nfrom ..files import read_smi\nfrom ..transforms import Compose\nfrom ..types import (\n Files,\n Indexes,\n Iterable,\n Sequence,\n Tensor,\n Tokenizer,\n Tokens,\n Tuple,\n Union,\n)\nfrom .processing import TOKENIZER_FUNCTIONS, tokenize_smiles\nfrom .transforms import compose_encoding_transforms, compose_smiles_transforms\n\nlogger = logging.getLogger(__name__)\n\n# mimicry of huggingface tokenizers\n# see PreTrainedTokenizer\nVOCAB_FILES_NAMES = {'vocab_file': 'vocab.json'}\n# see PreTrainedTokenizerBase\nTOKENIZER_CONFIG_FILE = 'tokenizer_config.json'\n# our\nTOKEN_COUNTS_FILE = 'token_count.json'\n\n\nclass UnknownMaxLengthError(RuntimeError):\n pass\n\n\nclass SMILESLanguage(object):\n \"\"\"\n SMILESLanguage class.\n\n SMILESLanguage handle SMILES data defining the vocabulary and\n utilities to manipulate it, including encoding to token indexes.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n\n def __init__(\n self,\n name: str = 'smiles-language',\n smiles_tokenizer: Tokenizer = tokenize_smiles,\n tokenizer_name: str = None, # Literal only in python 3.8\n vocab_file: str = None,\n max_token_sequence_length: int = 0,\n ) -> None:\n \"\"\"\n Initialize SMILES language.\n\n Args:\n name (str): name of the SMILESLanguage.\n smiles_tokenizer (Tokenizer): optional SMILES tokenization\n function. Defaults to tokenize_smiles, but tokenizer_name takes\n precedence when found in available TOKENIZER_FUNCTIONS.\n tokenizer_name (str): name, mapping to Tokenizer used to save and\n restore object from text files. Defaults to None, i.e.\n using default smiles_tokenizer. Examples for available names\n are 'smiles', 'selfies' or 'spe_smiles'.\n vocab_file (str): optional filepath to vocab json or directory\n containing it.\n max_token_sequence_length (int): initial value for keeping track\n of longest sequence. Defaults to 0.\n \"\"\"\n self.name = name\n self.tokenizer_name = tokenizer_name\n if tokenizer_name is not None and tokenizer_name not in TOKENIZER_FUNCTIONS:\n logger.info(\n f'Given tokenizer_name {tokenizer_name} was not found, using'\n 'default tokenizer function.'\n )\n self.smiles_tokenizer = TOKENIZER_FUNCTIONS.get(\n tokenizer_name, smiles_tokenizer\n )\n\n self.padding_token = '<PAD>'\n self.unknown_token = '<UNK>'\n self.start_token = '<START>'\n self.stop_token = '<STOP>'\n self.padding_index = 0\n self.unknown_index = 1\n self.start_index = 2\n self.stop_index = 3\n self.token_count = Counter()\n self.special_indexes = {\n self.padding_index: self.padding_token,\n self.unknown_index: self.unknown_token,\n self.start_index: self.start_token,\n self.stop_index: self.stop_token,\n }\n self.setup_vocab()\n\n if vocab_file:\n self.load_vocabulary(vocab_file)\n\n # updated when adding smiles\n self.max_token_sequence_length = max_token_sequence_length\n # updated by transformations, e.g. padding\n self._get_total_number_of_tokens_fn = len\n\n # inputs and kwargs for saving and re-loading (TOKENIZER_CONFIG_FILE)\n # (see ``from_pretrained`` and ``save_pretrained``)\n self.init_inputs = ()\n self.init_kwargs = {\n 'name': self.name,\n 'tokenizer_name': self.tokenizer_name,\n 'max_token_sequence_length': self.max_token_sequence_length,\n }\n\n self.transform_smiles = Compose([]) # identity\n self.transform_encoding = Compose([])\n\n def setup_vocab(self) -> None:\n \"\"\"\n Sets up the vocab by generating the special tokens.\n \"\"\"\n # NOTE: include augmentation characters, parenthesis and numbers for\n # rings\n additional_indexes_to_token = dict(\n enumerate(\n list('()')\n + list(map(str, range(1, 10)))\n + list('%{}'.format(index) for index in range(10, 30)),\n start=len(self.special_indexes),\n )\n )\n self.index_to_token = {**self.special_indexes, **additional_indexes_to_token}\n self.number_of_tokens = len(self.index_to_token)\n self.token_to_index = {\n token: index for index, token in self.index_to_token.items()\n }\n\n @staticmethod\n def load(filepath: str) -> 'SMILESLanguage':\n \"\"\"\n Static method to load a SMILESLanguage object.\n\n Args:\n filepath (str): path to the file.\n\n Returns:\n SMILESLanguage: the loaded SMILES language object.\n \"\"\"\n warnings.warn(\n \"Loading languages will use a text files in the future\", FutureWarning\n )\n with open(filepath, 'rb') as f:\n smiles_language = dill.load(f)\n return smiles_language\n\n @staticmethod\n def dump(smiles_language: 'SMILESLanguage', filepath: str):\n \"\"\"\n Static method to save a smiles_language object to disk.\n\n Args:\n smiles_language (SMILESLanguage): a SMILESLanguage object.\n filepath (str): path where to dump the SMILESLanguage.\n \"\"\"\n with open(filepath, 'wb') as f:\n dill.dump(smiles_language, f)\n\n def save(self, filepath: str):\n \"\"\"\n Instance method to save/dump smiles language object.\n\n Args:\n filepath (str): path where to save the SMILESLanguage.\n \"\"\"\n warnings.warn(\n \"Saving languages will only store a text files in the future\", FutureWarning\n )\n SMILESLanguage.dump(self, filepath)\n\n def load_vocabulary(self, vocab_file: str):\n \"\"\"Load a vocabulary mapping from token to token indexes.\n\n Args:\n vocab_file (str): a .json with tokens mapping to index. Can also\n be path to directory.\n \"\"\"\n if os.path.isdir(vocab_file):\n vocab_file = os.path.join(vocab_file, self.vocab_files_names['vocab_file'])\n\n with open(vocab_file, encoding=\"utf-8\") as fp:\n vocab = json.load(fp)\n # encoder\n self.token_to_index = self._check_specials(vocab)\n # decoder\n self.index_to_token = {v: k for k, v in self.token_to_index.items()}\n self.number_of_tokens = len(self.index_to_token)\n\n def _check_specials(self, vocab):\n \"\"\"Check that defined special tokens match class definitions.\"\"\"\n for index, token in self.special_indexes.items():\n try:\n if vocab[token] != index:\n warnings.warn(\n f'The vocab does not have matching special tokens: '\n f'{token} is {vocab[token]}, but was defined as '\n f'{index}.',\n )\n except KeyError:\n warnings.warn(f'The vocab is missing a special token: {token}.')\n return vocab\n\n @classmethod\n def from_pretrained(cls, pretrained_path, *init_inputs, **kwargs):\n # directory with vocab files\n # not handling ADDED_TOKENS_FILE or SPECIAL_TOKENS_MAP_FILE\n # only handle case of files on disk here\n # but include handling optional counts\n resolved_vocab_files = {}\n\n additional_files_names = {\n 'tokenizer_config_file': TOKENIZER_CONFIG_FILE,\n 'token_count_file': TOKEN_COUNTS_FILE,\n }\n\n # Look for the tokenizer main vocabulary files\n # and the additional tokens files\n if os.path.isdir(pretrained_path):\n for file_id, file_name in {\n **cls.vocab_files_names,\n **additional_files_names,\n }.items():\n full_file_name = os.path.join(pretrained_path, file_name)\n if not os.path.exists(full_file_name):\n logger.info(\n \"Didn't find file {}. We won't load it.\".format(full_file_name)\n )\n full_file_name = None\n\n resolved_vocab_files[file_id] = full_file_name\n\n # Prepare tokenizer initialization kwargs\n tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None)\n if tokenizer_config_file is not None:\n with open(tokenizer_config_file, encoding='utf-8') as config_file:\n init_kwargs = json.load(config_file)\n saved_init_inputs = init_kwargs.pop(\"init_inputs\", ())\n if not init_inputs:\n init_inputs = saved_init_inputs\n else:\n init_kwargs = {}\n\n # Update with newly provided kwargs\n init_kwargs.update(kwargs)\n\n token_count_file = resolved_vocab_files.pop(\"token_count_file\", None)\n\n # adds remaining (i.e. vocab_file) to kwargs\n for args_name, file_path in resolved_vocab_files.items():\n if args_name not in init_kwargs:\n init_kwargs[args_name] = file_path\n\n # Instantiate tokenizer.\n try:\n tokenizer = cls(*init_inputs, **init_kwargs)\n except OSError:\n raise OSError(\n 'Unable to load vocabulary from file. '\n 'Please check that the provided vocabulary is accessible '\n 'and not corrupted.'\n )\n if token_count_file is not None:\n with open(token_count_file, encoding='utf-8') as counts_file:\n tokenizer.token_count = Counter(json.load(counts_file))\n\n # set args and kwargs explicitly here.\n tokenizer.init_inputs = init_inputs\n tokenizer.init_kwargs = init_kwargs\n\n return tokenizer\n\n def save_vocabulary(self, vocab_file: str) -> Tuple[str]:\n \"\"\"Save the vocabulary mapping tokens to indexes to file.\n\n Args:\n vocab_file (str): a .json to save tokens mapping to index. Can also\n be path to directory.\n \"\"\"\n if os.path.isdir(vocab_file):\n vocab_file = os.path.join(vocab_file, self.vocab_files_names['vocab_file'])\n\n with open(vocab_file, 'w', encoding=\"utf-8\") as fp:\n json.dump(self.token_to_index, fp, indent=4)\n\n return (vocab_file,)\n\n def save_pretrained(self, save_directory):\n \"\"\"Save the tokenizer vocabulary files together with\n tokenizer instantiation positional and keywords inputs.\n\n This method make sure the full tokenizer can then be re-loaded\n using the `from_pretrained` class method.\n \"\"\"\n if not os.path.isdir(save_directory):\n # TODO raise?\n logger.error(\n 'Saving directory ({}) should be a directory'.format(save_directory)\n )\n return\n\n tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)\n tokenizer_counts_file = os.path.join(save_directory, TOKEN_COUNTS_FILE)\n\n tokenizer_config = copy.deepcopy(self.init_kwargs)\n if len(self.init_inputs) > 0:\n tokenizer_config['init_inputs'] = copy.deepcopy(self.init_inputs)\n for file_id in self.vocab_files_names.keys():\n tokenizer_config.pop(file_id, None)\n\n with open(tokenizer_config_file, 'w', encoding='utf-8') as fp:\n json.dump(tokenizer_config, fp=fp, ensure_ascii=False, indent=4)\n\n with open(tokenizer_counts_file, 'w', encoding='utf-8') as fp:\n json.dump(self.token_count, fp=fp, ensure_ascii=False, indent=4)\n\n vocab_files = self.save_vocabulary(save_directory)\n\n return vocab_files + (tokenizer_counts_file,)\n\n def _load_vocabulary_from_pickled_language(\n self, filepath: str, include_metadata: bool = False\n ) -> None:\n \"\"\"Save the vocabulary mapping tokens to indexes from file.\n\n Args:\n filepath (str): path to the dump of the SMILESLanguage.\n \"\"\"\n a_language = self.load(filepath)\n # encoder\n self.token_to_index = self._check_specials(a_language.token_to_index)\n # decoder\n self.index_to_token = {v: k for k, v in self.token_to_index.items()}\n self.number_of_tokens = len(self.index_to_token)\n\n def _from_legacy_pickled_language(self, filepath: str) -> None:\n \"\"\"Load a current language instance from pickled legacy language.\n\n Args:\n filepath (str): path to the dump of the SMILESLanguage.\n \"\"\"\n warnings.warn(\n \"Loading from legacy languages will be deprecated\", DeprecationWarning\n )\n a_language = self.load(filepath)\n # encoder\n # missing special tokens\n self.token_to_index = a_language.token_to_index\n self.token_to_index.update({t: i for i, t in self.special_indexes.items()})\n # decoder\n self.index_to_token = {v: k for k, v in self.token_to_index.items()}\n self.number_of_tokens = len(self.index_to_token)\n\n self.max_token_sequence_length = a_language.max_token_sequence_length # noqa\n self.init_kwargs['max_token_sequence_length'] = self.max_token_sequence_length\n self.token_count = a_language._token_count\n\n def _update_max_token_sequence_length(self, tokens: Tokens) -> None:\n \"\"\"\n Update the max token sequence length.\n Uses method possibly overloaded by transformation setup to assess the\n length of tokens after transformations prior to their application.\n For example this allows handling start and stop tokens.\n\n Args:\n tokens (Tokens): tokens considered.\n \"\"\"\n total_number_of_tokens = self._get_total_number_of_tokens_fn(tokens)\n if total_number_of_tokens > self.max_token_sequence_length:\n self.max_token_sequence_length = total_number_of_tokens\n self.init_kwargs['max_token_sequence_length'] = total_number_of_tokens\n\n def _update_language_dictionaries_with_tokens(self, tokens: Tokens) -> None:\n \"\"\"\n Update the language dictionaries with provided tokens.\n\n Args:\n tokens (Tokens): tokens considered.\n \"\"\"\n # count tokens\n tokens_counter = Counter(tokens)\n # index to token\n index_to_token = dict(\n enumerate(\n tokens_counter.keys() - self.token_to_index.keys(),\n self.number_of_tokens,\n )\n )\n # update language\n self.token_count += tokens_counter\n self.index_to_token.update(index_to_token)\n self.token_to_index.update(\n {token: index for index, token in index_to_token.items()}\n )\n self.number_of_tokens += len(index_to_token)\n\n def add_smis(\n self,\n smi_filepaths: Files,\n index_col: int = 1,\n chunk_size: int = 10000,\n name: str = 'SMILES',\n names: Sequence[str] = None,\n ) -> None:\n \"\"\"\n Add a set of SMILES from a list of .smi files, applying\n `transform_smiles`.\n\n Args:\n smi_filepaths (Files): a list of paths to .smi files.\n index_col (int): Data column used for indexing, defaults to 1.\n chunk_size (int): size of the chunks. Defaults to 10000.\n name (str): type of dataset, used to index columns in smi, and must\n be in names. Defaults to 'SMILES'.\n names (Sequence[str]): User-assigned names given to the columns.\n Defaults to `[name]`.\n \"\"\"\n for smi_filepath in smi_filepaths:\n self.add_smi(\n smi_filepath,\n index_col=index_col,\n chunk_size=chunk_size,\n name=name,\n names=names,\n )\n\n def add_smi(\n self,\n smi_filepath: str,\n index_col: int = 1,\n chunk_size: int = 10000,\n name: str = 'SMILES',\n names: Sequence[str] = None,\n ) -> None:\n \"\"\"\n Add a set of SMILES from a .smi file, applying `transform_smiles`.\n\n Args:\n smi_filepath (str): path to the .smi file.\n index_col (int): Data column used for indexing, defaults to 1.\n chunk_size (int): number of rows to read in a chunk.\n Defaults to 100000.\n name (str): type of dataset, used to index columns in smi, and must\n be in names. Defaults to 'SMILES'.\n names (Sequence[str]): User-assigned names given to the columns.\n Defaults to `[name]`.\n \"\"\"\n names = names or [name]\n try:\n for chunk in read_smi(\n smi_filepath, index_col=index_col, chunk_size=chunk_size, names=names\n ):\n for smiles in chunk[name]:\n try:\n transformed_smiles = self.transform_smiles(smiles)\n self.add_smiles(transformed_smiles)\n except Exception:\n logger.warning(\n 'transformation of smiles or adding result to '\n f'the language failed for: {smiles}'\n )\n except IndexError:\n raise IndexError('There must be one name per column in names.')\n except KeyError as error:\n raise KeyError(\n f'{str(error)}. Check index_col and that name {name} is in '\n f' names {names}'\n )\n\n def add_dataset(self, dataset: Iterable):\n \"\"\"\n Add a set of SMILES from an iterable, applying `transform_smiles`.\n\n Collects and warns about invalid SMILES, and warns on finding new\n tokens.\n\n Args:\n dataset (Iterable): returning SMILES strings.\n \"\"\"\n initial_vocab_length = len(self.token_to_index)\n self.invalid_molecules = []\n self.failed_transform_smiles = []\n\n for index, smiles in enumerate(dataset):\n if rdkit.Chem.MolFromSmiles(smiles, sanitize=False) is None:\n self.invalid_molecules.append((index, smiles))\n else:\n try:\n transformed_smiles = self.transform_smiles(smiles)\n except Exception:\n self.failed_transform_smiles.append((index, smiles))\n else:\n self.add_smiles(transformed_smiles)\n\n # Raise warning about invalid molecules\n if len(self.invalid_molecules) > 0:\n logger.warning(\n f'NOTE: We found {len(self.invalid_molecules)} invalid '\n 'smiles. Check the warning trace and inspect the attribute '\n '`invalid_molecules`. To remove invalid SMILES in your .smi '\n 'file, we recommend using '\n '`pytoda.preprocessing.smi.smi_cleaner`.'\n )\n # Raise warning about failed transformations\n if len(self.failed_transform_smiles) > 0:\n logger.warning(\n f'NOTE: We found {len(self.failed_transform_smiles)} smiles '\n 'that failed to be transformed (excluding invalid smiles). '\n 'Inspect the attribute `failed_transform_smiles`.'\n )\n\n # Raise warning if new tokens were added.\n if len(self.token_to_index) > initial_vocab_length:\n logger.warning(\n f'{len(self.token_to_index) - initial_vocab_length}'\n ' new token(s) were added to SMILES language.'\n )\n\n def add_smiles(self, smiles: str) -> None:\n \"\"\"\n Add a SMILES to the language.\n\n Updates `max_token_sequence_length`.\n Adds missing tokens to the language.\n\n Args:\n smiles (str): a SMILES representation.\n \"\"\"\n tokens = self.smiles_tokenizer(smiles)\n self._update_max_token_sequence_length(tokens)\n self._update_language_dictionaries_with_tokens(tokens)\n\n def add_token(self, token: str) -> None:\n \"\"\"\n Add a token to the language.\n\n Args:\n token (str): a token.\n \"\"\"\n if token in self.token_to_index:\n self.token_count[token] += 1\n else:\n self.token_to_index[token] = self.number_of_tokens\n self.token_count[token] = 1\n self.index_to_token[self.number_of_tokens] = token\n self.number_of_tokens += 1\n\n def smiles_to_token_indexes(self, smiles: str) -> Union[Indexes, Tensor]:\n \"\"\"\n Transform character-level SMILES into a sequence of token indexes.\n\n Args:\n smiles (str): a SMILES (or SELFIES) representation.\n\n Returns:\n Union[Indexes, Tensor]: indexes representation for the\n SMILES/SELFIES provided.\n \"\"\"\n return self.transform_encoding(\n [\n self.token_to_index.get(token, self.unknown_index)\n for token in self.smiles_tokenizer(self.transform_smiles(smiles))\n ]\n )\n\n def token_indexes_to_smiles(self, token_indexes: Union[Indexes, Tensor]) -> str:\n \"\"\"\n Transform a sequence of token indexes into SMILES, ignoring special\n tokens.\n\n Args:\n token_indexes (Union[Indexes, Tensor]): Sequence of integers\n representing tokens in vocabulary.\n\n Returns:\n str: a SMILES (or SELFIES) representation.\n \"\"\"\n token_indexes = self.tensor_to_indexes(token_indexes)\n\n return ''.join(\n [\n self.index_to_token.get(token_index, '')\n for token_index in token_indexes\n # consider only valid SMILES token indexes\n if token_index not in self.special_indexes\n ]\n )\n\n @staticmethod\n def tensor_to_indexes(token_indexes: Union[Indexes, Tensor]) -> Indexes:\n \"\"\"Utility to get Indexes from Tensors.\n\n Args:\n token_indexes (Union[Indexes, Tensor]): from single SMILES.\n\n Raises:\n ValueError: in case the Tensor is not shaped correctly\n\n Returns:\n Indexes: list from Tensor or else the initial token_indexes.\n \"\"\"\n if isinstance(token_indexes, torch.Tensor):\n if token_indexes.ndim != 1:\n raise ValueError('Only token indexes for a single SMILES are supported')\n return token_indexes.numpy().flatten().tolist()\n\n return token_indexes\n\n def selfies_to_smiles(self, selfies: str) -> str:\n \"\"\"\n SELFIES to SMILES converter method.\n Based on: https://arxiv.org/abs/1905.13741\n\n Arguments:\n selfies (str): SELFIES representation\n\n Returns:\n str: A SMILES string\n \"\"\"\n if not isinstance(selfies, str):\n raise TypeError(f'Wrong data type: {type(selfies)}. Use strings.')\n try:\n return selfies_decoder(selfies)\n except Exception:\n logger.warning(\n f'Could not convert SELFIES {selfies} to SMILES, returning '\n 'the SELFIES instead'\n )\n return selfies\n\n def smiles_to_selfies(self, smiles: str) -> str:\n \"\"\"\n SMILES to SELFIES converter method.\n Based on: https://arxiv.org/abs/1905.13741\n\n Arguments:\n smiles (str): SMILES representation\n\n Returns:\n str: A SELFIES string\n \"\"\"\n if not isinstance(smiles, str):\n raise TypeError(f'Wrong data type: {type(smiles)}. Use strings.')\n try:\n return selfies_encoder(smiles)\n except Exception:\n logger.warning(\n f'Could not convert SMILES {smiles} to SELFIES, returning '\n 'the SMILES instead'\n )\n return smiles\n\n\nclass SELFIESLanguage(SMILESLanguage):\n \"\"\"\n SELFIESLanguage is a SMILESLanguage with a different default tokenizer,\n transforming SMILES to SELFIES.\n \"\"\"\n\n def __init__(\n self,\n name: str = 'selfies-language',\n vocab_file: str = None,\n max_token_sequence_length: int = 0,\n ) -> None:\n \"\"\"\n Initialize SMILES language.\n\n Args:\n name (str): name of the SMILESLanguage.\n vocab_file (str): optional filepath to vocab json or directory\n containing it.\n max_token_sequence_length (int): initial value for keeping track\n of longest sequence. Defaults to 0.\n \"\"\"\n super().__init__(\n name=name,\n tokenizer_name='selfies',\n vocab_file=vocab_file,\n max_token_sequence_length=max_token_sequence_length,\n )\n self.transform_smiles = selfies_encoder\n\n\nclass SMILESTokenizer(SMILESLanguage):\n \"\"\"\n SMILESTokenizer class, based on SMILESLanguage applying transforms and\n and encoding of SMILES string to sequence of token indexes.\n \"\"\"\n\n def __init__(\n self,\n name: str = 'smiles-language',\n smiles_tokenizer: Tokenizer = tokenize_smiles,\n tokenizer_name: str = None,\n vocab_file: str = None,\n max_token_sequence_length: int = 0,\n canonical: bool = False,\n augment: bool = False,\n kekulize: bool = False,\n all_bonds_explicit: bool = False,\n all_hs_explicit: bool = False,\n remove_bonddir: bool = False,\n remove_chirality: bool = False,\n selfies: bool = False,\n sanitize: bool = True,\n randomize: bool = False,\n add_start_and_stop: bool = False,\n padding: bool = False,\n padding_length: int = None,\n device: torch.device = torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu'\n ),\n ) -> None:\n \"\"\"\n Initialize SMILES language.\n\n Args:\n name (str): name of the SMILESLanguage.\n smiles_tokenizer (Tokenizer): optional SMILES tokenization\n function. Defaults to tokenize_smiles, but tokenizer_name takes\n precedence when found in available TOKENIZER_FUNCTIONS.\n tokenizer_name (str): optional name mapping to Tokenizer. Defaults\n to None, i.e. using default smiles_tokenizer.\n vocab_file (str): optional filepath to vocab json or directory\n containing it.\n max_token_sequence_length (int): initial value for keeping track\n of longest sequence. Defaults to 0.\n canonical (bool): performs canonicalization of SMILES (one\n original string for one molecule), if True, then other\n transformations (augment etc, see below) do not apply\n augment (bool): perform SMILES augmentation. Defaults to False.\n kekulize (bool): kekulizes SMILES (implicit aromaticity only).\n Defaults to False.\n all_bonds_explicit (bool): Makes all bonds explicit. Defaults to\n False, only applies if kekulize = True.\n all_hs_explicit (bool): Makes all hydrogens explicit. Defaults to\n False, only applies if kekulize = True.\n randomize (bool): perform a true randomization of SMILES tokens.\n Defaults to False.\n remove_bonddir (bool): Remove directional info of bonds.\n Defaults to False.\n remove_chirality (bool): Remove chirality information.\n Defaults to False.\n selfies (bool): Whether selfies is used instead of smiles, defaults\n to False.\n sanitize (bool): Sanitize SMILES. Defaults to True.\n add_start_and_stop (bool): add start and stop token indexes.\n Defaults to False.\n padding (bool): pad sequences from the left to matching length.\n Defaults to False.\n padding_length (int): common length of all token sequences,\n applies only if padding is True. See `set_max_padding` to set\n it to longest token sequence the smiles language encountered.\n Defaults to None.\n device (torch.device): device where the tensors are stored.\n Defaults to gpu, if available.\n\n NOTE:\n See `set_smiles_transforms` and `set_encoding_transforms` to change\n the transforms temporarily and reset with\n `reset_initial_transforms`. Assignment of class attributes\n in the parameter list will trigger such a reset.\n \"\"\"\n super().__init__(\n name=name,\n smiles_tokenizer=smiles_tokenizer,\n tokenizer_name=tokenizer_name,\n vocab_file=vocab_file,\n max_token_sequence_length=max_token_sequence_length,\n )\n # smiles transforms\n self.canonical = canonical\n self.augment = augment\n self.kekulize = kekulize\n self.all_bonds_explicit = all_bonds_explicit\n self.all_hs_explicit = all_hs_explicit\n self.remove_bonddir = remove_bonddir\n self.remove_chirality = remove_chirality\n self.selfies = selfies\n self.sanitize = sanitize\n # encoding transforms\n self.randomize = randomize\n self.add_start_and_stop = add_start_and_stop\n self.padding = padding\n self.padding_length = padding_length\n self.device = device\n\n self._init_attributes = [ # additions to init_kwargs for pretrained\n 'canonical',\n 'augment',\n 'kekulize',\n 'all_bonds_explicit',\n 'all_hs_explicit',\n 'remove_bonddir',\n 'remove_chirality',\n 'selfies',\n 'sanitize',\n 'randomize',\n 'add_start_and_stop',\n 'padding',\n 'padding_length',\n ]\n # update save/load pretrained kwargs\n for keyword in self._init_attributes:\n self.init_kwargs[keyword] = getattr(self, keyword)\n\n self.reset_initial_transforms()\n\n self._attributes_to_trigger_reset = [\n *self._init_attributes,\n 'device',\n 'start_index',\n 'stop_index',\n ] # could be updated in inheritance\n\n # only now 'activate' setter that resets the transforms and warns on\n # truncating padding_length\n self._initialized = True\n\n def __setattr__(self, name, value):\n \"\"\"Also updates the transforms if the set attribute affects them.\"\"\"\n super().__setattr__(name, value)\n if self.__dict__.get('_initialized'):\n if name in self._attributes_to_trigger_reset:\n self.reset_initial_transforms()\n if name in self._init_attributes:\n self.init_kwargs[name] = value\n if name == 'padding_length' and self.padding:\n if self.max_token_sequence_length > value:\n logger.warning(\n 'The language has seen sequences of length '\n f'{self.max_token_sequence_length} that will be '\n 'truncated by given padding length of '\n f'{value}. Consider `set_max_padding`.'\n )\n\n @staticmethod\n def __get_total_number_of_tokens_with_start_stop_fn(x):\n return len(x) + 2\n\n def _set_token_len_fn(self, add_start_and_stop):\n \"\"\"\n Defines a Callable that given a sequence of naive tokens, i.e. before\n applying the encoding transforms, computes the number of\n implicit tokens after transforms (implicit because it's the\n number of token indexes, not actual tokens).\n \"\"\"\n if add_start_and_stop:\n self._get_total_number_of_tokens_fn = (\n self.__get_total_number_of_tokens_with_start_stop_fn\n )\n else:\n self._get_total_number_of_tokens_fn = len\n\n def set_max_padding(self):\n \"\"\"\n Set padding_length that does not truncate any sequence. Requires\n updated max_token_sequence_length.\n\n Raises:\n UnknownMaxLengthError: When max_token_sequence_length is 0 because\n no SMILES were added to the language.\n \"\"\"\n if self.max_token_sequence_length == 0:\n raise UnknownMaxLengthError(\n 'No check possible for naive SMILESTokenizer. Instance needs '\n 'a pass over the data, setting max_token_sequence_length. '\n 'See for example `add_smis`, `add_dataset` or `add_smiles` '\n 'methods.'\n )\n\n # also triggers reset of transforms\n self.padding_length = self.max_token_sequence_length\n\n def reset_initial_transforms(self):\n \"\"\"Reset smiles and token indexes transforms as on initialization.\"\"\"\n self.transform_smiles = compose_smiles_transforms(\n self.canonical,\n self.augment,\n self.kekulize,\n self.all_bonds_explicit,\n self.all_hs_explicit,\n self.remove_bonddir,\n self.remove_chirality,\n self.selfies,\n self.sanitize,\n )\n self.transform_encoding = compose_encoding_transforms(\n self.randomize,\n self.add_start_and_stop,\n self.start_index,\n self.stop_index,\n self.padding,\n self.padding_length,\n self.padding_index,\n self.device,\n )\n self._set_token_len_fn(self.add_start_and_stop)\n\n def set_smiles_transforms(\n self,\n canonical=None,\n augment=None,\n kekulize=None,\n all_bonds_explicit=None,\n all_hs_explicit=None,\n remove_bonddir=None,\n remove_chirality=None,\n selfies=None,\n sanitize=None,\n ):\n \"\"\"Helper function to reversibly change steps of the transforms.\"\"\"\n self.transform_smiles = compose_smiles_transforms(\n canonical=canonical if canonical is not None else self.canonical,\n augment=augment if augment is not None else self.augment,\n kekulize=kekulize if kekulize is not None else self.kekulize,\n all_bonds_explicit=all_bonds_explicit\n if all_bonds_explicit is not None\n else self.all_bonds_explicit,\n all_hs_explicit=all_hs_explicit\n if all_hs_explicit is not None\n else self.all_hs_explicit,\n remove_bonddir=remove_bonddir\n if remove_bonddir is not None\n else self.remove_bonddir,\n remove_chirality=remove_chirality\n if remove_chirality is not None\n else self.remove_chirality,\n selfies=selfies if selfies is not None else self.selfies,\n sanitize=sanitize if sanitize is not None else self.sanitize,\n )\n\n def set_encoding_transforms(\n self,\n randomize=None,\n add_start_and_stop=None,\n padding=None,\n padding_length=None,\n device=None,\n ):\n \"\"\"Helper function to reversibly change steps of the transforms.\"\"\"\n self.transform_encoding = compose_encoding_transforms(\n randomize=randomize if randomize is not None else self.randomize,\n add_start_and_stop=add_start_and_stop\n if add_start_and_stop is not None\n else self.add_start_and_stop,\n start_index=self.start_index,\n stop_index=self.stop_index,\n padding=padding if padding is not None else self.padding,\n padding_length=padding_length\n if padding_length is not None\n else self.padding_length,\n padding_index=self.padding_index,\n device=device if device is not None else self.device,\n )\n if add_start_and_stop is not None:\n self._set_token_len_fn(add_start_and_stop)\n"
] | [
[
"torch.cuda.is_available"
]
] |
sukritingupta/numpy | [
"bc0e4415614ef316cd0503c031de3f15931dc28c"
] | [
"numpy/typing/tests/data/pass/simple.py"
] | [
"\"\"\"Simple expression that should pass with mypy.\"\"\"\nimport operator\n\nimport numpy as np\nfrom collections.abc import Iterable\n\n# Basic checks\narray = np.array([1, 2])\n\n\ndef ndarray_func(x):\n # type: (np.ndarray) -> np.ndarray\n return x\n\n\nndarray_func(np.array([1, 2]))\narray == 1\narray.dtype == float\n\n# Dtype construction\nnp.dtype(float)\nnp.dtype(np.float64)\nnp.dtype(None)\nnp.dtype(\"float64\")\nnp.dtype(np.dtype(float))\nnp.dtype((\"U\", 10))\nnp.dtype((np.int32, (2, 2)))\n# Define the arguments on the previous line to prevent bidirectional\n# type inference in mypy from broadening the types.\ntwo_tuples_dtype = [(\"R\", \"u1\"), (\"G\", \"u1\"), (\"B\", \"u1\")]\nnp.dtype(two_tuples_dtype)\n\nthree_tuples_dtype = [(\"R\", \"u1\", 2)]\nnp.dtype(three_tuples_dtype)\n\nmixed_tuples_dtype = [(\"R\", \"u1\"), (\"G\", np.unicode_, 1)]\nnp.dtype(mixed_tuples_dtype)\n\nshape_tuple_dtype = [(\"R\", \"u1\", (2, 2))]\nnp.dtype(shape_tuple_dtype)\n\nshape_like_dtype = [(\"R\", \"u1\", (2, 2)), (\"G\", np.unicode_, 1)]\nnp.dtype(shape_like_dtype)\n\nobject_dtype = [(\"field1\", object)]\nnp.dtype(object_dtype)\n\nnp.dtype((np.int32, (np.int8, 4)))\n\n# Dtype comparison\nnp.dtype(float) == float\nnp.dtype(float) != np.float64\nnp.dtype(float) < None\nnp.dtype(float) <= \"float64\"\nnp.dtype(float) > np.dtype(float)\nnp.dtype(float) >= np.dtype((\"U\", 10))\n\n# Iteration and indexing\ndef iterable_func(x):\n # type: (Iterable) -> Iterable\n return x\n\n\niterable_func(array)\n[element for element in array]\niter(array)\nzip(array, array)\narray[1]\narray[:]\narray[...]\narray[:] = 0\n\narray_2d = np.ones((3, 3))\narray_2d[:2, :2]\narray_2d[..., 0]\narray_2d[:2, :2] = 0\n\n# Other special methods\nlen(array)\nstr(array)\narray_scalar = np.array(1)\nint(array_scalar)\nfloat(array_scalar)\n# currently does not work due to https://github.com/python/typeshed/issues/1904\n# complex(array_scalar)\nbytes(array_scalar)\noperator.index(array_scalar)\nbool(array_scalar)\n\n# comparisons\narray < 1\narray <= 1\narray == 1\narray != 1\narray > 1\narray >= 1\n1 < array\n1 <= array\n1 == array\n1 != array\n1 > array\n1 >= array\n\n# binary arithmetic\narray + 1\n1 + array\narray += 1\n\narray - 1\n1 - array\narray -= 1\n\narray * 1\n1 * array\narray *= 1\n\nnonzero_array = np.array([1, 2])\narray / 1\n1 / nonzero_array\nfloat_array = np.array([1.0, 2.0])\nfloat_array /= 1\n\narray // 1\n1 // nonzero_array\narray //= 1\n\narray % 1\n1 % nonzero_array\narray %= 1\n\ndivmod(array, 1)\ndivmod(1, nonzero_array)\n\narray ** 1\n1 ** array\narray **= 1\n\narray << 1\n1 << array\narray <<= 1\n\narray >> 1\n1 >> array\narray >>= 1\n\narray & 1\n1 & array\narray &= 1\n\narray ^ 1\n1 ^ array\narray ^= 1\n\narray | 1\n1 | array\narray |= 1\n\n# unary arithmetic\n-array\n+array\nabs(array)\n~array\n\n# Other methods\nnp.array([1, 2]).transpose()\n"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.dtype"
]
] |
santiagosilas/Flask-Bootstrap-ML-Dashboard | [
"f77b82ec6461a7eee36ecb06eff740471b7a8e4a"
] | [
"website/app/models.py"
] | [
"from sklearn.datasets import load_breast_cancer, load_boston, load_diabetes\n\n# Dictionary-like object, the interesting attributes are: \n# data:, the data to learn, \n# target: the classification labels, \n# target_names: the meaning of the labels, \n# feature_names: the meaning of the features\nds1 = load_breast_cancer()\n\nds2 = load_boston()\n\nds3= load_diabetes()\n\ndef get_data(scenario):\n if scenario == '1':\n data = [list(sample) for sample in ds1.data[:10] ]\n columns = [column.capitalize() for column in ds1.feature_names]\n elif scenario == '2':\n data = [list(sample) for sample in ds2.data[:10] ]\n columns = [column.capitalize() for column in ds2.feature_names]\n else:\n data = [list(sample) for sample in ds3.data[:10] ]\n columns = [column.capitalize() for column in ds3.feature_names]\n\n return data, columns\n"
] | [
[
"sklearn.datasets.load_diabetes",
"sklearn.datasets.load_boston",
"sklearn.datasets.load_breast_cancer"
]
] |
anoopjk/SFM | [
"467ad148e6813dc34faa70163dcbf45700ace082"
] | [
"src/sfm_graph/pair_graph.py"
] | [
"import numpy as np \n\nclass PairGraph(object):\n\n def __init__(self, pair_img_idxs=[], pose=np.eye(4),\n pts_3d=np.empty((0,0)), pts1_2d=np.empty((0,0)), pts2_2d=np.empty((0,0)), f=1.0):\n \n ncameras = 2\n self.f = f\n self.motion = np.zeros((ncameras,3,4))\n self.motion[0,:,:] = np.eye(3,4)\n self.motion[1,:,:] = pose[:3,:]\n self.camera_center = np.zeros((3,1))\n self.structure = pts_3d\n self.frame_idxs = pair_img_idxs\n \n # self.matches = np.hstack((pts1_2d, pts2_2d))\n N = pts1_2d.shape[0]\n self.kpts = np.zeros((ncameras,N,2)) #np.hstack((pts1_2d, pts2_2d))\n self.kpts[0,:,:] = pts1_2d\n self.kpts[1,:,:] = pts2_2d\n self.kpts_idxs = np.zeros((N,ncameras), dtype=np.int32)\n self.kpts_idxs[:,0] = np.arange(0,N)\n self.kpts_idxs[:,1] = np.arange(0,N)\n\n\n\n\n\n\n\n\n \n\n\n"
] | [
[
"numpy.arange",
"numpy.empty",
"numpy.zeros",
"numpy.eye"
]
] |
elbecerrasoto/gym-automata | [
"145e9549029fe8effa827979b3f90016a681fffa"
] | [
"gym_cellular_automata/forest_fire/bulldozer/utils/render.py"
] | [
"\"\"\"\nThe render of the bulldozer consists of four subplots:\n1. Local Grid\n + Grid centered at current position, visualizes agent's micromanagment\n2. Global Grid\n + Whole grid view, visualizes agent's strategy\n3. Gauge\n + Shows time until next CA update\n4. Counts\n + Shows Forest vs No Forest cell counts. Translates on how well the agent is doing.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom gym_cellular_automata.forest_fire.utils.neighbors import moore_n\nfrom gym_cellular_automata.forest_fire.utils.render import (\n EMOJIFONT,\n TITLEFONT,\n align_marker,\n clear_ax,\n get_norm_cmap,\n parse_svg_into_mpl,\n plot_grid,\n)\n\nfrom . import svg_paths\nfrom .config import CONFIG\n\n# Figure Globals\nFIGSIZE = (15, 12)\nFIGSTYLE = \"seaborn-whitegrid\"\n\nTITLE_SIZE = 42\nTITLE_POS = {\"x\": 0.121, \"y\": 0.96}\nTITLE_ALIGN = \"left\"\n\nCOLOR_EMPTY = \"#DDD1D3\" # Gray\nCOLOR_BURNED = \"#DFA4A0\" # Light-Red\nCOLOR_TREE = \"#A9C499\" # Green\nCOLOR_FIRE = \"#E68181\" # Salmon-Red\n\nEMPTY = CONFIG[\"cell_symbols\"][\"empty\"]\nBURNED = CONFIG[\"cell_symbols\"][\"burned\"]\nTREE = CONFIG[\"cell_symbols\"][\"tree\"]\nFIRE = CONFIG[\"cell_symbols\"][\"fire\"]\nNROWS = CONFIG[\"grid_shape\"][\"nrows\"]\nNCOLS = CONFIG[\"grid_shape\"][\"ncols\"]\n\n\n# Assumes that cells values are in ascending order and paired with its colors\nCOLORS = [COLOR_EMPTY, COLOR_BURNED, COLOR_TREE, COLOR_FIRE]\nCELLS = [EMPTY, BURNED, TREE, FIRE]\nNORM, CMAP = get_norm_cmap(CELLS, COLORS)\n\n# Local Grid\nN_LOCAL = 3 # n x n local grid size\nMARKBULL_SIZE = 52\n\n# Global Grid\nMARKFSEED_SIZE = 62\nMARKLOCATION_SIZE = 62\n\n# Gauge\nCOLOR_GAUGE = \"#D4CCDB\" # \"Gray-Purple\"\nCYCLE_SYMBOL = \"\\U0001f504\"\nCYCLE_SIZE = 32\n\n# Counts\nTREE_SYMBOL = \"\\U0001f332\"\nBURNED_SYMBOL = \"\\ue08a\"\n\n\ndef render(env):\n grid = env.grid\n ca_params, pos, time = env.context\n\n local_grid = moore_n(N_LOCAL, pos, grid, EMPTY)\n pos_fseed = env._fire_seed\n\n TITLE = \"ForestFireBulldozer\"+str(NROWS)+\"x\"+str(NCOLS)+\"-v2\"\n\n plt.style.use(FIGSTYLE)\n fig_shape = (12, 14)\n fig = plt.figure(figsize=FIGSIZE)\n fig.suptitle(\n TITLE,\n font=TITLEFONT,\n fontsize=TITLE_SIZE,\n **TITLE_POS,\n color=\"0.6\",\n ha=TITLE_ALIGN\n )\n\n ax_lgrid = plt.subplot2grid(fig_shape, (0, 0), colspan=8, rowspan=10)\n ax_ggrid = plt.subplot2grid(fig_shape, (0, 8), colspan=6, rowspan=6)\n ax_gauge = plt.subplot2grid(fig_shape, (10, 0), colspan=8, rowspan=2)\n ax_counts = plt.subplot2grid(fig_shape, (6, 8), colspan=6, rowspan=6)\n\n plot_local(ax_lgrid, local_grid)\n\n plot_global(ax_ggrid, grid, pos, pos_fseed)\n\n plot_gauge(ax_gauge, time)\n\n d = env.count_cells()\n counts = d[EMPTY], d[BURNED], d[TREE], d[FIRE]\n plot_counts(ax_counts, *counts)\n\n return plt.gcf()\n\n\ndef plot_local(ax, grid):\n nrows, ncols = grid.shape\n mid_row, mid_col = nrows // 2, nrows // 2\n\n plot_grid(ax, grid, interpolation=\"none\", cmap=CMAP, norm=NORM)\n\n markbull = parse_svg_into_mpl(svg_paths.BULLDOZER)\n ax.plot(mid_col, mid_row, marker=markbull, markersize=MARKBULL_SIZE, color=\"1.0\")\n\n\ndef plot_global(ax, grid, pos, pos_fseed):\n ax.imshow(grid, interpolation=\"none\", cmap=CMAP, norm=NORM)\n\n # Fire Seed\n markfire = align_marker(parse_svg_into_mpl(svg_paths.FIRE), valign=\"bottom\")\n\n ax.plot(\n pos_fseed[1],\n pos_fseed[0],\n marker=markfire,\n markersize=MARKFSEED_SIZE,\n color=COLOR_FIRE,\n )\n\n # Bulldozer Location\n marklocation = align_marker(parse_svg_into_mpl(svg_paths.LOCATION), valign=\"bottom\")\n\n ax.plot(\n pos[1], pos[0], marker=marklocation, markersize=MARKLOCATION_SIZE, color=\"1.0\"\n )\n clear_ax(ax)\n\n\ndef plot_gauge(ax, time):\n HEIGHT_GAUGE = 0.1\n ax.barh(0.0, time, height=HEIGHT_GAUGE, color=COLOR_GAUGE, edgecolor=\"None\")\n\n ax.barh(\n 0.0,\n 1.0,\n height=0.15,\n color=\"None\",\n edgecolor=\"0.86\",\n )\n\n # Mess with x,y limits for aethetics reasons\n INCREASE_LIMS = True\n\n if INCREASE_LIMS:\n ax.set_xlim(0 - 0.03, 1 + 0.1) # Breathing room\n ax.set_ylim(-0.4, 0.4) # Center the bar\n\n ax.set_xticks([0.0, 1.0]) # Start Time and End Time x ticks\n\n # Set the CA update symbol\n ax.set_yticks([0]) # Set symbol position\n ax.set_yticklabels(CYCLE_SYMBOL, font=EMOJIFONT, size=CYCLE_SIZE)\n ax.get_yticklabels()[0].set_color(\"0.74\") # Light gray\n\n clear_ax(ax, yticks=False)\n\n\ndef plot_counts(ax, counts_empty, counts_burned, counts_tree, counts_fire):\n\n counts_total = sum((counts_empty, counts_burned, counts_tree, counts_fire))\n\n commons = {\"x\": [0, 1], \"width\": 0.1}\n pc = \"1.0\" # placeholder color\n\n lv1y = [counts_tree, counts_empty]\n lv1c = [COLOR_TREE, COLOR_EMPTY]\n\n lv2y = [0, counts_burned] # level 2 y axis\n lv2c = [pc, COLOR_BURNED] # level 2 colors\n lv2b = lv1y # level 2 bottom\n\n lv3y = [0, counts_fire]\n lv3c = [pc, COLOR_FIRE]\n lv3b = [lv1y[i] + lv2y[i] for i in range(len(lv1y))]\n\n # First Level Bars\n ax.bar(height=lv1y, color=lv1c, **commons)\n\n # Second Level Bars\n ax.bar(height=lv2y, color=lv2c, bottom=lv2b, **commons)\n\n # Third Level Bars\n ax.bar(height=lv3y, color=lv3c, bottom=lv3b, **commons)\n\n # Bar Symbols Settings\n ax.set_xticks(np.arange(2))\n ax.set_xticklabels([TREE_SYMBOL, BURNED_SYMBOL], font=EMOJIFONT, size=34)\n # Same colors as bars\n for label, color in zip(ax.get_xticklabels(), [COLOR_TREE, COLOR_BURNED]):\n label.set_color(color)\n\n # Mess with x,y limits for aethetics reasons\n INCREASE_LIMS = True\n INCREASE_FACTORS = [0.1, 0.3] # Y axis down, up\n\n if INCREASE_LIMS:\n # Makes the bars look long & tall, also centers them\n offdown, offup = (\n counts_total * INCREASE_FACTORS[i] for i in range(len(INCREASE_FACTORS))\n )\n ax.set_ylim(\n 0 - offdown, counts_total + offup\n ) # It gives breathing room for bars\n ax.set_xlim(-1, 2) # It centers the bars\n\n # Grid Settings and Tick settings\n # Show marks each quarter\n ax.set_yticks(np.linspace(0, counts_total, 3, dtype=int))\n # Remove clutter\n clear_ax(ax, xticks=False)\n # Add back y marks each quarter\n ax.grid(axis=\"y\", color=\"0.94\") # Dim gray\n"
] | [
[
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.style.use",
"numpy.linspace"
]
] |
ranocha/nodepy | [
"b6cff1ff635611de76beebacd6d821788befb36b"
] | [
"nodepy/rooted_trees.py"
] | [
"from __future__ import print_function\n\nfrom __future__ import absolute_import\nimport numpy as np\nfrom sympy import factorial, sympify, Rational\n#from sage.combinat.combinat import permutations\nfrom nodepy.utils import permutations\nfrom six.moves import range\n\n#=====================================================\nclass RootedTree(str):\n#=====================================================\n r\"\"\"\n A rooted tree is a directed acyclic graph with one node, which\n has no incoming edges, designated as the root.\n Rooted trees are useful for analyzing the order conditions of\n multistage numerical ODE solvers, such as Runge-Kutta methods\n and other general linear methods.\n\n The trees are represented as strings, using one of the notations\n introduced by Butcher (the third column of Table 300(I) of\n Butcher's text). The character 'T' is used in place of $\\\\tau$\n to represent a vertex, and braces '{ }' are used instead of brackets\n '[ ]' to indicate that everything inside the braces\n is joined to a single parent node. Thus the first four trees are:\n\n 'T', '{T}', '{T^2}', {{T}}'\n\n These can be generated using the function list_trees(), which\n returns a list of all trees of a given order::\n\n >>> from nodepy import *\n >>> for p in range(4): print(rt.list_trees(p))\n ['']\n ['T']\n ['{T}']\n ['{{T}}', '{T^2}']\n\n Note that the tree of order 0 is indicated by an empty string.\n\n If the tree contains an edge from vertex A to vertex B, vertex\n B is said to be a child of vertex A.\n A vertex with no children is referred to as a leaf.\n\n .. warning::\n\n One important convention is assumed in the code; namely, that at each\n level, leaves are listed first (before any other subtrees),\n and if there are $n$ leaves, we write 'T^n'.\n\n .. note::\n\n Currently, powers cannot be used for subtrees; thus\n\n '{{T}{T}}'\n\n is valid, while\n\n '{{T}^2}'\n\n is not. This restriction may be lifted in the future.\n\n **Examples**::\n\n >>> from nodepy import rooted_trees as rt\n >>> tree=rt.RootedTree('{T^2{T{T}}{T}}')\n >>> tree.order()\n 9\n >>> tree.density()\n 144\n >>> tree.symmetry()\n 2\n\n Topologically equivalent trees are considered equal::\n\n >>> tree2=RootedTree('{T^2{T}{T{T}}}')\n >>> tree2==tree\n True\n\n We can generate Python code to evaluate the elementary weight\n corresponding to a given tree for a given class of methods::\n\n >>> rk.elementary_weight_str(tree)\n 'dot(b,dot(A,c)*dot(A,c*dot(A,c))*c**2)'\n\n **References**:\n * :cite:`butcher2003`\n * :cite:`hairer1993`\n \"\"\"\n def __init__(self,strg):\n \"\"\"\n TODO: - Check validity of strg more extensively\n - Accept any leaf ordering, but convert it to our convention\n - convention for ordering of subtrees?\n \"\"\"\n if any([strg[i] not in '{}T^1234567890' for i in range(len(strg))]):\n raise Exception('Not a valid rooted tree string (illegal character)')\n op,cl=strg.count('{'),strg.count('}')\n if op!=cl or (op+cl>0 and (strg[0]!='{' or strg[-1]!='}')):\n raise Exception('Not a valid rooted tree string')\n self=strg\n\n def order(self):\n \"\"\"\n The order of a rooted tree, denoted $r(t)$, is the number of\n vertices in the tree.\n\n **Examples**::\n\n >>> from nodepy import rooted_trees as rt\n >>> tree=rt.RootedTree('{T^2{T{T}}}')\n >>> tree.order()\n 7\n \"\"\"\n from nodepy.strmanip import getint\n if self=='T': return 1\n if self=='': return 0\n r=self.count('{')\n pos=0\n while pos!=-1:\n pos=self.find('T',pos+1)\n if pos!=-1:\n try: r+=getint(self[pos+2:])\n except: r+=1\n return r\n\n def density(self):\n r\"\"\"\n The density of a rooted tree, denoted by $\\\\gamma(t)$,\n is the product of the orders of the subtrees.\n\n **Examples**::\n\n >>> from nodepy import rooted_trees as rt\n >>> tree=rt.RootedTree('{T^2{T{T}}}')\n >>> tree.density()\n 56\n\n **Reference**: :cite:`butcher2003` p. 127, eq. 301(c)\n \"\"\"\n gamma=self.order()\n nleaves,subtrees=self._parse_subtrees()\n for tree in subtrees:\n gamma*=tree.density()\n return gamma\n\n def symmetry(self):\n r\"\"\"\n The symmetry $\\\\sigma(t)$ of a rooted tree is...\n\n **Examples**::\n\n >>> from nodepy import rooted_trees as rt\n >>> tree=rt.RootedTree('{T^2{T{T}}}')\n >>> tree.symmetry()\n 2\n\n **Reference**: :cite:`butcher2003` p. 127, eq. 301(b)\n \"\"\"\n from nodepy.strmanip import getint\n if self=='T': return 1\n sigma=1\n if self[1]=='T':\n try: sigma=factorial(getint(self[3:]))\n except: pass\n nleaves,subtrees=self._parse_subtrees()\n while len(subtrees)>0:\n st=subtrees[0]\n nst=subtrees.count(st)\n sigma*=factorial(nst)*st.symmetry()**nst\n while st in subtrees: subtrees.remove(st)\n return sigma\n\n\n def Dmap(self):\n \"\"\"\n Butcher's function $D(t)$. Represents differentiation.\n Defined by $D(t)$=0 except for D('T')=1.\n\n **Reference**: :cite:`butcher1997`\n \"\"\"\n return self=='T'\n\n def lamda(self,alpha,extraargs=[]):\n r\"\"\"\n Computes Butcher's functional lambda on a given tree\n for the function alpha. This is used to compute the\n product of two functions on trees.\n\n *INPUT*:\n\n * alpha -- a function on rooted trees\n * extraargs -- a list containing any additional arguments\n that must be passed to alpha\n\n *OUTPUT*:\n * tprod -- a list of trees [t1, t2, ...]\n * fprod -- a list of numbers [a1, a2, ...]\n\n The meaning of the output is that\n $\\\\lambda(\\\\alpha,t)(\\\\beta)=a1*\\\\beta(t1)+a2*\\\\beta(t2)+...$\n\n **Examples**::\n\n >>> from nodepy import rt\n >>> tree = rt.RootedTree('{T{T}}')\n >>> tree.lamda(rt.Emap)\n (['T', '{T}', '{{T}}', '{T}', '{T^2}', '{T{T}}'], [1/2, 1, 1, 1/2, 1, 1])\n\n **Reference**: :cite:`butcher2003` pp. 275-276\n \"\"\"\n if self=='': return [RootedTree('')],[0]\n if self=='T': return [RootedTree('T')],[1]\n t,u=self._factor()\n if extraargs:\n l1,f1=t.lamda(alpha,*extraargs)\n l2,f2=u.lamda(alpha,*extraargs)\n alphau=alpha(u,*extraargs)\n else:\n l1,f1=t.lamda(alpha)\n l2,f2=u.lamda(alpha)\n alphau=alpha(u)\n tprod=l1\n fprod=[alphau*f1i for f1i in f1 if f1!=0]\n #FOIL:\n for i in range(len(l1)):\n if f1!=0:\n for j in range(len(l2)):\n if f2!=0:\n tprod.append(l1[i]*l2[j])\n fprod.append(f1[i]*f2[j])\n return tprod,fprod\n\n def lamda_str(self,alpha,extraargs=[]):\n \"\"\"\n Alternate version of lamda above, but returns a string.\n Hopefully we can get rid of this (and the other str functions)\n when SAGE can handle noncommutative symbolic algebra.\n \"\"\"\n if not isinstance(extraargs,list): extraargs=[extraargs]\n if self=='': return [RootedTree('')],[0]\n if self=='T': return [RootedTree('T')],[1]\n t,u=self._factor()\n if extraargs:\n l1,f1=t.lamda_str(alpha,*extraargs)\n l2,f2=u.lamda_str(alpha,*extraargs)\n alphau=alpha(u,*extraargs)\n else:\n l1,f1=t.lamda_str(alpha)\n l2,f2=u.lamda_str(alpha)\n alphau=alpha(u)\n tprod=l1\n fprod=[str(f1i)+'*'+alphau for f1i in f1 if f1!=0]\n #FOIL:\n for i in range(len(l1)):\n if f1!=0:\n for j in range(len(l2)):\n if f2!=0:\n tprod.append(l1[i]*l2[j])\n fprod.append(str(f1[i])+'*'+str(f2[j]))\n return tprod,fprod\n\n def _factor(self):\n \"\"\"\n Returns two rooted trees, t and u, such that self=t*u.\n\n **Input**:\n - self -- any rooted tree\n **Output**:\n - t,u -- a pair of rooted trees whose product t*u is equal to self.\n\n **Examples**::\n\n >>> tree=RootedTree('{T^2{T}}')\n >>> t,u=tree._factor()\n >>> t\n '{T{T}}'\n >>> u\n 'T'\n >>> t*u==tree\n True\n\n .. note:: This function is typically only called by lamda().\n \"\"\"\n nleaves,subtrees=self._parse_subtrees()\n if nleaves==0: # Root has no leaves\n t=RootedTree('{'+''.join(subtrees[1:])+'}')\n u=RootedTree(subtrees[0])\n if nleaves==1:\n t=RootedTree(self[0]+self[2:])\n u=RootedTree('T')\n if nleaves==2:\n t=RootedTree(self[0:2]+self[4:])\n u=RootedTree('T')\n if nleaves>2 and nleaves<10:\n t=RootedTree(self[0:3]+str(int(self[3])-1)+self[4:])\n u=RootedTree('T')\n if nleaves>=10:\n t=RootedTree(self[0:3]+str(int(self[3:5])-1)+self[5:])\n u=RootedTree('T')\n if t=='{}': t=RootedTree('T')\n return t,u\n\n def Gprod(self,alpha,beta,alphaargs=[],betaargs=[]):\n r\"\"\"\n Returns the product of two functions on a given tree.\n\n INPUT:\n alpha, beta -- two functions on rooted trees\n that return symbolic or numeric values\n alphaargs -- a string containing any additional arguments\n that must be passed to function alpha\n betaargs -- a string containing any additional arguments\n that must be passed to function beta\n\n OUTPUT:\n (alpha*beta)(self)\n i.e., the function that is the product (in G) of the\n functions alpha and beta. Note that this product is\n not commutative.\n\n The product is given by\n\n $(\\\\alpha*\\\\beta)('')=\\\\beta('')$\n\n $(\\\\alpha*\\\\beta)(t) = \\\\lambda(\\\\alpha,t)(\\\\beta) + \\\\alpha(t)\\\\beta('')$\n\n .. note::\n Gprod can be used to compute products of more than two\n functions by passing Gprod itself in as beta, and providing\n the remaining functions to be multiplied as betaargs.\n\n **Examples**::\n\n >>> from nodepy import rt\n >>> tree = rt.RootedTree('{T{T}}')\n >>> tree.Gprod(rt.Emap,Dmap)\n 1/2\n\n **Reference**: :cite:`butcher2003` p. 276, Thm. 386A\n \"\"\"\n trees,factors=self.lamda(alpha,*alphaargs)\n s=0\n for i in range(len(trees)):\n s+=factors[i]*beta(trees[i],*betaargs)\n s+=alpha(self,*alphaargs)*beta(RootedTree(\"\"),*betaargs)\n return s\n\n def Gprod_str(self,alpha,beta,alphaargs=[],betaargs=[]):\n \"\"\"\n Alternate version of Gprod, but operates on strings.\n Hopefully can be eliminated later in favor of symbolic\n manipulation.\n \"\"\"\n trees,facs=self.lamda_str(alpha,*alphaargs)\n s=\"\"\n for i in range(len(trees)):\n if facs[i]!=0:\n bet=beta(trees[i],*betaargs)\n if bet not in ['0','']:\n if i>0: s+='+'\n s+=str(facs[i])+\"*\"+bet\n bet=beta(RootedTree(\"\"),*betaargs)\n if bet not in ['0','']:\n alph=alpha(self,*alphaargs)\n s+=\"+\"+str(sympify(alph+'*'+bet))\n return s\n\n def _plot_subtree(self,xroot,yroot,xwidth):\n \"\"\"\n Recursively plots subtrees. Should only be called from plot().\n\n INPUT:\n xroot, yroot -- coordinates at which root of this subtree\n is plotted\n xwidth -- width in which this subtree must fit, in order\n to avoid possibly overlapping with others\n \"\"\"\n import matplotlib.pyplot as plt\n ychild=yroot+1\n nleaves,subtrees=self._parse_subtrees()\n nchildren=nleaves+len(subtrees)\n\n dist=xwidth*(nchildren-1)/2.\n xchild=np.linspace(xroot-dist,xroot+dist,nchildren)\n plt.scatter(xchild,ychild*np.ones(nchildren))\n for i in range(nchildren):\n plt.plot([xroot,xchild[i]],[yroot,ychild],'-k')\n if i>nleaves-1:\n subtrees[i-nleaves]._plot_subtree(xchild[i],ychild,xwidth/3.)\n\n def plot(self,nrows=1,ncols=1,iplot=1,ttitle=''):\n \"\"\"\n Plots the rooted tree.\n\n *INPUT*: (optional)\n * nrows, ncols -- number of rows and columns of subplots\n in the figure\n * iplot -- index of the subplot in which to plot\n this tree\n\n These are only necessary if plotting more than one tree\n in a single figure using subplot.\n\n *OUTPUT*: None.\n\n The plot is created recursively by\n plotting the root, parsing the subtrees, plotting the\n subtrees' roots, and calling _plot_subtree on each child\n \"\"\"\n import matplotlib.pyplot as plt\n if iplot==1: plt.clf()\n plt.subplot(nrows,ncols,iplot)\n plt.scatter([0],[0])\n if self!='T': self._plot_subtree(0,0,1.)\n\n fs=int(np.ceil(20./nrows))\n plt.title(ttitle,{'fontsize': fs})\n plt.xticks([])\n plt.yticks([])\n plt.axis('off')\n\n\n\n def _parse_subtrees(self):\n \"\"\"\n Returns the number of leaves and a list of the subtrees,\n for a given rooted tree.\n\n OUTPUT:\n nleaves -- number of leaves attached directly to the root\n subtrees -- list of non-leaf subtrees attached to the root\n\n The method can be thought of as returning what remains if the\n root of the tree is removed. For efficiency, instead of\n returning possibly many copies of 'T', the leaves are just\n returned as a number.\n \"\"\"\n from nodepy.strmanip import get_substring, open_to_close, getint\n if str(self)=='T' or str(self)=='': return 0,[]\n pos=0\n #Count leaves at current level\n if self[1]=='T':\n if self[2]=='^':\n nleaves=getint(self[3:])\n else: nleaves=1\n else: nleaves=0\n\n subtrees=[]\n while pos!=-1:\n pos=self.find('{',pos+1)\n if pos!=-1:\n subtrees.append(RootedTree(get_substring(self,pos)))\n pos=open_to_close(self,pos)\n\n return nleaves,subtrees\n\n def list_equivalent_trees(self):\n \"\"\"\n Returns a list of all strings (subject to our assumptions)\n equivalent to a given tree\n\n INPUT:\n self -- any rooted tree\n OUTPUT:\n treelist -- a list of all the 'legal' tree strings\n that produce the same tree.\n\n The list of equivalent trees is obtained by taking all\n permutations of the (non-leaf) subtrees.\n This routine is used to test equality of trees.\n \"\"\"\n nleaves,subtrees=self._parse_subtrees()\n if len(subtrees)==0: return [self]\n for i in range(len(subtrees)): subtrees[i]=str(subtrees[i])\n treelist = [RootedTree('{'+_powerString('T',nleaves,powchar='^')+\n ''.join(sts)+'}') for sts in permutations(subtrees)]\n return treelist\n\n def __eq__(self,tree2):\n \"\"\"\n Test equivalence of two rooted trees.\n Generates all 'legal' strings equivalent to the first\n tree, and checks whether the second is in that list.\n \"\"\"\n ts=[str(t) for t in self.list_equivalent_trees()]\n if str(tree2) in ts: return True\n else: return False\n\n def __mul__(self,tree2):\n \"\"\"\n Returns Butcher's product: t*u is the tree obtained by\n attaching the root of u as a child to the root of t.\n \"\"\"\n from nodepy.strmanip import getint\n if self=='T': return RootedTree('{'+tree2+'}')\n if tree2=='T': # We're just adding a leaf to self\n nleaves,subtrees=self._parse_subtrees()\n if nleaves==0: return RootedTree(self[0]+'T'+self[1:])\n if nleaves==1: return RootedTree(self[0]+'T^2'+self[2:])\n if nleaves>1:\n n = getint(self[3:])\n return RootedTree(self[0:3]+str(n+1)+self[(3+len(str(n))):])\n else: return RootedTree(self[:-1]+tree2+'}') # tree2 wasn't just 'T'\n#=====================================================\n#End of RootedTree class\n#=====================================================\n\n#=====================================================\ndef plot_all_trees(p,title='str'):\n#=====================================================\n \"\"\" Plots all rooted trees of order p.\n\n **Example**:\n\n Plot all trees of order 4::\n\n >>> from nodepy import rt\n >>> rt.plot_all_trees(4) # doctest: +ELLIPSIS\n <Figure...\n \"\"\"\n import matplotlib.pyplot as plt\n forest=list_trees(p)\n nplots=len(forest)\n nrows=int(np.ceil(np.sqrt(float(nplots))))\n ncols=int(np.floor(np.sqrt(float(nplots))))\n if nrows*ncols<nplots: ncols=ncols+1\n for tree in forest:\n if title=='str': ttitle=tree\n else: ttitle=title(tree)\n tree.plot(nrows,ncols,forest.index(tree)+1,ttitle=ttitle)\n fig=plt.figure(1)\n plt.setp(fig,facecolor='white')\n return fig\n\n#=====================================================\ndef list_trees(p,ind='all'):\n#=====================================================\n \"\"\"\n Returns rooted trees of order p.\n\n INPUT:\n\n - p -- order of trees desired\n - ind -- if given, returns a single tree corresponding to this index.\n Not very useful since the ordering isn't obvious.\n\n OUTPUT: list of all trees of order p (or just one, if ind is provided).\n\n Generates the rooted trees using Albrecht's 'Recursion 3'.\n\n **Examples**:\n\n Produce column of Butcher's Table 302(I)::\n\n >>> for i in range(1,11):\n ... forest=list_trees(i)\n ... print(len(forest))\n 1\n 1\n 2\n 4\n 9\n 20\n 48\n 115\n 286\n 719\n\n .. warning::\n\n This code is complete only up to order 10. We need to extend it\n by adding more subloops for p>10.\n\n TODO: Implement Butcher's formula (Theorem 302B) for the number\n of trees and determine to what order this is valid.\n\n **Reference**: :cite:`albrecht1996`\n \"\"\"\n\n if p>10: raise Exception('list_trees is not complete for orders p > 10.')\n\n if p==0: return [RootedTree('')]\n W=[[],[]] #This way indices agree with Albrecht\n R=[[],[]]\n R.append([RootedTree(\"{T}\")])\n W.append([RootedTree(\"{{T}}\")])\n for i in range(3,p):\n #Construct R[i]\n ps=_powerString(\"T\",i-1,powchar=\"^\")\n R.append([RootedTree(\"{\"+ps+\"}\")])\n for w in W[i-1]:\n R[i].append(w)\n #Construct W[i]\n #l=0:\n W.append([RootedTree(\"{\"+R[i][0]+\"}\")])\n for r in R[i][1:]:\n W[i].append(RootedTree(\"{\"+r+\"}\"))\n for l in range(1,i-1): #level 1\n for r in R[i-l]:\n ps=_powerString(\"T\",l,powchar=\"^\")\n W[i].append(RootedTree(\"{\"+ps+r+\"}\"))\n for l in range(0,i-3): #level 2\n for n in range(2,i-l-1):\n m=i-n-l\n if m<=n: #Avoid duplicate conditions\n for Rm in R[m]:\n lowlim=(m<n and [0] or [R[m].index(Rm)])[0]\n for Rn in R[n][lowlim:]:\n ps=_powerString(\"T\",l,powchar=\"^\")\n W[i].append(RootedTree(\"{\"+ps+Rm+Rn+\"}\"))\n for l in range(0,i-5): #level 3\n for n in range(2,i-l-3):\n for m in range(2,i-l-n-1):\n s=i-m-n-l\n if m<=n and n<=s: #Avoid duplicate conditions\n for Rm in R[m]:\n lowlim=(m<n and [0] or [R[m].index(Rm)])[0]\n for Rn in R[n][lowlim:]:\n lowlim2=(n<s and [0] or [R[n].index(Rn)])[0]\n for Rs in R[s][lowlim2:]:\n ps=_powerString(\"T\",l,powchar=\"^\")\n W[i].append(RootedTree(\"{\"+ps+Rm+Rn+Rs+\"}\"))\n for l in range(0,i-7): #level 4\n for n in range(2,i-l-5):\n for m in range(2,i-l-n-3):\n for s in range(2,i-l-n-m-1):\n t=i-s-m-n-l\n if s<=t and n<=s and m<=n: #Avoid duplicate conditions\n for Rm in R[m]:\n lowlim=(m<n and [0] or [R[m].index(Rm)])[0]\n for Rn in R[n][lowlim:]:\n lowlim2=(n<s and [0] or [R[n].index(Rn)])[0]\n for Rs in R[s][lowlim2:]:\n lowlim3=(s<t and [0] or [R[s].index(Rs)])[0]\n for Rt in R[t]:\n ps=_powerString(\"T\",l,powchar=\"^\")\n W[i].append(RootedTree(\"{\"+ps+Rm+Rn+Rs+Rt+\"}\"))\n # The recursion above generates all trees except the 'blooms'\n # Now add the blooms:\n W[0].append(RootedTree(\"T\"))\n for i in range(1,p):\n ps=_powerString(\"T\",i,powchar=\"^\")\n W[i].append(RootedTree(\"{\"+ps+\"}\"))\n\n if ind=='all': return W[p-1]\n else: return W[p-1][ind]\n\n\ndef _powerString(s,npow,powchar=\"**\",trailchar=''):\n r\"\"\"Raise string s to power npow with additional formatting.\"\"\"\n if npow==0:\n return \"\"\n else:\n if npow==1:\n return s+trailchar\n else:\n return s+powchar+str(npow)+trailchar\n\n\n#=====================================================\n# Functions on trees\n#=====================================================\n\ndef Dprod(tree,alpha):\n \"\"\"\n Evaluate (alpha*D)(t). Note that this is not equal to (D*alpha)(t).\n This function is necessary (rather than just using Gprod)\n in order to avoid infinite recursions.\n\n **Examples**::\n\n >>> from nodepy import rt\n >>> tree = rt.RootedTree('{T{T}}')\n >>> Dprod(tree,Emap)\n 1/2\n \"\"\"\n if tree=='': return 0\n if tree=='T': return alpha(RootedTree(''))\n nleaves,subtrees=tree._parse_subtrees()\n result=alpha(RootedTree('T'))**nleaves\n for subtree in subtrees:\n result*=alpha(subtree)\n return result\n\ndef Dprod_str(tree,alpha):\n if tree=='': return '0'\n if tree=='T': return alpha(RootedTree(''))\n nleaves,subtrees=tree._parse_subtrees()\n result=_powerString(alpha(RootedTree('T')),nleaves)\n for subtree in subtrees:\n if result!='': result+='*'\n result+=alpha(subtree)\n return result\n\ndef Dmap(tree):\n \"\"\"\n Butcher's function D(t). Represents differentiation.\n Defined by D(t)=0 except for D('T')=1.\n \"\"\"\n return 1*(tree=='T')\n\ndef Dmap_str(tree):\n return str(int(tree=='T'))\n\ndef Gprod(tree,alpha,beta,alphaargs='',betaargs=[]):\n \"\"\" Returns the product of two functions on a given tree.\n See Butcher p. 276, Thm. 386A \"\"\"\n return tree.Gprod(alpha,beta,alphaargs,betaargs)\n\ndef Gprod_str(tree,alpha,beta,alphaargs='',betaargs=[]):\n return tree.Gprod_str(alpha,beta,alphaargs,betaargs)\n\ndef Emap(tree,a=1):\n \"\"\"\n Butcher's function E^a(t).\n Gives the B-series for the exact solution advanced 'a' steps\n in time.\n\n **Examples**::\n\n >>> from nodepy import rooted_trees as rt\n >>> tree=rt.RootedTree('{T^2{T{T}}}')\n >>> rt.Emap(tree)\n 1/56\n >>> rt.Emap(tree,a=2)\n 16/7\n\n **Reference**: :cite:`butcher1997`\n \"\"\"\n return Rational(a**tree.order(),(tree.density()))\n\ndef Emap_str(tree,a=1):\n return str(Rational(a**tree.order(),(tree.density())))\n\n\n#=====================================================\ndef recursiveVectors(p,ind='all'):\n#=====================================================\n \"\"\"\n Generate recursive vectors using Albrecht's 'recursion 1'.\n These are essentially the order conditions for Runge-Kutta\n methods, excluding those that correspond to bushy trees.\n More specifically, these are the vectors that must be\n orthogonal to the vector of weights b.\n\n Note that the individual order conditions obtained from\n this algorithm are different from those obtained using Butcher's\n approach. But as a set of conditions up to some order they\n are, of course, equivalent.\n\n Follows :cite:`albrecht1996` p. 1718\n\n .. warning::\n\n This code is complete only up to order 14. We need to extend it\n by adding more subloops for p>14.\n\n **Example**\n\n Count number of conditions for order 12::\n\n >>> from nodepy import rt\n >>> v = rt.recursiveVectors(12)\n >>> print(len(v))\n 4765\n \"\"\"\n if p>14: raise Exception('recursiveVectors is not complete for orders p > 14.')\n W=[[],[]]\n R=[[],[]]\n R.append([\"tau[2]\"])\n W.append([\"tau[2]\"])\n for i in range(3,p):\n #Construct R[i]\n R.append([\"tau[\"+str(i)+\"]\"])\n for w in W[i-1]:\n R[i].append(\"A,\"+w)\n #Construct W[i]\n #l=0:\n W.append(R[i][:])\n for l in range(1,i-1): #level 1\n ps=_powerString(\"C\",l,trailchar=\",\")\n for r in R[i-l]:\n W[i].append(ps+r)\n for l in range(0,i-3): #level 2\n ps=_powerString(\"C\",l,trailchar=\",\")\n for n in range(2,i-l-1):\n m=i-n-l\n if m<=n: #Avoid duplicate conditions\n for Rm in R[m]:\n # if m<n, start from R[0]\n # if m==n, start from Rm\n lowlim=(m<n and [0] or [R[m].index(Rm)])[0]\n for Rn in R[n][lowlim:]:\n W[i].append(ps+Rm+\"*\"+Rn)\n for l in range(0,i-5): #level 3\n ps=_powerString(\"C\",l,trailchar=\",\")\n for n in range(2,i-l-3):\n for m in range(2,i-l-n-1):\n s=i-m-n-l\n if m<=n and n<=s: #Avoid duplicate conditions\n for Rm in R[m]:\n lowlim=(m<n and [0] or [R[m].index(Rm)])[0]\n for Rn in R[n][lowlim:]:\n lowlim2=(n<s and [0] or [R[n].index(Rn)])[0]\n for Rs in R[s][lowlim2:]:\n W[i].append(ps+Rm+\"*\"+Rn+\"*\"+Rs)\n for l in range(0,i-7): #level 4\n ps=_powerString(\"C\",l,trailchar=\",\")\n for n in range(2,i-l-5):\n for m in range(2,i-l-n-3):\n for s in range(2,i-l-n-m-1):\n t=i-s-m-n-l\n if s<=t and n<=s and m<=n: #Avoid duplicate conditions\n for Rm in R[m]:\n lowlim=(m<n and [0] or [R[m].index(Rm)])[0]\n for Rn in R[n][lowlim:]:\n lowlim2=(n<s and [0] or [R[n].index(Rn)])[0]\n for Rs in R[s][lowlim2:]:\n lowlim3=(s<t and [0] or [R[s].index(Rs)])[0]\n for Rt in R[t][lowlim3:]:\n W[i].append(ps+Rm+\"*\"+Rn+\"*\"+Rs+\"*\"+Rt)\n for l in range(0,i-9): # level 5\n ps=_powerString(\"C\",l,trailchar=\",\")\n for n in range(2,i-l-7):\n for m in range(2,i-l-n-5):\n for s in range(2,i-l-n-m-3):\n for t in range(2,i-l-n-m-s-1):\n u=i-t-s-m-n-l\n if m<=n<=s<=t<=u: #Avoid duplicate conditions\n for Rm in R[m]:\n lowlim=(m<n and [0] or [R[m].index(Rm)])[0]\n for Rn in R[n][lowlim:]:\n lowlim2=(n<s and [0] or [R[n].index(Rn)])[0]\n for Rs in R[s][lowlim2:]:\n lowlim3=(s<t and [0] or [R[s].index(Rs)])[0]\n for Rt in R[t][lowlim3:]:\n lowlim4=(t<u and [0] or [R[t].index(Rt)])[0]\n for Ru in R[u][lowlim4:]:\n W[i].append(ps+Rm+\"*\"+Rn+\"*\"+Rs+\"*\"+Rt+\"*\"+Ru)\n\n for l in range(0,i-11): # level 6\n ps=_powerString(\"C\",l,trailchar=\",\")\n for m in range(2,i-l-9):\n for n in range(2,i-l-m-7):\n for s in range(2,i-l-n-m-5):\n for t in range(2,i-l-n-m-s-3):\n for u in range(2,i-l-n-m-s-t-1):\n v=i-t-s-m-n-l-u\n if m<=n<=s<=t<=u<=v: #Avoid duplicate conditions\n for Rm in R[m]:\n lowlim=(m<n and [0] or [R[m].index(Rm)])[0]\n for Rn in R[n][lowlim:]:\n lowlim2=(n<s and [0] or [R[n].index(Rn)])[0]\n for Rs in R[s][lowlim2:]:\n lowlim3=(s<t and [0] or [R[s].index(Rs)])[0]\n for Rt in R[t][lowlim3:]:\n lowlim4=(t<u and [0] or [R[t].index(Rt)])[0]\n for Ru in R[u][lowlim4:]:\n lowlim5=(u<v and [0] or [R[u].index(Ru)])[0]\n for Rv in R[v][lowlim5:]:\n W[i].append(ps+Rm+\"*\"+Rn+\"*\"+Rs+\"*\"+Rt+\"*\"+Ru+\"*\"+Rv)\n\n# It seems like the code above should give correct values for order up to 16,\n# but it does not agree with OEIS sequence A000081 starting at order 15.\n# I'm not sure what's wrong; it may be that this approach leads to duplicate conditions\n# starting only at order 15 (see comment on p. 1719 of Albrecht's paper).\n\n# The code below seems correct but in light of the issue above there is no point in using it.\n# for l in range(0,i-13): # level 7\n# ps=_powerString(\"C\",l,trailchar=\",\")\n# for n in range(2,i-l-11):\n# for m in range(2,i-l-n-9):\n# for s in range(2,i-l-n-m-7):\n# for t in range(2,i-l-n-m-s-5):\n# for u in range(2,i-l-n-m-s-t-3):\n# for v in range(2,i-l-n-m-s-t-u-1):\n# x=i-t-s-m-n-l-u\n# if m<=n<=s<=t<=u<=v<=x: #Avoid duplicate conditions\n# for Rm in R[m]:\n# lowlim=(m<n and [0] or [R[m].index(Rm)])[0]\n# for Rn in R[n][lowlim:]:\n# lowlim2=(n<s and [0] or [R[n].index(Rn)])[0]\n# for Rs in R[s][lowlim2:]:\n# lowlim3=(s<t and [0] or [R[s].index(Rs)])[0]\n# for Rt in R[t][lowlim3:]:\n# lowlim4=(t<u and [0] or [R[t].index(Rt)])[0]\n# for Ru in R[u][lowlim4:]:\n# lowlim5=(u<v and [0] or [R[u].index(Ru)])[0]\n# for Rv in R[v][lowlim5:]:\n# lowlim6=(v<x and [0] or [R[v].index(Rv)])[0]\n# for Rx in R[x][lowlim5:]:\n# W[i].append(ps+Rm+\"*\"+Rn+\"*\"+Rs+\"*\"+Rt+\"*\"+Ru+\"*\"+Rv+\"*\"+Rx)\n\n if ind=='all': return W[p-1]\n else: return W[p-1][ind]\n\n\n#=====================================================\n#=====================================================\n#DEPRECATED FUNCTIONS\n#=====================================================\n#=====================================================\n\ndef py2tex(codestr):\n \"\"\"Convert a python code string to LaTex\"\"\"\n\n strout=codestr.replace(\"'\",\"^T\")\n strout=strout.replace(\"*\",\"\")\n strout=strout.replace(\".^\",\"^\")\n strout='$'+strout+'$'\n return strout\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n"
] | [
[
"numpy.ceil",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"numpy.ones",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.scatter",
"numpy.linspace",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplot"
]
] |
akashdhruv/flowX | [
"65b752c58a9da29f8508b4056d4aa3ac6d336d41"
] | [
"flowx/imbound/_interface/visco/_force_flow.py"
] | [
"import numpy\n\nfrom . import _interface\n\ndef force_flow(gridc, gridx, gridy, scalars, particles, ibmf, ibmx, ibmy, velc, options):\n\n \"\"\"\n Subroutine to compute forces on the fluid due to the presence of the immersed boundary\n \n Arguments\n ---------\n gridc : object\n Grid object for center variables\n\n gridx : object\n Grid object for x-face variables\n\n gridy : object\n Grid object for y-face variables\n\n scalars: object\n Scalars object to access time-step and Reynold number\n\n particles: object\n Object containing immersed boundary information\n\n ibmf : string for forcing variable\n\n velc : string for velocity variable\n \"\"\"\n\n nx, ny = gridc.nx, gridc.ny\n dx, dy = gridc.dx, gridc.dy\n dt = scalars.dt\n Re_s, mu_s = scalars.Re_s, scalars.mu_s\n\n extrap_iter = options['extrap_solid']\n\n u = gridx[velc][0,0,:,:].transpose()\n v = gridy[velc][0,0,:,:].transpose()\n \n phi = gridc[ibmf][0,0,:,:].transpose()\n lmx = gridc[ibmx][0,0,:,:].transpose()\n lmy = gridc[ibmy][0,0,:,:].transpose()\n\n xmus = numpy.zeros_like(phi)\n lms1 = numpy.zeros_like(phi)\n lms2 = numpy.zeros_like(phi)\n lms3 = numpy.zeros_like(phi)\n lms4 = numpy.zeros_like(phi)\n adfx = numpy.zeros_like(phi)\n adfy = numpy.zeros_like(phi)\n\n #----------Assign solid properties---------------\n _interface.solid_props(phi,xmus,mu_s,dx,dy,nx+2,ny+2)\n\n #----------Calculate solid stress terms----------\n _interface.solid_stress(phi,lmx,lmy,lms1,lms2,lms3,lms4,dx,dy,nx+2,ny+2)\n\n #---------Find normal vectors---------------------\n _interface.normal_vector_solid(phi,adfx,adfy,dx,dy,nx+2,ny+2)\n\n #---------Extrapolation of stress terms---------- \n for _iter in range(extrap_iter):\n _interface.constant_extrapolation(phi,lms1,adfx,adfy,dx,dy,nx+2,ny+2)\n _interface.constant_extrapolation(phi,lms2,adfx,adfy,dx,dy,nx+2,ny+2)\n _interface.constant_extrapolation(phi,lms3,adfx,adfy,dx,dy,nx+2,ny+2)\n _interface.constant_extrapolation(phi,lms4,adfx,adfy,dx,dy,nx+2,ny+2)\n \n #---------------Update velocity------------------\n _interface.solid_ustar(u,v,xmus,lms1,lms2,lms3,lms4,Re_s,dt,dx,dy,nx+2,ny+2)\n\n return\n"
] | [
[
"numpy.zeros_like"
]
] |
mrlt8/wyzecam | [
"8852bc55e0b134f56ef81097bd416c995dc6a46c"
] | [
"wyzecam/iotc.py"
] | [
"from typing import Any, Dict, Iterator, Optional, Tuple, Union\n\nimport enum\nimport logging\nimport pathlib\nimport time\nimport warnings\nfrom ctypes import CDLL, c_int\n\nfrom wyzecam.api_models import WyzeAccount, WyzeCamera\n\ntry:\n import av\n import av.video.frame\nexcept ImportError:\n av = None\n\ntry:\n import cv2\nexcept ImportError:\n cv2 = None\n\ntry:\n import numpy as np\nexcept ImportError:\n np = None # type: ignore\n\nfrom wyzecam.tutk import tutk, tutk_ioctl_mux, tutk_protocol\nfrom wyzecam.tutk.tutk_ioctl_mux import TutkIOCtrlMux\nfrom wyzecam.tutk.tutk_protocol import (\n K10000ConnectRequest,\n K10052DBSetResolvingBit,\n K10056SetResolvingBit,\n respond_to_ioctrl_10001,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass WyzeIOTC:\n \"\"\"Wyze IOTC singleton, used to construct iotc_sessions\n\n This object should generally be used inside a context manager, i.e.:\n\n ```python\n with WyzeIOTC() as wyze:\n with wyze.connect_and_auth(account, camera) as session:\n ... # send commands to the camera, then start streaming\n ```\n\n :var tutk_platform_lib: the underlying c library used to communicate with the wyze\n device; see [wyzecam.tutk.tutk.load_library][]\n :var udp_port: the UDP port used on this machine for communication with wyze cameras on the same network\n :vartype udp_port: int\n :var max_num_av_channels: the maximum number of simultaneous sessions this object supports.\n :vartype max_num_av_channels: int\n :var version: the version of the underyling `tutk_platform_lib`\n \"\"\"\n\n def __init__(\n self,\n tutk_platform_lib: Optional[Union[str, CDLL]] = None,\n udp_port: Optional[int] = None,\n max_num_av_channels: Optional[int] = None,\n debug: bool = False,\n ) -> None:\n \"\"\"Construct a WyzeIOTC session object\n\n You should only create one of these at a time.\n\n :param tutk_platform_lib: The underlying c library (from tutk.load_library()), or the path\n to this library.\n :param udp_port: Specify a UDP port. Random UDP port is used if it is specified as 0.\n :param max_num_av_channels: The max number of AV channels. If it is specified\n less than 1, AV will set max number of AV channels as 1.\n\n \"\"\"\n if tutk_platform_lib is None:\n tutk_platform_lib = tutk.load_library()\n if isinstance(tutk_platform_lib, str):\n path = pathlib.Path(tutk_platform_lib)\n tutk_platform_lib = tutk.load_library(str(path.absolute()))\n\n self.tutk_platform_lib: CDLL = tutk_platform_lib\n self.initd = False\n self.udp_port = udp_port\n self.max_num_av_channels = max_num_av_channels\n\n if debug:\n logging.basicConfig()\n logger.setLevel(logging.DEBUG)\n tutk_protocol.logger.setLevel(logging.DEBUG)\n tutk_ioctl_mux.logger.setLevel(logging.DEBUG)\n\n def initialize(self):\n \"\"\"Initialize the underlying TUTK library\n\n This is called automatically by the context manager,\n and should only be called if you intend to manually handle\n cleanup of this classes resources (by calling deinitialize\n when done with it!)\n \"\"\"\n if self.initd:\n return\n self.initd = True\n\n errno = tutk.iotc_initialize(\n self.tutk_platform_lib, udp_port=self.udp_port or 0\n )\n if errno < 0:\n raise tutk.TutkError(errno)\n\n actual_num_chans = tutk.av_initialize(\n self.tutk_platform_lib, max_num_channels=self.max_num_av_channels\n )\n if actual_num_chans < 0:\n raise tutk.TutkError(errno)\n\n self.max_num_av_channels = actual_num_chans\n\n def deinitialize(self):\n \"\"\"Deinitialize the underlying TUTK library\n\n This is called automatically by the context manager\n \"\"\"\n tutk.av_deinitialize(self.tutk_platform_lib)\n tutk.iotc_deinitialize(self.tutk_platform_lib)\n\n @property\n def version(self):\n \"\"\"Get the version of the underlying TUTK library\"\"\"\n return tutk.iotc_get_version(self.tutk_platform_lib)\n\n def __enter__(self):\n self.initialize()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.deinitialize()\n\n def connect_and_auth(\n self, account: WyzeAccount, camera: WyzeCamera\n ) -> \"WyzeIOTCSession\":\n \"\"\"Initialize a new iotc session with the specified camera, and account information.\n\n The result of this method should be used as a context manager, i.e. using the 'with'\n keyword. This allows us to automatically clean up after we're done with the session:\n\n ```python\n with WyzeIOTC() as iotc:\n with iotc.connect_and_auth(account, camera) as session:\n ... # send configuration commands, or stream video from the session.\n ```\n\n See [WyzeIOTCSession](../iotc_session/) for more info.\n\n :param account: the account object returned from [wyzecam.api.get_user_info][]\n :param camera: the camera object returned from [wyzecam.api.get_camera_list][]\n :returns: An object representing the Wyze IOTC Session, a [WyzeIOTCSession](../iotc_session/)\n \"\"\"\n return WyzeIOTCSession(self.tutk_platform_lib, account, camera)\n\n\nclass WyzeIOTCSessionState(enum.IntEnum):\n \"\"\"An enum describing the possible states of a WyzeIOTCSession\"\"\"\n\n DISCONNECTED = 0\n \"\"\"Not yet connected\"\"\"\n\n IOTC_CONNECTING = 1\n \"\"\"Currently attempting to connect the IOTC session\"\"\"\n\n AV_CONNECTING = 2\n \"\"\"Currently attempting to connect the AV session\"\"\"\n\n CONNECTED = 3\n \"\"\"Fully connected to the camera, but have not yet attempted to authenticate\"\"\"\n\n CONNECTING_FAILED = 4\n \"\"\"Connection failed, no longer connected\"\"\"\n\n AUTHENTICATING = 5\n \"\"\"Attempting to authenticate\"\"\"\n\n AUTHENTICATION_SUCCEEDED = 6\n \"\"\"Fully connected and authenticated\"\"\"\n\n AUTHENTICATION_FAILED = 7\n \"\"\"Authentication failed, no longer connected\"\"\"\n\n\nclass WyzeIOTCSession:\n \"\"\"An IOTC session object, used for communicating with Wyze cameras\n\n This is constructed from a WyzeIOTC object:\n\n ```python\n with WyzeIOTC() as wyze:\n with wyze.connect_and_auth(account, camera) as session:\n ... # send configuration commands, or stream video\n ```\n\n However, you can construct it manually, which can be helpful if you intend to set a\n different frame size or bitrate than the defaults:\n\n ```python\n with WyzeIOTCSession(lib, account, camera, bitrate=tutk.BITRATE_SD)\n ...\n ```\n\n > **Note:** WyzeIOTCSession is intended to be used as a context manager. Otherwise,\n > you will need to manually tell the session to connect and authenticate, by calling\n > session._connect() followed by session._auth(), and session._disconnect() when you're\n > ready to disconnect the session.\n\n :var tutk_platform_lib: The underlying c library (from [tutk.load_library][wyzecam.tutk.tutk.load_library])\n :var account: A [WyzeAccount][wyzecam.api_models.WyzeAccount] instance, see\n [api.get_user_info][wyzecam.api.get_user_info]\n :var camera: A [WyzeCamera][wyzecam.api_models.WyzeCamera] instance, see\n [api.get_camera_list][wyzecam.api.get_camera_list]\n :var preferred_frame_size: The preferred size of the video stream returned by the camera.\n See [wyzecam.tutk.tutk.FRAME_SIZE_1080P][].\n :var preferred_bitrate: The preferred bitrate of the video stream returned by the camera.\n See [wyzecam.tutk.tutk.BITRATE_HD][].\n :var session_id: The id of this session, once connected.\n :var av_chan_id: The AV channel of this session, once connected.\n :var state: The current connection state of this session. See\n [WyzeIOTCSessionState](../iotc_session_state/).\n \"\"\"\n\n def __init__(\n self,\n tutk_platform_lib: CDLL,\n account: WyzeAccount,\n camera: WyzeCamera,\n frame_size: int = tutk.FRAME_SIZE_1080P,\n bitrate: int = tutk.BITRATE_HD,\n ) -> None:\n \"\"\"Construct a wyze iotc session\n\n :param tutk_platform_lib: The underlying c library (from\n [tutk.load_library][wyzecam.tutk.tutk.load_library])\n :param account: A [WyzeAccount][wyzecam.api_models.WyzeAccount] instance, see\n [api.get_user_info][wyzecam.api.get_user_info]\n :param camera: A [WyzeCamera][wyzecam.api_models.WyzeCamera] instance, see\n [api.get_camera_list][wyzecam.api.get_camera_list]\n :param frame_size: Configures the size of the video stream returned by the camera.\n See [wyzecam.tutk.tutk.FRAME_SIZE_1080P][].\n :param bitrate: Configures the bitrate of the video stream returned by the camera.\n See [wyzecam.tutk.tutk.BITRATE_HD][].\n \"\"\"\n self.tutk_platform_lib: CDLL = tutk_platform_lib\n self.account: WyzeAccount = account\n self.camera: WyzeCamera = camera\n self.session_id: Optional[c_int] = None\n self.av_chan_id: Optional[c_int] = None\n self.state: WyzeIOTCSessionState = WyzeIOTCSessionState.DISCONNECTED\n\n self.preferred_frame_size: int = frame_size\n self.preferred_bitrate: int = bitrate\n\n def session_check(self) -> tutk.SInfoStruct:\n \"\"\"Used by a device or a client to check the IOTC session info.\n\n A device or a client may use this function to check if the IOTC session is\n still alive as well as getting the IOTC session info.\n\n :returns: A [`tutk.SInfoStruct`][wyzecam.tutk.tutk.SInfoStruct]\n \"\"\"\n assert (\n self.session_id is not None\n ), \"Please call _connect() before session_check()\"\n\n errcode, sess_info = tutk.iotc_session_check(\n self.tutk_platform_lib, self.session_id\n )\n if errcode < 0:\n raise tutk.TutkError(errcode)\n\n return sess_info\n\n def iotctrl_mux(self) -> TutkIOCtrlMux:\n \"\"\"Constructs a new TutkIOCtrlMux for this session\n\n Use this to send configuration messages, such as change the cameras resolution.\n\n Note that you either should treat the result of this as a context manager (using\n with), or call start_listening() explicitly on the result. This starts a separate\n thread listening for the responses from the camera.\n\n ```python\n with session.ioctrl_mux() as mux:\n msg = tutk_protocol.K10056SetResolvingBit(\n tutk.FRAME_SIZE_1080P, tutk.BITRATE_SD)\n future = mux.send_ioctl(msg)\n assert future.result() == True, \"Change bitrate failed!\"\n ```\n\n \"\"\"\n assert self.av_chan_id is not None, \"Please call _connect() first!\"\n return TutkIOCtrlMux(self.tutk_platform_lib, self.av_chan_id)\n\n def __enter__(self):\n self._connect()\n self._auth()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._disconnect()\n\n def recv_video_data(\n self,\n ) -> Iterator[\n Tuple[\n Optional[bytes], Union[tutk.FrameInfoStruct, tutk.FrameInfo3Struct]\n ]\n ]:\n \"\"\"A generator for returning raw video frames!\n\n By iterating over the return value of this function, you will\n get raw video frame data in the form of a bytes object. This\n is convenient for accessing the raw video data without doing\n the work of decoding or transcoding the actual video feed. If\n you want to save the video to disk, display it, or otherwise process\n the video, I highly recommend using `recv_video_frame` or\n `recv_video_frame_nparray` instead of this function.\n\n The second item in the tuple returned by this function, 'frame_info', is a useful\n set of metadata about the frame as returned by the camera. See\n [tutk.FrameInfoStruct][wyzecam.tutk.tutk.FrameInfoStruct] for more details about\n the contents of this object.\n\n Note that the format of this data is either raw h264 or HVEC H265 video. You will\n have to introspect the frame_info object to determine the format!\n\n\n ```python\n with wyzecam.WyzeIOTC() as wyze_iotc:\n with wyze_iotc.connect_and_auth(account, camera) as sess:\n for (frame, frame_info) in sess.recv_video_data():\n # do something with the video data! :)\n ```\n\n In order to use this, you will need to install [PyAV](https://pyav.org/docs/stable/).\n\n :returns: A generator, which when iterated over, yields a tuple containing the decoded image\n (as a [PyAV VideoFrame](https://pyav.org/docs/stable/api/video.html#av.video.frame.VideoFrame)),\n as well as metadata about the frame (in the form of a\n [tutk.FrameInfoStruct][wyzecam.tutk.tutk.FrameInfoStruct]).\n\n\n \"\"\"\n assert self.av_chan_id is not None, \"Please call _connect() first!\"\n\n while True:\n errno, frame_data, frame_info, frame_idx = tutk.av_recv_frame_data(\n self.tutk_platform_lib, self.av_chan_id\n )\n if errno < 0:\n if errno == tutk.AV_ER_DATA_NOREADY:\n time.sleep(1.0 / 40)\n continue\n elif errno == tutk.AV_ER_INCOMPLETE_FRAME:\n warnings.warn(\"Received incomplete frame\")\n continue\n elif errno == tutk.AV_ER_LOSED_THIS_FRAME:\n warnings.warn(\"Lost frame\")\n continue\n else:\n raise tutk.TutkError(errno)\n assert frame_info is not None, \"Got no frame info without an error!\"\n # if frame_info.frame_size != self.preferred_frame_size:\n # if frame_info.frame_size < 2:\n # logger.debug(\n # f\"skipping smaller frame at start of stream (frame_size={frame_info.frame_size})\"\n # )\n # continue\n # else:\n # # wyze doorbell has weird rotated image sizes.\n # if frame_info.frame_size - 3 != self.preferred_frame_size:\n # continue\n\n yield frame_data, frame_info\n\n def recv_video_frame(\n self,\n ) -> Iterator[\n Tuple[\n \"av.VideoFrame\", Union[tutk.FrameInfoStruct, tutk.FrameInfo3Struct]\n ]\n ]:\n \"\"\"A generator for returning decoded video frames!\n\n By iterating over the return value of this function, you will conveniently\n get nicely decoded frames in the form of a PyAV VideoFrame object. This is\n convenient for recording the video to disk.\n\n The second item in the tuple returned by this function, 'frame_info', is a useful\n set of metadata about the frame as returned by the camera. See\n [tutk.FrameInfoStruct][wyzecam.tutk.tutk.FrameInfoStruct] for more details about\n the contents of this object.\n\n ```python\n with wyzecam.WyzeIOTC() as wyze_iotc:\n with wyze_iotc.connect_and_auth(account, camera) as sess:\n for (frame, frame_info) in sess.recv_video_frame():\n # do something with the video data! :)\n ```\n\n In order to use this, you will need to install [PyAV](https://pyav.org/docs/stable/).\n\n :returns: A generator, which when iterated over, yields a tuple containing the decoded image\n (as a [PyAV VideoFrame](https://pyav.org/docs/stable/api/video.html#av.video.frame.VideoFrame)),\n as well as metadata about the frame (in the form of a\n [tutk.FrameInfoStruct][wyzecam.tutk.tutk.FrameInfoStruct]).\n \"\"\"\n if av is None:\n raise RuntimeError(\n \"recv_video_frame requires PyAv to parse video frames. \"\n \"Install with `pip install av` and try again.\"\n )\n\n codec = None\n for frame_data, frame_info in self.recv_video_data():\n if codec is None:\n codec = self._av_codec_from_frameinfo(frame_info)\n packets = codec.parse(frame_data)\n for packet in packets:\n frames = codec.decode(packet)\n for frame in frames:\n yield frame, frame_info\n\n def recv_video_frame_ndarray(\n self,\n ) -> Iterator[\n Tuple[\n \"np.ndarray[Any, Any]\",\n Union[tutk.FrameInfoStruct, tutk.FrameInfo3Struct],\n ]\n ]:\n \"\"\"A generator for returning decoded video frames!\n\n By iterating over the return value of this function, you will conveniently\n get nicely decoded frames in the form of a numpy array (suitable for\n [matplotlib.imshow](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.imshow.html)\n or [cv2.imshow](https://docs.opencv.org/master/dd/d43/tutorial_py_video_display.html).\n\n The second item in the tuple returned by this function, 'frame_info', is a useful\n set of metadata about the frame as returned by the camera. See\n [tutk.FrameInfoStruct][wyzecam.tutk.tutk.FrameInfoStruct] for more details about\n the contents of this object.\n\n ```python\n with wyzecam.WyzeIOTC() as wyze_iotc:\n with wyze_iotc.connect_and_auth(account, camera) as sess:\n for (frame, frame_info) in sess.recv_video_frame_ndarray():\n # do something with the video data! :)\n ```\n\n In order to use this, you will need to install [PyAV](https://pyav.org/docs/stable/)\n and [numpy](https://numpy.org/).\n\n :returns: A generator, which when iterated over, yields a tuple containing the decoded image\n (as a numpy array), as well as metadata about the frame (in the form of a\n [tutk.FrameInfoStruct][wyzecam.tutk.tutk.FrameInfoStruct]).\n \"\"\"\n if np is None:\n raise RuntimeError(\n \"recv_video_frame_ndarray requires numpy to convert to a numpy array. \"\n \"Install with `pip install numpy` and try again.\"\n )\n\n for frame, frame_info in self.recv_video_frame():\n img = frame.to_ndarray(format=\"bgr24\")\n if frame_info.frame_size in (3, 4):\n img = np.rot90(img, 3)\n img = np.ascontiguousarray(img, dtype=np.uint8)\n yield img, frame_info\n\n def recv_video_frame_ndarray_with_stats(\n self,\n stat_window_size: int = 210,\n draw_stats: Optional[\n str\n ] = \"{width}x{height} {kilobytes_per_second} kB/s {frames_per_second} FPS\",\n ) -> Iterator[\n Tuple[\n \"np.ndarray[Any, Any]\",\n Union[tutk.FrameInfoStruct, tutk.FrameInfo3Struct],\n Dict[str, int],\n ]\n ]:\n \"\"\"\n Does everything recv_video_frame_ndarray does, but also computes a number\n of useful / interesting debug metrics including effective framerate, bitrate,\n and frame size information. Optionally, if you specify a format string to the\n `draw_stats` function, this information will be used to draw a line of text\n onto the image in the top-right corner with this debug information.\n\n ```python\n with wyzecam.WyzeIOTC() as wyze_iotc:\n with wyze_iotc.connect_and_auth(account, camera) as sess:\n for (frame, frame_info, frame_stats) in sess.recv_video_frame_ndarray_with_stats():\n # do something with the video data! :)\n ```\n\n\n This method gives you an additional 'frame_stats' value every frame, which is a\n dict with the following keys:\n\n - \"bytes_per_second\"\n - \"kilobytes_per_second\"\n - \"window_duration\"\n - \"frames_per_second\"\n - \"width\"\n - \"height\"\n\n This dictionary is available in the draw_stats string as arguments to a python\n str.format() call, allowing you to quickly change the debug string in the top corner\n of the video.\n\n In order to use this, you will need to install [PyAV](https://pyav.org/docs/stable/),\n [numpy](https://numpy.org/), and [PyOpenCV](https://pypi.org/project/opencv-python/).\n\n :param stat_window_size: the number of consecutive frames to use as the window function\n for computing the above metrics. The larger the window size,\n the longer period over which the metrics are averaged. Note that\n this method is not performant for very large window sizes.\n :param draw_stats: if specified, this python format() string is used to draw some debug text\n in the upper right hand corner.\n\n :returns: A generator, which when iterated over, yields a 3-tuple containing the decoded image\n (as a numpy array), metadata about the frame (in the form of a\n [tutk.FrameInfoStruct][wyzecam.tutk.tutk.FrameInfoStruct]), and some performance\n statistics (in the form of a dict).\n\n \"\"\"\n stat_window = []\n for frame_ndarray, frame_info in self.recv_video_frame_ndarray():\n stat_window.append(frame_info)\n if len(stat_window) > stat_window_size:\n stat_window = stat_window[len(stat_window) - stat_window_size :]\n\n if len(stat_window) > 1:\n stat_window_start = (\n stat_window[0].timestamp\n + stat_window[0].timestamp_ms / 1_000_000\n )\n stat_window_end = (\n stat_window[-1].timestamp\n + stat_window[-1].timestamp_ms / 1_000_000\n )\n stat_window_duration = stat_window_end - stat_window_start\n if stat_window_duration <= 0:\n # wyze doorbell doesn't support timestamp_ms; workaround:\n stat_window_duration = (\n len(stat_window) / stat_window[-1].framerate\n )\n stat_window_total_size = sum(\n b.frame_len for b in stat_window[:-1]\n ) # skip the last reading\n bytes_per_second = int(\n stat_window_total_size / stat_window_duration\n )\n frames_per_second = int(len(stat_window) / stat_window_duration)\n else:\n bytes_per_second = 0\n stat_window_duration = 0\n frames_per_second = 0\n\n stats = {\n \"bytes_per_second\": bytes_per_second,\n \"kilobytes_per_second\": int(bytes_per_second / 1000),\n \"window_duration\": stat_window_duration,\n \"frames_per_second\": frames_per_second,\n \"width\": frame_ndarray.shape[1],\n \"height\": frame_ndarray.shape[0],\n }\n\n if draw_stats:\n text = draw_stats.format(**stats)\n cv2.putText(\n frame_ndarray,\n text,\n (50, 50),\n cv2.FONT_HERSHEY_DUPLEX,\n 1,\n (0, 0, 0),\n 2,\n cv2.LINE_AA,\n )\n cv2.putText(\n frame_ndarray,\n text,\n (50, 50),\n cv2.FONT_HERSHEY_DUPLEX,\n 1,\n (255, 255, 255),\n 1,\n cv2.LINE_AA,\n )\n\n yield frame_ndarray, frame_info, stats\n\n def _av_codec_from_frameinfo(self, frame_info):\n if frame_info.codec_id == 75:\n codec_name = \"h264\"\n elif frame_info.codec_id == 78:\n codec_name = \"h264\"\n elif frame_info.codec_id == 80:\n codec_name = \"hevc\"\n else:\n codec_name = \"h264\"\n warnings.warn(f\"Unexpected codec! got {frame_info.codec_id}.\")\n # noinspection PyUnresolvedReferences\n codec = av.CodecContext.create(codec_name, \"r\")\n return codec\n\n def _connect(\n self,\n timeout_secs=10,\n channel_id=0,\n username=\"admin\",\n password=\"888888\",\n max_buf_size=5 * 1024 * 1024,\n ):\n try:\n self.state = WyzeIOTCSessionState.IOTC_CONNECTING\n session_id = tutk.iotc_get_session_id(self.tutk_platform_lib)\n if session_id < 0: # type: ignore\n raise tutk.TutkError(session_id)\n self.session_id = session_id\n\n session_id = tutk.iotc_connect_by_uid_parallel(\n self.tutk_platform_lib, self.camera.p2p_id, self.session_id\n )\n if session_id < 0: # type: ignore\n raise tutk.TutkError(session_id)\n self.session_id = session_id\n\n self.session_check()\n\n self.state = WyzeIOTCSessionState.AV_CONNECTING\n av_chan_id, pn_serv_type = tutk.av_client_start(\n self.tutk_platform_lib,\n self.session_id,\n username.encode(\"ascii\"),\n password.encode(\"ascii\"),\n timeout_secs,\n channel_id,\n )\n\n if av_chan_id < 0: # type: ignore\n raise tutk.TutkError(av_chan_id)\n self.av_chan_id = av_chan_id\n self.state = WyzeIOTCSessionState.CONNECTED\n except tutk.TutkError:\n self._disconnect()\n raise\n finally:\n if self.state != WyzeIOTCSessionState.CONNECTED:\n self.state = WyzeIOTCSessionState.CONNECTING_FAILED\n\n logger.info(\n f\"AV Client Start: \"\n f\"chan_id={self.av_chan_id} \"\n f\"expected_chan={channel_id}\"\n )\n\n tutk.av_client_set_max_buf_size(self.tutk_platform_lib, max_buf_size)\n\n def _auth(self):\n if self.state == WyzeIOTCSessionState.CONNECTING_FAILED:\n return\n\n assert (\n self.state == WyzeIOTCSessionState.CONNECTED\n ), f\"Auth expected state to be connected but not authed; state={self.state.name}\"\n\n self.state = WyzeIOTCSessionState.AUTHENTICATING\n try:\n with self.iotctrl_mux() as mux:\n challenge = mux.send_ioctl(K10000ConnectRequest())\n challenge_response = respond_to_ioctrl_10001(\n challenge.result(),\n challenge.resp_protocol,\n self.camera.enr,\n self.camera.product_model,\n self.camera.mac,\n self.account.phone_id,\n self.account.open_user_id,\n )\n auth_response = mux.send_ioctl(challenge_response).result()\n assert (\n auth_response[\"connectionRes\"] == \"1\"\n ), f\"Authentication did not succeed! {auth_response}\"\n self.camera.set_camera_info(auth_response[\"cameraInfo\"])\n\n if self.camera.product_model != \"WYZEDB3\":\n resolving = mux.send_ioctl(\n K10056SetResolvingBit(\n self.preferred_frame_size, self.preferred_bitrate\n )\n )\n\n mux.waitfor(resolving)\n else:\n # doorbell has a different message for setting resolutions\n resolving = mux.send_ioctl(\n K10052DBSetResolvingBit(\n self.preferred_frame_size, self.preferred_bitrate\n )\n )\n\n mux.waitfor(resolving)\n self.state = WyzeIOTCSessionState.AUTHENTICATION_SUCCEEDED\n except tutk.TutkError:\n self._disconnect()\n raise\n finally:\n if self.state != WyzeIOTCSessionState.AUTHENTICATION_SUCCEEDED:\n self.state = WyzeIOTCSessionState.AUTHENTICATION_FAILED\n return self\n\n def _disconnect(self):\n if self.av_chan_id is not None:\n tutk.av_client_stop(self.tutk_platform_lib, self.av_chan_id)\n self.av_chan_id = None\n if self.session_id is not None:\n tutk.iotc_session_close(self.tutk_platform_lib, self.session_id)\n self.session_id = None\n self.state = WyzeIOTCSessionState.DISCONNECTED\n"
] | [
[
"numpy.rot90",
"numpy.ascontiguousarray"
]
] |
AdamSpannbauer/ncaa_color_bracket | [
"f8827b94a74fda9582d98bfdd7a6941599bb4a09"
] | [
"color_bracket.py"
] | [
"import pandas as pd\nimport json\nimport colorsys\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#read in data\ncolor_data = pd.read_csv('data/school_color_df.csv', index_col=0)\nwith open(\"data/bracket.json\") as f:\n\tbracket = json.loads(f.read())\n\ndef gen_matchups(n):\n\t\"\"\"\n\tgenerate seed pairs for a round\n\n\t>>> gen_matchups(4)\n\t[(1, 4), (2, 3)]\n\t\"\"\"\n\tseeds = range(1, n+1)\n\tgames = []\n\tfor i in range(n/2):\n\t\tlow = seeds[i]\n\t\thi = seeds[-(i+1)]\n\t\tgames.append((low, hi))\n\treturn games\n\ndef hex_2_hsv(hex_col):\n\t\"\"\"\n\tconvert hex code to colorsys style hsv\n\n\t>>> hex_2_hsv('#f77f00')\n\t(0.08569500674763834, 1.0, 0.9686274509803922)\n\t\"\"\"\n\thex_col = hex_col.lstrip('#')\n\tr, g, b = tuple(int(hex_col[i:i+2], 16) for i in (0, 2 ,4))\n\treturn colorsys.rgb_to_hsv(r/255.0, g/255.0, b/255.0)\n\ndef play_game(a, b):\n\t\"\"\"\n\tlook up each team's color and compare hsv\n\treturn true if team a hsv > team b hsv\n\n\t>>> play_game(\"N.C. Central\", \"Texas Southern\")\n\tTrue\n\t\"\"\"\n\ta_hex = list(color_data.loc[color_data['name'] == a, 'color'])[0]\n\tb_hex = list(color_data.loc[color_data['name'] == b, 'color'])[0]\n\t\n\ta_wins = hex_2_hsv(a_hex) > hex_2_hsv(b_hex)\n\t\n\treturn a_wins\n\n######################\n# PLAY-IN GAMES\n######################\n#west 16 \"NCC/TSU\"\na = \"N.C. Central\"\nb = \"Texas Southern\"\na_wins = play_game(a, b)\nbracket['west']['16'] = [b,a][a_wins]\n\n#east 11 \"SBU/UCLA\"\na = \"St. Bonaventure\"\nb = \"UCLA\"\na_wins = play_game(a, b)\nbracket['east']['11'] = [b,a][a_wins]\n\n#east 16 \"LIUB/RAD\"\na = \"LIU Brooklyn\"\nb = \"Radford\"\na_wins = play_game(a, b)\nbracket['east']['16'] = [b,a][a_wins]\n\n#midwest 11 \"ASU/SYR\"\na = \"Arizona St.\"\nb = \"Syracuse\"\na_wins = play_game(a, b)\nbracket['midwest']['11'] = [b,a][a_wins]\n\n######################\n# BRACKET\n######################\nrounds = [gen_matchups(x) for x in [16, 8, 4, 2]]\nfor region, teams in bracket.items():\n\tfor i, r in enumerate(rounds):\n\t\tfor game in r:\n\t\t\ta = teams[str(game[0])]\n\t\t\tb = teams[str(game[1])]\n\n\t\t\ta_wins = play_game(a, b)\n\n\t\t\tteams[str(game[0])] = [b,a][a_wins]\n\n\t\t\tdel teams[str(game[1])]\n\t\t\n\t\tfile_out = 'data/{}_round_{}.json'.format(region, str(i+2))\n\t\twith open(file_out, 'w') as f:\n\t\t\tjson.dump(teams, f, indent=2)\n\n######################\n# FINAL FOUR\n######################\n#west v midwest\nwith open('data/west_round_5.json') as f:\n\tta = json.loads(f.read())['1']\nwith open('data/midwest_round_5.json') as f:\n\ttb = json.loads(f.read())['1']\n\nprint('{} WINS THE WEST!!'.format(ta))\nprint('{} WINS THE MIDWEST!!'.format(tb))\n\na_wins = play_game(ta, tb)\ntop_winner = [tb,ta][a_wins]\ntop_loser = [ta,tb][a_wins]\n\n#south v east\nwith open('data/south_round_5.json') as f:\n\tba = json.loads(f.read())['1']\nwith open('data/east_round_5.json') as f:\n\tbb = json.loads(f.read())['1']\n\nprint('{} WINS THE SOUTH!!'.format(ba))\nprint('{} WINS THE EAST!!'.format(bb))\n\na_wins = play_game(ba, bb)\nbottom_winner = [bb,ba][a_wins]\nbottom_loser = [ba,bb][a_wins]\n\n######################\n# CHAMPIONSHIP\n######################\ntop_wins = play_game(top_winner, bottom_winner)\n\nbig_winner = [bottom_winner, top_winner][top_wins]\nsecond = [top_winner, bottom_winner][top_wins]\n\nprint('\\n\\n{} WINS IT ALL!!!!!!!'.format(big_winner))\n\n######################\n# PLOT FINAL FOUR PODIUM\n######################\nvals = [4,2,1,1]\nlabs = [big_winner, second, bottom_loser, top_loser]\ncols = [list(color_data.loc[color_data['name'] == x, 'color'])[0] for x in labs]\n\ny_pos = np.arange(len(labs))\n\nplt.bar(y_pos, vals, color = cols)\nplt.xticks(y_pos, labs)\nplt.yticks([])\nplt.title('HSV Final Four Podium')\n\nplt.savefig('readme/hsv_podium.png')\n\nplt.show()\n\n\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks"
]
] |
ChrisMorter/trieste | [
"229ebb8a308e970b2ad2f4a10429209099e3a4f8"
] | [
"docs/notebooks/asynchronous_greedy_multiprocessing.pct.py"
] | [
"# %% [markdown]\n# # Asynchronous Bayesian optimization with Trieste\n#\n# In this notebook we demonstrate Trieste's ability to perform asynchronous Bayesian optimisation, as is suitable for scenarios where the objective function can be run for several points in parallel but where observations might return back at different times. To avoid wasting resources waiting for the evaluation of the whole batch, we immediately request the next point asynchronously, taking into account points that are still being evaluated. Besides saving resources, asynchronous approach also can potentially [improve sample efficiency](https://arxiv.org/abs/1901.10452) in comparison with synchronous batch strategies, although this is highly dependent on the use case.\n#\n# To contrast this approach with regular [batch optimization](batch_optimization.ipynb), this notebook also shows how to run parallel synchronous batch approach.\n\n# %%\n# silence TF warnings and info messages, only print errors\n# https://stackoverflow.com/questions/35911252/disable-tensorflow-debugging-information\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nimport tensorflow as tf\ntf.get_logger().setLevel(\"ERROR\")\nimport numpy as np\nimport time\nimport timeit\n\n\n# %% [markdown]\n# First, let's define a simple objective that will emulate evaluations taking variable time. We will be using a classic Bayesian optimisation benchmark function [Branin](https://www.sfu.ca/~ssurjano/branin.html) with a sleep call inserted in the middle of the calculation to emulate delay. Our sleep delay is a scaled sum of all input values to make sure delays are uneven.\n# %%\nfrom trieste.objectives import scaled_branin\n\ndef objective(points, sleep=True):\n if points.shape[1] != 2:\n raise ValueError(f\"Incorrect input shape, expected (*, 2), got {points.shape}\")\n\n observations = []\n for point in points:\n observation = scaled_branin(point)\n if sleep:\n # insert some artificial delay\n # increases linearly with the absolute value of points\n # which means our evaluations will take different time\n delay = 3 * np.sum(point)\n pid = os.getpid()\n print(\n f\"Process {pid}: Objective: pretends like it's doing something for {delay:.2}s\",\n flush=True,\n )\n time.sleep(delay)\n observations.append(observation)\n\n return np.array(observations)\n\n# test the defined objective function\nobjective(np.array([[0.1, 0.5]]), sleep=False)\n\n# %% [markdown]\n# As always, we need to prepare the model and some initial data to kick-start the optimization process.\n\n# %%\nfrom trieste.space import Box\nfrom trieste.data import Dataset\nfrom trieste.objectives import SCALED_BRANIN_MINIMUM\n\nsearch_space = Box([0, 0], [1, 1])\nnum_initial_points = 3\ninitial_query_points = search_space.sample(num_initial_points)\ninitial_observations = objective(initial_query_points.numpy(), sleep=False)\ninitial_data = Dataset(\n query_points=initial_query_points,\n observations=tf.constant(initial_observations, dtype=tf.float64),\n)\n\nimport gpflow\nfrom trieste.models.gpflow import GaussianProcessRegression\n\n\ndef build_model(data):\n variance = tf.math.reduce_variance(data.observations)\n kernel = gpflow.kernels.RBF(variance=variance)\n gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)\n gpflow.set_trainable(gpr.likelihood, False)\n return GaussianProcessRegression(gpr)\n\n# these imports will be used later for optimization\nfrom trieste.acquisition import LocalPenalizationAcquisitionFunction\nfrom trieste.acquisition.rule import AsynchronousGreedy, EfficientGlobalOptimization\nfrom trieste.ask_tell_optimization import AskTellOptimizer\n\n\n# %% [markdown]\n# ## Multiprocessing setup\n#\n# To keep this notebook as reproducible as possible, we will only be using Python's multiprocessing package here. In this section we will explain our setup and define some common code to be used later.\n#\n# In both synchronous and asynchronous scenarios we will have a fixed set of worker processes performing observations. We will also have a main process responsible for optimization process with Trieste. When Trieste suggests a new point, it is inserted into a points queue. One of the workers picks this point from the queue, performs the observation, and inserts the output into the observations queue. The main process then picks up the observation from the queue, at which moment it either waits for the rest of the points in the batch to come back (synchronous scenario) or immediately suggests a new point (asynchronous scenario). This process continues either for a certain number of iterations or until we accumulate necessary number of observations.\n#\n# The overall setup is illustrated in this diagram:\n# \n\n# %%\n# Necessary multiprocessing primitives\nfrom multiprocessing import Manager, Process\n\n# %% [markdown]\n# We now define several common functions to implement the described setup. First we define a worker function that will be running a single observation in a separate process. Worker takes both queues as an input, reads next point from the points queue, makes an observation, and inserts observed data into the observations queue.\n\n# %%\n\ndef observer_proc(points_queue, observations_queue):\n pid = os.getpid()\n\n while True:\n point_to_observe = points_queue.get()\n if point_to_observe is None:\n return\n\n print(f\"Process {pid}: Observer : observing data at point {point_to_observe}\", flush=True)\n new_observation = objective(point_to_observe, sleep=enable_sleep_delays)\n new_data = (point_to_observe, new_observation)\n\n print(f\"Process {pid}: Observer : observed data {new_data}\", flush=True)\n\n observations_queue.put(new_data)\n\n# %% [markdown]\n# Next we define two helper functions, one is to create a certain number of worker processes, and another is to terminate them once we are done.\n\n# %%\n\ndef create_worker_processes(n_workers, points_queue, obseverations_queue):\n observer_processes = []\n for i in range(n_workers):\n worker_proc = Process(target=observer_proc, args=(points_queue, obseverations_queue))\n worker_proc.daemon = True\n worker_proc.start()\n\n observer_processes.append(worker_proc)\n\n return observer_processes\n\ndef terminate_processes(processes):\n for prc in processes:\n prc.terminate()\n prc.join()\n prc.close()\n\n# %% [markdown]\n# Finally we set some common parameters. See comments below for explanation of what each one means.\n# %%\n# Number of worker processes to run simultaneously\n# Setting this to 1 will turn both setups into non-batch sequential optimization\nnum_workers = 3\n# Number of iterations to run the sycnhronous scenario for\nnum_iterations = 10\n# Number of observations to collect in the asynchronous scenario\nnum_observations = num_workers * num_iterations\n# Set this flag to False to disable sleep delays in case you want the notebook to execute quickly\nenable_sleep_delays = True\n\n# %% [markdown]\n# ## Asynchronous optimization\n# This section runs the asynchronous optimization routine. We first setup the [ask/tell optimizer](ask_tell_optimization.ipynb) as we cannot hand over the evaluation of the objective to Trieste. Next we create thread-safe queues for points and observations, and run the optimization loop.\n#\n# Crucially, even though we are using batch acquisition function Local Penalization, we specify batch size of 1. This is because we don't really want a batch. Since the amount of workers we have is fixed, whenever we see a new observation we only need one point back. However this process can only be done with acquisition functions that implement greedy batch collection strategies, because they are able to take into account points that are currently being observed (in Trieste we call them \"pending\"). Trieste currently provides two such functions: Local Penalization and GIBBON. Notice that we use **AsynchronousGreedy** rule specifically designed for using greedy batch acquisition functions in asynchronous scenarios.\n\n# %%\n\n# setup Ask Tell BO\nmodel = build_model(initial_data)\n\nlocal_penalization_acq = LocalPenalizationAcquisitionFunction(search_space, num_samples=2000)\nlocal_penalization_rule = AsynchronousGreedy(builder=local_penalization_acq) # type: ignore\n\nasync_bo = AskTellOptimizer(search_space, initial_data, model, local_penalization_rule)\n\n# retrieve process id for nice logging\npid = os.getpid()\n# create point and observation queues\nm = Manager()\npq = m.Queue()\noq = m.Queue()\n# keep track of all workers we have launched\nobserver_processes = []\n# counter to keep track of collected observations\npoints_observed = 0\n\nstart = timeit.default_timer()\ntry:\n observer_processes = create_worker_processes(num_workers, pq, oq)\n\n # init the queue with first batch of points\n for _ in range(num_workers):\n point = async_bo.ask()\n pq.put(np.atleast_2d(point.numpy()))\n\n while points_observed < num_observations:\n # keep asking queue for new observations until one arrives\n try:\n new_data = oq.get_nowait()\n print(f\"Process {pid}: Main : received data {new_data}\", flush=True)\n except:\n continue\n\n # new_data is a tuple of (point, observation value)\n # here we turn it into a Dataset and tell of it Trieste\n points_observed += 1\n new_data = Dataset(\n query_points=tf.constant(new_data[0], dtype=tf.float64),\n observations=tf.constant(new_data[1], dtype=tf.float64),\n )\n async_bo.tell(new_data)\n\n # now we can ask Trieste for one more point\n # and feed that back into the points queue\n point = async_bo.ask()\n print(f\"Process {pid}: Main : acquired point {point}\", flush=True)\n pq.put(np.atleast_2d(point))\nfinally:\n terminate_processes(observer_processes)\nstop = timeit.default_timer()\n\n# Collect the observations, compute the running time\nasync_lp_observations = async_bo.to_result().try_get_final_dataset().observations - SCALED_BRANIN_MINIMUM\nasync_lp_time = stop - start\nprint(f\"Got {len(async_lp_observations)} observations in {async_lp_time:.2f}s\")\n\n# %% [markdown]\n# ## Synchronous parallel optimization\n#\n# This section runs the synchronous parallel optimization with Trieste. We again use Local Penalization acquisition function, but this time with batch size equal to the number of workers we have available. Once Trieste suggests the batch, we add all points to the point queue, and workers immediatelly pick them up, one point per worker. Therefore all points in the batch are evaluated in parallel.\n\n# %%\n# setup Ask Tell BO\nmodel = build_model(initial_data)\n\nlocal_penalization_acq = LocalPenalizationAcquisitionFunction(search_space, num_samples=2000)\nlocal_penalization_rule = EfficientGlobalOptimization( # type: ignore\n num_query_points=num_workers, builder=local_penalization_acq\n)\n\nsync_bo = AskTellOptimizer(search_space, initial_data, model, local_penalization_rule)\n\n\n# retrieve process id for nice logging\npid = os.getpid()\n# create point and observation queues\nm = Manager()\npq = m.Queue()\noq = m.Queue()\n# keep track of all workers we have launched\nobserver_processes = []\n\nstart = timeit.default_timer()\ntry:\n observer_processes = create_worker_processes(num_workers, pq, oq)\n\n # BO loop starts here\n for i in range(num_iterations):\n print(f\"Process {pid}: Main : iteration {i} starts\", flush=True)\n\n # get a batch of points from Trieste, send them to points queue\n # each worker picks up a point and processes it\n points = sync_bo.ask()\n for point in points.numpy():\n pq.put(point.reshape(1, -1)) # reshape is to make point a 2d array\n\n # now we wait for all workers to finish\n # we create an empty dataset and wait\n # until we collected as many observations in it\n # as there were points in the batch\n all_new_data = Dataset(\n tf.zeros((0, initial_data.query_points.shape[1]), tf.float64),\n tf.zeros((0, initial_data.observations.shape[1]), tf.float64),\n )\n while len(all_new_data) < num_workers:\n # this line blocks the process until new data is available in the queue\n new_data = oq.get()\n print(f\"Process {pid}: Main : received data {new_data}\", flush=True)\n\n new_data = Dataset(\n query_points=tf.constant(new_data[0], dtype=tf.float64),\n observations=tf.constant(new_data[1], dtype=tf.float64),\n )\n\n all_new_data = all_new_data + new_data\n\n # tell Trieste of new batch of observations\n sync_bo.tell(all_new_data)\n\nfinally:\n terminate_processes(observer_processes)\nstop = timeit.default_timer()\n\n# Collect the observations, compute the running time\nsync_lp_observations = (\n sync_bo.to_result().try_get_final_dataset().observations - SCALED_BRANIN_MINIMUM\n)\nsync_lp_time = stop - start\nprint(f\"Got {len(sync_lp_observations)} observations in {sync_lp_time:.2f}s\")\n\n\n# %% [markdown]\n# ## Comparison\n# To compare outcomes of sync and async runs, let's plot their respective regrets side by side, and print out the running time. For this toy problem we expect async scenario to run a little bit faster on machines with multiple CPU.\n\n# %%\nfrom util.plotting import plot_regret\nimport matplotlib.pyplot as plt\n\nfig, ax = plt.subplots(1, 2)\n\nsync_lp_min_idx = tf.squeeze(tf.argmin(sync_lp_observations, axis=0))\nasync_lp_min_idx = tf.squeeze(tf.argmin(async_lp_observations, axis=0))\n\nplot_regret(\n sync_lp_observations.numpy(), ax[0], num_init=len(initial_data), idx_best=sync_lp_min_idx\n)\nax[0].set_yscale(\"log\")\nax[0].set_ylabel(\"Regret\")\nax[0].set_ylim(0.0000001, 100)\nax[0].set_xlabel(\"# evaluations\")\nax[0].set_title(f\"Sync LP, {len(sync_lp_observations)} points, time {sync_lp_time:.2f}\")\n\nplot_regret(\n async_lp_observations.numpy(), ax[1], num_init=len(initial_data), idx_best=async_lp_min_idx\n)\nax[1].set_yscale(\"log\")\nax[1].set_ylabel(\"Regret\")\nax[1].set_ylim(0.0000001, 100)\nax[1].set_xlabel(\"# evaluations\")\nax[1].set_title(f\"Async LP, {len(async_lp_observations)} points, time {async_lp_time:.2f}s\")\n\nfig.tight_layout()\n"
] | [
[
"numpy.array",
"tensorflow.math.reduce_variance",
"tensorflow.zeros",
"numpy.sum",
"matplotlib.pyplot.subplots",
"tensorflow.constant",
"tensorflow.argmin",
"tensorflow.get_logger",
"numpy.atleast_2d"
]
] |
andrewli77/HAAR-A-Hierarchical-RL-Algorithm | [
"f468118e231938606593202898551035a8e60166"
] | [
"sandbox/snn4hrl/regressors/latent_regressor.py"
] | [
"import numpy as np\n\nfrom rllab.core.serializable import Serializable\nfrom rllab.core.parameterized import Parameterized\nfrom rllab.misc import logger\n\n# the regressor will be choosen to be from the same distribution as the latents\nfrom rllab.regressors.gaussian_mlp_regressor import GaussianMLPRegressor\nfrom rllab.regressors.categorical_mlp_regressor import CategoricalMLPRegressor # could be Categorical_oneAxis\nfrom sandbox.snn4hrl.regressors.categorical_recurrent_regressor import CategoricalRecurrentRegressor\nfrom sandbox.snn4hrl.regressors.bernoulli_mlp_regressor import BernoulliMLPRegressor\nfrom sandbox.snn4hrl.regressors.bernoulli_recurrent_regressor import BernoulliRecurrentRegressor\n\nfrom rllab.optimizers.first_order_optimizer import FirstOrderOptimizer\n\n\nclass Latent_regressor(Parameterized, Serializable):\n def __init__(\n self,\n env_spec,\n policy,\n recurrent=False,\n predict_all=True,\n obs_regressed='all',\n act_regressed='all',\n use_only_sign=False,\n noisify_traj_coef=0,\n optimizer=None, # this defaults to LBFGS\n regressor_args=None, # here goes all args straight to the regressor: hidden_sizes, TR, step_size....\n ):\n \"\"\"\n :param predict_all: this is only for the recurrent case, to use all hidden states as predictions\n :param obs_regressed: list of index of the obs variables used to fit the regressor. default string 'all'\n :param act_regressed: list of index of the act variables used to fit the regressor. default string 'all'\n :param regressor_args:\n \"\"\"\n self.env_spec = env_spec\n self.policy = policy\n self.latent_dim = policy.latent_dim\n self.recurrent = recurrent\n self.predict_all = predict_all\n self.use_only_sign = use_only_sign\n self.noisify_traj_coef = noisify_traj_coef\n self.regressor_args = regressor_args\n # decide what obs variables will be regressed upon\n if obs_regressed == 'all':\n self.obs_regressed = list(range(env_spec.observation_space.flat_dim))\n else:\n self.obs_regressed = obs_regressed\n # decide what action variables will be regressed upon\n if act_regressed == 'all':\n self.act_regressed = list(range(env_spec.action_space.flat_dim))\n else:\n self.act_regressed = act_regressed\n # shape the input dimension of the NN for the above decisions.\n self.obs_act_dim = len(self.obs_regressed) + len(self.act_regressed)\n\n Serializable.quick_init(self, locals()) # ??\n\n if regressor_args is None:\n regressor_args = dict()\n\n if optimizer == 'first_order':\n self.optimizer = FirstOrderOptimizer(\n max_epochs=10, # both of these are to match Rocky's 10\n batch_size=128,\n )\n elif optimizer is None:\n self.optimizer = None\n else:\n raise NotImplementedError\n\n if policy.latent_name == 'bernoulli':\n if self.recurrent:\n self._regressor = BernoulliRecurrentRegressor(\n input_shape=(self.obs_act_dim,),\n output_dim=policy.latent_dim,\n optimizer=self.optimizer,\n predict_all=self.predict_all,\n **regressor_args\n )\n else:\n self._regressor = BernoulliMLPRegressor(\n input_shape=(self.obs_act_dim,),\n output_dim=policy.latent_dim,\n optimizer=self.optimizer,\n **regressor_args\n )\n elif policy.latent_name == 'categorical':\n if self.recurrent:\n self._regressor = CategoricalRecurrentRegressor( # not implemented\n input_shape=(self.obs_act_dim,),\n output_dim=policy.latent_dim,\n optimizer=self.optimizer,\n # predict_all=self.predict_all,\n **regressor_args\n )\n else:\n self._regressor = CategoricalMLPRegressor(\n input_shape=(self.obs_act_dim,),\n output_dim=policy.latent_dim,\n optimizer=self.optimizer,\n **regressor_args\n )\n elif policy.latent_name == 'normal':\n self._regressor = GaussianMLPRegressor(\n input_shape=(self.obs_act_dim,),\n output_dim=policy.latent_dim,\n optimizer=self.optimizer,\n **regressor_args\n )\n else:\n raise NotImplementedError\n\n def fit(self, paths):\n logger.log('fitting the regressor...')\n if self.recurrent:\n observations = np.array([p[\"observations\"][:, self.obs_regressed] for p in paths])\n actions = np.array([p[\"actions\"][:, self.act_regressed] for p in paths])\n obs_actions = np.concatenate([observations, actions], axis=2)\n if self.noisify_traj_coef:\n obs_actions += np.random.normal(loc=0.0,\n scale=float(np.mean(np.abs(obs_actions))) * self.noisify_traj_coef,\n size=np.shape(obs_actions))\n latents = np.array([p['agent_infos']['latents'] for p in paths])\n self._regressor.fit(obs_actions, latents) # the input shapes are (traj, time, dim)\n else:\n observations = np.concatenate([p[\"observations\"][:, self.obs_regressed] for p in paths])\n actions = np.concatenate([p[\"actions\"][:, self.act_regressed] for p in paths])\n obs_actions = np.concatenate([observations, actions], axis=1)\n latents = np.concatenate([p['agent_infos'][\"latents\"] for p in paths])\n if self.noisify_traj_coef:\n obs_actions += np.random.normal(loc=0.0,\n scale=float(np.mean(np.abs(obs_actions))) * self.noisify_traj_coef,\n size=np.shape(obs_actions))\n self._regressor.fit(obs_actions, latents.reshape((-1, self.latent_dim))) # why reshape??\n logger.log('done fitting the regressor')\n\n def predict(self, path):\n if self.recurrent:\n obs_actions = [np.concatenate([path[\"observations\"][:, self.obs_regressed],\n path[\"actions\"][:, self.act_regressed]],\n axis=1)] # is this the same??\n else:\n obs_actions = np.concatenate([path[\"observations\"][:, self.obs_regressed],\n path[\"actions\"][:, self.act_regressed]], axis=1)\n if self.noisify_traj_coef:\n obs_actions += np.random.normal(loc=0.0, scale=float(np.mean(np.abs(obs_actions))) * self.noisify_traj_coef,\n size=np.shape(obs_actions))\n if self.use_only_sign:\n obs_actions = np.sign(obs_actions)\n return self._regressor.predict(obs_actions).flatten()\n\n def get_output_p(self, path): # this gives the p_dist for every step: the latent posterior wrt obs_act\n if self.recurrent:\n obs_actions = [np.concatenate([path[\"observations\"][:, self.obs_regressed],\n path[\"actions\"][:, self.act_regressed]],\n axis=1)] # is this the same??\n else:\n obs_actions = np.concatenate([path[\"observations\"][:, self.obs_regressed],\n path[\"actions\"][:, self.act_regressed]], axis=1)\n if self.noisify_traj_coef:\n obs_actions += np.random.normal(loc=0.0, scale=float(np.mean(np.abs(obs_actions))) * self.noisify_traj_coef,\n size=np.shape(obs_actions))\n if self.use_only_sign:\n obs_actions = np.sign(obs_actions)\n if self.policy.latent_name == 'bernoulli':\n return self._regressor._f_p(obs_actions).flatten()\n elif self.policy.latent_name == 'normal':\n return self._regressor._f_pdists(obs_actions).flatten()\n\n def get_param_values(self, **tags):\n return self._regressor.get_param_values(**tags)\n\n def set_param_values(self, flattened_params, **tags):\n self._regressor.set_param_values(flattened_params, **tags)\n\n def predict_log_likelihood(self, paths, latents):\n if self.recurrent:\n observations = np.array([p[\"observations\"][:, self.obs_regressed] for p in paths])\n actions = np.array([p[\"actions\"][:, self.act_regressed] for p in paths])\n obs_actions = np.concatenate([observations, actions], axis=2) # latents must match first 2dim: (batch,time)\n else:\n observations = np.concatenate([p[\"observations\"][:, self.obs_regressed] for p in paths])\n actions = np.concatenate([p[\"actions\"][:, self.act_regressed] for p in paths])\n obs_actions = np.concatenate([observations, actions], axis=1)\n latents = np.concatenate(latents, axis=0)\n if self.noisify_traj_coef:\n noise = np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions, axis=0)),\n cov=np.diag(np.mean(np.abs(obs_actions),\n axis=0) * self.noisify_traj_coef),\n size=np.shape(obs_actions)[0])\n obs_actions += noise\n if self.use_only_sign:\n obs_actions = np.sign(obs_actions)\n return self._regressor.predict_log_likelihood(obs_actions, latents) # see difference with fit above...\n\n def lowb_mutual(self, paths, times=(0, None)):\n if self.recurrent:\n observations = np.array([p[\"observations\"][times[0]:times[1], self.obs_regressed] for p in paths])\n actions = np.array([p[\"actions\"][times[0]:times[1], self.act_regressed] for p in paths])\n obs_actions = np.concatenate([observations, actions], axis=2)\n latents = np.array([p['agent_infos']['latents'][times[0]:times[1]] for p in paths])\n else:\n observations = np.concatenate([p[\"observations\"][times[0]:times[1], self.obs_regressed] for p in paths])\n actions = np.concatenate([p[\"actions\"][times[0]:times[1], self.act_regressed] for p in paths])\n obs_actions = np.concatenate([observations, actions], axis=1)\n latents = np.concatenate([p['agent_infos'][\"latents\"][times[0]:times[1]] for p in paths])\n if self.noisify_traj_coef:\n obs_actions += np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions,axis=0)),\n cov=np.diag(np.mean(np.abs(obs_actions),\n axis=0) * self.noisify_traj_coef),\n size=np.shape(obs_actions)[0])\n if self.use_only_sign:\n obs_actions = np.sign(obs_actions)\n H_latent = self.policy.latent_dist.entropy(self.policy.latent_dist_info) # sum of entropies latents in\n\n return H_latent + np.mean(self._regressor.predict_log_likelihood(obs_actions, latents))\n\n\n def log_diagnostics(self, paths):\n logger.record_tabular(self._regressor._name + 'LowerB_MI', self.lowb_mutual(paths))\n logger.record_tabular(self._regressor._name + 'LowerB_MI_5first', self.lowb_mutual(paths, times=(0, 5)))\n logger.record_tabular(self._regressor._name + 'LowerB_MI_5last', self.lowb_mutual(paths, times=(-5, None)))\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.mean",
"numpy.shape",
"numpy.sign",
"numpy.abs"
]
] |
FankaRoy/pyrockphy | [
"c29ebc3de25b4526b396fd477fb5dd7064cb0bda"
] | [
"critpor.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 11 19:22:45 2018\r\n\r\n@author: Fanka, W. Roye T.\r\n\r\nRef: MATLAB code by T. Mukerji; \r\n function [vpcr,vscr,rocr,mcr,kcr,mucr]=critpor(vp1,vs1,ro1,vp2,vs2,ro2,\\\r\n phicr)\r\n\"\"\"\r\n#import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef CritPor(vp1,vs1,ro1,vp2,vs2,ro2,phicr):\r\n \"\"\"\r\n Function computes velocities, elastic moduli, and bulk density at the \r\n critical porosity.\r\n # =========================================================================\r\n # INPUTS\r\n # =========================================================================\r\n Takes the following inputs:\r\n vp1,vs1,vp2,vs2: Velocities of the two constituents\r\n ro1,ro2: Densities of the two constituents\r\n phicr: Critical porosity\r\n # =========================================================================\r\n # OUTPUTS\r\n # =========================================================================\r\n Function returns a list of outputs:\r\n [vpcr,vscr,rocr,mcr,kcr,mucr]\r\n vpcr, vscr: Velocities on critical porosity\r\n rocr: Bulk density on critical porosity\r\n kcr, mucr: Bulk and shear moduli on critical porosity\r\n rocr: Bulk density on critical porosity\r\n mcr: \r\n \"\"\"\r\n\r\n m1=ro1*vp1**2; m2=ro2*vp2**2; mu1=ro1*vs1**2; mu2=ro2*vs2**2;\r\n k1=m1-(4/3)*mu1; k2=m2-(4/3)*mu2;\r\n \r\n mcr=(m1*m2)/((1-phicr)*m2+phicr*m1);\r\n mucr=(mu1*mu2)/((1-phicr)*mu2+phicr*mu1);\r\n kcr=(k1*k2)/((1-phicr)*k2+phicr*k1);\r\n rocr=(1-phicr)*ro1+phicr*ro2; vscr=np.sqrt(mucr/rocr);\r\n vpcr=np.sqrt((kcr+(4/3)*mucr)/rocr);\r\n \r\n outlistcr = [vpcr,vscr,rocr,mcr,kcr,mucr]\r\n \r\n# plt.plot(phicr,kcr,'b',phicr,mucr,'r')\r\n return outlistcr\r\n\r\n"
] | [
[
"numpy.sqrt"
]
] |
zyxsachin/pyWATTS | [
"0ecf0263a620a77c0dafbf94e1ba251bf04720be"
] | [
"pywatts/modules/profile_neural_network.py"
] | [
"import logging\nfrom typing import Dict\n\nimport tensorflow\nimport numpy as np\nimport xarray as xr\nfrom pywatts.core.base import BaseEstimator\nfrom pywatts.core.filemanager import FileManager\nfrom pywatts.utils._xarray_time_series_utils import numpy_to_xarray\n\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import activations, optimizers, initializers\nfrom tensorflow import keras\n\n\nclass ProfileNeuralNetwork(BaseEstimator):\n \"\"\"\n This module implements the profile neural network. It is a model for forecasting short-term electrical load.\n Therefore, it takes into account, trend information, calendar_information, historical input but also the profile\n of the load.\n Note the horizon is extracted from the data\n If you use it please cite:\n Benedikt Heidrich, Marian Turowski, Nicole Ludwig, Ralf Mikut, and Veit Hagenmeyer. 2020.\n Forecasting energy time series with profile neural networks. In Proceedings of the Eleventh ACM International\n Conference on Future Energy Systems (e-Energy โ20). Association for Computing Machinery, New York, NY, USA,\n 220โ230. DOI:https://doi.org/10.1145/3396851.3397683\n\n :param name: The name of the module\n :type name: str\n :param epochs: The number of epochs the model should be trained.\n :type epochs: int\n :param offset: The number of samples at the beginning of the dataset that should be **not** considered for training.\n :type offset: int\n :param batch_size: The batch size which should be used for training\n :type batch_size: int\n :param validation_split: The share of data which should be used for validation\n :type validation_split: float\n \"\"\"\n\n def __init__(self, name: str = \"PNN\", epochs=50, offset=0, batch_size=128, validation_split=0.2):\n super().__init__(name)\n self.epochs = epochs\n self.offset = offset\n self.batch_size = batch_size\n self.validation_split = validation_split\n\n def get_params(self) -> Dict[str, object]:\n \"\"\" Get parameter for this object as dict.\n\n :return: Object parameters as json dict\n \"\"\"\n return {\n \"epochs\": self.epochs,\n \"offset\": self.offset,\n \"batch_size\": self.batch_size,\n \"validation_split\": self.validation_split\n }\n\n def set_params(self, epochs=None, offset=None, batch_size=None, validation_split=None):\n \"\"\"\n :param epochs: The number of epochs the model should be trained.\n :type epochs: int\n :param offset: The number of samples at the beginning of the dataset that should be **not** considered for training.\n :type offset: int\n :param batch_size: The batch size which should be used for training\n :type batch_size: int\n :param validation_split: The share of data which should be used for validation\n :type validation_split: float\n \"\"\"\n if batch_size:\n self.batch_size = batch_size\n if epochs:\n self.epochs = epochs\n if offset:\n self.offset = offset\n if validation_split:\n self.validation_split = validation_split\n\n def transform(self, historical_input, calendar, temperature, humidity, profile, trend) -> xr.DataArray:\n \"\"\"\n Forecast the electrical load for the given input.\n\n :param historical_input: The historical input\n :type historical_input: xr.DataArray\n :param calendar: The calendar information of the dates that should be predicted.\n :type calendar: xr.DataArray\n :param temperature: The temperature of the dates that should be predicted\n :type temperature: xr.DataArray\n :param humidity: The humidity of the dates that should be predicted\n :type humidity: xr.DataArray\n :param profile: The profile of the dates that should be predicted\n :type profile: xr.DataArray\n :param trend: The trend information of the dates that should be predicted\n :type trend: xr.DataArray\n :return: The prediction\n :rtype: xr.DataArray\n \"\"\"\n result = self.pnn.predict({\n \"hist_input\": historical_input.values,\n \"full_trend\": trend.values,\n \"profile\": profile.values,\n \"dummy_input\": np.concatenate(\n [calendar.values, temperature.values.reshape(-1, self.horizon, 1),\n humidity.values.reshape(-1, self.horizon, 1)], axis=-1)\n })\n return numpy_to_xarray(result, historical_input, self.name)\n\n def fit(self, historical_input, calendar, temperature, humidity, profile, trend, target):\n \"\"\"\n Fit the Profile Neural Network.\n\n :param historical_input: The historical input\n :type historical_input: xr.DataArray\n :param calendar: The calendar information of the dates that should be predicted.\n :type calendar: xr.DataArray\n :param temperature: The temperature of the dates that should be predicted\n :type temperature: xr.DataArray\n :param humidity: The humidity of the dates that should be predicted\n :type humidity: xr.DataArray\n :param profile: The profile of the dates that should be predicted\n :type profile: xr.DataArray\n :param trend: The trend information of the dates that should be predicted\n :type trend: xr.DataArray\n :param target: The ground truth of the desired prediction\n :type target: xr.DataArray\n \"\"\"\n input_length = historical_input.shape[-1]\n trend_length = trend.shape[-1]\n self.horizon = target.shape[-1]\n self.pnn = _PNN(self.horizon, n_steps_in=input_length, trend_length=trend_length)\n\n input, t = self._clean_dataset({\n \"hist_input\": historical_input.values[self.offset:],\n \"full_trend\": trend.values[self.offset:],\n \"profile\": profile.values[self.offset:],\n \"dummy_input\": np.concatenate(\n [calendar.values, temperature.values.reshape(-1, self.horizon, 1),\n humidity.values.reshape(-1, self.horizon, 1)], axis=-1)[self.offset:]\n }, target.values[self.offset:])\n self.pnn.fit(input, t, epochs=self.epochs, batch_size=self.batch_size, validation_split=self.validation_split)\n self.is_fitted = True\n\n def save(self, fm: FileManager) -> Dict:\n \"\"\"\n Stores the PNN at the given path\n\n :param fm: The Filemanager, which contains the path where the model should be stored\n :return: The path where the model is stored.\n \"\"\"\n json = super().save(fm)\n if self.is_fitted:\n filepath = fm.get_path(f\"{self.name}.h5\")\n self.pnn.save(filepath=filepath)\n json.update({\n \"pnn\": filepath\n })\n return json\n\n @classmethod\n def load(cls, load_information) -> BaseEstimator:\n \"\"\"\n Load the PNN model.\n\n :param params: The paramters which should be used for restoring the PNN.\n :return: A wrapped keras model.\n \"\"\"\n pnn_module = ProfileNeuralNetwork(name=load_information[\"name\"], **load_information[\"params\"])\n if load_information[\"is_fitted\"]:\n try:\n pnn = keras.models.load_model(filepath=load_information[\"pnn\"],\n custom_objects={\"_sum_squared_error\": _sum_squared_error,\n \"_root_mean_squared_error\": _root_mean_squared_error})\n except Exception as exception:\n logging.error(\"No model found in %s.\", load_information['pnn'])\n raise exception\n pnn_module.pnn = pnn\n pnn_module.is_fitted = True\n return pnn_module\n\n @staticmethod\n def _clean_dataset(X, y, same_values_in_a_row=2):\n \"\"\"\n Cleans the dataset. The following three rules are applied:\n Arguments:\n X: Input data\n y: Target data\n same_values_in_a_row: parameter which indicates how often the same value in a row is accetable\n \"\"\"\n\n def _check_instance(data):\n \"\"\"\n Checks if the data is nan, contains more than same_values_in_a_row values which has the same value and if the value is zero\n Returns: Bool: False if one of the conditions applies\n \"\"\"\n counter = 0\n d_last = -1\n for d in data:\n if d_last == d:\n if counter > same_values_in_a_row:\n return False\n counter += 1\n else:\n counter = 0\n d_last = d\n return True\n\n x_cleaned = {}\n for x in X:\n x_cleaned[x] = []\n y_cleaned = []\n for i in range(len(y)):\n if not np.any(np.isnan(X[\"hist_input\"][i])) and not np.any(np.isnan(y[i])) and not np.any(\n y[i] == 0) and _check_instance(X[\"hist_input\"][i]) and _check_instance(y[i]) and not np.any(\n np.isnan(X[\"full_trend\"][i])) and not np.any(np.isnan(X[\"dummy_input\"][i])):\n # add to cleaned dataset\n for key, x in X.items():\n x_cleaned[key].append(x[i])\n y_cleaned.append(y[i, :])\n for key, x in x_cleaned.items():\n x_cleaned[key] = np.array(x)\n return x_cleaned, np.array(y_cleaned)\n\n\ndef _sum_squared_error(y_true, y_pred):\n from tensorflow.keras import backend as K\n return K.sum(K.square(y_true - y_pred), axis=-1)\n\n\ndef _root_mean_squared_error(y_true, y_pred):\n from tensorflow.keras import backend as K\n return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))\n\n\ndef _PNN(n_steps_out, n_steps_in=36, trend_length=5) -> tensorflow.keras.Model:\n activation = activations.elu\n\n def hist_encoder(conv_input):\n conv = layers.Reshape((n_steps_in, 1))(conv_input)\n conv = layers.Conv1D(4, [3], activation=activation, padding='same')(conv)\n conv = layers.MaxPool1D(pool_size=2)(conv)\n\n conv = layers.Conv1D(1, [7], activation=activation, padding='same')(conv)\n conv = layers.MaxPool1D(pool_size=2)(conv)\n conv = layers.Flatten()(conv)\n conv = layers.Dense(n_steps_out)(conv)\n conv = layers.Reshape((n_steps_out, 1))(conv)\n return conv\n\n def prediction_network(fc):\n fc = layers.Conv1D(16, [7], padding='same', activation=activation)(fc)\n fc = layers.SpatialDropout1D(rate=0.3)(fc)\n fc = layers.Conv1D(8, [7], padding='same', activation=activation)(fc)\n fc = layers.SpatialDropout1D(rate=0.3)(fc)\n fc = layers.Conv1D(1, [7], padding='same')(fc)\n return fc\n\n def external_encoder(dummy_input):\n dummy = layers.Conv1D(2, [7], activation=activation, padding='same')(dummy_input)\n dummy = layers.Conv1D(1, [7], activation=activation, padding='same')(dummy)\n dummy = layers.Flatten()(dummy)\n dummy = layers.Reshape((n_steps_out, 1))(dummy)\n return dummy\n\n def trend_encoder(trend_input):\n trend = layers.Dense(1, activation=activation)(trend_input)\n trend = layers.Dense(4, activation=activation)(trend)\n trend = layers.Conv1D(4, [5], activation=activation, padding='same')(trend)\n trend = layers.Conv1D(1, [5], activation=activation, padding='same')(trend)\n return trend\n\n conv_input = keras.Input(shape=(n_steps_in,), name=\"hist_input\")\n trend_input = keras.Input(shape=(n_steps_out, trend_length), name=\"full_trend\")\n profile_input = keras.Input(shape=(n_steps_out,), name=\"profile\")\n dummy_input = keras.Input(shape=(n_steps_out, 16), name=\"dummy_input\")\n\n conv = hist_encoder(conv_input)\n\n trend = trend_encoder(trend_input)\n\n dummy = external_encoder(dummy_input)\n\n fc = layers.concatenate([dummy, conv], axis=2)\n fc = prediction_network(fc)\n\n profile = layers.Reshape((n_steps_out, 1))(profile_input)\n\n out = layers.concatenate([fc, profile, trend])\n out = layers.Conv1D(1, [1], padding='same', use_bias=False, activation=activations.linear,\n kernel_initializer=initializers.Constant(value=1 / 3), name=\"aggregation_layer\")(out)\n pred = layers.Flatten()(out)\n\n model = keras.Model(inputs=[conv_input, trend_input, dummy_input, profile_input], outputs=pred)\n\n model.compile(optimizer=optimizers.Adam(), loss=_sum_squared_error,\n metrics=[_root_mean_squared_error])\n return model\n"
] | [
[
"tensorflow.keras.layers.Conv1D",
"numpy.array",
"numpy.isnan",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.backend.square",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.SpatialDropout1D",
"tensorflow.keras.models.load_model",
"tensorflow.keras.Model",
"numpy.any",
"tensorflow.keras.Input",
"tensorflow.keras.layers.MaxPool1D",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.initializers.Constant"
]
] |
graceon/R3Det_Tensorflow | [
"5ff8e2505aacfb9107d2c41980374385dc0200ba"
] | [
"libs/networks/build_whole_network.py"
] | [
"# -*-coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\n\nfrom libs.networks import resnet, resnet_gluoncv, mobilenet_v2, xception\nfrom libs.box_utils import anchor_utils, generate_anchors, generate_rotate_anchors\nfrom libs.configs import cfgs\nfrom libs.losses import losses\nfrom libs.box_utils import show_box_in_tensor\nfrom libs.detection_oprations.proposal_opr_ import postprocess_detctions\nfrom libs.detection_oprations.anchor_target_layer_without_boxweight import anchor_target_layer\n\n\nclass DetectionNetwork(object):\n\n def __init__(self, base_network_name, is_training):\n\n self.base_network_name = base_network_name\n self.is_training = is_training\n if cfgs.METHOD == 'H':\n self.num_anchors_per_location = len(cfgs.ANCHOR_SCALES) * len(cfgs.ANCHOR_RATIOS)\n else:\n self.num_anchors_per_location = len(cfgs.ANCHOR_SCALES) * len(cfgs.ANCHOR_RATIOS) * len(cfgs.ANCHOR_ANGLES)\n self.method = cfgs.METHOD\n\n def build_base_network(self, input_img_batch):\n\n if self.base_network_name.startswith('resnet_v1'):\n return resnet.resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)\n\n elif self.base_network_name in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:\n\n return resnet_gluoncv.resnet_base(input_img_batch, scope_name=self.base_network_name,\n is_training=self.is_training)\n\n elif self.base_network_name.startswith('MobilenetV2'):\n return mobilenet_v2.mobilenetv2_base(input_img_batch, is_training=self.is_training)\n\n elif self.base_network_name.startswith('xception'):\n return xception.xception_base(input_img_batch, is_training=self.is_training)\n\n else:\n raise ValueError('Sry, we only support resnet, mobilenet_v2 and xception')\n\n def rpn_cls_net(self, inputs, scope_list, reuse_flag, level):\n rpn_conv2d_3x3 = inputs\n for i in range(4):\n rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3,\n num_outputs=256,\n kernel_size=[3, 3],\n stride=1,\n activation_fn=tf.nn.relu,\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n scope='{}_{}'.format(scope_list[0], i),\n reuse=reuse_flag)\n\n rpn_box_scores = slim.conv2d(rpn_conv2d_3x3,\n num_outputs=cfgs.CLASS_NUM * self.num_anchors_per_location,\n kernel_size=[3, 3],\n stride=1,\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.FINAL_CONV_BIAS_INITIALIZER,\n scope=scope_list[2],\n activation_fn=None,\n reuse=reuse_flag)\n\n rpn_box_scores = tf.reshape(rpn_box_scores, [-1, cfgs.CLASS_NUM],\n name='rpn_{}_classification_reshape'.format(level))\n rpn_box_probs = tf.sigmoid(rpn_box_scores, name='rpn_{}_classification_sigmoid'.format(level))\n\n return rpn_box_scores, rpn_box_probs\n\n def rpn_reg_net(self, inputs, scope_list, reuse_flag, level):\n rpn_delta_boxes = inputs\n for i in range(4):\n rpn_delta_boxes = slim.conv2d(inputs=rpn_delta_boxes,\n num_outputs=256,\n kernel_size=[3, 3],\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n stride=1,\n activation_fn=tf.nn.relu,\n scope='{}_{}'.format(scope_list[1], i),\n reuse=reuse_flag)\n\n rpn_delta_boxes = slim.conv2d(rpn_delta_boxes,\n num_outputs=5 * self.num_anchors_per_location,\n kernel_size=[3, 3],\n stride=1,\n weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,\n biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,\n scope=scope_list[3],\n activation_fn=None,\n reuse=reuse_flag)\n\n rpn_delta_boxes = tf.reshape(rpn_delta_boxes, [-1, 5],\n name='rpn_{}_regression_reshape'.format(level))\n return rpn_delta_boxes\n\n def rpn_net(self, feature_pyramid):\n\n rpn_delta_boxes_list = []\n rpn_scores_list = []\n rpn_probs_list = []\n with tf.variable_scope('rpn_net'):\n with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):\n for level in cfgs.LEVEL:\n\n if cfgs.SHARE_NET:\n reuse_flag = None if level == 'P3' else True\n scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification', 'rpn_regression']\n else:\n reuse_flag = None\n scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level,\n 'rpn_classification_' + level, 'rpn_regression_' + level]\n\n rpn_box_scores, rpn_box_probs = self.rpn_cls_net(feature_pyramid[level], scope_list, reuse_flag, level)\n rpn_delta_boxes = self.rpn_reg_net(feature_pyramid[level], scope_list, reuse_flag, level)\n\n rpn_scores_list.append(rpn_box_scores)\n rpn_probs_list.append(rpn_box_probs)\n rpn_delta_boxes_list.append(rpn_delta_boxes)\n\n rpn_all_delta_boxes = tf.concat(rpn_delta_boxes_list, axis=0)\n rpn_all_boxes_scores = tf.concat(rpn_scores_list, axis=0)\n rpn_all_boxes_probs = tf.concat(rpn_probs_list, axis=0)\n\n return rpn_all_delta_boxes, rpn_all_boxes_scores, rpn_all_boxes_probs\n\n def make_anchors(self, feature_pyramid):\n with tf.variable_scope('make_anchors'):\n anchor_list = []\n level_list = cfgs.LEVEL\n with tf.name_scope('make_anchors_all_level'):\n for level, base_anchor_size, stride in zip(level_list, cfgs.BASE_ANCHOR_SIZE_LIST, cfgs.ANCHOR_STRIDE):\n '''\n (level, base_anchor_size) tuple:\n (P3, 32), (P4, 64), (P5, 128), (P6, 256), (P7, 512)\n '''\n featuremap_height, featuremap_width = tf.shape(feature_pyramid[level])[1], \\\n tf.shape(feature_pyramid[level])[2]\n\n featuremap_height = tf.cast(featuremap_height, tf.float32)\n featuremap_width = tf.cast(featuremap_width, tf.float32)\n\n # tmp_anchors = anchor_utils.make_anchors(base_anchor_size=base_anchor_size,\n # anchor_scales=cfgs.ANCHOR_SCALES,\n # anchor_ratios=cfgs.ANCHOR_RATIOS,\n # featuremap_height=featuremap_height,\n # featuremap_width=featuremap_width,\n # stride=stride,\n # name='make_anchors_{}'.format(level))\n if self.method == 'H':\n tmp_anchors = tf.py_func(generate_anchors.generate_anchors_pre,\n inp=[featuremap_height, featuremap_width, stride,\n np.array(cfgs.ANCHOR_SCALES) * stride, cfgs.ANCHOR_RATIOS, 4.0],\n Tout=[tf.float32])\n\n tmp_anchors = tf.reshape(tmp_anchors, [-1, 4])\n else:\n tmp_anchors = generate_rotate_anchors.make_anchors(base_anchor_size=base_anchor_size,\n anchor_scales=cfgs.ANCHOR_SCALES,\n anchor_ratios=cfgs.ANCHOR_RATIOS,\n anchor_angles=cfgs.ANCHOR_ANGLES,\n featuremap_height=featuremap_height,\n featuremap_width=featuremap_width,\n stride=stride)\n tmp_anchors = tf.reshape(tmp_anchors, [-1, 5])\n anchor_list.append(tmp_anchors)\n\n all_level_anchors = tf.concat(anchor_list, axis=0)\n return all_level_anchors\n\n def add_anchor_img_smry(self, img, anchors, labels, method):\n\n positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(labels, 1)), [-1])\n # negative_anchor_indices = tf.reshape(tf.where(tf.equal(labels, 0)), [-1])\n\n positive_anchor = tf.gather(anchors, positive_anchor_indices)\n # negative_anchor = tf.gather(anchors, negative_anchor_indices)\n\n pos_in_img = show_box_in_tensor.only_draw_boxes(img_batch=img,\n boxes=positive_anchor,\n method=method)\n # neg_in_img = show_box_in_tensor.only_draw_boxes(img_batch=img,\n # boxes=negative_anchor)\n\n tf.summary.image('positive_anchor', pos_in_img)\n # tf.summary.image('negative_anchors', neg_in_img)\n\n def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h, gtboxes_batch_r, gpu_id=0):\n\n if self.is_training:\n gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])\n gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)\n\n gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])\n gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)\n\n # 1. build base network\n feature_pyramid = self.build_base_network(input_img_batch)\n\n # 2. build rpn\n rpn_box_pred, rpn_cls_score, rpn_cls_prob = self.rpn_net(feature_pyramid)\n\n # 3. generate_anchors\n anchors = self.make_anchors(feature_pyramid)\n\n # 4. postprocess rpn proposals. such as: decode, clip, filter\n if not self.is_training:\n with tf.variable_scope('postprocess_detctions'):\n boxes, scores, category = postprocess_detctions(rpn_bbox_pred=rpn_box_pred,\n rpn_cls_prob=rpn_cls_prob,\n anchors=anchors,\n is_training=self.is_training)\n return boxes, scores, category\n\n # 5. build loss\n else:\n with tf.variable_scope('build_loss'):\n labels, target_delta, anchor_states, target_boxes = tf.py_func(func=anchor_target_layer,\n inp=[gtboxes_batch_h, gtboxes_batch_r,\n anchors, gpu_id],\n Tout=[tf.float32, tf.float32, tf.float32,\n tf.float32])\n\n if self.method == 'H':\n self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 0)\n else:\n self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 1)\n\n cls_loss = losses.focal_loss(labels, rpn_cls_score, anchor_states)\n\n if cfgs.REG_LOSS_MODE == 0:\n reg_loss = losses.iou_smooth_l1_loss(target_delta, rpn_box_pred, anchor_states, target_boxes,\n anchors)\n elif cfgs.REG_LOSS_MODE == 1:\n reg_loss = losses.smooth_l1_loss_atan(target_delta, rpn_box_pred, anchor_states)\n else:\n reg_loss = losses.smooth_l1_loss(target_delta, rpn_box_pred, anchor_states)\n\n losses_dict = {'cls_loss': cls_loss * cfgs.CLS_WEIGHT,\n 'reg_loss': reg_loss * cfgs.REG_WEIGHT}\n\n with tf.variable_scope('postprocess_detctions'):\n boxes, scores, category = postprocess_detctions(rpn_bbox_pred=rpn_box_pred,\n rpn_cls_prob=rpn_cls_prob,\n anchors=anchors,\n is_training=self.is_training)\n boxes = tf.stop_gradient(boxes)\n scores = tf.stop_gradient(scores)\n category = tf.stop_gradient(category)\n\n return boxes, scores, category, losses_dict\n\n def get_restorer(self):\n checkpoint_path = tf.train.latest_checkpoint(os.path.join(cfgs.TRAINED_CKPT, cfgs.VERSION))\n\n if checkpoint_path != None:\n if cfgs.RESTORE_FROM_RPN:\n print('___restore from rpn___')\n model_variables = slim.get_model_variables()\n restore_variables = [var for var in model_variables if not var.name.startswith('FastRCNN_Head')] + \\\n [slim.get_or_create_global_step()]\n for var in restore_variables:\n print(var.name)\n restorer = tf.train.Saver(restore_variables)\n else:\n restorer = tf.train.Saver()\n print(\"model restore from :\", checkpoint_path)\n else:\n checkpoint_path = cfgs.PRETRAINED_CKPT\n print(\"model restore from pretrained mode, path is :\", checkpoint_path)\n\n model_variables = slim.get_model_variables()\n\n # for var in model_variables:\n # print(var.name)\n # print(20*\"__++__++__\")\n\n def name_in_ckpt_rpn(var):\n return var.op.name\n\n def name_in_ckpt_fastrcnn_head(var):\n '''\n Fast-RCNN/resnet_v1_50/block4 -->resnet_v1_50/block4\n Fast-RCNN/MobilenetV2/** -- > MobilenetV2 **\n :param var:\n :return:\n '''\n return '/'.join(var.op.name.split('/')[1:])\n\n nameInCkpt_Var_dict = {}\n for var in model_variables:\n if var.name.startswith('Fast-RCNN/'+self.base_network_name): # +'/block4'\n var_name_in_ckpt = name_in_ckpt_fastrcnn_head(var)\n nameInCkpt_Var_dict[var_name_in_ckpt] = var\n else:\n if var.name.startswith(self.base_network_name):\n var_name_in_ckpt = name_in_ckpt_rpn(var)\n nameInCkpt_Var_dict[var_name_in_ckpt] = var\n else:\n continue\n restore_variables = nameInCkpt_Var_dict\n for key, item in restore_variables.items():\n print(\"var_in_graph: \", item.name)\n print(\"var_in_ckpt: \", key)\n print(20*\"___\")\n restorer = tf.train.Saver(restore_variables)\n print(20 * \"****\")\n print(\"restore from pretrained_weighs in IMAGE_NET\")\n return restorer, checkpoint_path\n\n def get_gradients(self, optimizer, loss):\n '''\n\n :param optimizer:\n :param loss:\n :return:\n\n return vars and grads that not be fixed\n '''\n\n # if cfgs.FIXED_BLOCKS > 0:\n # trainable_vars = tf.trainable_variables()\n # # trained_vars = slim.get_trainable_variables()\n # start_names = [cfgs.NET_NAME + '/block%d'%i for i in range(1, cfgs.FIXED_BLOCKS+1)] + \\\n # [cfgs.NET_NAME + '/conv1']\n # start_names = tuple(start_names)\n # trained_var_list = []\n # for var in trainable_vars:\n # if not var.name.startswith(start_names):\n # trained_var_list.append(var)\n # # slim.learning.train()\n # grads = optimizer.compute_gradients(loss, var_list=trained_var_list)\n # return grads\n # else:\n # return optimizer.compute_gradients(loss)\n return optimizer.compute_gradients(loss)\n\n def enlarge_gradients_for_bias(self, gradients):\n\n final_gradients = []\n with tf.variable_scope(\"Gradient_Mult\") as scope:\n for grad, var in gradients:\n scale = 1.0\n if cfgs.MUTILPY_BIAS_GRADIENT and './biases' in var.name:\n scale = scale * cfgs.MUTILPY_BIAS_GRADIENT\n if not np.allclose(scale, 1.0):\n grad = tf.multiply(grad, scale)\n final_gradients.append((grad, var))\n return final_gradients\n"
] | [
[
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.contrib.slim.l2_regularizer",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.train.Saver",
"tensorflow.contrib.slim.get_model_variables",
"tensorflow.variable_scope",
"numpy.array",
"tensorflow.py_func",
"numpy.allclose",
"tensorflow.contrib.slim.get_or_create_global_step",
"tensorflow.name_scope",
"tensorflow.contrib.slim.conv2d",
"tensorflow.multiply",
"tensorflow.summary.image",
"tensorflow.greater_equal",
"tensorflow.gather",
"tensorflow.stop_gradient"
]
] |
aishikhar/avalanche | [
"39c361aba1663795ed33f093ab2e15cc5792026e",
"39c361aba1663795ed33f093ab2e15cc5792026e"
] | [
"avalanche/benchmarks/classic/core50.py",
"avalanche/training/strategies/ar1.py"
] | [
"################################################################################\r\n# Copyright (c) 2021 ContinualAI. #\r\n# Copyrights licensed under the MIT License. #\r\n# See the accompanying LICENSE file for terms. #\r\n# #\r\n# Date: 1-05-2020 #\r\n# Author(s): Vincenzo Lomonaco #\r\n# E-mail: [email protected] #\r\n# Website: www.continualai.org #\r\n################################################################################\r\n\r\n\"\"\" This module contains the high-level CORe50 scenario generator. It\r\nbasically returns a iterable scenario object ``GenericCLScenario`` given a\r\nnumber of configuration parameters.\"\"\"\r\n\r\nfrom avalanche.benchmarks.datasets.core50.core50_data import CORE50_DATA\r\nfrom avalanche.benchmarks.scenarios.generic_scenario_creation import \\\r\n create_generic_scenario_from_filelists\r\nfrom os.path import expanduser\r\n\r\nnbatch = {\r\n 'ni': 8,\r\n 'nc': 9,\r\n 'nic': 79,\r\n 'nicv2_79': 79,\r\n 'nicv2_196': 196,\r\n 'nicv2_391': 391\r\n}\r\n\r\nscen2dirs = {\r\n 'ni': \"batches_filelists/NI_inc/\",\r\n 'nc': \"batches_filelists/NC_inc/\",\r\n 'nic': \"batches_filelists/NIC_inc/\",\r\n 'nicv2_79': \"NIC_v2_79/\",\r\n 'nicv2_196': \"NIC_v2_196/\",\r\n 'nicv2_391': \"NIC_v2_391/\"\r\n}\r\n\r\n\r\ndef CORe50(root=expanduser(\"~\") + \"/.avalanche/data/core50/\",\r\n scenario=\"nicv2_391\",\r\n run=0,\r\n train_transform=None,\r\n eval_transform=None):\r\n \"\"\"\r\n Creates a CL scenario for CORe50.\r\n\r\n If the dataset is not present in the computer, this method will\r\n automatically download and store it.\r\n\r\n This generator can be used to obtain the NI, NC, NIC and NICv2-* scenarios.\r\n\r\n The scenario instance returned by this method will have two fields,\r\n `train_stream` and `test_stream`, which can be iterated to obtain\r\n training and test :class:`Experience`. Each Experience contains the\r\n `dataset` and the associated task label.\r\n\r\n The task label \"0\" will be assigned to each experience.\r\n\r\n The scenario API is quite simple and is uniform across all scenario\r\n generators. It is recommended to check the tutorial of the \"benchmark\" API,\r\n which contains usage examples ranging from \"basic\" to \"advanced\".\r\n\r\n :param root: Path indicating where to store the dataset and related\r\n metadata. By default they will be stored in\r\n \"~/.avalanche/datasets/core50/data/\".\r\n :param scenario: CORe50 main scenario. It can be chosen between 'ni', 'nc',\r\n 'nic', 'nicv2_79', 'nicv2_196' or 'nicv2_391.'\r\n :param run: number of run for the scenario. Each run defines a different\r\n ordering. Must be a number between 0 and 9.\r\n :param train_transform: The transformation to apply to the training data,\r\n e.g. a random crop, a normalization or a concatenation of different\r\n transformations (see torchvision.transform documentation for a\r\n comprehensive list of possible transformations). Defaults to None.\r\n :param eval_transform: The transformation to apply to the test data,\r\n e.g. a random crop, a normalization or a concatenation of different\r\n transformations (see torchvision.transform documentation for a\r\n comprehensive list of possible transformations). Defaults to None.\r\n\r\n :returns: a properly initialized :class:`GenericCLScenario` instance.\r\n \"\"\"\r\n\r\n assert (0 <= run <= 9), \"Pre-defined run of CORe50 are only 10. Indicate \" \\\r\n \"a number between 0 and 9.\"\r\n assert (scenario in nbatch.keys()), \"The selected scenario is note \" \\\r\n \"recognized: it should be 'ni', 'nc',\" \\\r\n \"'nic', 'nicv2_79', 'nicv2_196' or \" \\\r\n \"'nicv2_391'.\"\r\n if root is None:\r\n core_data = CORE50_DATA()\r\n else:\r\n core_data = CORE50_DATA(root)\r\n\r\n root = core_data.data_folder\r\n root_img = root + \"core50_128x128/\"\r\n\r\n filelists_bp = scen2dirs[scenario] + \"run\" + str(run) + \"/\"\r\n train_failists_paths = []\r\n for i in range(nbatch[scenario]):\r\n train_failists_paths.append(\r\n root + filelists_bp + \"train_batch_\" +\r\n str(i).zfill(2) + \"_filelist.txt\")\r\n\r\n scenario_obj = create_generic_scenario_from_filelists(\r\n root_img, train_failists_paths,\r\n root + filelists_bp + \"test_filelist.txt\",\r\n [0 for _ in range(nbatch[scenario])],\r\n complete_test_set_only=True,\r\n train_transform=train_transform,\r\n eval_transform=eval_transform)\r\n\r\n return scenario_obj\r\n\r\n\r\n__all__ = [\r\n 'CORe50'\r\n]\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # this below can be taken as a usage example or a simple test script\r\n import sys\r\n from torch.utils.data.dataloader import DataLoader\r\n\r\n scenario = CORe50(scenario=\"nicv2_79\")\r\n for i, batch in enumerate(scenario.train_stream):\r\n print(i, batch)\r\n dataset, t = batch.dataset, batch.task_label\r\n dl = DataLoader(dataset, batch_size=300)\r\n\r\n for mb in dl:\r\n x, y = mb\r\n print(x.shape)\r\n print(y.shape)\r\n sys.exit(0)\r\n",
"import warnings\nfrom typing import Optional, Sequence\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import CrossEntropyLoss\nfrom torch.nn.modules.batchnorm import _NormBase\nfrom torch.optim import SGD\nfrom torch.utils.data import DataLoader\n\nfrom avalanche.training import default_logger\nfrom avalanche.models import MobilenetV1\nfrom avalanche.models.batch_renorm import BatchRenorm2D\nfrom avalanche.training.plugins import StrategyPlugin, EvaluationPlugin, \\\n SynapticIntelligencePlugin, CWRStarPlugin\nfrom avalanche.training.strategies import BaseStrategy\nfrom avalanche.training.utils import replace_bn_with_brn, get_last_fc_layer, \\\n freeze_up_to, change_brn_pars, examples_per_class, LayerAndParameter\n\n\nclass AR1(BaseStrategy):\n \"\"\"\n The AR1 strategy with Latent Replay.\n\n This implementations allows for the use of both Synaptic Intelligence and\n Latent Replay to protect the lower level of the model from forgetting.\n\n While the original papers show how to use those two techniques in a mutual\n exclusive way, this implementation allows for the use of both of them\n concurrently. This behaviour is controlled by passing proper constructor\n arguments).\n \"\"\"\n\n def __init__(self, criterion=None, lr: float = 0.001, momentum=0.9,\n l2=0.0005, train_epochs: int = 4,\n init_update_rate: float = 0.01,\n inc_update_rate=0.00005,\n max_r_max=1.25, max_d_max=0.5, inc_step=4.1e-05,\n rm_sz: int = 1500,\n freeze_below_layer: str = \"lat_features.19.bn.beta\",\n latent_layer_num: int = 19, ewc_lambda: float = 0,\n train_mb_size: int = 128, eval_mb_size: int = 128, device=None,\n plugins: Optional[Sequence[StrategyPlugin]] = None,\n evaluator: EvaluationPlugin = default_logger, eval_every=-1):\n \"\"\"\n Creates an instance of the AR1 strategy.\n\n :param criterion: The loss criterion to use. Defaults to None, in which\n case the cross entropy loss is used.\n :param lr: The learning rate (SGD optimizer).\n :param momentum: The momentum (SGD optimizer).\n :param l2: The L2 penalty used for weight decay.\n :param train_epochs: The number of training epochs. Defaults to 4.\n :param init_update_rate: The initial update rate of BatchReNorm layers.\n :param inc_update_rate: The incremental update rate of BatchReNorm\n layers.\n :param max_r_max: The maximum r value of BatchReNorm layers.\n :param max_d_max: The maximum d value of BatchReNorm layers.\n :param inc_step: The incremental step of r and d values of BatchReNorm\n layers.\n :param rm_sz: The size of the replay buffer. The replay buffer is shared\n across classes. Defaults to 1500.\n :param freeze_below_layer: A string describing the name of the layer\n to use while freezing the lower (nearest to the input) part of the\n model. The given layer is not frozen (exclusive).\n :param latent_layer_num: The number of the layer to use as the Latent\n Replay Layer. Usually this is the same of `freeze_below_layer`.\n :param ewc_lambda: The Synaptic Intelligence lambda term. Defaults to\n 0, which means that the Synaptic Intelligence regularization\n will not be applied.\n :param train_mb_size: The train minibatch size. Defaults to 128.\n :param eval_mb_size: The eval minibatch size. Defaults to 128.\n :param device: The device to use. Defaults to None (cpu).\n :param plugins: (optional) list of StrategyPlugins.\n :param evaluator: (optional) instance of EvaluationPlugin for logging\n and metric computations.\n :param eval_every: the frequency of the calls to `eval` inside the\n training loop.\n if -1: no evaluation during training.\n if 0: calls `eval` after the final epoch of each training\n experience.\n if >0: calls `eval` every `eval_every` epochs and at the end\n of all the epochs for a single experience.\n \"\"\"\n\n warnings.warn(\"The AR1 strategy implementation is in an alpha stage \"\n \"and is not perfectly aligned with the paper \"\n \"implementation. Please use at your own risk!\")\n\n if plugins is None:\n plugins = []\n\n # Model setup\n model = MobilenetV1(pretrained=True, latent_layer_num=latent_layer_num)\n replace_bn_with_brn(\n model, momentum=init_update_rate, r_d_max_inc_step=inc_step,\n max_r_max=max_r_max, max_d_max=max_d_max)\n\n fc_name, fc_layer = get_last_fc_layer(model)\n\n if ewc_lambda != 0:\n # Synaptic Intelligence is not applied to the last fully\n # connected layer (and implicitly to \"freeze below\" ones.\n plugins.append(SynapticIntelligencePlugin(\n ewc_lambda, excluded_parameters=[fc_name]))\n\n self.cwr_plugin = CWRStarPlugin(model, cwr_layer_name=fc_name,\n freeze_remaining_model=False)\n plugins.append(self.cwr_plugin)\n\n optimizer = SGD(model.parameters(), lr=lr, momentum=momentum,\n weight_decay=l2)\n\n if criterion is None:\n criterion = CrossEntropyLoss()\n\n self.ewc_lambda = ewc_lambda\n self.freeze_below_layer = freeze_below_layer\n self.rm_sz = rm_sz\n self.inc_update_rate = inc_update_rate\n self.max_r_max = max_r_max\n self.max_d_max = max_d_max\n self.lr = lr\n self.momentum = momentum\n self.l2 = l2\n self.rm = None\n self.cur_acts: Optional[Tensor] = None\n self.replay_mb_size = 0\n\n super().__init__(\n model, optimizer, criterion,\n train_mb_size=train_mb_size, train_epochs=train_epochs,\n eval_mb_size=eval_mb_size, device=device, plugins=plugins,\n evaluator=evaluator, eval_every=eval_every)\n\n def before_training_exp(self, **kwargs):\n self.model.eval()\n self.model.end_features.train()\n self.model.output.train()\n\n if self.training_exp_counter > 0:\n # In AR1 batch 0 is treated differently as the feature extractor is\n # left more free to learn.\n # This if is executed for batch > 0, in which we freeze layers\n # below \"self.freeze_below_layer\" (which usually is the latent\n # replay layer!) and we also change the parameters of BatchReNorm\n # layers to a more conservative configuration.\n\n # \"freeze_up_to\" will freeze layers below \"freeze_below_layer\"\n # Beware that Batch ReNorm layers are not frozen!\n freeze_up_to(self.model, freeze_until_layer=self.freeze_below_layer,\n layer_filter=AR1.filter_bn_and_brn)\n\n # Adapt the parameters of BatchReNorm layers\n change_brn_pars(self.model, momentum=self.inc_update_rate,\n r_d_max_inc_step=0, r_max=self.max_r_max,\n d_max=self.max_d_max)\n\n # Adapt the model and optimizer\n self.model = self.model.to(self.device)\n self.optimizer = SGD(\n self.model.parameters(), lr=self.lr, momentum=self.momentum,\n weight_decay=self.l2)\n\n # super()... will run S.I. and CWR* plugin callbacks\n super().before_training_exp(**kwargs)\n\n # Update cur_j of CWR* to consider latent patterns\n if self.training_exp_counter > 0:\n for class_id, count in examples_per_class(self.rm[1]).items():\n self.model.cur_j[class_id] += count\n self.cwr_plugin.cur_class = [\n cls for cls in set(self.model.cur_j.keys())\n if self.model.cur_j[cls] > 0]\n self.cwr_plugin.reset_weights(self.cwr_plugin.cur_class)\n\n def make_train_dataloader(self, num_workers=0, shuffle=True, **kwargs):\n \"\"\"\n Called after the dataset instantiation. Initialize the data loader.\n\n For AR1 a \"custom\" dataloader is used: instead of using\n `self.train_mb_size` as the batch size, the data loader batch size will\n be computed ad `self.train_mb_size - latent_mb_size`. `latent_mb_size`\n is in turn computed as:\n\n `\n len(train_dataset) // ((len(train_dataset) + len(replay_buffer)\n // self.train_mb_size)\n `\n\n so that the number of iterations required to run an epoch on the current\n batch is equal to the number of iterations required to run an epoch\n on the replay buffer.\n\n :param num_workers: number of thread workers for the data loading.\n :param shuffle: True if the data should be shuffled, False otherwise.\n \"\"\"\n\n current_batch_mb_size = self.train_mb_size\n\n if self.training_exp_counter > 0:\n train_patterns = len(self.adapted_dataset)\n current_batch_mb_size = train_patterns // (\n (train_patterns + self.rm_sz) // self.train_mb_size)\n\n current_batch_mb_size = max(1, current_batch_mb_size)\n self.replay_mb_size = max(0, self.train_mb_size - current_batch_mb_size)\n\n # AR1 only supports SIT scenarios (no task labels).\n self.dataloader = DataLoader(\n self.adapted_dataset, num_workers=num_workers,\n batch_size=current_batch_mb_size, shuffle=shuffle)\n\n def training_epoch(self, **kwargs):\n for self.mb_it, (self.mb_x, self.mb_y, _) in \\\n enumerate(self.dataloader):\n self.before_training_iteration(**kwargs)\n\n self.optimizer.zero_grad()\n self.mb_x = self.mb_x.to(self.device)\n self.mb_y = self.mb_y.to(self.device)\n\n if self.training_exp_counter > 0:\n lat_mb_x = self.rm[0][self.mb_it * self.replay_mb_size:\n (self.mb_it + 1) * self.replay_mb_size]\n lat_mb_x = lat_mb_x.to(self.device)\n lat_mb_y = self.rm[1][self.mb_it * self.replay_mb_size:\n (self.mb_it + 1) * self.replay_mb_size]\n lat_mb_y = lat_mb_y.to(self.device)\n self.mb_y = torch.cat((self.mb_y, lat_mb_y), 0)\n else:\n lat_mb_x = None\n\n # Forward pass. Here we are injecting latent patterns lat_mb_x.\n # lat_mb_x will be None for the very first batch (batch 0), which\n # means that lat_acts.shape[0] == self.mb_x[0].\n self.before_forward(**kwargs)\n self.logits, lat_acts = self.model(\n self.mb_x, latent_input=lat_mb_x, return_lat_acts=True)\n\n if self.epoch == 0:\n # On the first epoch only: store latent activations. Those\n # activations will be used to update the replay buffer.\n lat_acts = lat_acts.detach().clone().cpu()\n if self.mb_it == 0:\n self.cur_acts = lat_acts\n else:\n self.cur_acts = torch.cat((self.cur_acts, lat_acts), 0)\n self.after_forward(**kwargs)\n\n # Loss & Backward\n # We don't need to handle latent replay, as self.mb_y already\n # contains both current and replay labels.\n self.loss = self.criterion(self.logits, self.mb_y)\n self.before_backward(**kwargs)\n self.loss.backward()\n self.after_backward(**kwargs)\n\n # Optimization step\n self.before_update(**kwargs)\n self.optimizer.step()\n self.after_update(**kwargs)\n\n self.after_training_iteration(**kwargs)\n\n def after_training_exp(self, **kwargs):\n h = min(self.rm_sz // (self.training_exp_counter + 1),\n self.cur_acts.size(0))\n\n curr_data = self.experience.dataset\n idxs_cur = torch.randperm(self.cur_acts.size(0))[:h]\n rm_add_y = torch.tensor(\n [curr_data.targets[idx_cur] for idx_cur in idxs_cur])\n\n rm_add = [self.cur_acts[idxs_cur], rm_add_y]\n\n # replace patterns in random memory\n if self.training_exp_counter == 0:\n self.rm = rm_add\n else:\n idxs_2_replace = torch.randperm(self.rm[0].size(0))[:h]\n for j, idx in enumerate(idxs_2_replace):\n idx = int(idx)\n self.rm[0][idx] = rm_add[0][j]\n self.rm[1][idx] = rm_add[1][j]\n\n self.cur_acts = None\n\n # Runs S.I. and CWR* plugin callbacks\n super().after_training_exp(**kwargs)\n\n @staticmethod\n def filter_bn_and_brn(param_def: LayerAndParameter):\n return not isinstance(param_def.layer, (_NormBase, BatchRenorm2D))\n"
] | [
[
"torch.utils.data.dataloader.DataLoader"
],
[
"torch.nn.CrossEntropyLoss",
"torch.cat",
"torch.tensor",
"torch.utils.data.DataLoader"
]
] |
wjsi/mars-profiling | [
"1accb00c90da67b46ad98ea1592fecc524625454"
] | [
"tests/issues/test_issue587.py"
] | [
"\"\"\"\nTest for issue 587:\nhttps://github.com/pandas-profiling/pandas-profiling/issues/587\n\"\"\"\nimport pandas as pd\nimport pytest\n\nfrom mars_profiling import config\nfrom mars_profiling.model.base import get_counts, is_numeric\n\n\[email protected](\n int(pd.__version__.split(\".\")[0]) < 1, reason=\"requires pandas 1 or higher\"\n)\ndef test_issue587():\n config[\"vars\"][\"num\"][\"low_categorical_threshold\"] = 0\n\n # Minimal reproducible code\n series = pd.Series([1, None], dtype=\"Int64\")\n series_description = get_counts(series)\n assert is_numeric(series, series_description)\n"
] | [
[
"pandas.__version__.split",
"pandas.Series"
]
] |
jattenberg/SternPythonDataScience2018 | [
"78dbb8190faaf3946aac56efa2dd181554165e7d"
] | [
"ds_utils/features_pipeline_3.py"
] | [
"import logging\nimport re\nfrom collections import Counter, OrderedDict\nimport numpy as np\nimport chardet\nfrom pandas import DataFrame\nfrom html2text import html2text\nimport os\nimport functools\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline\nfrom sklearn.preprocessing import OneHotEncoder, LabelBinarizer, StandardScaler, PolynomialFeatures, Imputer, MinMaxScaler\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\nclass ItemSelector(BaseEstimator, TransformerMixin):\n def __init__(self, key):\n self.key = key\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n if DataFrame is type(X):\n return X[self.key]\n else:\n raise Exception(\"unsupported itemselector type. implement some new stuff: %s\" % type(X))\n\nclass Reshaper(BaseEstimator, TransformerMixin):\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return X[:,None]\n\nclass Dummyizer(BaseEstimator, TransformerMixin):\n\n def fit(self, X, y=None):\n self.dummyizer = LabelBinarizer()\n self.dummyizer.fit(X)\n return self\n\n def transform(self, X):\n return self.dummyizer.transform(X)\n\nclass Concatenator(BaseEstimator, TransformerMixin):\n def __init__(self, glue=\" \"):\n self.glue = glue\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n cols = len(list(X.shape))\n out = [\"%s\" % (self.glue.join(x) if cols > 1 else x) for x in X]\n return out\n \nclass Floater(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return X.astype(\"float64\")\n\nclass Densinator(BaseEstimator, TransformerMixin):\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return X.todense()\n\nclass Quantiler(BaseEstimator, TransformerMixin):\n def __init__(self, n_quantiles=100):\n self.n_quantiles = n_quantiles\n def fit(self, X, y=None):\n percentiles = np.linspace(0, 100, self.n_quantiles+2)\n self.quantiles = np.percentile(X, percentiles)\n return self\n\n def find_quantile(self, x):\n return [1 if self.quantiles[i] < x and self.quantiles[i+1] >= x else 0 for i in range(0, len(self.quantiles) - 1)]\n \n def transform(self, X):\n return [self.find_quantile(x) for x in X]\n\nclass WordCleaner(BaseEstimator, TransformerMixin):\n\n def decode(self, content):\n str_bytes = str.encode(content)\n charset = chardet.detect(str_bytes)['encoding']\n return str_bytes.decode(encoding=charset, errors='ignore')\n\n feature_regex_pipe = [\n (r\"\\|\", \" \"),\n (r\"\\r\\n?|\\n\", \" \"),\n (r\"[^\\x00-\\x7F]+\", \" \"),\n (r\"\\s+\", \" \"),\n (r\"https?://\\S+\", \"_url_\"),\n (r\"\\w{,20}[a-zA-Z]{1,20}[0-9]{1,20}\", \"_wn_\"),\n (r\"\\d+/\\d+/\\d+\", \"_d2_\"),\n (r\"\\d+/\\d+\", \"_d_\"),\n (r\"\\d+:\\d+:\\d+\", \"_ts_\"),\n (r\":\", \" \")\n ]\n \n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n def _text_clean(x):\n all_clean = html2text(self.decode(x))\n replaced = functools.reduce(lambda acc, re_rep: re.sub(re_rep[0], re_rep[1], acc), self.feature_regex_pipe, all_clean)\n return \" \".join([y for y in replaced.split(\" \") if len(y) <= 20])\n\n return map(_text_clean, X)\n \n \n\ndef build_poly_wrapper(col,\n degree=2,\n transformers=[]):\n\n transformer_list = [get_transformer(trans['name'])(col, **trans['config']) for trans in transformers]\n return (col + '_poly', Pipeline([\n ('union', FeatureUnion(transformer_list=transformer_list)),\n ('densinator', Densinator()),\n ('poly', PolynomialFeatures(degree=degree))\n ]))\n\ndef build_numeric_column(col):\n return (\"numeric_%s\" % col, Pipeline([\n ('selector', ItemSelector(col)), \n ('reshaper', Reshaper()),\n ('floater', Floater()),\n ('scaler', StandardScaler())]))\n\ndef build_quantile_column(col,\n n_quantiles=100):\n return (\"quantile_%s\" % col, Pipeline([\n ('selector', ItemSelector(col)), \n ('reshaper', Reshaper()),\n ('quantiler', Quantiler(n_quantiles))]))\n\ndef build_range_scaler(col,\n min=0,\n max=1):\n return (\"min_max %s\" % col, Pipeline([\n ('selector', ItemSelector(col)),\n ('reshaper', Reshaper()),\n ('min_max', MinMaxScaler(feature_range=(min, max)))]))\n\ndef build_dummyizer(col):\n return (\"onehot_s_%s\" % col, Pipeline([\n ('selector', ItemSelector(col)),\n ('concat_cols', Concatenator()),\n ('label', Dummyizer())]))\n\ndef build_null(col):\n return (\"null_%s\" % col, Pipeline([\n ('selector', ItemSelector(col)), \n ('reshaper', Reshaper())]))\n\ndef build_wordcount_transformer(col,\n binary=False,\n min_df=0.0,\n ngrams=2):\n return (\"wordcount_%s\" % col, Pipeline([\n ('selector', ItemSelector(col)),\n ('concat_cols', Concatenator()),\n ('cleaner', WordCleaner()),\n ('tfidf', CountVectorizer(binary=binary, min_df=min_df, decode_error='ignore', ngram_range=(1,ngrams)))]))\n\ndef build_tfidf_transformer(col,\n min_df=0.0,\n ngrams=2):\n return (\"tfidf_%s\" % col, Pipeline([\n ('selector', ItemSelector(col)),\n ('concat_cols', Concatenator()),\n ('cleaner', WordCleaner()),\n ('tfidf', TfidfVectorizer(min_df=min_df, decode_error='ignore', ngram_range=(1,ngrams)))]))\n\ndef get_transformer(name):\n transformer_map = {\n \"standard_numeric\" : build_numeric_column,\n \"quantile_numeric\" : build_quantile_column,\n \"range_numeric\" : build_range_scaler, \n \"poly\" : build_poly_wrapper,\n \"dummyizer\" : build_dummyizer,\n \"null_transformer\" : build_null,\n \"tfidf\" : build_tfidf_transformer,\n \"word_count\" : build_wordcount_transformer\n }\n return transformer_map[name]\n\ndef transformer_from_config(field, transformer_config):\n name = transformer_config['name']\n configs = transformer_config.get('config', {})\n return get_transformer(name)(field, **configs) \n \ndef pipeline_from_config_file(filename):\n return pipeline_from_config(json.load(open(filename, 'r')))\n\ndef pipeline_from_config(configuration):\n transformers = [[transformer_from_config(field_config['field'], transformer_config) for transformer_config in field_config['transformers']] for field_config in configuration]\n transformer_list = functools.reduce(lambda x,y: x+y, transformers)\n return Pipeline([('union', FeatureUnion(transformer_list=transformer_list))])\n"
] | [
[
"sklearn.preprocessing.StandardScaler",
"numpy.percentile",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.pipeline.FeatureUnion",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.linspace",
"sklearn.preprocessing.LabelBinarizer"
]
] |
liwt31/Renormalizer | [
"123a9d53f4f5f32c0088c255475f0ee60d02c745"
] | [
"renormalizer/tests/parameter_PBI.py"
] | [
"import numpy as np\n\nfrom renormalizer.utils import Quantity, constant\nfrom renormalizer.model import MolList, Mol, Phonon\n\n\ndef construct_mol(nmols, dmrg_nphs, hartree_nphs) -> MolList:\n assert dmrg_nphs + hartree_nphs == 10\n elocalex = Quantity(2.13 / constant.au2ev)\n dipole_abs = 1.0\n\n # cm^-1\n omega_value = (\n np.array(\n [206.0, 211.0, 540.0, 552.0, 751.0, 1325.0, 1371.0, 1469.0, 1570.0, 1628.0]\n )\n * constant.cm2au\n )\n S_value = np.array(\n [0.197, 0.215, 0.019, 0.037, 0.033, 0.010, 0.208, 0.042, 0.083, 0.039]\n )\n\n # sort from large to small\n gw = np.sqrt(S_value) * omega_value\n idx = np.argsort(gw)[::-1]\n omega_value = omega_value[idx]\n S_value = S_value[idx]\n\n omega = [[Quantity(x), Quantity(x)] for x in omega_value]\n D_value = np.sqrt(S_value) / np.sqrt(omega_value / 2.0)\n displacement = [[Quantity(0), Quantity(x)] for x in D_value]\n\n ph_phys_dim = [5] * 10\n\n # print(dmrg_nphs, hartree_nphs)\n is_hartree = [False] * dmrg_nphs + [True] * hartree_nphs\n ph_list = [\n Phonon(*args[:3], hartree=args[3])\n for args in zip(omega, displacement, ph_phys_dim, is_hartree)\n ]\n\n mol_list = MolList([Mol(elocalex, ph_list, dipole_abs)] * nmols, Quantity(500, \"cm-1\"), scheme=3)\n #mol_list = MolList([Mol(elocalex, ph_list, dipole_abs, heatbath=True)] * nmols, Quantity(0.0124, \"eV\"))\n\n return mol_list\n"
] | [
[
"numpy.array",
"numpy.argsort",
"numpy.sqrt"
]
] |
georgetown-analytics/DC-Criminalistics | [
"9238523c8e4eb1f4e7225921096f9c65b12a4ae7"
] | [
"ingest-data-progs/ingest_weather_data.py"
] | [
"\"\"\"\nThis program ingests weather data from Dark Sky API using DC crime data.\n\"\"\"\nimport json\nimport pandas as pd\nimport requests\nimport sqlite3\n\ndef formatCrimeData():\n \"\"\"\n Import crime data and format date/time of crime for Dark Sky API calls.\n \"\"\"\n crime_zip = zipfile.ZipFile(\"../data/dc-crime-data/dc-crime-data.csv.zip\", mode='r')\n crime_csv = crime_zip.open('dc-crime-data.csv')\n crime_df = pd.read_csv(crime_csv)\n\n crime_df['STATE_DATE_TRUNC'] = crime_df['START_DATE'].str[:-4]\n\n crime_df.dropna(subset=['STATE_DATE_TRUNC','LATITUDE','LONGITUDE'], how='any', inplace=True)\n\n crime_df_nodup = crime_df.drop_duplicates(['STATE_DATE_TRUNC','LATITUDE','LONGITUDE'])\n\n return crime_df_nodup\n\ndef darkSkyAPICall(df_api):\n \"\"\"\n Make an API call for a particular time/date/location of crime, return as JSON.\n \"\"\"\n base_url = 'https://api.darksky.net/forecast/'\n api_key = ''\n exclude = 'minutely, hourly, daily, flags'\n params = {'exclude': exclude}\n\n query = \"/{},{},{}\".format(df_api['LATITUDE'],df_api['LONGITUDE'],df_api['STATE_DATE_TRUNC'])\n\n url = base_url + api_key + query\n\n try:\n response = requests.get(url, params=params)\n except ConnectionError:\n pass\n\n try:\n response_json = response.json()\n except:\n response_json = {}\n\n return response_json\n\ndef writeDatabaseFile(json_doc):\n \"\"\"\n Write the return from calling the Dark Sky API to a database file.\n \"\"\"\n #Create Directory Path\n path_text = '../data/weather-data'\n path = os.path.dirname(path_text)\n\n #If path does not exist, then make it.\n if not os.path.exists(path):\n os.makedirs(path)\n\n #Connect to DB table in the folder path.\n conn = sqlite3.connect('../data/weather-data/weather_data.db')\n c = conn.cursor()\n\n #If table already exists, overwrite it.\n c.execute(\"drop table if exists weather_data\")\n\n #Turn query into a DataFrame from JSON, flattening JSON file.\n weather_df = pd.io.json.json_normalize(json_doc, sep=\"_\")\n\n weather_df.to_sql('weather_data',conn)\n\n #Commit and close connection.\n conn.commit()\n conn.close()\n\ndef main():\n \"\"\"\n The driver function.\n \"\"\"\n crime_df = formatCrimeData()\n\n weather_list = []\n\n for index, row in crime_df.iterrows():\n weather_json = darkSkyAPICall(row)\n weather_list.append(weather_json)\n\n writeDatabaseFile(json_doc=weather_list)\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.io.json.json_normalize",
"pandas.read_csv"
]
] |
secretppcdc/secretppcdc.github.com | [
"0e00d7a02b177bfe1932bab1ff68096e2acf6f05"
] | [
"Recommend System/source code/DropoutNet-master/DropoutNet-master/utils.py"
] | [
"import time\nimport datetime\nimport numpy as np\nimport scipy\nimport tensorflow as tf\nfrom sklearn import preprocessing as prep\n\n\nclass timer(object):\n def __init__(self, name='default'):\n \"\"\"\n timer object to record running time of functions, not for micro-benchmarking\n usage is:\n $ timer = utils.timer('name').tic()\n $ timer.toc('process A').tic()\n\n\n :param name: label for the timer\n \"\"\"\n self._start_time = None\n self._name = name\n self.tic()\n\n def tic(self):\n self._start_time = time.time()\n return self\n\n def toc(self, message):\n elapsed = time.time() - self._start_time\n message = '' if message is None else message\n print('[{0:s}] {1:s} elapsed [{2:s}]'.format(self._name, message, timer._format(elapsed)))\n return self\n\n def reset(self):\n self._start_time = None\n return self\n\n @staticmethod\n def _format(s):\n delta = datetime.timedelta(seconds=s)\n d = datetime.datetime(1, 1, 1) + delta\n s = ''\n if (d.day - 1) > 0:\n s = s + '{:d} days'.format(d.day - 1)\n if d.hour > 0:\n s = s + '{:d} hr'.format(d.hour)\n if d.minute > 0:\n s = s + '{:d} min'.format(d.minute)\n s = s + '{:d} s'.format(d.second)\n return s\n\n\ndef batch(iterable, _n=1, drop=True):\n \"\"\"\n returns batched version of some iterable\n :param iterable: iterable object as input\n :param _n: batch size\n :param drop: if true, drop extra if batch size does not divide evenly,\n otherwise keep them (last batch might be shorter)\n :return: batched version of iterable\n \"\"\"\n it_len = len(iterable)\n for ndx in range(0, it_len, _n):\n if ndx + _n < it_len:\n yield iterable[ndx:ndx + _n]\n elif drop is False:\n yield iterable[ndx:it_len]\n\n\ndef tfidf(x):\n \"\"\"\n compute tfidf of numpy array x\n :param x: input array, document by terms\n :return:\n \"\"\"\n x_idf = np.log(x.shape[0] - 1) - np.log(1 + np.asarray(np.sum(x > 0, axis=0)).ravel())\n x_idf = np.asarray(x_idf)\n x_idf_diag = scipy.sparse.lil_matrix((len(x_idf), len(x_idf)))\n x_idf_diag.setdiag(x_idf)\n x_tf = x.tocsr()\n x_tf.data = np.log(x_tf.data + 1)\n x_tfidf = x_tf * x_idf_diag\n return x_tfidf\n\n\ndef prep_standardize(x):\n \"\"\"\n takes sparse input and compute standardized version\n\n Note:\n cap at 5 std\n\n :param x: 2D scipy sparse data array to standardize (column-wise), must support row indexing\n :return: the object to perform scale (stores mean/std) for inference, as well as the scaled x\n \"\"\"\n x_nzrow = x.any(axis=1)\n scaler = prep.StandardScaler().fit(x[x_nzrow, :])\n x_scaled = np.copy(x)\n x_scaled[x_nzrow, :] = scaler.transform(x_scaled[x_nzrow, :])\n x_scaled[x_scaled > 5] = 5\n x_scaled[x_scaled < -5] = -5\n x_scaled[np.absolute(x_scaled) < 1e-5] = 0\n return scaler, x_scaled\n\n\ndef prep_standardize_dense(x):\n \"\"\"\n takes dense input and compute standardized version\n\n Note:\n cap at 5 std\n\n :param x: 2D numpy data array to standardize (column-wise)\n :return: the object to perform scale (stores mean/std) for inference, as well as the scaled x\n \"\"\"\n scaler = prep.StandardScaler().fit(x)\n x_scaled = scaler.transform(x)\n x_scaled[x_scaled > 5] = 5\n x_scaled[x_scaled < -5] = -5\n x_scaled[np.absolute(x_scaled) < 1e-5] = 0\n return scaler, x_scaled\n\n\ndef batch_eval_recall(_sess, tf_eval, eval_feed_dict, recall_k, eval_data):\n \"\"\"\n given EvalData and DropoutNet compute graph in TensorFlow, runs batch evaluation\n\n :param _sess: tf session\n :param tf_eval: the evaluate output symbol in tf\n :param eval_feed_dict: method to parse tf, pick from EvalData method\n :param recall_k: list of thresholds to compute recall at (information retrieval recall)\n :param eval_data: EvalData instance\n :return: recall array at thresholds matching recall_k\n \"\"\"\n tf_eval_preds_batch = []\n for (batch, (eval_start, eval_stop)) in enumerate(eval_data.eval_batch):\n tf_eval_preds = _sess.run(tf_eval,\n feed_dict=eval_feed_dict(\n batch, eval_start, eval_stop, eval_data))\n tf_eval_preds_batch.append(tf_eval_preds)\n tf_eval_preds = np.concatenate(tf_eval_preds_batch)\n tf.local_variables_initializer().run()\n\n # filter non-zero targets\n y_nz = [len(x) > 0 for x in eval_data.R_test_inf.rows]\n y_nz = np.arange(len(eval_data.R_test_inf.rows))[y_nz]\n\n preds_all = tf_eval_preds[y_nz, :]\n\n recall = []\n for at_k in recall_k:\n preds_k = preds_all[:, :at_k]\n y = eval_data.R_test_inf[y_nz, :]\n\n x = scipy.sparse.lil_matrix(y.shape)\n x.rows = preds_k\n x.data = np.ones_like(preds_k)\n\n z = y.multiply(x)\n recall.append(np.mean(np.divide((np.sum(z, 1)), np.sum(y, 1))))\n return recall\n"
] | [
[
"numpy.concatenate",
"tensorflow.local_variables_initializer",
"numpy.ones_like",
"numpy.asarray",
"numpy.log",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.copy",
"numpy.absolute",
"scipy.sparse.lil_matrix"
]
] |
AlexHerger/self-driving-truck | [
"0d6870ea8d00eb5daa89deee2ce0b8fe4d04783b"
] | [
"annotate/common.py"
] | [
"from __future__ import print_function, division\n\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom lib import replay_memory\nfrom lib import util\nimport Tkinter\nfrom PIL import Image, ImageTk\nimport numpy as np\nimport cPickle as pickle\nimport time\nimport imgaug as ia\n\ndef numpy_to_tk_image(image):\n image_pil = Image.fromarray(image)\n image_tk = ImageTk.PhotoImage(image_pil)\n return image_tk\n\ndef load_annotations(fp):\n if os.path.isfile(fp):\n return pickle.load(open(fp, \"r\"))\n else:\n return None\n\ndef draw_normal_distribution(height, width, x, y, size):\n if 0 <= y < height and 0 <= x < width:\n pad_by = size * 10\n img = np.zeros((pad_by+height+pad_by, pad_by+width+pad_by), dtype=np.float32)\n #img = img.pad(img, ((20, 20), (20, 20)))\n #normal = util.create_2d_gaussian(size=size*2, fwhm=size)\n normal = util.create_2d_gaussian(size=size*4, sigma=size)\n #print(normal)\n normal_h, normal_w = normal.shape\n normal_hh, normal_wh = normal_h//2, normal_w//2\n #print(\"normal size\", normal.shape)\n #print(\"img.shape\", img.shape)\n #print(\"img[y-normal_hh:y+normal_hh, x-normal_wh:x+normal_wh]\", img[y-normal_hh:y+normal_hh, x-normal_wh:x+normal_wh].shape)\n y1 = np.clip(y-normal_hh+pad_by, 0, img.shape[0]-1) #-(2*pad_by))\n y2 = np.clip(y+normal_hh+pad_by, 0, img.shape[0]-1) #-(2*pad_by))\n x1 = np.clip(x-normal_wh+pad_by, 0, img.shape[1]-1) #-(2*pad_by))\n x2 = np.clip(x+normal_wh+pad_by, 0, img.shape[1]-1) #-(2*pad_by))\n if x2 - x1 > 0 and y2 - y1 > 0:\n img[y1:y2, x1:x2] = normal\n return img[pad_by:-pad_by, pad_by:-pad_by]\n else:\n return np.zeros((height, width), dtype=np.float32)\n\nclass GridAnnotationWindow(object):\n def __init__(self, master, canvas, memory, current_state_idx, annotations, current_anno_attribute_name, save_to_fp, every_nth_example=10, zoom_factor=4):\n self.master = master\n self.canvas = canvas\n self.memory = memory\n self.current_state_idx = current_state_idx\n self.annotations = annotations if annotations is not None else dict()\n self.current_annotation = None\n self.background_label = None\n\n self.eraser = False\n self.dirty = False\n self.brush_size = 3\n self.last_autosave = 0\n self.heatmap_alpha = 0.3\n self.heatmap_alphas = (0.1, 0.3)\n self.current_anno_attribute_name = current_anno_attribute_name\n self.every_nth_example = every_nth_example\n self.zoom_factor = zoom_factor\n self.autosave_every_nth = 20\n self.save_to_fp = save_to_fp\n\n self.is_showing_directly_previous_state = False\n self.directly_previous_state = None\n self.current_state = None\n self.switch_to_state(self.current_state_idx, autosave=False)\n #self.current_state = memory.get_state_by_id(current_state_idx)\n\n @staticmethod\n def create(memory, current_anno_attribute_name, save_to_fp, every_nth_example=10, zoom_factor=4):\n print(\"Loading previous annotations...\")\n annotations = load_annotations(save_to_fp)\n #is_annotated = dict([(str(annotation.idx), True) for annotation in annotations])\n\n current_state_idx = memory.id_min\n if annotations is not None:\n while current_state_idx < memory.id_max:\n key = str(current_state_idx)\n if key not in annotations or current_anno_attribute_name not in annotations[key]:\n break\n current_state_idx += every_nth_example\n print(\"ID of first unannotated state: %d\" % (current_state_idx,))\n\n master = Tkinter.Tk()\n state = memory.get_state_by_id(current_state_idx)\n canvas_height = state.screenshot_rs.shape[0] * zoom_factor\n canvas_width = state.screenshot_rs.shape[1] * zoom_factor\n print(\"canvas height, width:\", canvas_height, canvas_width)\n canvas = Tkinter.Canvas(master, width=canvas_width, height=canvas_height)\n canvas.pack()\n canvas.focus_set()\n\n #y = int(canvas_height / 2)\n #w.create_line(0, y, canvas_width, y, fill=\"#476042\")\n message = Tkinter.Label(master, text=\"Click to draw annotation. Press E to switch to eraser mode. Press S to save. Use Numpad +/- for brush size.\")\n message.pack(side=Tkinter.BOTTOM)\n\n window_state = GridAnnotationWindow(\n master,\n canvas,\n memory,\n current_state_idx,\n annotations,\n current_anno_attribute_name,\n save_to_fp,\n every_nth_example,\n zoom_factor\n )\n\n #canvas.bind(\"<Button-1>\", OnPaint(window_state))\n #master.bind(\"<Button-1>\", lambda event: print(event))\n #master.bind(\"<Button-3>\", lambda event: print(\"right\", event))\n #master.bind(\"<ButtonPress-1>\", lambda event: print(\"press\", event))\n master.bind(\"<B1-Motion>\", window_state.on_left_mouse_button)\n #master.bind(\"<ButtonRelease-1>\", lambda event: print(\"release\", event))\n master.bind(\"<B3-Motion>\", window_state.on_right_mouse_button)\n canvas.bind(\"<e>\", lambda event: window_state.toggle_eraser())\n canvas.bind(\"<s>\", lambda event: window_state.save_annotations(force=True))\n canvas.bind(\"<w>\", lambda event: window_state.toggle_heatmap())\n canvas.bind(\"<p>\", lambda event: window_state.toggle_previous_screenshot())\n canvas.bind(\"<Left>\", lambda event: window_state.previous_state(autosave=True))\n canvas.bind(\"<Right>\", lambda event: window_state.next_state(autosave=True))\n canvas.bind(\"<KP_Add>\", lambda event: window_state.increase_brush_size())\n canvas.bind(\"<KP_Subtract>\", lambda event: window_state.decrease_brush_size())\n\n return window_state\n\n @property\n def grid(self):\n return self.current_annotation[self.current_anno_attribute_name]\n\n def toggle_eraser(self):\n self.eraser = not self.eraser\n print(\"Eraser set to %s\" % (self.eraser,))\n\n def toggle_heatmap(self):\n pos = self.heatmap_alphas.index(self.heatmap_alpha)\n self.heatmap_alpha = self.heatmap_alphas[(pos+1) % len(self.heatmap_alphas)]\n\n self.set_canvas_background(self._generate_heatmap())\n\n def toggle_previous_screenshot(self):\n if self.directly_previous_state is not None:\n if self.is_showing_directly_previous_state:\n self.set_canvas_background(self._generate_heatmap())\n else:\n self.set_canvas_background(self.directly_previous_state.screenshot_rs)\n self.is_showing_directly_previous_state = not self.is_showing_directly_previous_state\n\n def increase_brush_size(self):\n self.brush_size = np.clip(self.brush_size+1, 1, 100)\n print(\"Increased brush size to %d\" % (self.brush_size,))\n\n def decrease_brush_size(self):\n self.brush_size = np.clip(self.brush_size-1, 1, 100)\n print(\"Decreased brush size to %d\" % (self.brush_size,))\n\n def previous_state(self, autosave):\n print(\"Switching to previous state...\")\n self.current_state_idx -= self.every_nth_example\n assert self.current_state_idx >= self.memory.id_min, \"Start of memory reached (%d vs %d)\" % (self.current_state_idx, self.memory.id_min)\n self.switch_to_state(self.current_state_idx, autosave=autosave)\n\n def next_state(self, autosave):\n print(\"Switching to next state...\")\n self.current_state_idx += self.every_nth_example\n assert self.current_state_idx <= self.memory.id_max, \"End of memory reached (%d vs %d)\" % (self.current_state_idx, self.memory.id_max)\n self.switch_to_state(self.current_state_idx, autosave=autosave)\n\n def switch_to_state(self, idx, autosave):\n print(\"Switching to state %d (autosave=%s)...\" % (idx, str(autosave)))\n self.directly_previous_state = self.memory.get_state_by_id(idx-1)\n self.current_state = self.memory.get_state_by_id(idx)\n assert self.current_state is not None\n self.current_state_idx = idx\n\n if autosave:\n if (self.last_autosave+1) % self.autosave_every_nth == 0:\n # only autosaves if dirty flag is true, ie any example was changed\n self.save_annotations()\n self.last_autosave = 0\n else:\n self.last_autosave += 1\n print(\"last_autosave=\", self.last_autosave)\n\n key = str(self.current_state_idx)\n if key in self.annotations:\n self.current_annotation = self.annotations[key]\n else:\n self.current_annotation = {\n \"idx\": self.current_state_idx,\n \"from_datetime\": self.current_state.from_datetime,\n \"screenshot_rs\": self.current_state.screenshot_rs,\n }\n self.annotations[key] = self.current_annotation\n\n annos_done = [key for key in self.current_annotation.keys() if key not in [\"idx\", \"from_datetime\", \"screenshot_rs\"]]\n print(\"Annotations added to this state: %s\" % (\", \".join(annos_done)))\n if self.current_anno_attribute_name not in annos_done:\n print(\"This state has not yet been annotated with '%s'.\" % (self.current_anno_attribute_name,))\n img = self.current_state.screenshot_rs\n empty_grid = np.zeros((img.shape[0], img.shape[1]), dtype=np.float32)\n self.current_annotation[self.current_anno_attribute_name] = empty_grid\n\n self.is_showing_directly_previous_state = False\n self.update_annotation_grid(self.grid, initial=True)\n\n def save_annotations(self, force=False):\n #print(self.annotations)\n if self.dirty or force:\n print(\"Saving...\")\n with open(self.save_to_fp, \"w\") as f:\n pickle.dump(self.annotations, f, protocol=-1)\n self.dirty = False\n print(\"Finished saving.\")\n else:\n print(\"Not saved (not marked dirty)\")\n\n \"\"\"\n def redraw_canvas(self):\n img = generate_canvas_image(self.current_state.screenshot_rs, self.grid)\n self.canvas.delete(Tkinter.ALL)\n self.set_canvas_background(self.canvas, img)\n \"\"\"\n\n def update_annotation_grid(self, annotation_grid, initial=False):\n self.current_annotation[self.current_anno_attribute_name] = annotation_grid\n #self.redraw_canvas()\n #img = generate_canvas_image(self.current_state.screenshot_rs, annotation_grid)\n img_heatmap = self._generate_heatmap()\n self.set_canvas_background(img_heatmap)\n if not initial:\n self.dirty = True\n\n def set_canvas_background(self, image):\n if self.background_label is None:\n # initialize background image label (first call)\n #img = self.current_state.screenshot_rs\n #bg_img_tk = numpy_to_tk_image(np.zeros(img.shape))\n img_heatmap = self._generate_heatmap()\n img_heatmap_rs = ia.imresize_single_image(img_heatmap, (img_heatmap.shape[0]*self.zoom_factor, img_heatmap.shape[1]*self.zoom_factor), interpolation=\"nearest\")\n bg_img_tk = numpy_to_tk_image(img_heatmap_rs)\n self.background_label = Tkinter.Label(self.canvas, image=bg_img_tk)\n self.background_label.place(x=0, y=0, relwidth=1, relheight=1, anchor=Tkinter.NW)\n self.background_label.image = bg_img_tk\n\n #print(\"image size\", image.shape)\n #print(\"image height, width\", image.to_array().shape)\n image_rs = ia.imresize_single_image(image, (image.shape[0]*self.zoom_factor, image.shape[1]*self.zoom_factor), interpolation=\"nearest\")\n image_tk = numpy_to_tk_image(image_rs)\n self.background_label.configure(image=image_tk)\n self.background_label.image = image_tk\n\n def _generate_heatmap(self):\n return util.draw_heatmap_overlay(self.current_state.screenshot_rs, self.grid, alpha=self.heatmap_alpha)\n\n def on_left_mouse_button(self, event):\n #canvas = event.widget\n x = self.canvas.canvasx(event.x) / self.zoom_factor\n y = self.canvas.canvasy(event.y) / self.zoom_factor\n height, width = self.current_state.screenshot_rs.shape[0:2]\n #x = event.x\n #y = event.y\n #canvas.delete(Tkinter.ALL)\n\n grid = self.grid\n normal = draw_normal_distribution(height, width, int(x), int(y), self.brush_size)\n #normal = np.zeros_like(grid)\n #normal[int(y)-2:int(y)+2, int(x)-2:int(x)+2] = 1.0\n if not self.eraser:\n #grid = np.clip(grid + normal, 0, 1)\n grid = np.maximum(grid, normal)\n else:\n grid = grid - normal\n grid = np.clip(grid, 0, 1)\n self.update_annotation_grid(grid)\n #time.sleep(0.1)\n\n def on_right_mouse_button(self, event):\n x = self.canvas.canvasx(event.x) / self.zoom_factor\n y = self.canvas.canvasy(event.y) / self.zoom_factor\n height, width = self.current_state.screenshot_rs.shape[0:2]\n grid = self.grid\n normal = draw_normal_distribution(height, width, int(x), int(y), self.brush_size)\n grid = grid - normal\n grid = np.clip(grid, 0, 1)\n self.update_annotation_grid(grid)\n"
] | [
[
"numpy.maximum",
"numpy.zeros",
"numpy.clip"
]
] |
dlshu/RS-GAN-v1 | [
"1e992755d78e24ce6bf53f9ee2a665747017593d"
] | [
"model/conv_bn_relu.py"
] | [
"import torch.nn as nn\n\nclass ConvBNRelu(nn.Module):\n \"\"\"\n Building block used in HiDDeN network. Is a sequence of Convolution, Batch Normalization, and ReLU activation\n \"\"\"\n def __init__(self, channels_in, channels_out, stride=1):\n\n super(ConvBNRelu, self).__init__()\n \n self.layers = nn.Sequential(\n nn.Conv2d(channels_in, channels_out, 3, stride, padding=1),\n nn.BatchNorm2d(channels_out),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n return self.layers(x)\n"
] | [
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
]
] |
Shahaf11111/retinanet | [
"48e1680c84aeec479db79d8a1645c05b90136b9f"
] | [
"visualize.py"
] | [
"import numpy as np\nimport torchvision\nimport time\nimport os\nimport copy\nimport pdb\nimport time\nimport argparse\n\nimport pandas as pd\n\nimport sys\nimport cv2\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import datasets, models, transforms\n\nfrom retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, \\\n\tUnNormalizer, Normalizer\n\n\nassert torch.__version__.split('.')[0] == '1'\n\nprint('CUDA available: {}'.format(torch.cuda.is_available()))\n\n\ndef main(args=None):\n\tparser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')\n\n\tparser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.')\n\tparser.add_argument('--coco_path', help='Path to COCO directory')\n\tparser.add_argument('--csv_classes', help='Path to file containing class list (see readme)')\n\tparser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)')\n\n\tparser.add_argument('--model', help='Path to model (.pt) file.')\n\n\tparser = parser.parse_args(args)\n\n\tif parser.dataset == 'coco':\n\t\tdataset_val = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose([Normalizer(), Resizer()]))\n\telif parser.dataset == 'csv':\n\t\tdataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()]))\n\telse:\n\t\traise ValueError('Dataset type not understood (must be csv or coco), exiting.')\n\n\tsampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False)\n\tdataloader_val = DataLoader(dataset_val, num_workers=1, collate_fn=collater, batch_sampler=sampler_val)\n\n\tretinanet = torch.load(parser.model)\n\n\tuse_gpu = True\n\n\tif use_gpu:\n\t\tif torch.cuda.is_available():\n\t\t\tretinanet = retinanet.cuda()\n\n\tif torch.cuda.is_available():\n\t\tretinanet = torch.nn.DataParallel(retinanet).cuda()\n\telse:\n\t\tretinanet = torch.nn.DataParallel(retinanet)\n\n\tretinanet.eval()\n\n\tunnormalize = UnNormalizer()\n\n\tdef draw_caption(image, box, caption):\n\n\t\tb = np.array(box).astype(int)\n\t\tcv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)\n\t\tcv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)\n\n\tfor idx, data in enumerate(dataloader_val):\n\n\t\twith torch.no_grad():\n\t\t\tst = time.time()\n\t\t\tif torch.cuda.is_available():\n\t\t\t\tscores, classification, transformed_anchors = retinanet(data['img'].cuda().float())\n\t\t\telse:\n\t\t\t\tscores, classification, transformed_anchors = retinanet(data['img'].float())\n\t\t\tprint('Elapsed time: {}'.format(time.time()-st))\n\t\t\tidxs = np.where(scores.cpu()>0.5)\n\t\t\timg = np.array(255 * unnormalize(data['img'][0, :, :, :])).copy()\n\n\t\t\timg[img<0] = 0\n\t\t\timg[img>255] = 255\n\n\t\t\timg = np.transpose(img, (1, 2, 0))\n\n\t\t\timg = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB)\n\n\t\t\tfor j in range(idxs[0].shape[0]):\n\t\t\t\tbbox = transformed_anchors[idxs[0][j], :]\n\t\t\t\tx1 = int(bbox[0])\n\t\t\t\ty1 = int(bbox[1])\n\t\t\t\tx2 = int(bbox[2])\n\t\t\t\ty2 = int(bbox[3])\n\t\t\t\tlabel_name = dataset_val.labels[int(classification[idxs[0][j]])]\n\t\t\t\tdraw_caption(img, (x1, y1, x2, y2), label_name)\n\n\t\t\t\tcv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2)\n\t\t\t\tprint(label_name)\n\n\t\t\tcv2.imshow('img', img)\n\t\t\tcv2.waitKey(0)\n\tresults_path = os.path.basename(parser.model_path).split('.')[0] + '.csv'\n\tfor img_name in os.listdir(parser.image_dir):\n\t\timg_path = os.path.join(parser.image_dir, img_name)\n\t\tresults_dict = detect_image(img_path, parser.model_path, parser.class_list, parser.visualize)\n\t\tif not parser.visualize:\n\t\t\tresults_df = dict_to_df(results_dict)\n\t\t\tresults_df.to_csv(results_path, index=False)\n\n\ndef dict_to_df(d):\n\tdf = pd.DataFrame(columns=['image_id', 'PredictionString'])\n\tfor (img_id, img_preds) in d.items():\n\t\timg_preds_str = ' '.join([' '.join(pred) for pred in img_preds])\n\t\tdf[len(df)] = [img_id, img_preds_str]\n\treturn df\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"numpy.array",
"torch.__version__.split",
"pandas.DataFrame",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.transpose",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.DataParallel"
]
] |
s-mostafa-a/PartiallyReversibleUnet | [
"f01a27bac14fc90780bf688cfd2e7f869a81ecf2"
] | [
"bratsDataset.py"
] | [
"import torch\nimport torch.utils.data\nimport h5py\nimport numpy as np\nimport time\nimport random\nimport dataProcessing.augmentation as aug\n\nclass BratsDataset(torch.utils.data.Dataset):\n #mode must be trian, test or val\n def __init__(self, filePath, expConfig, mode=\"train\", randomCrop=None, hasMasks=True, returnOffsets=False):\n super(BratsDataset, self).__init__()\n self.filePath = filePath\n self.mode = mode\n self.file = None\n self.trainOriginalClasses = expConfig.TRAIN_ORIGINAL_CLASSES\n self.randomCrop = randomCrop\n self.hasMasks = hasMasks\n self.returnOffsets = returnOffsets\n\n #augmentation settings\n self.nnAugmentation = expConfig.NN_AUGMENTATION\n self.softAugmentation = expConfig.SOFT_AUGMENTATION\n self.doRotate = expConfig.DO_ROTATE\n self.rotDegrees = expConfig.ROT_DEGREES\n self.doScale = expConfig.DO_SCALE\n self.scaleFactor = expConfig.SCALE_FACTOR\n self.doFlip = expConfig.DO_FLIP\n self.doElasticAug = expConfig.DO_ELASTIC_AUG\n self.sigma = expConfig.SIGMA\n self.doIntensityShift = expConfig.DO_INTENSITY_SHIFT\n self.maxIntensityShift = expConfig.MAX_INTENSITY_SHIFT\n\n def __getitem__(self, index):\n\n #lazily open file\n self.openFileIfNotOpen()\n\n #load from hdf5 file\n image = self.file[\"images_\" + self.mode][index, ...]\n if self.hasMasks: labels = self.file[\"masks_\" + self.mode][index, ...]\n\n #Prepare data depeinding on soft/hard augmentation scheme\n if not self.nnAugmentation:\n if not self.trainOriginalClasses and (self.mode != \"train\" or self.softAugmentation):\n if self.hasMasks: labels = self._toEvaluationOneHot(labels)\n defaultLabelValues = np.zeros(3, dtype=np.float32)\n else:\n if self.hasMasks: labels = self._toOrignalCategoryOneHot(labels)\n defaultLabelValues = np.asarray([1, 0, 0, 0, 0], dtype=np.float32)\n elif self.hasMasks:\n if labels.ndim < 4:\n labels = np.expand_dims(labels, 3)\n defaultLabelValues = np.asarray([0], dtype=np.float32)\n\n #augment data\n if self.mode == \"train\":\n image, labels = aug.augment3DImage(image,\n labels,\n defaultLabelValues,\n self.nnAugmentation,\n self.doRotate,\n self.rotDegrees,\n self.doScale,\n self.scaleFactor,\n self.doFlip,\n self.doElasticAug,\n self.sigma,\n self.doIntensityShift,\n self.maxIntensityShift)\n\n if self.nnAugmentation:\n if self.hasMasks: labels = self._toEvaluationOneHot(np.squeeze(labels, 3))\n else:\n if self.mode == \"train\" and not self.softAugmentation and not self.trainOriginalClasses and self.hasMasks:\n labels = self._toOrdinal(labels)\n labels = self._toEvaluationOneHot(labels)\n\n # random crop\n if not self.randomCrop is None:\n shape = image.shape\n x = random.randint(0, shape[0] - self.randomCrop[0])\n y = random.randint(0, shape[1] - self.randomCrop[1])\n z = random.randint(0, shape[2] - self.randomCrop[2])\n image = image[x:x+self.randomCrop[0], y:y+self.randomCrop[1], z:z+self.randomCrop[2], :]\n if self.hasMasks: labels = labels[x:x + self.randomCrop[0], y:y + self.randomCrop[1], z:z + self.randomCrop[2], :]\n\n image = np.transpose(image, (3, 0, 1, 2)) # bring into NCWH format\n if self.hasMasks: labels = np.transpose(labels, (3, 0, 1, 2)) # bring into NCWH format\n\n # to tensor\n #image = image[:, 0:32, 0:32, 0:32]\n image = torch.from_numpy(image)\n if self.hasMasks:\n #labels = labels[:, 0:32, 0:32, 0:32]\n labels = torch.from_numpy(labels) \n\n #get pid\n pid = self.file[\"pids_\" + self.mode][index]\n\n if self.returnOffsets:\n xOffset = self.file[\"xOffsets_\" + self.mode][index]\n yOffset = self.file[\"yOffsets_\" + self.mode][index]\n zOffset = self.file[\"zOffsets_\" + self.mode][index]\n if self.hasMasks:\n return image, str(pid), labels, xOffset, yOffset, zOffset\n else:\n return image, pid, xOffset, yOffset, zOffset\n else:\n if self.hasMasks:\n return image, str(pid), labels\n else:\n return image, pid\n\n def __len__(self):\n #lazily open file\n self.openFileIfNotOpen()\n\n return self.file[\"images_\" + self.mode].shape[0]\n\n def openFileIfNotOpen(self):\n if self.file == None:\n self.file = h5py.File(self.filePath, \"r\")\n\n def _toEvaluationOneHot(self, labels):\n shape = labels.shape\n out = np.zeros([shape[0], shape[1], shape[2], 3], dtype=np.float32)\n out[:, :, :, 0] = (labels != 0)\n out[:, :, :, 1] = (labels != 0) * (labels != 2)\n out[:, :, :, 2] = (labels == 4)\n return out\n\n def _toOrignalCategoryOneHot(self, labels):\n shape = labels.shape\n out = np.zeros([shape[0], shape[1], shape[2], 5], dtype=np.float32)\n for i in range(5):\n out[:, :, :, i] = (labels == i)\n return out\n\n def _toOrdinal(self, labels):\n return np.argmax(labels, axis=3)\n"
] | [
[
"numpy.asarray",
"numpy.zeros",
"torch.from_numpy",
"numpy.transpose",
"numpy.argmax",
"numpy.squeeze",
"numpy.expand_dims"
]
] |
loserbbb/1-stage-wseg | [
"f1579be241986c1e19420bfbf6711b6c2208d99a"
] | [
"models/mods/pamr.py"
] | [
"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nfrom functools import partial\n\n#\n# Helper modules\n#\nclass LocalAffinity(nn.Module):\n\n def __init__(self, dilations=[1]):\n super(LocalAffinity, self).__init__()\n self.dilations = dilations\n weight = self._init_aff()\n self.register_buffer('kernel', weight)\n\n def _init_aff(self):\n # initialising the shift kernel\n weight = torch.zeros(8, 1, 3, 3)\n\n for i in range(weight.size(0)):\n weight[i, 0, 1, 1] = 1\n\n weight[0, 0, 0, 0] = -1\n weight[1, 0, 0, 1] = -1\n weight[2, 0, 0, 2] = -1\n\n weight[3, 0, 1, 0] = -1\n weight[4, 0, 1, 2] = -1\n\n weight[5, 0, 2, 0] = -1\n weight[6, 0, 2, 1] = -1\n weight[7, 0, 2, 2] = -1\n\n self.weight_check = weight.clone()\n\n return weight\n\n def forward(self, x):\n \n self.weight_check = self.weight_check.type_as(x)\n assert torch.all(self.weight_check.eq(self.kernel))\n\n B,K,H,W = x.size()\n x = x.view(B*K,1,H,W)\n\n x_affs = []\n for d in self.dilations:\n x_pad = F.pad(x, [d]*4, mode='replicate')\n x_aff = F.conv2d(x_pad, self.kernel, dilation=d)\n x_affs.append(x_aff)\n\n x_aff = torch.cat(x_affs, 1)\n return x_aff.view(B,K,-1,H,W)\n\nclass LocalAffinityCopy(LocalAffinity):\n\n def _init_aff(self):\n # initialising the shift kernel\n weight = torch.zeros(8, 1, 3, 3)\n\n weight[0, 0, 0, 0] = 1\n weight[1, 0, 0, 1] = 1\n weight[2, 0, 0, 2] = 1\n\n weight[3, 0, 1, 0] = 1\n weight[4, 0, 1, 2] = 1\n\n weight[5, 0, 2, 0] = 1\n weight[6, 0, 2, 1] = 1\n weight[7, 0, 2, 2] = 1\n\n self.weight_check = weight.clone()\n return weight\n\nclass LocalStDev(LocalAffinity):\n\n def _init_aff(self):\n weight = torch.zeros(9, 1, 3, 3)\n weight.zero_()\n\n weight[0, 0, 0, 0] = 1\n weight[1, 0, 0, 1] = 1\n weight[2, 0, 0, 2] = 1\n\n weight[3, 0, 1, 0] = 1\n weight[4, 0, 1, 1] = 1\n weight[5, 0, 1, 2] = 1\n\n weight[6, 0, 2, 0] = 1\n weight[7, 0, 2, 1] = 1\n weight[8, 0, 2, 2] = 1\n\n self.weight_check = weight.clone()\n return weight\n\n def forward(self, x):\n # returns (B,K,P,H,W), where P is the number\n # of locations\n x = super(LocalStDev, self).forward(x)\n\n return x.std(2, keepdim=True)\n\nclass LocalAffinityAbs(LocalAffinity):\n\n def forward(self, x):\n x = super(LocalAffinityAbs, self).forward(x)\n return torch.abs(x)\n\n#\n# PAMR module\n#\nclass PAMR(nn.Module):\n\n def __init__(self, num_iter=1, dilations=[1]):\n super(PAMR, self).__init__()\n\n self.num_iter = num_iter\n self.aff_x = LocalAffinityAbs(dilations)\n self.aff_m = LocalAffinityCopy(dilations)\n self.aff_std = LocalStDev(dilations)\n\n def forward(self, x, mask):\n mask = F.interpolate(mask, size=x.size()[-2:], mode=\"bilinear\", align_corners=True)\n\n # x: [BxKxHxW]\n # mask: [BxCxHxW]\n B,K,H,W = x.size()\n _,C,_,_ = mask.size()\n\n x_std = self.aff_std(x)\n\n x = -self.aff_x(x) / (1e-8 + 0.1 * x_std)\n x = x.mean(1, keepdim=True)\n x = F.softmax(x, 2)\n\n for _ in range(self.num_iter):\n m = self.aff_m(mask) # [BxCxPxHxW]\n mask = (m * x).sum(2)\n\n # xvals: [BxCxHxW]\n return mask\n"
] | [
[
"torch.zeros",
"torch.cat",
"torch.abs",
"torch.nn.functional.softmax",
"torch.nn.functional.pad",
"torch.nn.functional.conv2d"
]
] |
nf-core/spatialtranscriptomics | [
"40e2fe8125edad3036687a6043074b97252371f7"
] | [
"bin/scPreprocess.py"
] | [
"#!/usr/bin/env python\n\n# Load packages\nimport argparse\nimport scanpy as sc\nimport numpy as np\nimport pandas as pd\nimport scipy.stats\nfrom matplotlib import pyplot as plt\nfrom scipy.sparse import csr_matrix\n\n\n# Parse command-line arguments\nparser = argparse.ArgumentParser(\n description='Preprocess single cell transcriptomics data.')\nparser.add_argument('--npFactorsOutputName',\n metavar='filename',\n type=str,\n default=None,\n help='Name of files with counts.')\nparser.add_argument('--rawAdata',\n metavar='h5file',\n type=str,\n default=None,\n help='Name of the h5ad file.')\nparser.add_argument('--mitoFile',\n metavar='file',\n type=str,\n default=None,\n help='Path and name of the mito file.')\nparser.add_argument('--pltFigSize',\n metavar='figsize',\n type=int,\n default=6,\n help='Figure size.')\nparser.add_argument('--minCounts',\n metavar='cutoff',\n type=int,\n default=500,\n help='Min counts per spot.')\nparser.add_argument('--minGenes',\n metavar='cutoff',\n type=int,\n default=250,\n help='Min genes per spot.')\nparser.add_argument('--minCells',\n metavar='cutoff',\n type=int,\n default=1,\n help='Min cells per gene.')\nparser.add_argument('--histplotQCmaxTotalCounts',\n metavar='cutoff',\n type=int,\n default=5000,\n help='Max total counts.')\nparser.add_argument('--histplotQCminGeneCounts',\n metavar='cutoff',\n type=int,\n default=2000,\n help='Min gene counts.')\nparser.add_argument('--histplotQCbins',\n metavar='number',\n type=int,\n default=40,\n help='Number of bins.')\nparser.add_argument('--histogramPlotAllName',\n metavar='name',\n type=str,\n default='sc_histogrtam_all.png',\n help='Figure name.')\nparser.add_argument('--histogramPlotFilteredName',\n metavar='name',\n type=str,\n default='sc_histogrtam_filtered.png',\n help='Figure name.')\nparser.add_argument('--histWithWithoutNorm',\n metavar='name',\n type=str,\n default='sc_histogram_with_without_normalization.png',\n help='Figure name.')\nparser.add_argument('--nameX',\n metavar='File name',\n type=str,\n default='sc_adata_X.npz',\n help='Name of the counts file.')\nparser.add_argument('--nameVar',\n metavar='File name',\n type=str,\n default='sc_adata.var.csv',\n help='Name of the features file.')\nparser.add_argument('--nameObs',\n metavar='File name',\n type=str,\n default='sc_adata.obs.csv',\n help='Name of the observations file.')\nparser.add_argument('--nameDataPlain',\n metavar='File name',\n type=str,\n default='sc_adata_plain.h5ad',\n help='Name of the data save file.')\nparser.add_argument('--nameDataNorm',\n metavar='File name',\n type=str,\n default='sc_adata_norm.h5ad',\n help='Name of the data save file.')\nargs = parser.parse_args()\n\n\n# Main script\n# See more settings at:\n# https://scanpy.readthedocs.io/en/stable/generated/scanpy._settings.ScanpyConfig.html\n\nf_temp = np.load(args.npFactorsOutputName)\nf_temp = f_temp[list(f_temp.keys())[0]]\nprint(f_temp.shape)\n\nsc_adata = sc.read(args.rawAdata)\nprint(sc_adata.shape)\n\nsc_adata.obs['norm_factors'] = pd.Series(index=sc_adata.obs.index, data=f_temp)\n\nmito = pd.read_csv(args.mitoFile, index_col=['Symbol', 'MCARTA2_LIST'], delimiter='\\t')['EnsemblGeneID']\nmito = mito.xs(1, level='MCARTA2_LIST').sort_index().reset_index()\nprint(mito)\n\nsc_adata.var[\"mt\"] = sc_adata.var_names.isin(mito['Symbol'])\nsc.pp.calculate_qc_metrics(sc_adata, qc_vars=[\"mt\"], inplace=True)\nprint(sc_adata)\n\nprint(sc_adata.var)\n\nplt.rcParams[\"figure.figsize\"] = (args.pltFigSize, args.pltFigSize)\n\ndef histplotQC(se_data, bins, ax):\n print(se_data.shape)\n ax.hist(se_data, density=True, bins=bins, color='navy', alpha=0.3)\n kde = scipy.stats.gaussian_kde(se_data)\n xx = np.linspace(min(se_data), max(se_data), 300)\n ax.set_xlabel(se_data.name)\n ax.set_ylabel('Density')\n ax.plot(xx, kde(xx), color='crimson')\n ax.set_xlim([0, ax.get_xlim()[1]])\n return\n\nfig, axs = plt.subplots(1, 5, figsize=(args.pltFigSize*5, args.pltFigSize))\nhistplotQC(sc_adata.obs[\"total_counts\"], bins=args.histplotQCbins, ax=axs[0])\nhistplotQC(sc_adata.obs[\"total_counts\"][sc_adata.obs[\"total_counts\"] < args.histplotQCmaxTotalCounts], bins=args.histplotQCbins, ax=axs[1])\nhistplotQC(sc_adata.obs[\"n_genes_by_counts\"], bins=args.histplotQCbins, ax=axs[2])\nhistplotQC(sc_adata.obs[\"n_genes_by_counts\"][sc_adata.obs[\"n_genes_by_counts\"] < args.histplotQCminGeneCounts], bins=args.histplotQCbins, ax=axs[3])\nhistplotQC(sc_adata.obs[\"pct_counts_mt\"], bins=args.histplotQCbins, ax=axs[4])\nfig.tight_layout()\nfig.savefig(args.histogramPlotAllName, facecolor='white')\n\n# Filter cells and genes\nsc.pp.filter_cells(sc_adata, min_counts=args.minCounts)\nsc.pp.filter_cells(sc_adata, min_genes=args.minGenes)\nsc.pp.filter_genes(sc_adata, min_cells=args.minCells)\nprint(sc_adata.shape)\n\nfig, axs = plt.subplots(1, 5, figsize=(args.pltFigSize*5, args.pltFigSize))\nhistplotQC(sc_adata.obs[\"total_counts\"], bins=args.histplotQCbins, ax=axs[0])\nhistplotQC(sc_adata.obs[\"total_counts\"][sc_adata.obs[\"total_counts\"] < args.histplotQCmaxTotalCounts], bins=args.histplotQCbins, ax=axs[1])\nhistplotQC(sc_adata.obs[\"n_genes_by_counts\"], bins=args.histplotQCbins, ax=axs[2])\nhistplotQC(sc_adata.obs[\"n_genes_by_counts\"][sc_adata.obs[\"n_genes_by_counts\"] < args.histplotQCminGeneCounts], bins=args.histplotQCbins, ax=axs[3])\nhistplotQC(sc_adata.obs[\"pct_counts_mt\"], bins=args.histplotQCbins, ax=axs[4])\nfig.tight_layout()\nfig.savefig(args.histogramPlotFilteredName, facecolor='white')\n\n# Effect of normalization by size factors\nfig, ax = plt.subplots(figsize=(args.pltFigSize, args.pltFigSize))\ndisplay_cutoff = 5*10**3\nse = pd.Series(np.array(sc_adata.X.sum(axis=1)).T[0])\nse = se[se<display_cutoff]\nprint('Number of cells displayed:', se.shape)\nse.hist(bins=100, alpha=0.75, ax=ax)\nax.set_xlim(0, display_cutoff);\nsc_adata_c = sc_adata.copy()\nsc_adata_c.X = csr_matrix(sc_adata.X / sc_adata.obs['norm_factors'].values[:, None])\nse = pd.Series(np.array(sc_adata_c.X.sum(axis=1)).T[0])\nse = se[se<display_cutoff]\nprint('Number of cells displayed:', se.shape)\nse.hist(bins=100, alpha=0.75, ax=ax)\nax.set_xlim(0, display_cutoff);\nfig.savefig(args.histWithWithoutNorm, facecolor='white', dpi=300);\nplt.close(fig)\n\n# Save raw filtered data\nsc_adata.write(args.nameDataPlain)\n\n# Save normalized data\nsc_adata.X = csr_matrix(sc_adata.X / sc_adata.obs['norm_factors'].values[:, None])\nsc.pp.log1p(sc_adata)\nsc_adata.write(args.nameDataNorm)\n\n# Save to open in R\nnp.savez_compressed(args.nameX, sc_adata.X.T.todense())\nsc_adata.var.to_csv(args.nameVar)\nsc_adata.obs.to_csv(args.nameObs)\n"
] | [
[
"numpy.load",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"pandas.Series",
"pandas.read_csv",
"scipy.sparse.csr_matrix"
]
] |
chili-chiu/napari | [
"eb6e672975ce105ac0125f71da3d0970d17cefb9"
] | [
"napari/utils/_tests/test_proxies.py"
] | [
"from unittest.mock import patch\n\nimport numpy as np\nimport pytest\n\nfrom napari.components.viewer_model import ViewerModel\nfrom napari.utils._proxies import PublicOnlyProxy, ReadOnlyWrapper\n\n\ndef test_ReadOnlyWrapper_setitem():\n \"\"\"test that ReadOnlyWrapper prevents setting items\"\"\"\n d = {'hi': 3}\n d_read_only = ReadOnlyWrapper(d)\n\n with pytest.raises(TypeError):\n d_read_only['hi'] = 5\n\n\ndef test_ReadOnlyWrapper_setattr():\n \"\"\"test that ReadOnlyWrapper prevents setting attributes\"\"\"\n\n class TestClass:\n x = 3\n\n tc = TestClass()\n tc_read_only = ReadOnlyWrapper(tc)\n\n with pytest.raises(TypeError):\n tc_read_only.x = 5\n\n\[email protected]\ndef patched_root_dir():\n \"\"\"Simulate a call from outside of napari\"\"\"\n with patch('napari.utils.misc.ROOT_DIR', new='/some/other/package'):\n yield\n\n\ndef test_PublicOnlyProxy(patched_root_dir):\n class X:\n a = 1\n _b = 'nope'\n\n def method(self):\n return 2\n\n class Tester:\n x = X()\n _private = 2\n\n def __getitem__(self, key):\n return X()\n\n t = Tester()\n proxy = PublicOnlyProxy(t)\n assert proxy.x.a == 1\n assert proxy[0].a == 1\n assert proxy.x.method() == 2\n\n assert isinstance(proxy, Tester)\n with pytest.warns(FutureWarning) as e:\n proxy._private\n assert 'Private attribute access' in str(e[0].message)\n\n with pytest.warns(FutureWarning) as e:\n # works on sub-objects too\n proxy.x._b\n assert 'Private attribute access' in str(e[0].message)\n\n with pytest.warns(FutureWarning) as e:\n # works on sub-items too\n proxy[0]._b\n assert 'Private attribute access' in str(e[0].message)\n\n assert '_private' not in dir(proxy)\n assert '_private' in dir(t)\n\n\ndef test_public_proxy_limited_to_napari(patched_root_dir):\n \"\"\"Test that the recursive public proxy goes no farther than napari.\"\"\"\n viewer = ViewerModel()\n viewer.add_points(None)\n pv = PublicOnlyProxy(viewer)\n assert not isinstance(pv.layers[0].data, PublicOnlyProxy)\n\n\ndef test_array_from_proxy_objects(patched_root_dir):\n \"\"\"Test that the recursive public proxy goes no farther than napari.\"\"\"\n viewer = ViewerModel()\n viewer.add_points(None)\n pv = PublicOnlyProxy(viewer)\n assert isinstance(np.array(pv.dims.displayed, dtype=int), np.ndarray)\n\n\ndef test_receive_return_proxy_object():\n \"\"\"Test that an\"\"\"\n viewer = ViewerModel()\n viewer.add_image(np.random.random((20, 20)))\n pv = PublicOnlyProxy(viewer)\n\n # simulates behavior from outside of napari\n with patch('napari.utils.misc.ROOT_DIR', new='/some/other/package'):\n # the recursion means this layer will be a Proxy Object on __getitem__\n layer = pv.layers[-1]\n assert isinstance(layer, PublicOnlyProxy)\n # remove and add it back, should be fine\n add_layer = getattr(pv, 'add_layer')\n\n add_layer(layer)\n assert len(viewer.layers) == 2\n\n\ndef test_viewer_method():\n viewer = PublicOnlyProxy(ViewerModel())\n assert viewer.add_points() is not None\n"
] | [
[
"numpy.random.random",
"numpy.array"
]
] |
DavidWalz/scikit-optimize | [
"85bfd98760232570285644c1e86ef4d63fd270d7"
] | [
"skopt/tests/test_space.py"
] | [
"import pytest\nimport numbers\nimport numpy as np\nimport os\nimport yaml\nfrom tempfile import NamedTemporaryFile\n\nfrom numpy.testing import assert_array_almost_equal\nfrom numpy.testing import assert_array_equal\nfrom numpy.testing import assert_equal\nfrom numpy.testing import assert_raises_regex\n\nfrom skopt import Optimizer\nfrom skopt.space import Space\nfrom skopt.space import Real\nfrom skopt.space import Integer\nfrom skopt.space import Categorical\nfrom skopt.space import check_dimension as space_check_dimension\n\n\ndef check_dimension(Dimension, vals, random_val):\n x = Dimension(*vals)\n assert_equal(x, Dimension(*vals))\n assert x != Dimension(vals[0], vals[1] + 1)\n assert x != Dimension(vals[0] + 1, vals[1])\n y = x.rvs(random_state=1)\n if isinstance(y, list):\n y = np.array(y)\n assert_equal(y, random_val)\n\n\ndef check_categorical(vals, random_val):\n x = Categorical(vals)\n assert_equal(x, Categorical(vals))\n assert x != Categorical(vals[:-1] + (\"zzz\",))\n assert_equal(x.rvs(random_state=1), random_val)\n\n\ndef check_limits(value, low, high):\n # check if low <= value <= high\n if isinstance(value, list):\n value = np.array(value)\n assert low <= value\n assert high >= value\n\n\[email protected]_test\ndef test_dimensions():\n check_dimension(Real, (1., 4.), 2.251066014107722)\n check_dimension(Real, (1, 4), 2.251066014107722)\n check_dimension(Integer, (1, 4), 2)\n check_dimension(Integer, (1., 4.), 2)\n check_dimension(Integer, (1, 4), 2)\n check_categorical((\"a\", \"b\", \"c\", \"d\"), \"b\")\n check_categorical((1., 2., 3., 4.), 2.)\n check_categorical((1, 2, 3, 4), 2)\n\n\[email protected]_test\ndef test_real_log_sampling_in_bounds():\n dim = Real(low=1, high=32, prior='log-uniform', transform='normalize')\n\n # round trip a value that is within the bounds of the space\n #\n # x = dim.inverse_transform(dim.transform(31.999999999999999))\n for n in (32., 31.999999999999999):\n round_tripped = dim.inverse_transform(dim.transform([n]))\n assert np.allclose([n], round_tripped)\n assert n in dim\n assert round_tripped in dim\n\n\[email protected]_test\ndef test_real():\n a = Real(1, 25)\n for i in range(50):\n r = a.rvs(random_state=i)\n check_limits(r, 1, 25)\n assert r in a\n\n random_values = a.rvs(random_state=0, n_samples=10)\n assert len(random_values) == 10\n assert_array_equal(a.transform(random_values), random_values)\n assert_array_equal(a.inverse_transform(random_values), random_values)\n\n log_uniform = Real(10**-5, 10**5, prior=\"log-uniform\")\n assert log_uniform != Real(10**-5, 10**5)\n for i in range(50):\n random_val = log_uniform.rvs(random_state=i)\n check_limits(random_val, 10**-5, 10**5)\n random_values = log_uniform.rvs(random_state=0, n_samples=10)\n assert len(random_values) == 10\n transformed_vals = log_uniform.transform(random_values)\n assert_array_equal(transformed_vals, np.log10(random_values))\n assert_array_equal(\n log_uniform.inverse_transform(transformed_vals), random_values)\n\n\[email protected]_test\ndef test_real_bounds():\n # should give same answer as using check_limits() but this is easier\n # to read\n a = Real(1., 2.1)\n assert 0.99 not in a\n assert 1. in a\n assert 2.09 in a\n assert 2.1 in a\n assert np.nextafter(2.1, 3.) not in a\n\n\[email protected]_test\ndef test_integer():\n a = Integer(1, 10)\n for i in range(50):\n r = a.rvs(random_state=i)\n assert 1 <= r\n assert 11 >= r\n assert r in a\n\n random_values = a.rvs(random_state=0, n_samples=10)\n assert_array_equal(random_values.shape, (10))\n assert_array_equal(a.transform(random_values), random_values)\n assert_array_equal(a.inverse_transform(random_values), random_values)\n\n\[email protected]_test\ndef test_categorical_transform():\n categories = [\"apple\", \"orange\", \"banana\", None, True, False, 3]\n cat = Categorical(categories)\n\n apple = [1., 0., 0., 0., 0., 0., 0.]\n orange = [0., 1.0, 0.0, 0.0, 0., 0., 0.]\n banana = [0., 0., 1., 0., 0., 0., 0.]\n none = [0., 0., 0., 1., 0., 0., 0.]\n true = [0., 0., 0., 0., 1., 0., 0.]\n false = [0., 0., 0., 0., 0., 1., 0.]\n three = [0., 0., 0., 0., 0., 0., 1.]\n\n assert_equal(cat.transformed_size, 7)\n assert_equal(cat.transformed_size, cat.transform([\"apple\"]).size)\n assert_array_equal(\n cat.transform(categories),\n [apple, orange, banana, none, true, false, three]\n )\n assert_array_equal(cat.transform([\"apple\", \"orange\"]), [apple, orange])\n assert_array_equal(cat.transform([\"apple\", \"banana\"]), [apple, banana])\n assert_array_equal(cat.inverse_transform([apple, orange]),\n [\"apple\", \"orange\"])\n assert_array_equal(cat.inverse_transform([apple, banana]),\n [\"apple\", \"banana\"])\n ent_inverse = cat.inverse_transform(\n [apple, orange, banana, none, true, false, three])\n assert_array_equal(ent_inverse, categories)\n\n\[email protected]_test\ndef test_categorical_transform_binary():\n categories = [\"apple\", \"orange\"]\n cat = Categorical(categories)\n\n apple = [0.]\n orange = [1.]\n\n assert_equal(cat.transformed_size, 1)\n assert_equal(cat.transformed_size, cat.transform([\"apple\"]).size)\n assert_array_equal(cat.transform(categories), [apple, orange])\n assert_array_equal(cat.transform([\"apple\", \"orange\"]), [apple, orange])\n assert_array_equal(cat.inverse_transform([apple, orange]),\n [\"apple\", \"orange\"])\n ent_inverse = cat.inverse_transform([apple, orange])\n assert_array_equal(ent_inverse, categories)\n\n\[email protected]_test\ndef test_categorical_repr():\n small_cat = Categorical([1, 2, 3, 4, 5])\n assert (small_cat.__repr__() ==\n \"Categorical(categories=(1, 2, 3, 4, 5), prior=None)\")\n\n big_cat = Categorical([1, 2, 3, 4, 5, 6, 7, 8])\n assert (big_cat.__repr__() ==\n 'Categorical(categories=(1, 2, 3, ..., 6, 7, 8), prior=None)')\n\n\[email protected]_test\ndef test_space_consistency():\n # Reals (uniform)\n\n s1 = Space([Real(0.0, 1.0)])\n s2 = Space([Real(0.0, 1.0)])\n s3 = Space([Real(0, 1)])\n s4 = Space([(0.0, 1.0)])\n s5 = Space([(0.0, 1.0, \"uniform\")])\n s6 = Space([(0, 1.0)])\n s7 = Space([(np.float64(0.0), 1.0)])\n s8 = Space([(0, np.float64(1.0))])\n a1 = s1.rvs(n_samples=10, random_state=0)\n a2 = s2.rvs(n_samples=10, random_state=0)\n a3 = s3.rvs(n_samples=10, random_state=0)\n a4 = s4.rvs(n_samples=10, random_state=0)\n a5 = s5.rvs(n_samples=10, random_state=0)\n assert_equal(s1, s2)\n assert_equal(s1, s3)\n assert_equal(s1, s4)\n assert_equal(s1, s5)\n assert_equal(s1, s6)\n assert_equal(s1, s7)\n assert_equal(s1, s8)\n assert_array_equal(a1, a2)\n assert_array_equal(a1, a3)\n assert_array_equal(a1, a4)\n assert_array_equal(a1, a5)\n\n # Reals (log-uniform)\n s1 = Space([Real(10**-3.0, 10**3.0, prior=\"log-uniform\", base=10)])\n s2 = Space([Real(10**-3.0, 10**3.0, prior=\"log-uniform\", base=10)])\n s3 = Space([Real(10**-3, 10**3, prior=\"log-uniform\", base=10)])\n s4 = Space([(10**-3.0, 10**3.0, \"log-uniform\", 10)])\n s5 = Space([(np.float64(10**-3.0), 10**3.0, \"log-uniform\", 10)])\n a1 = s1.rvs(n_samples=10, random_state=0)\n a2 = s2.rvs(n_samples=10, random_state=0)\n a3 = s3.rvs(n_samples=10, random_state=0)\n a4 = s4.rvs(n_samples=10, random_state=0)\n assert_equal(s1, s2)\n assert_equal(s1, s3)\n assert_equal(s1, s4)\n assert_equal(s1, s5)\n assert_array_equal(a1, a2)\n assert_array_equal(a1, a3)\n assert_array_equal(a1, a4)\n\n # Integers\n s1 = Space([Integer(1, 5)])\n s2 = Space([Integer(1.0, 5.0)])\n s3 = Space([(1, 5)])\n s4 = Space([(np.int64(1.0), 5)])\n s5 = Space([(1, np.int64(5.0))])\n a1 = s1.rvs(n_samples=10, random_state=0)\n a2 = s2.rvs(n_samples=10, random_state=0)\n a3 = s3.rvs(n_samples=10, random_state=0)\n assert_equal(s1, s2)\n assert_equal(s1, s3)\n assert_equal(s1, s4)\n assert_equal(s1, s5)\n assert_array_equal(a1, a2)\n assert_array_equal(a1, a3)\n\n # Integers (log-uniform)\n s1 = Space([Integer(16, 512, prior=\"log-uniform\", base=2)])\n s2 = Space([Integer(16.0, 512.0, prior=\"log-uniform\", base=2)])\n s3 = Space([(16, 512, \"log-uniform\", 2)])\n s4 = Space([(np.int64(16.0), 512, \"log-uniform\", 2)])\n s5 = Space([(16, np.int64(512.0), \"log-uniform\", 2)])\n a1 = s1.rvs(n_samples=10, random_state=0)\n a2 = s2.rvs(n_samples=10, random_state=0)\n a3 = s3.rvs(n_samples=10, random_state=0)\n assert_equal(s1, s2)\n assert_equal(s1, s3)\n assert_equal(s1, s4)\n assert_equal(s1, s5)\n assert_array_equal(a1, a2)\n assert_array_equal(a1, a3)\n\n # Categoricals\n s1 = Space([Categorical([\"a\", \"b\", \"c\"])])\n s2 = Space([Categorical([\"a\", \"b\", \"c\"])])\n s3 = Space([[\"a\", \"b\", \"c\"]])\n a1 = s1.rvs(n_samples=10, random_state=0)\n a2 = s2.rvs(n_samples=10, random_state=0)\n a3 = s3.rvs(n_samples=10, random_state=0)\n assert_equal(s1, s2)\n assert_array_equal(a1, a2)\n assert_equal(s1, s3)\n assert_array_equal(a1, a3)\n\n s1 = Space([(True, False)])\n s2 = Space([Categorical([True, False])])\n s3 = Space([np.array([True, False])])\n assert s1 == s2 == s3\n\n # Categoricals Integer\n s1 = Space([Categorical([1, 2, 3])])\n s2 = Space([Categorical([1, 2, 3])])\n s3 = Space([[1, 2, 3]])\n a1 = s1.rvs(n_samples=10, random_state=0)\n a2 = s2.rvs(n_samples=10, random_state=0)\n a3 = s3.rvs(n_samples=10, random_state=0)\n assert_equal(s1, s2)\n assert_array_equal(a1, a2)\n assert_equal(s1, s3)\n assert_array_equal(a1, a3)\n\n s1 = Space([(True, False)])\n s2 = Space([Categorical([True, False])])\n s3 = Space([np.array([True, False])])\n assert s1 == s2 == s3\n\[email protected]_test\ndef test_space_api():\n space = Space([(0.0, 1.0), (-5, 5),\n (\"a\", \"b\", \"c\"), (1.0, 5.0, \"log-uniform\"), (\"e\", \"f\")])\n\n cat_space = Space([(1, \"r\"), (1.0, \"r\")])\n assert isinstance(cat_space.dimensions[0], Categorical)\n assert isinstance(cat_space.dimensions[1], Categorical)\n\n assert_equal(len(space.dimensions), 5)\n assert isinstance(space.dimensions[0], Real)\n assert isinstance(space.dimensions[1], Integer)\n assert isinstance(space.dimensions[2], Categorical)\n assert isinstance(space.dimensions[3], Real)\n assert isinstance(space.dimensions[4], Categorical)\n\n samples = space.rvs(n_samples=10, random_state=0)\n assert_equal(len(samples), 10)\n assert_equal(len(samples[0]), 5)\n\n assert isinstance(samples, list)\n for n in range(4):\n assert isinstance(samples[n], list)\n\n assert isinstance(samples[0][0], numbers.Real)\n assert isinstance(samples[0][1], numbers.Integral)\n assert isinstance(samples[0][2], str)\n assert isinstance(samples[0][3], numbers.Real)\n assert isinstance(samples[0][4], str)\n\n samples_transformed = space.transform(samples)\n assert_equal(samples_transformed.shape[0], len(samples))\n assert_equal(samples_transformed.shape[1], 1 + 1 + 3 + 1 + 1)\n\n # our space contains mixed types, this means we can't use\n # `array_allclose` or similar to check points are close after a round-trip\n # of transformations\n for orig, round_trip in zip(samples,\n space.inverse_transform(samples_transformed)):\n assert space.distance(orig, round_trip) < 1.e-8\n\n samples = space.inverse_transform(samples_transformed)\n assert isinstance(samples[0][0], numbers.Real)\n assert isinstance(samples[0][1], numbers.Integral)\n assert isinstance(samples[0][2], str)\n assert isinstance(samples[0][3], numbers.Real)\n assert isinstance(samples[0][4], str)\n\n for b1, b2 in zip(space.bounds,\n [(0.0, 1.0), (-5, 5),\n np.asarray([\"a\", \"b\", \"c\"]), (1.0, 5.0),\n np.asarray([\"e\", \"f\"])]):\n assert_array_equal(b1, b2)\n\n for b1, b2 in zip(space.transformed_bounds,\n [(0.0, 1.0), (-5, 5), (0.0, 1.0), (0.0, 1.0), (0.0, 1.0),\n (np.log10(1.0), np.log10(5.0)), (0.0, 1.0)]):\n assert_array_equal(b1, b2)\n\n\[email protected]_test\ndef test_space_from_space():\n # can you pass a Space instance to the Space constructor?\n space = Space([(0.0, 1.0), (-5, 5),\n (\"a\", \"b\", \"c\"), (1.0, 5.0, \"log-uniform\"), (\"e\", \"f\")])\n\n space2 = Space(space)\n\n assert_equal(space, space2)\n\n\[email protected]_test\ndef test_constant_property():\n space = Space([(0.0, 1.0), (1,),\n (\"a\", \"b\", \"c\"), (1.0, 5.0, \"log-uniform\"), (\"e\",)])\n assert space.n_constant_dimensions == 2\n for i in [1, 4]:\n assert space.dimensions[i].is_constant\n for i in [0, 2, 3]:\n assert not space.dimensions[i].is_constant\n\n\[email protected]_test\ndef test_set_get_transformer():\n # can you pass a Space instance to the Space constructor?\n space = Space([(0.0, 1.0), (-5, 5),\n (\"a\", \"b\", \"c\"), (1.0, 5.0, \"log-uniform\"), (\"e\", \"f\")])\n\n transformer = space.get_transformer()\n assert_array_equal([\"identity\", \"identity\", \"onehot\",\n \"identity\", \"onehot\"], transformer)\n space.set_transformer(\"normalize\")\n transformer = space.get_transformer()\n assert_array_equal([\"normalize\"] * 5, transformer)\n space.set_transformer(transformer)\n assert_array_equal(transformer, space.get_transformer())\n\n space.set_transformer_by_type(\"label\", Categorical)\n assert space.dimensions[2].transform([\"a\"]) == [0]\n\n\[email protected]_test\ndef test_normalize():\n # can you pass a Space instance to the Space constructor?\n space = Space([(0.0, 1.0), (-5, 5),\n (\"a\", \"b\", \"c\"), (1.0, 5.0, \"log-uniform\"), (\"e\", \"f\")])\n space.set_transformer(\"normalize\")\n X = [[0., -5, 'a', 1., 'e']]\n Xt = np.zeros((1, 5))\n assert_array_equal(space.transform(X), Xt)\n assert_array_equal(space.inverse_transform(Xt), X)\n assert_array_equal(space.inverse_transform(space.transform(X)), X)\n\n\[email protected]_test\ndef test_normalize_real():\n\n a = Real(2.0, 30.0, transform=\"normalize\")\n for i in range(50):\n check_limits(a.rvs(random_state=i), 2, 30)\n\n rng = np.random.RandomState(0)\n X = rng.randn(100)\n X = 28 * (X - X.min()) / (X.max() - X.min()) + 2\n\n # Check transformed values are in [0, 1]\n assert np.all(a.transform(X) <= np.ones_like(X))\n assert np.all(np.zeros_like(X) <= a.transform(X))\n\n # Check inverse transform\n assert_array_almost_equal(a.inverse_transform(a.transform(X)), X)\n\n # log-uniform prior\n a = Real(10**2.0, 10**4.0, prior=\"log-uniform\", transform=\"normalize\")\n for i in range(50):\n check_limits(a.rvs(random_state=i), 10**2, 10**4)\n\n rng = np.random.RandomState(0)\n X = np.clip(10**3 * rng.randn(100), 10**2.0, 10**4.0)\n\n # Check transform\n assert np.all(a.transform(X) <= np.ones_like(X))\n assert np.all(np.zeros_like(X) <= a.transform(X))\n\n # Check inverse transform\n assert_array_almost_equal(a.inverse_transform(a.transform(X)), X)\n\n a = Real(0, 1, transform=\"normalize\", dtype=float)\n for i in range(50):\n check_limits(a.rvs(random_state=i), 0, 1)\n assert_array_equal(a.transformed_bounds, (0, 1))\n\n X = rng.rand()\n # Check transformed values are in [0, 1]\n assert np.all(a.transform(X) <= np.ones_like(X))\n assert np.all(np.zeros_like(X) <= a.transform(X))\n\n # Check inverse transform\n X_orig = a.inverse_transform(a.transform(X))\n assert isinstance(X_orig, float)\n assert_array_equal(X_orig, X)\n\n a = Real(0, 1, transform=\"normalize\", dtype='float64')\n X = np.float64(rng.rand())\n # Check inverse transform\n X_orig = a.inverse_transform(a.transform(X))\n assert isinstance(X_orig, np.float64)\n\n a = Real(0, 1, transform=\"normalize\", dtype=np.float64)\n X = np.float64(rng.rand())\n # Check inverse transform\n X_orig = a.inverse_transform(a.transform(X))\n assert isinstance(X_orig, np.float64)\n\n a = Real(0, 1, transform=\"normalize\", dtype='float64')\n X = np.float64(rng.rand())\n # Check inverse transform\n X_orig = a.inverse_transform(a.transform(X))\n assert isinstance(X_orig, np.float64)\n\n\[email protected]_test\ndef test_normalize_integer():\n a = Integer(2, 30, transform=\"normalize\")\n for i in range(50):\n check_limits(a.rvs(random_state=i), 2, 30)\n assert_array_equal(a.transformed_bounds, (0, 1))\n\n X = rng.randint(2, 31, dtype=np.int64)\n # Check transformed values are in [0, 1]\n assert np.all(a.transform(X) <= np.ones_like(X))\n assert np.all(np.zeros_like(X) <= a.transform(X))\n\n # Check inverse transform\n X_orig = a.inverse_transform(a.transform(X))\n assert isinstance(X_orig, np.int64)\n assert_array_equal(X_orig, X)\n\n a = Integer(2, 30, transform=\"normalize\", dtype=int)\n X = rng.randint(2, 31, dtype=int)\n # Check inverse transform\n X_orig = a.inverse_transform(a.transform(X))\n assert isinstance(X_orig, int)\n\n a = Integer(2, 30, transform=\"normalize\", dtype='int')\n X = rng.randint(2, 31, dtype=int)\n # Check inverse transform\n X_orig = a.inverse_transform(a.transform(X))\n assert isinstance(X_orig, int)\n\n a = Integer(2, 30, prior=\"log-uniform\", base=2, transform=\"normalize\",\n dtype=int)\n for i in range(50):\n check_limits(a.rvs(random_state=i), 2, 30)\n assert_array_equal(a.transformed_bounds, (0, 1))\n\n X = rng.randint(2, 31, dtype=int)\n # Check transformed values are in [0, 1]\n assert np.all(a.transform(X) <= np.ones_like(X))\n assert np.all(np.zeros_like(X) <= a.transform(X))\n\n # Check inverse transform\n X_orig = a.inverse_transform(a.transform(X))\n assert isinstance(X_orig, int)\n assert_array_equal(X_orig, X)\n\n\[email protected]_test\ndef test_normalize_categorical():\n categories = [\"cat\", \"dog\", \"rat\"]\n a = Categorical(categories, transform=\"normalize\")\n for i in range(len(categories)):\n assert a.rvs(random_state=i)[0] in categories\n assert a.inverse_transform(0.) == categories[0]\n assert a.inverse_transform(0.5) == categories[1]\n assert a.inverse_transform(1.0) == categories[2]\n assert_array_equal(categories, a.inverse_transform([0., 0.5, 1]))\n\n categories = [1, 2, 3]\n a = Categorical(categories, transform=\"normalize\")\n assert_array_equal(categories, np.sort(np.unique(a.rvs(100,\n random_state=1))))\n assert_array_equal(categories, a.inverse_transform([0., 0.5, 1.]))\n\n categories = [1., 2., 3.]\n a = Categorical(categories, transform=\"normalize\")\n assert_array_equal(categories, np.sort(np.unique(a.rvs(100,\n random_state=1))))\n assert_array_equal(categories, a.inverse_transform([0., 0.5, 1.]))\n\n categories = [1, 2, 3]\n a = Categorical(categories, transform=\"string\")\n a.set_transformer(\"normalize\")\n assert_array_equal(categories, np.sort(np.unique(a.rvs(100,\n random_state=1))))\n assert_array_equal(categories, a.inverse_transform([0., 0.5, 1.]))\n\[email protected]_test\ndef test_normalize_integer():\n for dtype in ['int', 'int8', 'int16', 'int32', 'int64',\n 'uint8', 'uint16', 'uint32', 'uint64']:\n a = Integer(2, 30, transform=\"normalize\", dtype=dtype)\n for X in range(2, 31):\n X_orig = a.inverse_transform(a.transform(X))\n assert_array_equal(X_orig, X)\n for dtype in [int, np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64]:\n a = Integer(2, 30, transform=\"normalize\", dtype=dtype)\n for X in range(2, 31):\n X_orig = a.inverse_transform(a.transform(X))\n assert_array_equal(X_orig, X)\n assert isinstance(X_orig, dtype)\n\n\ndef check_valid_transformation(klass):\n assert klass(2, 30, transform=\"normalize\")\n assert klass(2, 30, transform=\"identity\")\n assert_raises_regex(ValueError, \"should be 'normalize' or 'identity'\",\n klass, 2, 30, transform='not a valid transform name')\n\n\[email protected]_test\ndef test_valid_transformation():\n check_valid_transformation(Integer)\n check_valid_transformation(Real)\n\n\[email protected]_test\ndef test_invalid_dimension():\n assert_raises_regex(ValueError, \"has to be a list or tuple\",\n space_check_dimension, \"23\")\n # single value fixes dimension of space\n space_check_dimension((23,))\n\n\[email protected]_test\ndef test_categorical_identity():\n categories = [\"cat\", \"dog\", \"rat\"]\n cat = Categorical(categories, transform=\"identity\")\n samples = cat.rvs(100)\n assert all([t in categories for t in cat.rvs(100)])\n transformed = cat.transform(samples)\n assert_array_equal(transformed, samples)\n assert_array_equal(samples, cat.inverse_transform(transformed))\n\n\[email protected]_test\ndef test_categorical_string():\n categories = [1, 2, 3]\n categories_transformed = [\"1\", \"2\", \"3\"]\n cat = Categorical(categories, transform=\"string\")\n samples = cat.rvs(100)\n assert all([t in categories for t in cat.rvs(100)])\n transformed = cat.transform(samples)\n assert all([t in categories_transformed for t in transformed])\n assert_array_equal(samples, cat.inverse_transform(transformed))\n\n\[email protected]_test\ndef test_categorical_distance():\n categories = ['car', 'dog', 'orange']\n cat = Categorical(categories)\n for cat1 in categories:\n for cat2 in categories:\n delta = cat.distance(cat1, cat2)\n if cat1 == cat2:\n assert delta == 0\n else:\n assert delta == 1\n\n\[email protected]_test\ndef test_integer_distance():\n ints = Integer(1, 10)\n for i in range(1, 10+1):\n assert_equal(ints.distance(4, i), abs(4 - i))\n\n\[email protected]_test\ndef test_integer_distance_out_of_range():\n ints = Integer(1, 10)\n assert_raises_regex(RuntimeError, \"compute distance for values within\",\n ints.distance, 11, 10)\n\n\[email protected]_test\ndef test_real_distance_out_of_range():\n ints = Real(1, 10)\n assert_raises_regex(RuntimeError, \"compute distance for values within\",\n ints.distance, 11, 10)\n\n\[email protected]_test\ndef test_real_distance():\n reals = Real(1, 10)\n for i in range(1, 10+1):\n assert_equal(reals.distance(4.1234, i), abs(4.1234 - i))\n\n\[email protected](\"dimension, bounds\",\n [(Real, (2, 1)), (Integer, (2, 1)),\n (Real, (2, 2)), (Integer, (2, 2))])\ndef test_dimension_bounds(dimension, bounds):\n with pytest.raises(ValueError) as exc:\n dim = dimension(*bounds)\n assert \"has to be less than the upper bound \" in exc.value.args[0]\n\n\[email protected](\"dimension, name\",\n [(Real(1, 2, name=\"learning_rate\"), \"learning_rate\"),\n (Integer(1, 100, name=\"n_trees\"), \"n_trees\"),\n (Categorical([\"red, blue\"], name=\"colors\"), \"colors\")])\ndef test_dimension_name(dimension, name):\n assert dimension.name == name\n\n\ndef test_dimension_name():\n notnames = [1, 1., True]\n for n in notnames:\n with pytest.raises(ValueError) as exc:\n real = Real(1, 2, name=n)\n assert(\"Dimension's name must be either string or\"\n \"None.\" == exc.value.args[0])\n s = Space([Real(1, 2, name=\"a\"),\n Integer(1, 100, name=\"b\"),\n Categorical([\"red, blue\"], name=\"c\")])\n assert s[\"a\"] == (0, s.dimensions[0])\n assert s[\"a\", \"c\"] == [(0, s.dimensions[0]), (2, s.dimensions[2])]\n assert s[[\"a\", \"c\"]] == [(0, s.dimensions[0]), (2, s.dimensions[2])]\n assert s[(\"a\", \"c\")] == [(0, s.dimensions[0]), (2, s.dimensions[2])]\n assert s[0] == (0, s.dimensions[0])\n assert s[0, \"c\"] == [(0, s.dimensions[0]), (2, s.dimensions[2])]\n assert s[0, 2] == [(0, s.dimensions[0]), (2, s.dimensions[2])]\n\n\[email protected](\"dimension\",\n [Real(1, 2), Integer(1, 100), Categorical([\"red, blue\"])])\ndef test_dimension_name_none(dimension):\n assert dimension.name is None\n\n\[email protected]_test\ndef test_space_from_yaml():\n with NamedTemporaryFile(delete=False) as tmp:\n tmp.write(b\"\"\"\n Space:\n - Real:\n low: 0.0\n high: 1.0\n - Integer:\n low: -5\n high: 5\n - Categorical:\n categories:\n - a\n - b\n - c\n - Real:\n low: 1.0\n high: 5.0\n prior: log-uniform\n - Categorical:\n categories:\n - e\n - f\n \"\"\")\n tmp.flush()\n\n space = Space([(0.0, 1.0),\n (-5, 5),\n (\"a\", \"b\", \"c\"),\n (1.0, 5.0, \"log-uniform\"),\n (\"e\", \"f\")])\n\n space2 = Space.from_yaml(tmp.name)\n assert_equal(space, space2)\n tmp.close()\n os.unlink(tmp.name)\n\[email protected](\"name\", [1, 1., True])\ndef test_dimension_with_invalid_names(name):\n with pytest.raises(ValueError) as exc:\n Real(1, 2, name=name)\n assert(\"Dimension's name must be either string or None.\" ==\n exc.value.args[0])\n\n\[email protected]_test\ndef test_purely_categorical_space():\n # Test reproduces the bug in #908, make sure it doesn't come back\n dims = [Categorical(['a', 'b', 'c']), Categorical(['A', 'B', 'C'])]\n optimizer = Optimizer(dims, n_initial_points=1, random_state=3)\n\n x = optimizer.ask()\n # before the fix this call raised an exception\n optimizer.tell(x, 1.)\n\n\[email protected]_test\ndef test_partly_categorical_space():\n dims = Space([Categorical(['a', 'b', 'c']), Categorical(['A', 'B', 'C'])])\n assert dims.is_partly_categorical\n dims = Space([Categorical(['a', 'b', 'c']), Integer(1, 2)])\n assert dims.is_partly_categorical\n assert not dims.is_categorical\n dims = Space([Integer(1, 2), Integer(1, 2)])\n assert not dims.is_partly_categorical\n"
] | [
[
"numpy.nextafter",
"numpy.array",
"numpy.ones_like",
"numpy.zeros_like",
"numpy.asarray",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.testing.assert_equal",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_raises_regex",
"numpy.float64",
"numpy.allclose",
"numpy.int64",
"numpy.log10"
]
] |
gkaancan/self-balanced-waiter-robot- | [
"9cc2fef49613698171869502abf1bf788f823076"
] | [
"self_balancing_waiter_robot/scripts/mapping.py"
] | [
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\npoint_array = np.loadtxt(\"/home/kaan/points_cloud.txt\")\n\n\n\nplt.plot(point_array[1],point_array[0])\nplt.xlabel(\"Time\")\nplt.ylabel(\"velocity\")\nplt.show()\n\n\n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
] |
bveitch/EpidPy | [
"455cd67afa2efbb774300115abb5fc7d4600b37d"
] | [
"dataset_builder.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 4 11:55:56 2020\n\ndescription : Models and adds sample bias to epidemic data. \nauthor : bveitch\nversion : 1.0\nproject : EpidPy (epidemic modelling in python)\n\nUsage: \n Create synthetics for data fitting\n \n\"\"\"\n\nimport numpy as np;\nimport sir_modeller as sirm\nimport model_builder as mbuild\nimport sir_sampler as samplr\nfrom plottingutils import plot_rsampled\nfrom test_jacobian import dp_test\n\nnstage=30\nmargs={'peakI':5,'peakR':15,'rateI':2,'rateR':0.5,'scaleI':2,'scaleR':1,'I0':1e-4}\nistage=3\nhstage=21\nntestmin=100\nntestmax=10000\npTT=0.95\npTF=0.01\n\n#NOT used at present \ndef estimate_sir_from_nstage(sir_type,nstage,model,scale=0.01):\n \n def est_sir(beta,gamma):\n return [sum[beta],sum[gamma]]\n \n def params_from_jstage(beta,gamma,jstage):\n if(jstage>0):\n b1=sum(beta[0:jstage])/(jstage)\n b2=sum(beta[jstage:nstage])/(nstage-jstage)\n g =sum(gamma[jstage:nstage])/(nstage-jstage)\n e =1/jstage\n else:\n b1=0\n b2=sum(beta)/nstage\n g=sum(gamma)/nstage\n e=1\n return [b1,b2,g,e]\n \n def est_seir(beta,gamma):\n boolb=beta < scale*np.max(beta)\n boolg=gamma < scale*np.max(gamma)\n both = boolb and boolg\n #find first False value\n jstage = next( (istage for istage, b in enumerate(both) if b == False), nstage)\n [b1,b2,g,e] = params_from_jstage(beta,gamma,jstage)\n return [b2,g,e]\n \n def est_si2r(beta,gamma):\n boolg=gamma < scale*np.max(gamma)\n #find first False value\n jstage = next((istage for istage, b in enumerate(boolg) if b == False), nstage)\n [b1,b2,g,e] = params_from_jstage(beta,gamma,jstage)\n return [b1,b2,g,e]\n \n estimates = {\n 'sir' : est_sir,\n 'seir' : est_seir,\n 'si2r' : est_si2r\n }\n \n estimator = estimates.get(sir_type, lambda: \"Invalid modelling type\")\n \n beta=model[0:nstage]\n gamma=model[nstage:2*nstage]\n \n return estimator(beta,gamma)\n\ndef build_data_nstage(args,Test=True):\n\n mod=mbuild.build_nstage_model(nstage,margs)\n nt =args['nt']\n dtype='sir_nstage'\n dargs={'type':dtype,'dt':args['dt'], 'nt':nt, 'ntsub': args['ntsub'],'nstage':nstage}\n sirdata=sirm.sir_mod(dargs)\n\n #Sampling operator with test\n truedata=sirdata.f(mod)\n \n d_intv=[[istage,hstage],[hstage,nstage+1]]\n samp = samplr.Sampler(sirdata.dsize,d_intv)\n data_sum_intv = samp.F(truedata)\n \n infect_genpop = data_sum_intv[:,0]\n hospital_pop = data_sum_intv[:,1]\n dtest=(ntestmax-ntestmin)/(nt-1)\n ntests = ntestmin+dtest*np.arange(0,nt)\n data_rsamp = samplr.randSampler(infect_genpop,pTT,pTF,ntests)\n \n recorded_data = (data_rsamp+hospital_pop).reshape(nt,1)\n \n t=np.arange(0,nt,1)\n plot_rsampled(t,[infect_genpop,hospital_pop],[data_rsamp,hospital_pop],recorded_data,np.sum(truedata[:,1:nstage+1],1))\n if(Test):\n dpargs={'mod': truedata, 'data': data_sum_intv, 'dptol':1e-11}\n dp_pass=dp_test(samp.F,samp.Ft,dpargs)\n if(dp_pass):\n print('dotproduct passed')\n else:\n print('dotproduct failed')\n return recorded_data"
] | [
[
"numpy.max",
"numpy.sum",
"numpy.arange"
]
] |
um-dsp/Morphence | [
"781d84ebc884ee3053a0355adfbd20312c627308"
] | [
"Copycat/Framework/oracle/model.py"
] | [
"#torch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass CNN(nn.Module):\n \"\"\"Sample model.\"\"\"\n\n def __init__(self):\n super(CNN, self).__init__()\n\n self.conv_layer = nn.Sequential(\n\n # Conv Layer block 1\n nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n # Conv Layer block 2\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Dropout2d(p=0.05),\n\n # Conv Layer block 3\n nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n\n\n self.fc_layer = nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Linear(4096, 1024),\n nn.ReLU(inplace=True),\n nn.Linear(1024, 512),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.1),\n nn.Linear(512, 10)\n )\n\n def forward(self, x):\n # conv layers\n x = self.conv_layer(x)\n # flatten\n x = x.view(x.size(0), -1)\n # fc layer\n x = self.fc_layer(x)\n\n return x\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Dropout2d"
]
] |
leehsiu/nerfactor | [
"87f7d3ffa56bdbca925958a4b89e249d35006c80"
] | [
"nerfactor/networks/embedder.py"
] | [
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\n\nfrom util import logging as logutil\n\n\nlogger = logutil.Logger(loggee=\"networks/embedder\")\n\n\nclass Embedder:\n def __init__(\n self, incl_input=True, in_dims=3, log2_max_freq=3, n_freqs=4,\n log_sampling=True, periodic_func=None):\n if periodic_func is None:\n periodic_func = [tf.math.sin, tf.math.cos]\n embed_func = []\n out_dims = 0\n if incl_input:\n embed_func.append(lambda x: x)\n out_dims += in_dims\n if log_sampling:\n freq_bands = 2. ** tf.linspace(0., log2_max_freq, n_freqs)\n else:\n freq_bands = tf.linspace(2. ** 0., 2. ** log2_max_freq, n_freqs)\n for freq in freq_bands:\n for p_f in periodic_func:\n embed_func.append(\n lambda x, p_f=p_f, freq=freq: p_f(x * freq))\n out_dims += in_dims\n self.out_dims = out_dims\n self.embed_func = embed_func\n\n def __call__(self, x):\n return tf.concat([f(x) for f in self.embed_func], -1)\n"
] | [
[
"tensorflow.linspace"
]
] |
LemurPwned/VISM | [
"4d1e6b68d2bf1f9f3a09ce42c531ed2ce1d16400"
] | [
"video_utils/video_composer.py"
] | [
"import PIL\nimport skvideo.io as skv\nimport skimage.io as ski\nfrom skimage import color\nimport numpy as np\nimport os\nimport glob\n\n\nclass Movie:\n def __init__(self, directory, fformat='.png', cleanup=True):\n self.directory = directory\n self.filename = \"\"\n self.format = fformat\n self.framerate = 3\n self.cleanup = cleanup\n\n def create_video(self):\n '''\n composes video from .fformat files, requires ffmpeg\n '''\n fileList = glob.glob(os.path.join(self.directory, '*' + self.format))\n if len(fileList) < 1:\n raise EnvironmentError\n total_movie = []\n fileList.sort(key=lambda x: os.path.basename(x))\n print(\"Merging up the files. This might take a moment...\")\n for filename in fileList:\n print(filename)\n img = ski.imread(filename)\n for i in range(self.framerate):\n total_movie.append(img)\n total_movie = np.array(total_movie)\n skv.vwrite(os.path.join(self.directory, 'movie.mkv'), total_movie)\n # self.do_cleanup(fileList)\n\n def do_cleanup(self, filenames):\n print(\"Cleaning up the files ...\")\n for filename in filenames:\n try:\n os.remove(filename)\n except OSError:\n pass\n\n\nif __name__ == \"__main__\":\n mv = Movie('../Screenshots/GL_STM')\n mv.create_video()\n"
] | [
[
"numpy.array"
]
] |
mawanda-jun/IntelligentOCR | [
"ccb9838744ea829ca1b01dae8ba1525a489fd6c4"
] | [
"extract_pages_from_pdf.py"
] | [
"\"\"\"\nThe first step of the pipeline lead us to generate good images from pdf to do inference and OCR.\nTo avoid memory leak - as the user can upload very large pdf files - I've decided to use tue utility\npdftoppm and access one page at a once.\nThen the pages are beautified - this part can be better, since the only thing I do here is deskewing pages\nIn particular, for deskewing object a personalized version of alyn has been created and must be installed\nfrom wheels/alyn-xxx.whl: now it is possible to load, deskew and retrieve a numpy image without writing it\non disk.\nIf needed the user can write resulting images on disk.\n\"\"\"\nfrom PIL import Image\nfrom alyn import deskew\nimport os\nimport errno\nimport numpy as np\nfrom costants import \\\n EXTRACTION_DPI, \\\n TEMP_IMG_FOLDER_FROM_PDF, \\\n PATH_TO_EXTRACTED_IMAGES, \\\n TEST_PDF_PATH\nfrom personal_errors import InputError, OutputError, APIError\nfrom subprocess import Popen, PIPE, STDOUT\nimport copy\n\nimport logging\nfrom logger import TimeHandler\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nlogger.addHandler(TimeHandler().handler)\n\n\ndef clear_and_create_temp_folders(path_to_folder=PATH_TO_EXTRACTED_IMAGES):\n \"\"\"\n Create a folder with file name to store images extracted from pdf. If path exists it is deleted and then re-created\n :param path_to_folder: path/to/folder in which to store images.\n :return void\n \"\"\"\n logger.info('Clear and create temp file for images from pdf')\n try:\n os.makedirs(path_to_folder)\n logger.info('Folder created successfully')\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n message = '{path}\\nwas not created correctly.' \\\n .format(path=path_to_folder)\n raise InputError(\n message=message\n )\n else:\n logger.info('Folder exists')\n\n\ndef write_image_on_disk(file_name, pil_image, page=0, path=PATH_TO_EXTRACTED_IMAGES):\n \"\"\"\n Writes image on disk\n :param file_name: name of original file\n :param pil_image: numpy array greyscale image\n :param page: page counter from upward function.\n :param path: path/to/folder where to write images\n :return:\n \"\"\"\n logger.info('Writing temp images on disk...')\n path_to_image = os.path.join(path, '{fn}_page_{c}.jpeg'.format(fn=file_name, c=page))\n try:\n pil_image.save(path_to_image, dpi=(EXTRACTION_DPI, EXTRACTION_DPI))\n logger.info('Image_{} wrote on disk'.format(page))\n except IOError or ValueError as e:\n raise OutputError(\n message='Cannot write image on disk: \\n{}'.format(e)\n )\n\n\ndef from_pdf_to_pil_generator(file_path, temp_folder=TEMP_IMG_FOLDER_FROM_PDF, thread_name=None):\n \"\"\"\n Create a page generator from pdf to make it load less RAM as it takes one page at a once. It read a page at once from\n pdf, then acquire it in RAM and offer as generator.\n It temporarly write the image in temp_folder, then it delete it automatically\n :param file_path: path/to/file.pdf\n :param thread_name: name of the thread in case of batch process\n :param temp_folder: path/to/folder to store temp image before acquiring it in RAM\n :return: PIL generator. Return None if nothing is found\n \"\"\"\n\n if not os.path.isfile(file_path):\n raise InputError(\n message='{} not found'.format(file_path)\n )\n else:\n page = 1\n # logger.info(\"Creating page generator from {path}...\".format(path=file_path))\n if not os.path.isdir(temp_folder):\n try:\n os.makedirs(temp_folder)\n logger.info('Temp folder for extraction written on disk')\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise OutputError(\n message=exc\n )\n else:\n logger.info('{} already exists. No need to create it'.format(temp_folder))\n # Extract one page at a once. The iterator goes from first page to last until it reaches the end. In that case a\n # StopIteraton is raised.\n # Uses pdftoppm\n while True:\n\n args = [\n \"pdftoppm\",\n \"-l\",\n str(page),\n \"-f\",\n str(page),\n \"-r\",\n str(EXTRACTION_DPI),\n \"-gray\",\n file_path,\n os.path.join(temp_folder, \"temp-{}\".format(thread_name))\n ]\n\n # args.append(item for item in config_list)\n\n proc = Popen(\n args,\n stdin=PIPE,\n stdout=PIPE,\n stderr=STDOUT,\n # cwd=os.path.join(temp_folder)\n )\n output, outerr = proc.communicate()\n\n if proc.returncode == 0:\n # Everything went well\n logger.info(\"Page {} successfully extracted\".format(page))\n # checking if the number of pages goes up to 999 pages. In the case that the number of pages is > 10,\n # the temp file number of the first page will be 01 instead of 1. If num_pages > 100, then 001 instead of 1.\n # here we check if temp file exists, if not we check the 01 one and so on.\n fp = os.path.join(temp_folder, 'temp-{tn}-{n}.pgm'.format(n=page, tn=thread_name))\n if page < 10:\n if not os.path.isfile(fp):\n fp = os.path.join(temp_folder,\n 'temp-{tn}-0{n}.pgm'.format(n=page, tn=thread_name))\n if not os.path.isfile(fp):\n fp = os.path.join(temp_folder,\n 'temp-{tn}-00{n}.pgm'.format(n=page, tn=thread_name))\n\n elif 11 <= page <= 100:\n if not os.path.isfile(fp):\n fp = os.path.join(temp_folder,\n 'temp-{tn}-0{n}.pgm'.format(n=page, tn=thread_name))\n\n try:\n img = Image.open(fp)\n # explicit copy of image so we can delete it from disk safely\n img = copy.deepcopy(img)\n if os.path.exists(fp):\n os.remove(fp)\n # convert image to greyscale mode\n img.convert(mode='L')\n page += 1\n # return it as a generator\n yield img\n # return img\n except FileNotFoundError as e:\n raise InputError(\n message=e\n )\n\n # case mostly used for stopping iteration when EOF\n else:\n if outerr is None:\n logger.warning('pdftoppm output: {}'.format(output))\n logger.warning('Probably reached end of file.')\n raise StopIteration\n else:\n logger.error('Something went wrong...')\n logger.error('pdftoppm output: {}'.format(output))\n raise InputError(\n message='pdftoppm error: {}'.format(outerr)\n )\n\n\ndef beautify_pages(page_generator, file_name, extraction_path=PATH_TO_EXTRACTED_IMAGES):\n \"\"\"\n Function to beautify pages for inference.\n :param page_generator: list of pillow images\n :return: beautified list of pages\n \"\"\"\n counter = 0\n for page in page_generator:\n # if page was not converted to greyscale yet\n page_grey = page.convert(\n mode='L'\n )\n logger.info('Page converted to greyscale')\n # load image as np for beautifying\n logger.info('Beautifying pages...')\n # I decided to make another function to beautify a single page at a once avoiding correlation\n image_np = np.asarray(page_grey)\n beautified_np = beautify_image(image_np)\n page_grey = Image.fromarray(beautified_np).convert('L')\n if extraction_path is not None:\n destination_folder = os.path.join(extraction_path, file_name)\n logger.info('Creating folder: {}'.format(destination_folder))\n clear_and_create_temp_folders(path_to_folder=destination_folder)\n logger.info('Temp folder created')\n # create a deep copy of generator since the for loops consume generators\n # copy_of_pil_gen = copy.deepcopy(bw_beautified_pil_gen)\n logger.info('Writing images on disk')\n write_image_on_disk(file_name, copy.deepcopy(page_grey), counter, path=destination_folder)\n counter += 1\n logger.info('Pages beautified')\n # page = page_grey\n\n # return b/w pil generator\n yield page_grey\n\n\ndef beautify_image(np_array_image):\n \"\"\"\n Do some modifications to images. This is the right place to put background noise removal, for example.\n Here we only de-skew images to help OCR and table recognition later\n :param np_array_image: input numpy array image\n :return: a beautified numpy array image\n \"\"\"\n logger.info('Beautifying images...')\n\n logger.info('Doing deskew...')\n try:\n sd = deskew.Deskew(\n input_numpy=np_array_image,\n output_numpy=True\n )\n de_skewed_image_np = sd.run()\n logger.info('Deskew done.')\n\n to_return = de_skewed_image_np\n logger.info('Image beautified.')\n return to_return\n except Exception as e:\n # deskew is not so well implemented so I'm catching every exception\n raise APIError(\n message='Deskew is not performing well. Please check API\\n{}'.format(e)\n )\n\n\ndef generate_pil_images_from_pdf(file_path, temp_path=TEMP_IMG_FOLDER_FROM_PDF, thread_name='',\n extraction_path=PATH_TO_EXTRACTED_IMAGES):\n \"\"\"\n Takes a pdf file and offer it as a generator of pillow 8-bit greyscale single channel images.\n :param file_path: /path/to/pdf.pdf\n :param temp_path: /path/to/tempfiles.\n :param thread_name: name of referring thread\n :param extraction_path: default is None, path/to/folder to save the result of beautified images on disk\n :return: dict with: 'status': True if everything went good, False instead. Messages/data are inside 'data'\n \"\"\"\n\n file_name = os.path.basename(file_path).split('.')[0]\n # clear temp path to store the extracted pages\n # effectively extract pages\n pil_gen = from_pdf_to_pil_generator(file_path, thread_name=thread_name, temp_folder=temp_path)\n # beautify pages before do inference on them. Possibility to write result on disk\n # with yield we cannot check if the status of the return is False or True,\n # so we have to manage it inside beautify_pages\n bw_beautified_pil_gen = beautify_pages(page_generator=pil_gen, file_name=file_name, extraction_path=extraction_path)\n # logger.info('Extraction of pages from pdf completed')\n\n return bw_beautified_pil_gen\n\n\nif __name__ == '__main__':\n generator = generate_pil_images_from_pdf(\n file_path=TEST_PDF_PATH,\n temp_path=TEMP_IMG_FOLDER_FROM_PDF,\n extraction_path=PATH_TO_EXTRACTED_IMAGES\n )\n for image in generator:\n print(image)\n"
] | [
[
"numpy.asarray"
]
] |
EoRImaging/GleamPlusFullSpectrum | [
"2da591dc063621b78a648ace486cef0bdf0c7f81"
] | [
"scripts/gleam_low_freq_fits.py"
] | [
"from pyradiosky import SkyModel, utils\nimport numpy as np\nfrom astropy.table import Table, setdiff\nfrom astropy.utils.diff import report_diff_values\nfrom astropy.io import fits\nfrom operator import itemgetter\nimport numpy.polynomial.polynomial as poly\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nimport psutil\nimport erfa\nimport csv\nfrom matplotlib.colors import LogNorm\nsm = SkyModel()\n\ndef log_linear_fit(freqs, fit_data, stokes_error, dec, gleam_catalog, detect_outlier = False):\n \"\"\"\n This is the fit modeling function. It compute combined error, fits the log of source data to a linear\n polynomial, and calculates a chi2 residual. It can also perform an outlier analysis, to see if any\n significant outliers exist for a given source.\n\n Parameters\n ----------\n freqs : ndarray\n Frequencies available for the source.\n fit_data : ndarray\n Fluxes for available frequencies.\n stokes_error : ndarray\n The error for the source in the catalog.\n dec : ndarray\n Declination of the source. Total error is a combination of the stokes_error and an\n error calculated from the declination of the source.\n detect_outlier : bool\n Will test the source data to determine if there is a significant outlier that affects the\n fit. When 'detect_outlier'=True, fits on all points, then determines the greatest outlier from\n the first fit by detetermining the largest residual from the first fit. Fits again without this\n outlier. If the reduced chi2 is improved by more than 2.6x, the outlier is removed and the fit\n without this point is used.\n\n Returns\n -------\n coeffs : ndarray\n Coefficients of the linear fit of data. A pair of numbers, [b, m] corresponding to the equation\n y=b+mx\n chi2_residual : float\n The reduced chi^2 value for the fit. When 'detect_outlier'=True, the chi2_residual of the better fit\n fitted_data : ndarray\n Modeled fluxes at only the frequencies provided in 'freqs'\n all_freqs_fitted_data : ndarray\n Modeled fluxes at all GLEAM frequencies\n fitted_freqs : ndarray\n Only frequencies that are used in the fit\n fit_data_selected : ndarray\n Fluxes corresponding to the 'fitted_freqs' frequencies. Sometimes there are NaN values in\n only one of fluxes or errors, so this removes those datapoints. This usually happens if\n the flux at a frequency was negative, and has been turned into a NaN.\n original_parameters : ndarray\n If an outlier is determined and the outlier-removed fit is superior to the full fit, an array containing\n the fit parameters that included the outlier. If there is no outlier, 'original_parameters'=NaN\n low_fit : float\n The projected flux at 50 MHz. This is useful if you want to see how the extrapolation of the fit\n performs outside GLEAM's frequency range.\n \"\"\"\n # Calculate sky coordinate-based portion of error\n if (dec >= 18.5) or (dec <= -72):\n loc_error = fit_data * .03\n else:\n loc_error = fit_data * .02\n\n # Compute total error and weight for the polyfit\n total_error = np.sqrt(loc_error**2 + stokes_error**2)\n weight = np.log10(1 / total_error)\n\n # Convert data into log scale for polyfit\n fit_data_log = np.log10(fit_data)\n freqs_log = np.log10(freqs)\n all_freqs_log = np.log10(gleam_catalog.freq_array.value)\n\n # Subset to freqs with no nans in vals or errors and do polyfit on only those freqs\n idx = np.isfinite(freqs_log) & np.isfinite(fit_data_log) & np.isfinite(weight)\n\n # coeffs is a pair of numbers (b, m) corresponding to the equation y=b+mx\n coeffs = poly.polyfit(freqs_log[idx], fit_data_log[idx], w = weight[idx], deg=1)\n\n # Use coeffs to generate modeled vals at only freqs that were used to make coeffs\n fit_log = poly.polyval(freqs_log[idx], coeffs)\n fitted_data = 10**fit_log\n\n # use coefficients to generate modeled vals at all 20 freqs\n full_fit_log = poly.polyval(all_freqs_log, coeffs)\n all_freqs_fitted_data = 10**full_fit_log\n\n # generate modeled val at 50 MHz\n low_fit_log = poly.polyval(np.log10(50000000), coeffs)\n low_fit = 10**low_fit_log\n\n # compute reduced chi2 value\n variance = total_error[idx]**2\n residual = fit_data[idx] - fitted_data\n chi2 = sum((residual**2) / variance)\n chi2_residual = chi2 / (len(freqs[idx]) - 2)\n\n fitted_freqs = freqs[idx]\n fit_data_selected = fit_data[idx]\n\n original_parameters = np.array([[float(\"NaN\")]])\n\n # Outlier detection reruns fit without greatest outlier\n if detect_outlier is True:\n idx_outlier = np.argmax(abs(residual))\n\n # create datasets with outlier removed\n log_data_ol = np.delete(fit_data_log[idx], idx_outlier)\n log_freq_ol = np.delete(freqs_log[idx], idx_outlier)\n weight_ol = np.delete(weight[idx], idx_outlier)\n\n # fit without the outlier\n coeffs_ol = poly.polyfit(log_freq_ol, log_data_ol, w=weight_ol, deg=1)\n\n fit_log_ol = poly.polyval(log_freq_ol, coeffs_ol)\n fitted_data_ol = 10**fit_log_ol\n full_fit_log_ol = poly.polyval(all_freqs_log, coeffs_ol)\n all_freqs_fitted_data_ol = 10**full_fit_log_ol\n\n # compute chi2 using this new fit\n variance_ol = np.delete(total_error[idx], idx_outlier)**2\n residual_ol = np.delete(fit_data[idx], idx_outlier) - np.delete(fitted_data, idx_outlier)\n\n chi2_ol = sum((residual_ol**2) / variance_ol)\n chi2_residual_ol = chi2_ol / (len(np.delete(freqs[idx], idx_outlier)) - 2)\n\n # see if fit has improved\n if chi2_residual_ol < chi2_residual / 2.6:\n\n original_parameters = np.array([coeffs, chi2_residual, fitted_data, all_freqs_fitted_data, fitted_freqs, fit_data_selected], dtype=object)\n\n # reassign values with outlier removed version of fit\n chi2_residual = chi2_residual_ol\n coeffs = coeffs_ol\n fitted_data = fitted_data_ol\n all_freqs_fitted_data = all_freqs_fitted_data_ol\n fitted_freqs = np.delete(freqs[idx], idx_outlier)\n fit_data_selected = np.delete(fit_data[idx], idx_outlier)\n\n return(coeffs, chi2_residual, fitted_data, all_freqs_fitted_data, fitted_freqs, fit_data_selected, original_parameters, low_fit)\n\n\ndef low_freq_multifit(catalog_loc):\n \"\"\"\n This function performs a multi-layered fit of the GLEAM catalog sources with a preference for low frequencies.\n A fit is always based on at least 4 data points.\n For a given source;\n 1. Replace any negative fluxes with NaN's\n 2. Run 'log_linear_fit' function on source data, checking for significant outliers\n 3. If the source is >1Jy at 150 MHz, and if the reduced chi2 of the fit is >= 1.93, run 'log_linear_fit'\n again, on only the bottom half of available frequencies for the source. Sources dimmer than 1Jy do not\n show much fit improvement by reducing the number of frequencies, because their scatter is high.\n 4. If the reduced chi2 of the fit of the bottom half of frequencies is still >= 1.93, AND there are at\n least 8 frequencies in the bottom half of frequencies, run 'log_linear_fit' a third time on only the\n bottom quarter of frequencies for the source.\n 5. If the reduced chi2 is still >= 1.93, the fit with the lowest chi2 is selected as the best fit.\n 6. Parameters and data for the best/final fit are put into a dict, which also includes a keyword whose\n value is all the parameters of any previous fits.\n\n Parameters\n ----------\n catalog_loc : str\n The full file location of the gleam catalog.\n\n Returns\n -------\n source_dict : dict\n A dictionary containing dicts of the data and parameters for the best fit for each source.\n For a given source, the dict contains:\n ra : ndarray\n The Right Ascension of the source\n dec : ndarray\n The declination of the source.\n coefficients : ndarray\n The coefficients of the best fit, [b, m] corresponding to y=b+mx, the linear fit of logged\n source data.\n chi2_residual : float\n The reduced chi2 value of the best fit of the source.\n prev_fit_data : list\n Data from previous fits that were not the best fit. If a source did not have a given fit,\n the value is NaN. For example if there were only a full and half fit, values corresponding\n to the quarter fit will be NaN.\n In order:\n - Data for all frequencies 'all_freqs_fitted_data' from full fit\n - Data for all frequencies from half fit\n - Data for all frequencies from quarter fit\n - Chi2 residual 'chi2_residual' from full fit\n - Chi2 residual from half fit\n - Chi2 residual from quarter fit\n - Data from only provided freqs 'fitted_data' from full fit\n - Data from only provided freqs from half fit\n - Data from only provided freqs from quarter fit\n - Coefficients from the initial full fit, 'coeffs'\n fitted_data : ndarray\n Modeled fluxes at only the frequencies provided in 'freqs'\n freqs : ndarray\n All frequencies in GLEAM\n freqs_used_for_fit : ndarray\n Only frequencies used by 'log_linear_fit', corresponds to the 'fitted_freqs' output from\n 'log_linear_fit'\n data_used_for_fit : ndarray\n Fluxes corresponding to the 'freqs_used_for_fit' frequencies. Corresponds to the 'fit_data_selected'\n output from 'log_linear_fit'\n pre_outlier_removal_output : ndarray\n Contains data from the 'original_parameters' output from 'log_linear_fit'. These are all the data,\n coefficients, etc from a fit where outliers were checked for and if necessary removed.\n variance : float\n The variance is a measure of the scatter of the original flux data. Flux data is normalized,\n and then the difference between adjacent points is calculated. The variance describes\n overall how closely aligned adjacent datapoints are. A low variance indicates a low scatter,\n and datapoints that follow an overall trend. High variance indicates that there is a lot of\n intrinsic data scatter. This can be useful in looking at overall goodness of fit, since we'd\n expect sources with a lot of intrinsic scatter and a high variance to have mediocre fits due to\n data quality issues. Conversely, a poor fit on a source with low variance would be something\n to be concerned about.\n 50_mhz_extrapolation : list\n - Projected flux at 50 MHz for the best of all fits performed.\n - Projected flux at 50 MHz for the initial fit on all available frequencies.\n\n \"\"\"\n\n # Import gleam source catalog\n gleam_catalog = sm.from_gleam_catalog(catalog_loc, spectral_type=\"subband\", with_error=True)\n\n # Initialize arrays\n source_dict = {}\n bad_chi2 = []\n fit_averages = []\n problem_objs = []\n\n # Separate all rows that contain nans\n for source in np.arange(gleam_catalog.Ncomponents):\n fit_data = gleam_catalog.stokes.value[0, :, source]\n dec = gleam_catalog.dec.value[source]\n freqs = gleam_catalog.freq_array.value\n stokes_error = gleam_catalog.stokes_error.value[0, :, source]\n\n mean_adj_data = (fit_data - np.nanmean(fit_data)) / np.nanmean(fit_data)\n\n diff = np.diff(mean_adj_data)\n variance = np.nanvar(diff)\n\n # Initialize arrays for half and quarter fits\n out2 = np.array([[float(\"NaN\")], [float(\"NaN\")], [float(\"NaN\")], [float(\"NaN\")]])\n out3 = np.array([[float(\"NaN\")], [float(\"NaN\")], [float(\"NaN\")], [float(\"NaN\")]])\n\n # Find sources that have missing values in only one of error and vals\n source_probs = []\n for i in range(len(fit_data)):\n if np.isnan(fit_data[i]):\n if ~np.isnan(stokes_error[i]) and not source_probs:\n source_probs.append([fit_data, stokes_error])\n else:\n if np.isnan(stokes_error[i]) and not source_probs:\n source_probs.append([fit_data, stokes_error])\n\n # Only include in problems list if there WAS a problem, exclude source from rest of fitting\n if source_probs:\n problem_objs.append([source, gleam_catalog.ra.value[source], gleam_catalog.dec.value[source], source_probs])\n continue\n\n # Eliminate negative fluxes by turning into nans before fitting\n fit_data[fit_data < 0] = np.nan\n indices = np.argwhere(~np.isnan(fit_data)).flatten()\n\n # Skip sources with no values\n if np.all(np.isnan(fit_data)):\n continue\n\n # Perform full fit using all freqs available for source\n out1 = log_linear_fit(freqs, fit_data, stokes_error, dec, gleam_catalog, detect_outlier=True)\n\n # Transfer output to 'out', which is the final output variable, to save results from this fit in out1\n out = out1\n\n # if chi2_residual is >=1.93 and brighter than 1Jy at 150MHz, fit again with fewer freqs\n if out[1] >= 1.93:\n if fit_data[9] >= 1:\n\n # Fit with bottom half of freqs\n if len(fit_data[indices]) >= 8:\n half_freqs = freqs[indices[:int(len(indices) / 2)]]\n fit_data_half = fit_data[indices[:int(len(indices) / 2)]]\n error_half = stokes_error[indices[:int(len(indices) / 2)]]\n\n # Fit with bottom half of freqs\n out2 = log_linear_fit(half_freqs, fit_data_half, error_half, dec, gleam_catalog)\n out = out2\n\n # if 2nd fit has poor chi2, fit with bottom 1/4 freqs\n if out[1] >= 1.93:\n # If original freqs >=16, fit on bottom 1/4\n if len(half_freqs) >= 8:\n qt_freqs = half_freqs[:int(len(half_freqs) / 2)]\n fit_data_qt = fit_data_half[:int(len(half_freqs) / 2)]\n error_qt = error_half[:int(len(half_freqs) / 2)]\n\n out3 = log_linear_fit(qt_freqs, fit_data_qt, error_qt, dec, gleam_catalog)\n out = out3\n\n # If there are <16 total non-nan frequencies, fit on bottom 4\n else:\n bottom_freqs = freqs[indices[:4]]\n fit_data_bottom = fit_data[indices[:4]]\n error_bottom = stokes_error[indices[:4]]\n\n out3 = log_linear_fit(bottom_freqs, fit_data_bottom, error_bottom, dec, gleam_catalog)\n out = out3\n\n else:\n # If bottom half of freqs is small, run fit on bottom 4 freqs, and do not attempt 3rd fit\n bottom_freqs = freqs[indices[:4]]\n fit_data_bottom = fit_data[indices[:4]]\n error_bottom = stokes_error[indices[:4]]\n\n # Fit with bottom half of freqs\n out2 = log_linear_fit(bottom_freqs, fit_data_bottom, error_bottom, dec, gleam_catalog)\n out = out2\n\n # if chi2_residual is still large after all iterations, take lowest one\n if out[1] >= 1.93:\n bad_chi2.append([source, out1[3], out2[3], out3[3], out1[1], out2[1], out3[1]])\n\n # select best of 3 fit options by chi2 val and use as final fit\n prev_rounds = {\"out1\": out1[1], \"out2\": out2[1], \"out3\": out3[1]}\n best_fit = min(prev_rounds, key=prev_rounds.get)\n out = eval(best_fit)\n\n fit_averages.append(np.average(out[3]))\n\n # Create dict with final vals\n source_vars = {\n \"ra\": gleam_catalog.ra.value[source],\n \"dec\": dec,\n \"coefficients\": out[0],\n \"chi2_residual\": out[1],\n \"prev_fit_data\": [out1[3], out2[3], out3[3], out1[1], out2[1], out3[1],\n out1[2], out2[2], out3[2], out1[0]],\n \"fitted_data\": out[2],\n \"all_freqs_fitted_data\": out[3],\n \"freqs\": freqs,\n \"freqs_used_for_fit\": out[4],\n \"data_used_for_fit\": out[5],\n \"pre_outlier_removal_output\": out[6],\n \"variance\": variance,\n \"50_mhz_extrapolation\": [out[7], out1[7]]\n }\n # source_dict is a dict of dicts\n source_dict[source] = source_vars\n return(source_dict)\n\n\ndef spectral_ind_dist(source_dict, plot_type='diff', flux_threshold=1, save_loc=None):\n \"\"\"\n Creates a 2D histogram of source brightness at 150 MHz vs spectral index\n\n Parameters\n ----------\n source_dict : dict\n The output from 'low_freq_fit', a dict of data and parameters describing the fits of sources\n plot_type : str\n Which fit iteration spectral index to plot. Must be one of:\n - \"diff\" (Difference between initial and final fit spectral indices)\n - \"first\" (Spectral index of initial, all-frequency fits)\n - \"final\" (Spectral index of final, best fits)\n flux_threshold : float\n Flux threshold used in 'low_freq_fit'. Should be identical to that threshold. Default is 1Jy.\n save_loc : str\n Path of folder to save plots in.\n\n Outputs\n -------\n Plot of brightness distribution on x axis and spectral index (or difference between spectral indices)\n on y axis.\n\n If plot_type = \"diff\", there will be no sources below the flux threshold specified in 'low_freq_fit',\n because only 1 fit is done at sources dimmer than that threshold.\n\n If \"save_loc\" is specified, the plot will be saved in that location as:\n \"path.../flux150_vs_spectral_index_hist_[plot_type].png\"\n\n \"\"\"\n midband = []\n diff = []\n first = []\n last = []\n\n for i in source_dict:\n if i in source_dict:\n if plot_type == 'diff':\n\n # Ignore sources with only one fit\n if np.isnan(source_dict[i]['prev_fit_data'][1][0]):\n continue\n else:\n midband.append(source_dict[i]['prev_fit_data'][0][9])\n first_ind = source_dict[i]['prev_fit_data'][9][1]\n last_ind = source_dict[i]['coefficients'][1]\n diff.append(last_ind - first_ind)\n elif plot_type == 'first':\n midband.append(source_dict[i]['prev_fit_data'][0][9])\n first.append(source_dict[i]['prev_fit_data'][9][1])\n elif plot_type == 'last':\n midband.append(source_dict[i]['prev_fit_data'][0][9])\n last.append(source_dict[i]['coefficients'][1])\n\n if plot_type == 'diff':\n plt.hist2d(midband, diff, bins=100, norm=LogNorm(), range=[[0, 5], [-16, 16]])\n plt.ylabel(\"Spectral index diff, [last - first]\")\n plt.title(\"Distribution of source flux vs spectral index, [final - first] fit, multifit threshold: \" + str(flux_threshold) + \" Jy\")\n\n elif plot_type == 'first':\n plt.hist2d(midband, first, bins=100, norm=LogNorm(), range=[[0, 5], [-10, 10]])\n plt.ylabel(\"Spectral index, first fit\")\n plt.title(\"Distribution of source flux vs spectral index, \" + plot_type + \" fit, multifit threshold: \" + str(flux_threshold) + \" Jy\")\n\n elif plot_type == 'final':\n plt.hist2d(midband, last, bins=100, norm=LogNorm(), range=[[0, 5], [-10, 10]])\n plt.ylabel(\"Spectral index, final fit\")\n plt.title(\"Distribution of source flux vs spectral index, \" + plot_type + \" fit, multifit threshold: \" + str(flux_threshold) + \" Jy\")\n\n plt.xlabel(\"150 Mhz flux\")\n\n if save_loc is not None:\n filepath = save_loc + \"flux150_vs_spectral_index_hist_\" + plot_type + \"_\" + str(flux_threshold) + \"jy.png\"\n plt.savefig(filepath)\n"
] | [
[
"numpy.delete",
"numpy.array",
"numpy.isnan",
"numpy.polynomial.polynomial.polyval",
"numpy.polynomial.polynomial.polyfit",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"numpy.diff",
"numpy.nanmean",
"numpy.arange",
"numpy.sqrt",
"numpy.isfinite",
"numpy.nanvar",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"numpy.average",
"matplotlib.colors.LogNorm"
]
] |
parolaraul/itChallengeML2017 | [
"c7e5d65ff5f9207342158dc2818638062ce3c220"
] | [
"Argentina - Mondiola Rock - 90 pts/Practica/TP2/ejercicio 7/busqueda_local.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nHeurรญsticas de bรบsqueda local\n\n@author: Germรกn L. Osella Massa (german.osella at nexo.unnoba.edu.ar)\n\"\"\"\n\nimport numpy as np\nfrom random import random\n\ndef hill_climb(solucion_inicial, evaluacion, obtener_vecinos):\n \"\"\"\n Hill climbing determinรญstico.\n \"\"\"\n soluciones_evaluadas = 1\n solucion_actual = solucion_inicial\n evaluacion_actual = evaluacion(solucion_actual)\n\n optimo_local = False\n while not optimo_local:\n vecinos = obtener_vecinos(solucion_actual)\n optimo_local = True\n for vecino in vecinos:\n evaluacion_vecino = evaluacion(vecino)\n soluciones_evaluadas += 1\n if evaluacion_vecino > evaluacion_actual:\n solucion_actual = vecino\n evaluacion_actual = evaluacion_vecino\n optimo_local = False\n\n return solucion_actual, soluciones_evaluadas\n\n\ndef simulated_annealing(solucion_inicial, evaluacion, obtener_vecinos,\n T_max, T_min, reduccion):\n \"\"\"\n Simulated Annealing.\n \"\"\"\n solucion_mejor = solucion_actual = solucion_inicial\n evaluacion_mejor = evaluacion_actual = evaluacion(solucion_actual)\n soluciones_evaluadas = 1\n\n T = T_max\n while T >= T_min:\n vecinos = obtener_vecinos(solucion_actual)\n for vecino in vecinos:\n evaluacion_vecino = evaluacion(vecino)\n soluciones_evaluadas += 1\n \n if (evaluacion_vecino > evaluacion_actual or\n random() < np.exp((evaluacion_vecino - evaluacion_actual) / T)):\n solucion_actual = vecino\n evaluacion_actual = evaluacion_vecino\n if evaluacion_mejor < evaluacion_actual:\n solucion_mejor = solucion_actual\n evaluacion_mejor = evaluacion_actual\n\n T = reduccion * T\n\n return solucion_mejor, soluciones_evaluadas\n"
] | [
[
"numpy.exp"
]
] |
XuanxuanGao/esvit | [
"a8328a283dc33c7cf80f556b0112d01482c9ab2b"
] | [
"esvit_embeddings.py"
] | [
"# coding=utf-8\n\n# Modified by Chunyuan Li ([email protected])\n#\n# Copyright (c) Facebook, Inc. and its affiliates.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport json\nimport numpy as np\nimport argparse\nfrom six.moves import cPickle\nfrom pathlib import Path\n\nimport pandas as pd\nimport torch\nfrom torch import nn\nimport torch.distributed as dist\nimport torch.backends.cudnn as cudnn\nfrom torchvision import datasets\nfrom torchvision import transforms as transforms\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom torchvision import models as torchvision_models\nfrom PIL import Image\n\nimport utils\nimport models.vision_transformer as vits\nfrom models.vision_transformer import DINOHead\nfrom models import build_model\nfrom config import config\nfrom config import update_config\nfrom config import save_config\n\ntorchvision_archs = sorted(name for name in torchvision_models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(torchvision_models.__dict__[name]))\n\nclass MPDataset(Dataset):\n \"\"\"\n ๅๅปบ่ชๅทฑ็ๆฐๆฎ้\n Note: ๅฝ Dataset ๅๅปบๅฅฝๅๅนถๆฒกๆๅฐๆฐๆฎ็ไบงๅบๆฅ๏ผๆไปฌๅชๆฏๅฎไนไบๆฐๆฎๅๆ ็ญพ็ไบง็ๆตๆฐด็บฟ๏ผๅชๆๅจ็ๆญฃไฝฟ็จๆถ๏ผๅฆๆๅจ่ฐ็จ next(iter(train_dataset))๏ผๆ่ขซ DataLoader่ฐ็จ๏ผๆไผ่งฆๅๆฐๆฎ้ๅ
้จ็ __getitem__() ๅฝๆฐๆฅ่ฏปๅๆฐๆฎ\n ๅฎ็ฐ็้ป่พ: ่ฏปๅๆฌๅฐๅพ็ใ่ฟ่กๆฐๆฎๅขๅผบ\n \"\"\"\n def __init__(self, image_dir, transform = None):\n super(MPDataset, self).__init__()\n self.image_path = [os.path.join(image_dir, x) for x in os.listdir(image_dir)]\n self.transform = transform\n\n def __getitem__(self, idx):\n \"\"\"\n ่ทๅๅฏนๅบidx็ๅพๅ๏ผๅนถ่ฟ่กๆฐๆฎๅขๅผบ\n \"\"\"\n image = Image.open(self.image_path[idx]) # ็จPIL่ฏปๅๆฌๅฐๅพ็๏ผๅๅผ่ๅด[0, 255]๏ผๅฝข็ถ[W, H]\n if self.transform is not None:\n image_transform = self.transform(image) # ๅฏนๅพ็่ฟ่กๆฐๆฎๅขๅผบ\n else:\n image_transform = transforms.ToTensor()(image) # ้ป่ฎคๆฏ่ฝฌๆtensor๏ผๅๅผ่ๅดๅๅฐ[0๏ผ1.0]๏ผๅฝข็ถ[C, H, W]\n return image_transform, idx # img, cls, idx, path\n\n def __len__(self):\n return len(self.image_path)\n\ndef extract_feature_pipeline(args):\n # ============ preparing data ... ============\n transform = transforms.Compose([\n transforms.Resize(256, interpolation=3),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n dataset_train = MPDataset(args.data_path, transform=transform)\n\n sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)\n data_loader_train = torch.utils.data.DataLoader(\n dataset_train,\n sampler=sampler,\n batch_size=args.batch_size_per_gpu,\n num_workers=args.num_workers,\n pin_memory=True,\n drop_last=False,\n )\n # print(f\"Data loaded with {len(dataset_train)} imgs.\")\n\n # ============ building network ... ============\n # if the network is a 4-stage vision transformer (i.e. swin)\n if 'swin' in args.arch :\n model = build_model(config, is_teacher=True)\n\n # if the network is a 4-stage vision transformer (i.e. longformer)\n elif 'vil' in args.arch :\n update_config(config, args)\n model = build_model(config, is_teacher=True)\n\n # if the network is a 4-stage vision transformer (i.e. CvT)\n elif 'cvt' in args.arch :\n update_config(config, args)\n model = build_model(config, is_teacher=True)\n\n # if the network is a vision transformer (i.e. deit_tiny, deit_small, vit_base)\n elif args.arch in vits.__dict__.keys():\n model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)\n\n # print(f\"Model {args.arch} {args.patch_size}x{args.patch_size} built.\")\n model.cuda()\n utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)\n model.eval()\n\n # ============ extract features ... ============\n # print(\"Extracting features for image set...\")\n train_features = extract_features(model, data_loader_train)\n\n if utils.get_rank() == 0:\n train_features = nn.functional.normalize(train_features, dim=1, p=2)\n\n train_paths = dataset_train.image_path\n \n # print(\"train_paths len:\", len(train_paths))\n # print(\"train_features shape:\", train_features.shape)\n \n # save features and labels\n if args.dump_features and dist.get_rank() == 0:\n if not os.path.exists(args.dump_features):\n os.makedirs(args.dump_features)\n train_features = train_features.cpu().float().numpy()\n \n # df = pd.DataFrame(data = train_features)\n # df.insert(0, 'name', train_paths)\n # df.to_csv(os.path.join(args.dump_features,'embeddings.txt'), sep=' ', header=None, index=False)\n return train_features, train_paths\n\n\[email protected]_grad() #ไธ้่ฆ่ฎก็ฎๆขฏๅบฆ๏ผๆดๅฟซ๏ผ๏ผไนไธไผ่ฟ่กๅๅไผ ๆญ\ndef extract_features(model, data_loader):\n metric_logger = utils.MetricLogger(delimiter=\" \")\n features = None\n for samples, index in metric_logger.log_every(data_loader, 10): # samples, indexๆฏdata_loader็ๅ
็ด ๏ผๆฏimgๅๅฎๅฏนๅบ็็ดขๅผidx๏ผ็ฑmetric_logger.log_every yieldๅบ็\n samples = samples.cuda(non_blocking=True)\n index = index.cuda(non_blocking=True)\n feats = model(samples).clone()\n\n # init storage feature matrix\n if dist.get_rank() == 0 and features is None:\n features = torch.zeros(len(data_loader.dataset), feats.shape[-1]) # DataLoaderๆdatasetๅฑๆง๏ผlen(dataset)ๆฏdataset็ๆ ทๆฌๆฐใๅฏนๆฏไนไธ๏ผlen(dataloader)ๆฏไธไธชepoch้็batchไธชๆฐ๏ผไนๅณไธ่ฝฎ็่ฟญไปฃๆฌกๆฐ\n features = features.cuda(non_blocking=True)\n print(f\"Storing features into tensor of shape {features.shape}\")\n\n # get indexes from all processes\n y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device) #torch.empty()่ฟๅๅกซๅ
ๆๆชๅๅงๅๆฐๆฎ็ๅผ ้๏ผ่ฟ้็shape=(1, batch)\n y_l = list(y_all.unbind(0)) #ubind็งป้คๆๅฎ็ปดๅ๏ผ่ฟๅไธไธชๅ
็ป๏ผๅ
ๅซไบๆฒฟ็ๆๅฎ็ปดๅ็ๅ็ๅไธชๅ็ใไนๅณ๏ผๅๆไบ[device1็batch idx tuple, device2็batch idx tuple, ...]\n y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True) # ๆๅไธชdevice้็tensor index้ไธญๅฐๅไธชdevice้ฝๆ็tensor list y_lไธญ\n y_all_reduce.wait() # ๅฏน่ฟ็จไธ้๏ผ็ญๅพ
้ไฟก็ปๆใๅจ.wait() ๆง่กไนๅ๏ผๆไปฌๅฏไปฅไฟ่ฏ้ไฟกๅทฒ็ป็ปๆ๏ผๆๆindexๅทฒ็ป้ไธญๅฐy_l้ไบใ\n index_all = torch.cat(y_l) # ๅพๅฐๆๆindex\n\n # share features between processes\n feats_all = torch.empty(\n dist.get_world_size(),\n feats.size(0),\n feats.size(1),\n dtype=feats.dtype,\n device=feats.device,\n )\n output_l = list(feats_all.unbind(0)) \n output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True) #ๅฐๅไธชdevice้็feats๏ผ้ไธญๅฐๆฏไธชdevice้ฝๆ็output_l่ฟไธชlist\n output_all_reduce.wait()\n\n # update storage feature matrix\n if dist.get_rank() == 0:\n features.index_copy_(0, index_all, torch.cat(output_l)) # ๅจ็ฌฌ0็ปด๏ผๅฐtorch.cat(output_l)[i]ๆพๅจfeatures็็ฌฌindex_all[i]ไฝ็ฝฎไธ\n # features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu()) # cpu\n return features\n\n# EsViTFeat็ๅฝๆฐ้่ฆ่ฟไบ\nos.environ['MASTER_ADDR'] = 'localhost' #ไธ็จๅฝไปค่กๅฏๅจ็่ฏ๏ผไผๆฅ็ฏๅขๅ้็้๏ผๅ ไธ่ฟไธคไธชๅฐฑๆฒก้ฎ้ขไบ\nos.environ['MASTER_PORT'] = '5678'\nparser = argparse.ArgumentParser('Evaluation with weighted k-NN on ImageNet')\n\nparser.add_argument('--cfg',\n help='experiment configure file name',\n type=str)\n\nparser.add_argument('--arch', default='deit_small', type=str,\n choices=['cvt_tiny', 'swin_tiny','swin_small', 'swin_base', 'swin_large', 'swin', 'vil', 'vil_1281', 'vil_2262', 'deit_tiny', 'deit_small', 'vit_base'] + torchvision_archs,\n help=\"\"\"Name of architecture to train. For quick experiments with ViTs,\n we recommend using deit_tiny or deit_small.\"\"\")\n\nparser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size')\nparser.add_argument('--nb_knn', default=[10, 20, 100, 200], nargs='+', type=int,\n help='Number of NN to use. 20 is usually working the best.')\nparser.add_argument('--temperature', default=0.07, type=float,\n help='Temperature used in the voting coefficient')\nparser.add_argument('--pretrained_weights', default='', type=str, help=\"Path to pretrained weights to evaluate.\")\nparser.add_argument('--use_cuda', default=True, type=utils.bool_flag,\n help=\"Should we store the features on GPU? We recommend setting this to False if you encounter OOM\")\nparser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')\nparser.add_argument(\"--checkpoint_key\", default=\"teacher\", type=str,\n help='Key to use in the checkpoint (example: \"teacher\")')\nparser.add_argument('--dump_features', default=None,\n help='Path where to save computed features, empty for no saving')\nparser.add_argument('--load_features', default=None, help=\"\"\"If the features have\n already been computed, where to find them.\"\"\")\nparser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')\nparser.add_argument(\"--dist_url\", default=\"env://\", type=str, help=\"\"\"url used to set up\n distributed training; see https://pytorch.org/docs/stable/distributed.html\"\"\")\nparser.add_argument(\"--local_rank\", default=0, type=int, help=\"Please ignore and do not set this argument.\")\nparser.add_argument('--data_path', default='/path/to/imagenet/train/', type=str)\n\n# Dataset\nparser.add_argument('--zip_mode', type=utils.bool_flag, default=False, help=\"\"\"Whether or not\n to use zip file.\"\"\")\nparser.add_argument('opts',\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER) \n\nargs = parser.parse_args()\n\nPROJ_PATH='.'\nOUT_PATH='%s/exp_output/esvit_exp/swin/swin_tiny/bl_lr0.0005_gpu16_bs32_dense_multicrop_epoch300' % PROJ_PATH\nCKPT_PATH='%s/exp_output/esvit_exp/swin/swin_tiny/bl_lr0.0005_gpu16_bs32_dense_multicrop_epoch300/checkpoint_best.pth' % PROJ_PATH\nargs.dump_features = '%s/embeddings/epoch0300' % OUT_PATH\nargs.pretrained_weights = CKPT_PATH\nargs.checkpoint_key = 'teacher'\nargs.batch_size_per_gpu = 128\nargs.arch = 'swin_tiny'\nargs.cfg = 'experiments/imagenet/swin/swin_tiny_patch4_window7_224.yaml'\nconfig.MODEL.NUM_CLASSES = 0\n\nargs.rank = 0\nargs.world_size = 1\nargs.gpu = 0\ndist.init_process_group(\n backend=\"nccl\",\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n)\n\ntorch.cuda.set_device(args.gpu)\nprint('| distributed init (rank {}): {}'.format(\n args.rank, args.dist_url), flush=True)\ndist.barrier()\nutils.setup_for_distributed(args.rank == 0)\n\n# print(\"git:\\n {}\\n\".format(utils.get_sha()))\nprint(\"\\n\".join(\"%s: %s\" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))\n\nupdate_config(config, args)\n\nclass EsViTFeat(object):\n def __init__(self, use_model, use_layer, use_cache, args):\n self.use_model = use_model\n self.use_layer = use_layer\n self.use_cache = use_cache\n self.args = args\n\n def make_samples(self, db, verbose=True):\n \n cache_name = '{}-{}'.format(self.use_model, self.use_layer)\n cache_dir = os.path.join(self.args.base_dir, 'cache', self.use_cache)\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n cache_path = os.path.join(cache_dir, cache_name)\n # print(cache_path)\n try:\n samples = cPickle.load(open(cache_path, \"rb\", True))\n if verbose:\n print(\"Using cache..., cache path=%s\" % (cache_path))\n except:\n if verbose:\n print(\"Counting histogram..., cache path=%s\" % (cache_path))\n \n \n args.data_path = db.DB_dir\n path_to_data = args.data_path\n # print(path_to_data, len(os.listdir(path_to_data)))\n \n cudnn.benchmark = True\n embeddings, filenames = extract_feature_pipeline(args)\n # print(filenames[:5])\n\n data = db.get_data()\n # print(data['img'][:5])\n data.index = data['img']\n \n classes = [data['cls'].loc[x] for x in filenames]\n # print(classes[:5], len(classes))\n assert embeddings.shape[0] == len(classes), \"length of embeddings and classes not equal!\"\n \n # samples = []\n # cnt = 0\n # for d in range(len(classes)):\n # cnt += 1\n # samples.append({\n # 'img': filenames[d], \n # 'cls': classes[d], \n # 'hist': embeddings[d]\n # })\n # if (cnt % 20000) == 0:\n # cPickle.dump(samples, open(cache_path + str(int(cnt / 20000)), \"wb\", True))\n # samples = []\n \n samples = []\n for d in range(len(classes)):\n samples.append({\n 'img': filenames[d], \n 'cls': classes[d], \n 'hist': embeddings[d]\n })\n cPickle.dump(samples, open(cache_path, \"wb\", True))\n \n return samples\n"
] | [
[
"torch.nn.functional.normalize",
"torch.cat",
"torch.distributed.get_world_size",
"torch.distributed.init_process_group",
"torch.utils.data.DistributedSampler",
"torch.no_grad",
"torch.distributed.all_gather",
"torch.cuda.set_device",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"torch.distributed.barrier"
]
] |
doubleblind148/IGCCF | [
"bc0e90a5322f1ba4927ec89d9181190974f7e1ba"
] | [
"recgve/losses/tensorflow_losses.py"
] | [
"#!/usr/bin/env python\n__author__ = \"XXX\"\n__email__ = \"XXX\"\n\nimport tensorflow as tf\n\n\[email protected]\ndef bpr_loss(x_u, x_i, x_j):\n \"\"\" Create BPR loss for a batch of samples\n\n Args:\n x_u (tf.Tensor): tensor containing user representations\n x_i (tf.Tensor): tensor containing positive item representations\n x_j (tf.Tensor): tensor containing negative item representation\n\n Returns:\n loss\n\n Paper: BPR: Bayesian Personalized Ranking from Implicit Feedback\n https://arxiv.org/pdf/1205.2618.pdf\n \"\"\"\n pos_scores = tf.reduce_sum(tf.multiply(x_u, x_i), axis=1)\n neg_scores = tf.reduce_sum(tf.multiply(x_u, x_j), axis=1)\n xuij = tf.math.log_sigmoid(pos_scores - neg_scores)\n loss = tf.negative(tf.reduce_sum(xuij))\n return loss\n\n\[email protected]\ndef rmse_loss(x_u, x_i, labels):\n scores = tf.reduce_sum(tf.multiply(x_u, x_i), axis=1)\n loss = tf.reduce_sum((labels - scores) ** 2)\n return loss\n\n\[email protected]\ndef l2_reg(model, alpha):\n \"\"\"\n Create l2 loss for the model variables\n\n Args:\n model: model for which compute l2 reg\n alpha (float): l2 regularization coefficient\n Returns:\n float: l2 loss\n \"\"\"\n l2_loss = 0\n for v in model.trainable_variables:\n l2_loss += tf.nn.l2_loss(v) * alpha\n return l2_loss\n"
] | [
[
"tensorflow.nn.l2_loss",
"tensorflow.multiply",
"tensorflow.reduce_sum",
"tensorflow.math.log_sigmoid"
]
] |
PyJedi/PyLensing | [
"33ac22bb741564f77933f047614c4c8d4d2b6d37"
] | [
"lensing/gen_data_vortex.py"
] | [
"# Author: Pranath Reddy\n# This module is for generating galaxy-galaxy strong lensing images with vortex substructure\n\nimport numpy as np\nimport autolens as al\nimport matplotlib.pyplot as plt\nimport math\nimport scipy.io\nimport h5py\nimport os\nfrom progress.bar import Bar\nfrom astropy.cosmology import FlatLambdaCDM\nfrom astropy import units as u\n\n#physical constants\nfrom astropy.constants import G, c, M_sun\n\ndef ER(Mass,redshift_halo,redshift_gal,H0=70,Om0=0.3,Ob0=0.05):\n \"\"\"\n Mass: Mass in solar masses\n \n redshift_halo: Redshift of the DM halo\n\n redshift_gal: Redshift of the lensed galaxy\n\n H0: Hubble constant\n\n Om0: Matter content\n\n Ob0: Baryon content\n \"\"\"\n\n if redshift_gal < redshift_halo:\n raise Exception('Lensed galaxy must be at higher redshift than DM halo!')\n sys.exit()\n\n M_Halo = Mass * M_sun\n rad_to_arcsec = 206265\n\n # Choice of cosmology\n cosmo = FlatLambdaCDM(H0=H0,Om0=Om0,Ob0=Ob0)\n\n # Luminosity ditance to DM halo\n DL = cosmo.luminosity_distance(redshift_halo).to(u.m)\n\n # Luminosity distance to lensed galaxy\n DS = cosmo.luminosity_distance(redshift_gal).to(u.m)\n\n # Distance between halo and lensed galaxy\n DLS = DS - DL\n\n # Einstein radius\n theta = np.sqrt(4 * G * M_Halo/c**2 * DLS/(DL*DS))\n\n # Return radius in arcsecods\n return theta * rad_to_arcsec\n\ndef gen_data(parameters,\n pixel_scales=0.1,\n psf_shape=[11,11],\n psf_sigma=0.1,\n grid_sub_size=2,\n grid_shape=[100,100],\n sub_halo_mass=[],\n sub_halo_mass_fractions=[0.01],\n output_type='image',\n output_path='./lens_sub_vortex',\n file_name='vortex'):\n \n '''\n \n Args:\n ______\n \n pixel_scales: float\n The arc-second to pixel conversion factor of each pixel.\n \n psf_shape: []\n Shape of the Gaussian kernel\n \n psf_sigma: float\n Standard deviation for Gaussian kernel\n \n grid_sub_size: int\n The size (sub_size x sub_size) of each unmasked pixels sub-grid.\n \n grid_shape: []\n \n sub_halo_mass: []\n Masses of substructures (in solar masses)\n \n sub_halo_mass_fractions: []\n Array of fractions with respect to the mass of the DM halo\n \n output_type: str\n 'image': save the lensing images as .png files\n 'numpy': save the lesning images as a numpy array\n 'matlab': save the lesning images as a matlab (.MAT) file\n 'hdf5': save the lensing images as a HDF file\n \n output_path: str\n \n file_name: str\n \n '''\n \n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n bar = Bar('Processing lensing images', max=parameters.shape[0])\n lensing_images = []\n for i in range(parameters.shape[0]):\n \n params = parameters[i]\n psf = al.Kernel.from_gaussian(shape_2d=(psf_shape[0], psf_shape[1]), sigma=psf_sigma, pixel_scales=pixel_scales)\n grid = al.Grid.uniform(shape_2d=(grid_shape[0], grid_shape[1]), pixel_scales=pixel_scales, sub_size=grid_sub_size)\n\n vortex_profiles = []\n \n # Dark Matter Halo\n vortex_profiles.append((\"dmh_profile\",al.mp.SphericalIsothermal(centre=(params[8], params[9]), einstein_radius=params[10])))\n \n # Calculate the positional parameters for vortex substructure\n resolution = 25 # no of sub halos to consider\n vortex_len = params[23]\n x_start = params[21] - (vortex_len/2*math.cos(math.radians(params[24])))\n y_start = params[22] - (vortex_len/2*math.sin(math.radians(params[24])))\n delta = vortex_len/resolution\n\n if sub_halo_mass == []:\n \n if sub_halo_mass_fractions.all() == [0.01]:\n \n # Linear mass distribution for substructure (string of mass on galactic scales)\n for j in range(resolution):\n vortex_profiles.append((\"point_mass_profile_\" + str(j+1),\n al.mp.PointMass(centre=(x_start + j*delta*math.cos(params[24]), y_start + j*delta*math.sin(params[24])), einstein_radius= ((params[25])**0.5)/resolution * params[10])\n ))\n \n if sub_halo_mass_fractions.all() != [0.01]:\n \n fraction = np.asarray(sub_halo_mass_fractions)\n if fraction.shape[0] != resolution:\n raise Exception('Invalid number of sub halos')\n sys.exit()\n \n # Linear mass distribution for substructure (string of mass on galactic scales)\n for j in range(resolution):\n vortex_profiles.append((\"point_mass_profile_\" + str(j+1),\n al.mp.PointMass(centre=(x_start + j*delta*math.cos(params[24]), y_start + j*delta*math.sin(params[24])), einstein_radius= ((fraction[j])**0.5) * params[10])\n ))\n \n if sub_halo_mass != []:\n \n sub_halo_mass = np.asarray(sub_halo_mass)\n if sub_halo_mass.shape[0] != resolution:\n raise Exception('Invalid number of sub halos')\n sys.exit()\n \n # Linear mass distribution for substructure (string of mass on galactic scales)\n for j in range(resolution):\n vortex_profiles.append((\"point_mass_profile_\" + str(j+1),\n al.mp.PointMass(centre=(x_start + j*delta*math.cos(params[24]), y_start + j*delta*math.sin(params[24])), einstein_radius= ER(sub_halo_mass[j],0.5,params[15]) )\n ))\n \n # Lens galaxy\n lensing_galaxy = al.Galaxy(\n redshift=params[2],\n # Light Profile\n light=al.lp.EllipticalSersic(\n centre=(params[0], params[1]),\n axis_ratio=params[3],\n phi=params[4],\n intensity=params[5],\n effective_radius=params[7],\n sersic_index=params[6],\n ),\n # Mass Profile\n **dict(vortex_profiles),\n \n # External Shear\n shear=al.mp.ExternalShear(magnitude=params[11], phi=params[12]),\n )\n\n galaxies=[lensing_galaxy]\n \n # Calculate coordinates of lensed galaxy\n x = params[0] + params[13]*math.cos(params[14])\n y = params[1] + params[13]*math.sin(params[14])\n \n # Source galaxy\n lensed_galaxy = al.Galaxy(\n redshift=params[15],\n # Light Profile\n light=al.lp.EllipticalSersic(\n centre=(x, y),\n axis_ratio=params[16],\n phi=params[17],\n intensity=params[18],\n effective_radius=params[20],\n sersic_index=params[19],\n ),\n )\n\n galaxies.append(lensed_galaxy)\n\n tracer = al.Tracer.from_galaxies(galaxies)\n\n simulator = al.SimulatorImaging(\n exposure_time_map=al.Array.full(fill_value=300.0, shape_2d=grid.shape_2d),\n psf=psf,\n background_sky_map=al.Array.full(fill_value=0.1, shape_2d=grid.shape_2d),\n add_noise=True,\n )\n\n imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)\n image = imaging.image.in_2d\n\n # Export all the Lensing Images\n if output_type.lower() == 'image':\n \n output_file = os.path.join(output_path, file_name + str(i+1) + '.png')\n plt.imsave(output_file, image, cmap='gray')\n \n if output_type.lower() in ( 'numpy' , 'matlab' , 'hdf5' ) :\n lensing_images.append(image)\n \n bar.next()\n \n bar.finish()\n \n lensing_images = np.asarray(lensing_images)\n \n # Dump all the Lensing Images into a numpy array\n if output_type.lower() == 'numpy':\n \n output_file = os.path.join(output_path, file_name + '.npy')\n np.save(output_file, lensing_images)\n print('Dimensions of the data: {}'.format(lensing_images.shape))\n \n # Dump all the Lensing Images into a matlab (.MAT) file\n if output_type.lower() == 'matlab':\n \n output_file = os.path.join(output_path, file_name + '.mat')\n scipy.io.savemat(output_file, mdict={'vortex': lensing_images})\n print('Dimensions of the data: {}'.format(lensing_images.shape))\n \n # Dump all the Lensing Images into a HDF file\n if output_type.lower() == 'hdf5':\n \n output_file = os.path.join(output_path, file_name + '.h5')\n with h5py.File(output_file, 'w') as hf:\n hf.create_dataset(\"vortex\", data=lensing_images)\n print('Dimensions of the data: {}'.format(lensing_images.shape))\n\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.imsave",
"numpy.save",
"numpy.asarray",
"numpy.sqrt"
]
] |
Fosstack/vmaf | [
"e13b6c383ec20dba0cbd64d3265cfe49677a6f18"
] | [
"python/vmaf/core/feature_extractor.py"
] | [
"from abc import ABCMeta, abstractmethod\nfrom xml.etree import ElementTree\n\nfrom vmaf.tools.decorator import override\n\n__copyright__ = \"Copyright 2016-2020, Netflix, Inc.\"\n__license__ = \"BSD+Patent\"\n\nimport re\nimport numpy as np\nimport ast\n\nfrom vmaf import ExternalProgramCaller\nfrom vmaf.core.executor import Executor\nfrom vmaf.core.result import Result\nfrom vmaf.tools.reader import YuvReader\n\n\nclass FeatureExtractor(Executor):\n \"\"\"\n FeatureExtractor takes in a list of assets, and run feature extraction on\n them, and return a list of corresponding results. A FeatureExtractor must\n specify a unique type and version combination (by the TYPE and VERSION\n attribute), so that the Result generated by it can be identified.\n\n A derived class of FeatureExtractor must:\n 1) Override TYPE and VERSION\n 2) Override _generate_result(self, asset), which call a\n command-line executable and generate feature scores in a log file.\n 3) Override _get_feature_scores(self, asset), which read the feature\n scores from the log file, and return the scores in a dictionary format.\n For an example, follow VmafFeatureExtractor.\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n @property\n @abstractmethod\n def ATOM_FEATURES(self):\n raise NotImplementedError\n\n def _read_result(self, asset):\n result = {}\n result.update(self._get_feature_scores(asset))\n executor_id = self.executor_id\n return Result(asset, executor_id, result)\n\n @classmethod\n def get_scores_key(cls, atom_feature):\n return \"{type}_{atom_feature}_scores\".format(\n type=cls.TYPE, atom_feature=atom_feature)\n\n @classmethod\n def get_score_key(cls, atom_feature):\n return \"{type}_{atom_feature}_score\".format(\n type=cls.TYPE, atom_feature=atom_feature)\n\n def _get_feature_scores(self, asset):\n # routine to read the feature scores from the log file, and return\n # the scores in a dictionary format.\n\n log_file_path = self._get_log_file_path(asset)\n\n atom_feature_scores_dict = {}\n atom_feature_idx_dict = {}\n for atom_feature in self.ATOM_FEATURES:\n atom_feature_scores_dict[atom_feature] = []\n atom_feature_idx_dict[atom_feature] = 0\n\n with open(log_file_path, 'rt') as log_file:\n for line in log_file.readlines():\n for atom_feature in self.ATOM_FEATURES:\n re_template = \"{af}: ([0-9]+) ([a-zA-Z0-9.-]+)\".format(af=atom_feature)\n mo = re.match(re_template, line)\n if mo:\n\n cur_idx = int(mo.group(1))\n assert cur_idx == atom_feature_idx_dict[atom_feature]\n\n # parse value, allowing NaN and inf\n val = float(mo.group(2))\n if np.isnan(val) or np.isinf(val):\n val = None\n\n atom_feature_scores_dict[atom_feature].append(val)\n atom_feature_idx_dict[atom_feature] += 1\n continue\n\n len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]])\n assert len_score != 0\n for atom_feature in self.ATOM_FEATURES[1:]:\n assert len_score == len(atom_feature_scores_dict[atom_feature]), \\\n \"Feature data possibly corrupt. Run cleanup script and try again.\"\n\n feature_result = {}\n\n for atom_feature in self.ATOM_FEATURES:\n scores_key = self.get_scores_key(atom_feature)\n feature_result[scores_key] = atom_feature_scores_dict[atom_feature]\n\n return feature_result\n\n\nclass VmafFeatureExtractor(FeatureExtractor):\n\n TYPE = \"VMAF_feature\"\n\n # VERSION = '0.1' # vmaf_study; Anush's VIF fix\n # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr\n # VERSION = '0.2.1' # expose vif num/den of each scale\n # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case\n # VERSION = '0.2.2b' # expose adm_den/num_scalex\n # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef\n # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step\n # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2\n VERSION = '0.2.4c' # Modify by moving motion2 to c code\n\n ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2',\n 'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr',\n 'vif_num_scale0', 'vif_den_scale0',\n 'vif_num_scale1', 'vif_den_scale1',\n 'vif_num_scale2', 'vif_den_scale2',\n 'vif_num_scale3', 'vif_den_scale3',\n 'adm_num_scale0', 'adm_den_scale0',\n 'adm_num_scale1', 'adm_den_scale1',\n 'adm_num_scale2', 'adm_den_scale2',\n 'adm_num_scale3', 'adm_den_scale3',\n ]\n\n DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3',\n 'vif2', 'adm2', 'adm3',\n 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3',\n ]\n\n ADM2_CONSTANT = 0\n ADM_SCALE_CONSTANT = 0\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate feature\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)\n\n @classmethod\n @override(Executor)\n def _post_process_result(cls, result):\n\n result = super(VmafFeatureExtractor, cls)._post_process_result(result)\n\n # adm2 =\n # (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT)\n adm2_scores_key = cls.get_scores_key('adm2')\n adm_num_scores_key = cls.get_scores_key('adm_num')\n adm_den_scores_key = cls.get_scores_key('adm_den')\n result.result_dict[adm2_scores_key] = list(\n (np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) /\n (np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT)\n )\n\n # vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3\n vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0')\n vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0')\n vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1')\n vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1')\n vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2')\n vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2')\n vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3')\n vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3')\n vif_scale0_scores_key = cls.get_scores_key('vif_scale0')\n vif_scale1_scores_key = cls.get_scores_key('vif_scale1')\n vif_scale2_scores_key = cls.get_scores_key('vif_scale2')\n vif_scale3_scores_key = cls.get_scores_key('vif_scale3')\n result.result_dict[vif_scale0_scores_key] = list(\n (np.array(result.result_dict[vif_num_scale0_scores_key])\n / np.array(result.result_dict[vif_den_scale0_scores_key]))\n )\n result.result_dict[vif_scale1_scores_key] = list(\n (np.array(result.result_dict[vif_num_scale1_scores_key])\n / np.array(result.result_dict[vif_den_scale1_scores_key]))\n )\n result.result_dict[vif_scale2_scores_key] = list(\n (np.array(result.result_dict[vif_num_scale2_scores_key])\n / np.array(result.result_dict[vif_den_scale2_scores_key]))\n )\n result.result_dict[vif_scale3_scores_key] = list(\n (np.array(result.result_dict[vif_num_scale3_scores_key])\n / np.array(result.result_dict[vif_den_scale3_scores_key]))\n )\n\n # vif2 =\n # ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) +\n # (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0\n vif_scores_key = cls.get_scores_key('vif2')\n result.result_dict[vif_scores_key] = list(\n (\n (np.array(result.result_dict[vif_num_scale0_scores_key])\n / np.array(result.result_dict[vif_den_scale0_scores_key])) +\n (np.array(result.result_dict[vif_num_scale1_scores_key])\n / np.array(result.result_dict[vif_den_scale1_scores_key])) +\n (np.array(result.result_dict[vif_num_scale2_scores_key])\n / np.array(result.result_dict[vif_den_scale2_scores_key])) +\n (np.array(result.result_dict[vif_num_scale3_scores_key])\n / np.array(result.result_dict[vif_den_scale3_scores_key]))\n ) / 4.0\n )\n\n # adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3\n adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0')\n adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0')\n adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1')\n adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1')\n adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2')\n adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2')\n adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3')\n adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3')\n adm_scale0_scores_key = cls.get_scores_key('adm_scale0')\n adm_scale1_scores_key = cls.get_scores_key('adm_scale1')\n adm_scale2_scores_key = cls.get_scores_key('adm_scale2')\n adm_scale3_scores_key = cls.get_scores_key('adm_scale3')\n result.result_dict[adm_scale0_scores_key] = list(\n (np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)\n )\n result.result_dict[adm_scale1_scores_key] = list(\n (np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)\n )\n result.result_dict[adm_scale2_scores_key] = list(\n (np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)\n )\n result.result_dict[adm_scale3_scores_key] = list(\n (np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)\n )\n\n # adm3 = \\\n # (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT))\n # + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT))\n # + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT))\n # + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0\n adm3_scores_key = cls.get_scores_key('adm3')\n result.result_dict[adm3_scores_key] = list(\n (\n ((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) +\n ((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) +\n ((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)) +\n ((np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT))\n ) / 4.0\n )\n\n # validate\n for feature in cls.DERIVED_ATOM_FEATURES:\n assert cls.get_scores_key(feature) in result.result_dict\n\n return result\n\n\nclass VifFrameDifferenceFeatureExtractor(FeatureExtractor):\n\n TYPE = \"VifDiff_feature\"\n\n VERSION = '0.1'\n\n ATOM_FEATURES = ['vifdiff',\n 'vifdiff_num', 'vifdiff_den',\n 'vifdiff_num_scale0', 'vifdiff_den_scale0',\n 'vifdiff_num_scale1', 'vifdiff_den_scale1',\n 'vifdiff_num_scale2', 'vifdiff_den_scale2',\n 'vifdiff_num_scale3', 'vifdiff_den_scale3',\n ]\n\n DERIVED_ATOM_FEATURES = ['vifdiff_scale0', 'vifdiff_scale1', 'vifdiff_scale2', 'vifdiff_scale3',\n ]\n\n ADM2_CONSTANT = 0\n ADM_SCALE_CONSTANT = 0\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate feature\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_vifdiff_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)\n\n @classmethod\n @override(Executor)\n def _post_process_result(cls, result):\n\n result = super(VifFrameDifferenceFeatureExtractor, cls)._post_process_result(result)\n\n # vifdiff_scalei = vifdiff_num_scalei / vifdiff_den_scalei, i = 0, 1, 2, 3\n vifdiff_num_scale0_scores_key = cls.get_scores_key('vifdiff_num_scale0')\n vifdiff_den_scale0_scores_key = cls.get_scores_key('vifdiff_den_scale0')\n vifdiff_num_scale1_scores_key = cls.get_scores_key('vifdiff_num_scale1')\n vifdiff_den_scale1_scores_key = cls.get_scores_key('vifdiff_den_scale1')\n vifdiff_num_scale2_scores_key = cls.get_scores_key('vifdiff_num_scale2')\n vifdiff_den_scale2_scores_key = cls.get_scores_key('vifdiff_den_scale2')\n vifdiff_num_scale3_scores_key = cls.get_scores_key('vifdiff_num_scale3')\n vifdiff_den_scale3_scores_key = cls.get_scores_key('vifdiff_den_scale3')\n vifdiff_scale0_scores_key = cls.get_scores_key('vifdiff_scale0')\n vifdiff_scale1_scores_key = cls.get_scores_key('vifdiff_scale1')\n vifdiff_scale2_scores_key = cls.get_scores_key('vifdiff_scale2')\n vifdiff_scale3_scores_key = cls.get_scores_key('vifdiff_scale3')\n result.result_dict[vifdiff_scale0_scores_key] = list(\n (np.array(result.result_dict[vifdiff_num_scale0_scores_key])\n / np.array(result.result_dict[vifdiff_den_scale0_scores_key]))\n )\n result.result_dict[vifdiff_scale1_scores_key] = list(\n (np.array(result.result_dict[vifdiff_num_scale1_scores_key])\n / np.array(result.result_dict[vifdiff_den_scale1_scores_key]))\n )\n result.result_dict[vifdiff_scale2_scores_key] = list(\n (np.array(result.result_dict[vifdiff_num_scale2_scores_key])\n / np.array(result.result_dict[vifdiff_den_scale2_scores_key]))\n )\n result.result_dict[vifdiff_scale3_scores_key] = list(\n (np.array(result.result_dict[vifdiff_num_scale3_scores_key])\n / np.array(result.result_dict[vifdiff_den_scale3_scores_key]))\n )\n\n # validate\n for feature in cls.DERIVED_ATOM_FEATURES:\n assert cls.get_scores_key(feature) in result.result_dict\n\n return result\n\n\nclass VmafrcFeatureExtractorMixin(object):\n\n @override(FeatureExtractor)\n def _get_feature_scores(self, asset):\n\n assert hasattr(self, '_get_log_file_path')\n assert hasattr(self, 'ATOM_FEATURES')\n assert hasattr(self, 'ATOM_FEATURES_TO_VMAFRC_KEY_DICT')\n assert hasattr(self, 'get_scores_key')\n\n log_file_path = self._get_log_file_path(asset)\n tree = ElementTree.parse(log_file_path)\n root = tree.getroot()\n\n feature_scores = [[] for _ in self.ATOM_FEATURES]\n\n for frame in root.findall('frames/frame'):\n for i_feature, feature in enumerate(self.ATOM_FEATURES):\n try:\n feature_scores[i_feature].append(float(frame.attrib[self.ATOM_FEATURES_TO_VMAFRC_KEY_DICT[feature]]))\n except KeyError:\n pass # some features may be missing\n\n for i_feature, feature in enumerate(self.ATOM_FEATURES):\n assert len(feature_scores[i_feature]) != 0\n assert len(feature_scores[i_feature]) == len(feature_scores[0])\n\n feature_result = {}\n for i_feature, feature in enumerate(self.ATOM_FEATURES):\n feature_result[self.get_scores_key(feature)] = feature_scores[i_feature]\n\n return feature_result\n\n\nclass PsnrFeatureExtractor(VmafrcFeatureExtractorMixin, FeatureExtractor):\n\n TYPE = \"PSNR_feature\"\n # VERSION = \"1.0\"\n VERSION = \"1.1\" # call vmaf_rc to replace standalone psnr exec\n\n ATOM_FEATURES = ['psnr']\n\n ATOM_FEATURES_TO_VMAFRC_KEY_DICT = {\n 'psnr': 'float_psnr',\n }\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate quality\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_psnr(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)\n\n\nclass MomentFeatureExtractor(FeatureExtractor):\n\n TYPE = \"Moment_feature\"\n\n # VERSION = \"1.0\" # call executable\n VERSION = \"1.1\" # python only\n\n ATOM_FEATURES = ['ref1st', 'ref2nd', 'dis1st', 'dis2nd', ]\n\n DERIVED_ATOM_FEATURES = ['refvar', 'disvar', ]\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate feature\n # scores in the log file.\n\n quality_w, quality_h = asset.quality_width_height\n\n ref_scores_mtx = None\n with YuvReader(filepath=asset.ref_procfile_path, width=quality_w, height=quality_h,\n yuv_type=self._get_workfile_yuv_type(asset)) as ref_yuv_reader:\n scores_mtx_list = []\n i = 0\n for ref_yuv in ref_yuv_reader:\n ref_y = ref_yuv[0]\n ref_y = ref_y.astype(np.double)\n firstm = ref_y.mean()\n secondm = ref_y.var() + firstm**2\n scores_mtx_list.append(np.hstack(([firstm], [secondm])))\n i += 1\n ref_scores_mtx = np.vstack(scores_mtx_list)\n\n dis_scores_mtx = None\n with YuvReader(filepath=asset.dis_procfile_path, width=quality_w, height=quality_h,\n yuv_type=self._get_workfile_yuv_type(asset)) as dis_yuv_reader:\n scores_mtx_list = []\n i = 0\n for dis_yuv in dis_yuv_reader:\n dis_y = dis_yuv[0]\n dis_y = dis_y.astype(np.double)\n firstm = dis_y.mean()\n secondm = dis_y.var() + firstm**2\n scores_mtx_list.append(np.hstack(([firstm], [secondm])))\n i += 1\n dis_scores_mtx = np.vstack(scores_mtx_list)\n\n assert ref_scores_mtx is not None and dis_scores_mtx is not None\n\n log_dict = {'ref_scores_mtx': ref_scores_mtx.tolist(),\n 'dis_scores_mtx': dis_scores_mtx.tolist()}\n\n log_file_path = self._get_log_file_path(asset)\n with open(log_file_path, 'wt') as log_file:\n log_file.write(str(log_dict))\n\n def _get_feature_scores(self, asset):\n # routine to read the feature scores from the log file, and return\n # the scores in a dictionary format.\n\n log_file_path = self._get_log_file_path(asset)\n\n with open(log_file_path, 'rt') as log_file:\n log_str = log_file.read()\n log_dict = ast.literal_eval(log_str)\n ref_scores_mtx = np.array(log_dict['ref_scores_mtx'])\n dis_scores_mtx = np.array(log_dict['dis_scores_mtx'])\n\n _, num_ref_features = ref_scores_mtx.shape\n assert num_ref_features == 2 # ref1st, ref2nd\n _, num_dis_features = dis_scores_mtx.shape\n assert num_dis_features == 2 # dis1st, dis2nd\n\n feature_result = {}\n feature_result[self.get_scores_key('ref1st')] = list(ref_scores_mtx[:, 0])\n feature_result[self.get_scores_key('ref2nd')] = list(ref_scores_mtx[:, 1])\n feature_result[self.get_scores_key('dis1st')] = list(dis_scores_mtx[:, 0])\n feature_result[self.get_scores_key('dis2nd')] = list(dis_scores_mtx[:, 1])\n\n return feature_result\n\n @classmethod\n @override(Executor)\n def _post_process_result(cls, result):\n\n result = super(MomentFeatureExtractor, cls)._post_process_result(result)\n\n # calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd\n refvar_scores_key = cls.get_scores_key('refvar')\n ref1st_scores_key = cls.get_scores_key('ref1st')\n ref2nd_scores_key = cls.get_scores_key('ref2nd')\n disvar_scores_key = cls.get_scores_key('disvar')\n dis1st_scores_key = cls.get_scores_key('dis1st')\n dis2nd_scores_key = cls.get_scores_key('dis2nd')\n get_var = lambda m: m[1] - m[0] * m[0]\n result.result_dict[refvar_scores_key] = \\\n list(map(get_var, zip(result.result_dict[ref1st_scores_key],\n result.result_dict[ref2nd_scores_key])))\n result.result_dict[disvar_scores_key] = \\\n list(map(get_var, zip(result.result_dict[dis1st_scores_key],\n result.result_dict[dis2nd_scores_key])))\n\n # validate\n for feature in cls.DERIVED_ATOM_FEATURES:\n assert cls.get_scores_key(feature) in result.result_dict\n\n return result\n\n\nclass SsimFeatureExtractor(VmafrcFeatureExtractorMixin, FeatureExtractor):\n\n TYPE = \"SSIM_feature\"\n # VERSION = \"1.0\"\n # VERSION = \"1.1\" # fix OPT_RANGE_PIXEL_OFFSET = 0\n VERSION = \"1.2\" # call vmaf_rc to replace standalone ssim exec\n\n ATOM_FEATURES = ['ssim',\n # 'ssim_l', 'ssim_c', 'ssim_s',\n ]\n\n ATOM_FEATURES_TO_VMAFRC_KEY_DICT = {\n 'ssim': 'float_ssim',\n }\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate quality\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)\n\n\nclass MsSsimFeatureExtractor(VmafrcFeatureExtractorMixin, FeatureExtractor):\n\n TYPE = \"MS_SSIM_feature\"\n # VERSION = \"1.0\"\n # VERSION = \"1.1\" # fix OPT_RANGE_PIXEL_OFFSET = 0\n VERSION = \"1.2\" # call vmaf_rc to replace standalone ms_ssim exec\n\n ATOM_FEATURES = ['ms_ssim',\n # 'ms_ssim_l_scale0', 'ms_ssim_c_scale0', 'ms_ssim_s_scale0',\n # 'ms_ssim_l_scale1', 'ms_ssim_c_scale1', 'ms_ssim_s_scale1',\n # 'ms_ssim_l_scale2', 'ms_ssim_c_scale2', 'ms_ssim_s_scale2',\n # 'ms_ssim_l_scale3', 'ms_ssim_c_scale3', 'ms_ssim_s_scale3',\n # 'ms_ssim_l_scale4', 'ms_ssim_c_scale4', 'ms_ssim_s_scale4',\n ]\n\n ATOM_FEATURES_TO_VMAFRC_KEY_DICT = {\n 'ms_ssim': 'float_ms_ssim',\n }\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate quality\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_ms_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)\n\n\n"
] | [
[
"numpy.isinf",
"numpy.array",
"numpy.isnan",
"numpy.hstack",
"numpy.vstack"
]
] |
gcpeixoto/ICD | [
"bae7d02cd467240649c89b0ba4440966fba18cc7"
] | [
"_build/jupyter_execute/ipynb/15-otimizacao.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Otimizaรงรฃo\n\n# ## Introduรงรฃo\n# \n# Problemas de otimizaรงรฃo (POs) sรฃo encontrados em diversas situaรงรตes da Engenharia, em particular na Engenharia de Produรงรฃo. Em uma linha de produรงรฃo, por exemplo, a otimizaรงรฃo de custos com logรญstica, recursos humanos, matรฉria-prima sรฃo exemplos de onde podemos empregar mรฉtodos computacionais para obter soluรงรตes _รณtimas_. Entretanto, princรญpios de otimizaรงรฃo sรฃo a base de muitos algoritmos e aplicaรงรตes de inteligรชncia artificial, em particular, no aprendizado de mรกquina. Mรกquinas de vetor de suporte (_support vector machines_) sรฃo um exemplo de onde se usa otimizaรงรฃo, jรก que podem ser formuladas como problemas convexos quadrรกticos.\n# \n# Problemas de otimizaรงรฃo sรฃo comumente tratados como *problemas de minimizaรงรฃo*, onde se busca o _mรญnimo global_ de uma _funรงรฃo objetivo_ (FO) escalar $f(x)$, visto que maximizar $f(x)$ รฉ equivalente a minimizar $-f(x)$. \n# \n# Entretanto, esses problemas sรฃo acompanhados de _restriรงรตes_, que podem ser representadas por uma igualdade ou por uma desigualdade. Quando uma restriรงรฃo รฉ escrita na forma $g(x) = 0$, dizemos que $g(x)$ รฉ uma _restriรงรฃo de igualdade_; quando escrita na forma $h(x) \\leq 0$, dizemos que $h(x)$ รฉ uma _restriรงรฃo de desigualdade_.\n# \n# Neste capรญtulo, faremos uma breve explanaรงรฃo sobre otimizaรงรฃo tomando o cรกlculo de derivadas e pontos crรญticos como elementos fundamentais. Utilizaremos recursos de computaรงรฃo simbรณlica para resolver um problema unidimensional e revisitaremos conceitos aprendidos nas disciplinas de Cรกlculo.\n\n# ### Classificaรงรฃo de problemas de otimizaรงรฃo\n# \n# Problemas de otimizaรงรฃo (PO) sรฃo classificados com base nas propriedades das funรงรตes $f(x)$, $g(x)$ e $h(x)$. Em linhas gerais, um PO pode ser:\n# \n# - _univariado_ (ou _unidimensional_), se $x$ รฉ escalar, i.e. $x \\in \\mathbb{R}$;\n# - _multivariado_ (ou _multidimensional_), se $x$ รฉ um vetor, i.e. $x \\in \\mathbb{R}^n$.\n# - _linear_: se a FO e as restriรงรตes sรฃo funรงรตes lineares. Neste caso, por razรตes histรณricas, diz-se que o problema รฉ de _programaรงรฃo linear_.\n# - _nรฃo-linear_: se a FO e as restriรงรตes sรฃo funรงรตes nรฃo-lineares. Neste caso, diz-se que o problema รฉ de _programaรงรฃo nรฃo-linear_.\n# \n# Com respeito ร s restriรงรตes, um PO pode ainda ser:\n# \n# - _irrestrito_: quando nรฃo se assumem limites para os valores de $x$.\n# - _restrito_: quando limites para os valores de $x$ sรฃo impostos.\n# \n# Aqui trataremos apenas de casos em que $x \\in \\mathbb{R}$.\n\n# ### Problemas convexos\n# \n# Sabe-se que problemas nรฃo-lineares sรฃo muito mais difรญceis de resolver do que problemas lineares porque eles podem admitir uma ampla variedade de comportamentos. Um PO nรฃo-linear pode ter tanto _mรญnimos locais_ quanto _mรญnimos globais_. Logo, encontrar o _mรญnimo global_ de uma funรงรฃo $f(x)$ nรฃo-linear exige tรฉcnicas aperfeiรงoadas. \n# \n# Neste sentido, uma subclasse de problemas nรฃo-lineares que pode ser resolvida eficientemente sรฃo os chamados _convexos_. Em problemas convexos, a funรงรฃo $f(x)$ รฉ _convexa_. Mas o que รฉ uma _funรงรฃo convexa_?\n# \n# Uma funรงรฃo convexa definida em um intervalo $[a,b]$ รฉ aquela em que todos os seus valores estรฃo abaixo da reta secante que passa pelos pontos $(a,f(a))$ e $(b,f(b)$. Isto, por sua vez, garante que ela contenha _somente_ um mรญnimo global.\n\n# Importaremos os seguintes mรณdulos:\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sympy as sy\nsy.init_printing()\n\n\n# **Exemplo**: a funรงรฃo $f(x) = 3x^2 - 0.36x - 11.2$ รฉ convexa em $[-2,3]$.\n\n# In[2]:\n\n\n# domรญnio\na,b = -2,3\nx = np.linspace(a,b,100) \n\n# funรงรฃo e valores nos extremos\nf = lambda x: 5*x**2 - 10.36*x - 11.2 \nfa,fb = f(a),f(b) \n\n# reta secante\ns = fa + (fb - fa)/(b - a)*(x - a)\n\n# ponto de mรญnimo: -b/(2a)\nxmin = 10.36/10 \n\n# plotagem de funรงรตes\nplt.figure(figsize=(5,3))\nplt.plot(x,f(x))\nplt.plot(x,s,color='#ffa500')\n\n# pontos da secante\nplt.plot(a,f(a),'o',color='#ffa500')\nplt.plot(b,f(b),'o',color='#ffa500')\n\n# ponto de mรญnimo\nplt.plot(xmin,f(xmin),'*r',ms=10);\n\nplt.title('Exemplo de funรงรฃo convexa'); \n\n\n# **Exemplo**: a funรงรฃo $p(x) = 10x^2\\textrm{sen}(6x) - 10.36\\exp(x/8) - 11.2$ nรฃo รฉ convexa em $[-2,3]$.\n\n# In[3]:\n\n\n# funรงรฃo\np = lambda x: 10*x**2*np.sin(6*x) - 10.36*x*np.exp(x/8) - 11.2 \n\n# extremos\npa,pb = p(a),p(b)\n\n# secante\nt = pa + (pb - pa)/(b - a)*(x - a) \n\n# plotagem de funรงรตes\nplt.figure(figsize=(5,3))\nplt.plot(x,p(x))\nplt.plot(x,t,color='#ffa500')\n\n# pontos da secante\nplt.plot(a,p(a),'o',color='#ffa500')\nplt.plot(b,p(b),'o',color='#ffa500')\n\n# mรญnimos locais\nxloc = [-1.33868618,0.88811853,1.87451904]\n\nfor xl in xloc:\n plt.plot(xl,p(xl),'or');\n\n# mรญnimo global\nxmin2 = 2.90547127\n\nplt.plot(xmin2,p(xmin2),'*r',ms=10);\n\nplt.title('Exemplo de funรงรฃo nรฃo convexa'); \n\n\n# Como vemos acima, a funรงรฃo $p(x)$ admite 3 mรญnimos locais e um mรญnimo global. Pense um pouco sobre que estratรฉgia computacional vocรช utilizaria para encontrar os mรญnimos locais. Mais adiante mostraremos como localizar o mรญnimo global para funรงรตes univariadas contรญnuas (quando possรญvel).\n\n# ### Pontos de sela\n# \n# Como vimos acima, a convexidade de uma funรงรฃo รฉ uma propriedade muito importante para que um mรญnimo global seja localizado. Como sabemos do Cรกlculo, pontos de mรกximo ou mรญnimo identificam-se como _pontos crรญticos_ de uma funรงรฃo nos quais a primeira derivada da funรงรฃo se anula.\n# \n# Casos particulares onde a derivada de uma FO anula-se mas o ponto nรฃo pode ser definido como de mรญnimo ou mรกximo podem ocorrer. Tais situaรงรตes implicam a existรชncia dos chamados _pontos de sela_. Uma funรงรฃo com um รบnico ponto de sela, por exemplo, nรฃo admitirรก mรญnimo global nem mรญnimo local. Para testarmos se um ponto crรญtico รฉ um ponto de sela, devemos verificar o sinal da segunda derivada da funรงรฃo. Uma das seguintes situaรงรตes deve ser obtida em um ponto crรญtico $x^*$:\n# \n# - _ponto de mรญnimo:_ $f''(x^*) > 0$\n# - _ponto de mรกximo:_ $f''(x^*) < 0$\n# - _ponto de sela:_ $f''(x^*) = 0$\n\n# **Exemplo:** qualquer funรงรฃo quadrรกtica admite ou um ponto de mรญnimo ou de mรกximo. A funรงรฃo $f(x) = x^3$ possui um ponto de sela em $x^* = 0$.\n\n# In[4]:\n\n\nx = np.linspace(-1,1)\n\nplt.figure(figsize=(10,3))\n\nplt.subplot(131)\nplt.plot(x,x**2 + 1)\nplt.plot(0,1,'r*',ms=10)\nplt.title('mรญnimo global')\n\nplt.subplot(132)\nplt.plot(x,-x**2 + 1)\nplt.plot(0,1,'r*',ms=10)\nplt.title('mรกximo global')\n\nplt.subplot(133)\nplt.plot(x,x**3 + 1)\nplt.plot(0,1,'r*',ms=10)\nplt.title('ponto de sela');\n\n\n# ## Otimizaรงรฃo univariada\n\n# Como dissemos anteriormente, a otimizaรงรฃo univariada visa resolver um problema de minimizaรงรฃo tomando uma FO que depende apenas de uma variรกvel. Matematicamente, podemos descrever este problema da seguinte forma:\n# \n# $$\\text{Encontre } x^{*} = \\min f(x), \\, \\text{sujeito a} \\, g(x) = 0, h(x) \\leq 0.$$\n# \n# Em geral, $x$ รฉ uma _variรกvel de decisรฃo_, isto รฉ, uma quantidade que pode ser ajustada livremente (ex. comprimentos, รกreas, รขngulos etc.). \n# \n# As tรฉcnicas utilizadas para a resoluรงรฃo de um problema desse tipo sรฃo baseadas em mรฉtodos analรญticos (busca pelos zeros das derivadas) ou em mรฉtodos computacionais (determinaรงรฃo de raรญzes por processos iterativos). Mรฉtodos chamados de _root finding_ sรฃo estudados em um curso introdutรณrio de Mรฉtodos Numรฉricos.\n# \n# Para exemplificar, usaremos uma abordagem analรญtica por meio de computaรงรฃo simbรณlica (mรณdulo `sympy`) para resolver um problema que pode ser exibido como de otimizaรงรฃo univariada.\n\n# ### Problema resolvido\n# \n# Consideremos o seguinte problema: _maximizar a รกrea do retรขngulo inscrito em uma elipse._\n\n# ### Resoluรงรฃo \n# \n# Em primeiro lugar, escreveremos este problema em linguagem matemรกtica. Sabemos que a รกrea de um retรขngulo com vรฉrtice esquerdo inferior na origem da elipse e com vรฉrtice direito superior no ponto $(x,y)$ da elipse que estรก no primeiro quadrante รฉ dada por $A_r = xy$. Logo, a รกrea do retรขngulo inscrito na elipse serรก $A = 4xy$.\n# \n# A รกrea $A$ pode ser escrita em termos de $y$. Uma vez que a equaรงรฃo da elipse (centrada na origem) รฉ dada por\n# \n# $$\\frac{x^2}{a^2} + \\frac{y^2}{b^2} = 1,$$\n# \n# podemos resolver a equaรงรฃo da elipse para $x$ (ou $y$) e substituir esta soluรงรฃo na expressรฃo da รกrea para ter uma funรงรฃo $A(x)$ (ou $A(y)$). Se escolhermos $x$, o problema de otimizaรงรฃo pode ser escrito como:\n# \n# $$\\text{Encontre } x^{*} = \\min \\,( -A(x) ), \\, \\text{sujeito a} \\, x > 0.$$\n# \n# Notemos que maximizar $A(x)$ equivale a minimizar $-A(x)$. \n# \n# Na busca do ponto de mรญnimo $x^{*}$, usaremos computaรงรฃo simbรณlica.\n\n# Primeiramente, criamos variรกveis simbรณlicas que representem as variรกveis de interesse do problema e a expressรฃo da รกrea total.\n\n# In[5]:\n\n\n# cria variรกveis simbรณlicas\nx,y,a,b = sy.symbols('x,y,a,b') \n\n# รกrea do retรขngulo no 1o. quadrante รฉ xy\n# logo, รกrea total รฉ 4xy\nA = -4*x*y\nA\n\n\n# Em seguida, resolvemos a equaรงรฃo da elipse para a variรกvel $y$ utilizando a funรงรฃo `sympy.solve`.\n\n# In[6]:\n\n\n# resolve equaรงรฃo da elipse para y\nsol = sy.solve(x**2/a**2 + y**2/b**2 - 1,y)\nsol[0],sol[1]\n\n\n# Duas soluรงรตes sรฃo possรญveis para $y$. Porรฉm, como o nosso ponto de referรชncia sobre a elipse estรก no primeiro quadrante, tomamos a expressรฃo para $y > 0$ e a substituรญmos na expressรฃo da รกrea de forma a obter uma expressรฃo univariada $A(x)$.\n\n# In[7]:\n\n\n# substitui expressรฃo de y positivo em A para ter -A(x)\nA = A.subs({'y':sol[1]})\nA\n\n\n# Localizaremos o ponto crรญtico da funรงรฃo a partir da derivada $A'(x)$. Derivando $A$ em relaรงรฃo a $x$, obtemos: \n\n# In[8]:\n\n\n# deriva -A(x) com a,b constantes\ndAdx = A.diff(x)\ndAdx\n\n\n# Em seguida, buscamos $x^{*}$ tal que $A'(x^{*}) = \\frac{dA}{dx}(x^{*}) = 0$.\n\n# In[9]:\n\n\n# resolve A'(x*) = 0\nsol_x = sy.solve(dAdx,x)\nsol_x\n\n\n# Duas soluรงรตes, sรฃo possรญveis, porรฉm, podemos verificar qual ponto de crรญtico, de fato, รฉ o que minimizarรก $-A(x)$ atravรฉs da anรกlise da concavidade. Entรฃo, calculamos $A''(x)$, para cada ponto crรญtico.\n\n# In[10]:\n\n\n# testa A''(x) para os dois pontos\ndAdx2 = dAdx.diff(x)\ndAdx2.subs(x,sol_x[0]).simplify(),dAdx2.subs(x,sol_x[1]).simplify()\n\n\n# Uma vez que a segunda soluรงรฃo verifica a concavidade positiva, temos que o ponto crรญtico $x^{*}$ รฉ:\n\n# In[11]:\n\n\n# concavidade para cima => ponto de mรญnimo\nxs = sol_x[1]\nxs\n\n\n# Usando este valor na equaรงรฃo da elipse, obtemos a ordenada correspondente:\n\n# In[12]:\n\n\n# resolve para y > 0\nys = sy.solve(xs**2/a**2 + y**2/b**2 - 1,y)[1]\nys\n\n\n# Por fim, substituindo $x^{*}$ na expressรฃo da รกrea, temos que $A_{max}$ รฉ:\n\n# In[13]:\n\n\n# รกrea mรกxima\nA_max = A.subs(x,xs)\nA_max\n\n\n# ou, de forma, simplificada,\n\n# In[14]:\n\n\n# simplificando\nA_max.simplify()\n\n\n# ### Conclusรฃo \n# \n# A รกrea do retรขngulo inscrito na elipse serรก mรกxima quando $x = \\frac{\\sqrt{2}}{2}a$ e $y = \\frac{\\sqrt{2}}{2}b$. Portanto, $A_{max} = 2ab$, para comprimentos $a$ e $b$ de semi-eixo maior e menor.\n\n# ## Estudo paramรฉtrico de geometria\n# \n# No grรกfico abaixo, plotamos a variaรงรฃo das รกreas de retรขngulos inscritos em uma elipse arbitrรกria com semi-eixos $a$ e $b$ em funรงรฃo do comprimento $x$ da meia-base do retรขngulo atรฉ o limite da meia-base do retรขngulo de รกrea mรกxima. Adicionalmente, plotamos a variaรงรฃo do comprimento da diagonal do retรขngulo. A constante $A_{elip}$ รฉ a รกrea da elipse.\n# \n# Vocรช pode alterar os parรขmetros de construรงรฃo de elipse, o nรบmero de valores para $x$ e realizar uma nova anรกlise dos parรขmetros.\n\n# In[15]:\n\n\n# semi-eixos da elipse\na,b = 10,2\n\n# no. de retรขngulos inscritos\nnx = 40 \n\n# base variรกvel do retรขngulo\nX = np.linspace(0,np.sqrt(2)/2*a,nx)\n\n# รกrea da elipse\ne = np.pi*a*b\n\n# รกreas dos retรขngulos\nR = []\nH = []\nfor x in X:\n y = b*np.sqrt(1 - x**2/a**2) \n r = 4*x*y\n h = np.hypot(2*x,2*y) # diagonal do retรขngulo \n R.append(r)\n H.append(h)\n \n# plotagem \nfig,ax1 = plt.subplots(figsize=(6,4))\nax1.plot(X,R,'sb',mec='w',alpha=0.8,label='$A_{ret}(x)$')\nax1.plot(X,np.full(X.shape,2*a*b),'--r',alpha=0.8,label='$A_{max}$')\nax1.plot(X,np.full(X.shape,e),'-',alpha=0.8,label='$A_{elip}$')\nax1.legend(fontsize=10)\n# labels\nplt.xlabel('$x$ [compr. base ret. inscrito]')\nplt.ylabel('$A$ [รกreas]');\n\nax2 = ax1.twinx()\nax2.plot(X,H,'og',mec='w',alpha=0.8,label='$h_{ret}(x)$')\nax2.legend(loc=5,ncol=1,fontsize=10)\nplt.ylabel('$h$ [compr. diag ret.]');\n\nplt.suptitle('Variaรงรฃo de รกreas e diagonais: elipse x retรขngulo inscrito\\n');\nplt.title(f'Elipse: $x^2/({a:.1f})^2 + y^2/({b:.1f})^2 = 1$',fontsize=10);\n\n"
] | [
[
"numpy.full",
"numpy.sin",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.hypot",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.subplot"
]
] |
yyyyangyi/CNNs-for-Multi-Source-Remote-Sensing-Data-Fusion | [
"28f229513a759a8c9b60d67c463c6d3672bfb85f"
] | [
"data/dataset_houston.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n\n# Load Houston data set\n\n\nimport os\nimport numpy as np\nimport gdal\nfrom common import Config\n\nX_TRAIN_FILE = 'X_train.npy'\nX_TEST_FILE = 'X_test.npy'\nY_TRAIN_FILE = '2018_IEEE_GRSS_DFC_GT_TR.tif'\nY_TEST_FILE = 'Test_Labels.tif'\n\nSAMPLE_H = Config.sample_h\nSAMPLE_W = Config.sample_w\nSTRIDE_ROW = int(SAMPLE_H/2)\nSTRIDE_COL = int(SAMPLE_W/2)\n# STRIDE_ROW = SAMPLE_H\n# STRIDE_COL = SAMPLE_W\nNUM_CLASSES = Config.num_classes\n\n\ndef _get_data_set(X_data, y_data): \n data_rows = X_data.shape[1]\n data_cols = X_data.shape[2]\n assert (data_rows, data_cols) == (y_data.shape[0], y_data.shape[1])\n\n X = []\n y = []\n for r in range(0, data_rows, STRIDE_ROW):\n for c in range(0, data_cols, STRIDE_COL):\n if r+SAMPLE_H > data_rows:\n bottum = data_rows\n top = data_rows - SAMPLE_H\n else:\n bottum = r + SAMPLE_H\n top = r\n if c+SAMPLE_W > data_cols:\n left = data_cols - SAMPLE_W\n right = data_cols\n else:\n left = c\n right = c + SAMPLE_W\n X.append(X_data[:, top:bottum, left:right])\n y.append(y_data[top:bottum, left:right])\n \n return np.array(X), np.array(y)\n\ndef load_dataset(data_dir):\n X_train_data = np.load(os.path.join(data_dir, X_TRAIN_FILE))\n y_train_data = gdal.Open(os.path.join(data_dir, Y_TRAIN_FILE)).ReadAsArray()\n X_test_data = np.load(os.path.join(data_dir, X_TEST_FILE))\n y_test_data = gdal.Open(os.path.join(data_dir, Y_TEST_FILE)).ReadAsArray()\n X, y = _get_data_set(X_train_data, y_train_data)\n return X, y, X_test_data, y_test_data\n\n"
] | [
[
"numpy.array"
]
] |
Claude0311/fdtd | [
"4938423ed73226e45d20589a206324a5259bf79a"
] | [
"fdtd/visualization.py"
] | [
"\"\"\" visualization methods for the fdtd Grid.\n\nThis module supplies visualization methods for the FDTD Grid. They are\nimported by the Grid class and hence are available as Grid methods.\n\n\"\"\"\n\n## Imports\n\n# plotting\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as ptc\n\n# relative\nfrom .backend import backend as bd\n\n\n# 2D visualization function\n\n\ndef visualize(\n grid,\n x=None,\n y=None,\n z=None,\n cmap=\"Blues\",\n pbcolor=\"C3\",\n pmlcolor=(0, 0, 0, 0.1),\n objcolor=(1, 0, 0, 0.1),\n srccolor=\"C0\",\n detcolor=\"C2\",\n show=True,\n maxi=0.002,\n mini=0.0\n):\n \"\"\" visualize a projection of the grid and the optical energy inside the grid\n\n Args:\n x: the x-value to make the yz-projection (leave None if using different projection)\n y: the y-value to make the zx-projection (leave None if using different projection)\n z: the z-value to make the xy-projection (leave None if using different projection)\n cmap: the colormap to visualize the energy in the grid\n pbcolor: the color to visualize the periodic boundaries\n pmlcolor: the color to visualize the PML\n objcolor: the color to visualize the objects in the grid\n objcolor: the color to visualize the sources in the grid\n \"\"\"\n # imports (placed here to circumvent circular imports)\n from .sources import PointSource, LineSource\n from .boundaries import _PeriodicBoundaryX, _PeriodicBoundaryY, _PeriodicBoundaryZ\n from .boundaries import (\n _PMLXlow,\n _PMLXhigh,\n _PMLYlow,\n _PMLYhigh,\n _PMLZlow,\n _PMLZhigh,\n )\n\n # validate x, y and z\n if x is not None:\n if not isinstance(x, int):\n raise ValueError(\"the `x`-location supplied should be a single integer\")\n if y is not None or z is not None:\n raise ValueError(\n \"if an `x`-location is supplied, one should not supply a `y` or a `z`-location!\"\n )\n elif y is not None:\n if not isinstance(y, int):\n raise ValueError(\"the `y`-location supplied should be a single integer\")\n if z is not None or x is not None:\n raise ValueError(\n \"if a `y`-location is supplied, one should not supply a `z` or a `x`-location!\"\n )\n elif z is not None:\n if not isinstance(z, int):\n raise ValueError(\"the `z`-location supplied should be a single integer\")\n if x is not None or y is not None:\n raise ValueError(\n \"if a `z`-location is supplied, one should not supply a `x` or a `y`-location!\"\n )\n else:\n raise ValueError(\n \"at least one projection plane (x, y or z) should be supplied to visualize the grid!\"\n )\n\n # just to create the right legend entries:\n plt.plot([], lw=7, color=objcolor, label=\"Objects\")\n plt.plot([], lw=7, color=pmlcolor, label=\"PML\")\n plt.plot([], lw=3, color=pbcolor, label=\"Periodic Boundaries\")\n plt.plot([], lw=3, color=srccolor, label=\"Sources\")\n plt.plot([], lw=3, color=detcolor, label=\"Detectors\")\n\n # Grid energy\n grid_energy = bd.sum(grid.E ** 2 + grid.H ** 2, -1)\n if x is not None:\n assert grid.Ny > 1 and grid.Nz > 1\n xlabel, ylabel = \"y\", \"z\"\n Nx, Ny = grid.Ny, grid.Nz\n pbx, pby = _PeriodicBoundaryY, _PeriodicBoundaryZ\n pmlxl, pmlxh, pmlyl, pmlyh = _PMLYlow, _PMLYhigh, _PMLZlow, _PMLZhigh\n grid_energy = grid_energy[x, :, :]\n elif y is not None:\n assert grid.Nx > 1 and grid.Nz > 1\n xlabel, ylabel = \"z\", \"x\"\n Nx, Ny = grid.Nz, grid.Nx\n pbx, pby = _PeriodicBoundaryZ, _PeriodicBoundaryX\n pmlxl, pmlxh, pmlyl, pmlyh = _PMLZlow, _PMLZhigh, _PMLXlow, _PMLXhigh\n grid_energy = grid_energy[:, y, :].T\n elif z is not None:\n assert grid.Nx > 1 and grid.Ny > 1\n xlabel, ylabel = \"x\", \"y\"\n Nx, Ny = grid.Nx, grid.Ny\n pbx, pby = _PeriodicBoundaryX, _PeriodicBoundaryY\n pmlxl, pmlxh, pmlyl, pmlyh = _PMLXlow, _PMLXhigh, _PMLYlow, _PMLYhigh\n grid_energy = grid_energy[:, :, z]\n else:\n raise ValueError(\"Visualization only works for 2D grids\")\n\n for source in grid.sources:\n # LineSource\n if isinstance(source, LineSource):\n if x is not None:\n _x = [source.y[0], source.y[-1]]\n _y = [source.z[0], source.z[-1]]\n elif y is not None:\n _x = [source.z[0], source.z[-1]]\n _y = [source.x[0], source.x[-1]]\n elif z is not None:\n _x = [source.x[0], source.x[-1]]\n _y = [source.y[0], source.y[-1]]\n plt.plot(_y, _x, lw=3, color=srccolor)\n\n elif isinstance(source, PointSource):\n if x is not None:\n _x = source.y\n _y = source.z\n elif y is not None:\n _x = source.z\n _y = source.y\n elif z is not None:\n _x = source.x\n _y = source.y\n plt.plot(_y - 0.5, _x - 0.5, lw=3, marker=\"o\", color=srccolor)\n\n grid_energy[_x, _y] = 0 # do not visualize energy at location of source\n\n # LineDetector\n for detector in grid.detectors:\n if x is not None:\n _x = [detector.y[0], detector.y[-1]]\n _y = [detector.z[0], detector.z[-1]]\n elif y is not None:\n _x = [detector.z[0], detector.z[-1]]\n _y = [detector.x[0], detector.x[-1]]\n elif z is not None:\n _x = [detector.x[0], detector.x[-1]]\n _y = [detector.y[0], detector.y[-1]]\n\n plt.plot(_y, _x, lw=3, color=detcolor)\n\n # Boundaries\n for boundary in grid.boundaries:\n if isinstance(boundary, pbx):\n _x = [-0.5, -0.5, float(\"nan\"), Nx - 0.5, Nx - 0.5]\n _y = [-0.5, Ny - 0.5, float(\"nan\"), -0.5, Ny - 0.5]\n plt.plot(_y, _x, color=pbcolor, linewidth=3)\n elif isinstance(boundary, pby):\n _x = [-0.5, Nx - 0.5, float(\"nan\"), -0.5, Nx - 0.5]\n _y = [-0.5, -0.5, float(\"nan\"), Ny - 0.5, Ny - 0.5]\n plt.plot(_y, _x, color=pbcolor, linewidth=3)\n elif isinstance(boundary, pmlyl):\n patch = ptc.Rectangle(\n xy=(-0.5, -0.5),\n width=boundary.thickness,\n height=Nx,\n linewidth=0,\n edgecolor=\"none\",\n facecolor=pmlcolor,\n )\n plt.gca().add_patch(patch)\n elif isinstance(boundary, pmlxl):\n patch = ptc.Rectangle(\n xy=(-0.5, -0.5),\n width=Ny,\n height=boundary.thickness,\n linewidth=0,\n edgecolor=\"none\",\n facecolor=pmlcolor,\n )\n plt.gca().add_patch(patch)\n elif isinstance(boundary, pmlyh):\n patch = ptc.Rectangle(\n xy=(Ny - 0.5 - boundary.thickness, -0.5),\n width=boundary.thickness,\n height=Nx,\n linewidth=0,\n edgecolor=\"none\",\n facecolor=pmlcolor,\n )\n plt.gca().add_patch(patch)\n elif isinstance(boundary, pmlxh):\n patch = ptc.Rectangle(\n xy=(-0.5, Nx - boundary.thickness - 0.5),\n width=Ny,\n height=boundary.thickness,\n linewidth=0,\n edgecolor=\"none\",\n facecolor=pmlcolor,\n )\n plt.gca().add_patch(patch)\n\n for obj in grid.objects:\n if x is not None:\n _x = (obj.y.start, obj.y.stop)\n _y = (obj.z.start, obj.z.stop)\n elif y is not None:\n _x = (obj.z.start, obj.z.stop)\n _y = (obj.x.start, obj.x.stop)\n elif z is not None:\n _x = (obj.x.start, obj.x.stop)\n _y = (obj.y.start, obj.y.stop)\n\n patch = ptc.Rectangle(\n xy=(min(_y) - 0.5, min(_x) - 0.5),\n width=max(_y) - min(_y),\n height=max(_x) - min(_x),\n linewidth=0,\n edgecolor=\"none\",\n facecolor=objcolor,\n )\n plt.gca().add_patch(patch)\n\n # visualize the energy in the grid\n plt.imshow(bd.numpy(grid_energy), cmap=cmap, interpolation=\"sinc\",vmax=maxi,vmin=mini)\n\n # finalize the plot\n plt.ylabel(xlabel)\n plt.xlabel(ylabel)\n plt.ylim(Nx, -1)\n plt.xlim(-1, Ny)\n plt.figlegend()\n plt.tight_layout()\n if show:\n plt.show()\n"
] | [
[
"matplotlib.pyplot.xlim",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.figlegend"
]
] |
ppontisso/Keras-progressive_growing_of_gans | [
"e82c8d3529903463d9888fa18c98151ca0937254"
] | [
"Progressive growing of GANs/h5tool.py"
] | [
"# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\nimport os\nimport sys\nimport io\nimport glob\nimport pickle\nimport argparse\nimport threading\nimport queue\nimport traceback\nimport numpy as np\nimport scipy.ndimage\nimport PIL.Image\nimport h5py # conda install h5py\n\n#----------------------------------------------------------------------------\n\nclass HDF5Exporter:\n def __init__(self, h5_filename, resolution, channels=3):\n rlog2 = int(np.floor(np.log2(resolution)))\n assert resolution == 2 ** rlog2\n self.resolution = resolution\n self.channels = channels\n self.h5_file = h5py.File(h5_filename, 'w')\n self.h5_lods = []\n self.buffers = []\n self.buffer_sizes = []\n for lod in range(rlog2, -1, -1):\n r = 2 ** lod; c = channels\n bytes_per_item = c * (r ** 2)\n chunk_size = int(np.ceil(128.0 / bytes_per_item))\n buffer_size = int(np.ceil(512.0 * np.exp2(20) / bytes_per_item))\n #change to channel last\n lod = self.h5_file.create_dataset('data%dx%d' % (r,r), shape=(0,r,r,c), dtype=np.uint8,\n maxshape=(None,r,r,c), chunks=(chunk_size,r,r,c), compression='gzip', compression_opts=4)\n \n self.h5_lods.append(lod)\n self.buffers.append(np.zeros((buffer_size,r,r,c), dtype=np.uint8))\n self.buffer_sizes.append(0)\n\n def close(self):\n for lod in range(len(self.h5_lods)):\n self.flush_lod(lod)\n self.h5_file.close()\n\n def add_images(self, img):\n assert img.ndim == 4 and img.shape[1] == self.channels and img.shape[2] == img.shape[3]\n assert img.shape[2] >= self.resolution and img.shape[2] == 2 ** int(np.floor(np.log2(img.shape[2])))\n for lod in range(len(self.h5_lods)):\n while img.shape[2] > self.resolution / (2 ** lod):\n img = img.astype(np.float32)\n img = (img[:, :, 0::2, 0::2] + img[:, :, 0::2, 1::2] + img[:, :, 1::2, 0::2] + img[:, :, 1::2, 1::2]) * 0.25\n quant = np.uint8(np.clip(np.round(img), 0, 255))\n ofs = 0\n while ofs < quant.shape[0]:\n num = min(quant.shape[0] - ofs, self.buffers[lod].shape[0] - self.buffer_sizes[lod])\n self.buffers[lod][self.buffer_sizes[lod] : self.buffer_sizes[lod] + num] = quant[ofs : ofs + num]\n self.buffer_sizes[lod] += num\n if self.buffer_sizes[lod] == self.buffers[lod].shape[0]:\n self.flush_lod(lod)\n ofs += num\n\n def add_images_channel_last(self, img):\n assert img.ndim == 4 and img.shape[3] == self.channels and img.shape[1] == img.shape[2]\n assert img.shape[2] >= self.resolution and img.shape[2] == 2 ** int(np.floor(np.log2(img.shape[2])))\n for lod in range(len(self.h5_lods)):\n while img.shape[2] > self.resolution / (2 ** lod):\n img = img.astype(np.float32)\n img = (img[:, 0::2, 0::2, :] + \n img[:, 0::2, 1::2, :] + \n img[:, 1::2, 0::2, :] + \n img[:, 1::2, 1::2, :]) * 0.25\n\n quant = np.uint8(np.clip(np.round(img), 0, 255))\n ofs = 0\n while ofs < quant.shape[0]:\n num = min(quant.shape[0] - ofs, self.buffers[lod].shape[0] - self.buffer_sizes[lod])\n \n #print(\"self.buffers.shape:\",self.buffers[0].shape)\n\n self.buffers[lod][ self.buffer_sizes[lod] : self.buffer_sizes[lod] + num] = quant[ofs : ofs + num]\n self.buffer_sizes[lod] += num\n if self.buffer_sizes[lod] == self.buffers[lod].shape[0]:\n self.flush_lod(lod)\n ofs += num\n\n def num_images(self):\n return self.h5_lods[0].shape[0] + self.buffer_sizes[0]\n \n def flush_lod(self, lod):\n num = self.buffer_sizes[lod]\n if num > 0:\n self.h5_lods[lod].resize(self.h5_lods[lod].shape[0] + num, axis=0)\n self.h5_lods[lod][-num:] = self.buffers[lod][:num]\n self.buffer_sizes[lod] = 0\n\n#----------------------------------------------------------------------------\n\nclass ExceptionInfo(object):\n def __init__(self):\n self.type, self.value = sys.exc_info()[:2]\n self.traceback = traceback.format_exc()\n\n#----------------------------------------------------------------------------\n\nclass WorkerThread(threading.Thread):\n def __init__(self, task_queue):\n threading.Thread.__init__(self)\n self.task_queue = task_queue\n\n def run(self):\n while True:\n func, args, result_queue = self.task_queue.get()\n if func is None:\n break\n try:\n result = func(*args)\n except:\n result = ExceptionInfo()\n result_queue.put((result, args))\n\n#----------------------------------------------------------------------------\n\nclass ThreadPool(object):\n def __init__(self, num_threads):\n assert num_threads >= 1\n self.task_queue = queue.Queue()\n self.result_queues = dict()\n self.num_threads = num_threads\n for idx in range(self.num_threads):\n thread = WorkerThread(self.task_queue)\n thread.daemon = True\n thread.start()\n\n def add_task(self, func, args=()):\n assert hasattr(func, '__call__') # must be a function\n if func not in self.result_queues:\n self.result_queues[func] = queue.Queue()\n self.task_queue.put((func, args, self.result_queues[func]))\n\n def get_result(self, func, verbose_exceptions=True): # returns (result, args)\n result, args = self.result_queues[func].get()\n if isinstance(result, ExceptionInfo):\n if verbose_exceptions:\n print('\\n\\nWorker thread caught an exception:\\n' + result.traceback + '\\n', end=' ')\n raise result.type(result.value)\n return result, args\n\n def finish(self):\n for idx in range(self.num_threads):\n self.task_queue.put((None, (), None))\n\n def __enter__(self): # for 'with' statement\n return self\n\n def __exit__(self, *excinfo):\n self.finish()\n\n def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x, post_func=lambda x: x, max_items_in_flight=None):\n if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4\n assert max_items_in_flight >= 1\n results = []\n retire_idx = [0]\n\n def task_func(prepared, idx):\n return process_func(prepared)\n \n def retire_result():\n processed, (prepared, idx) = self.get_result(task_func)\n results[idx] = processed\n while retire_idx[0] < len(results) and results[retire_idx[0]] is not None:\n yield post_func(results[retire_idx[0]])\n results[retire_idx[0]] = None\n retire_idx[0] += 1\n \n for idx, item in enumerate(item_iterator):\n prepared = pre_func(item)\n results.append(None)\n self.add_task(func=task_func, args=(prepared, idx))\n while retire_idx[0] < idx - max_items_in_flight + 2:\n for res in retire_result(): yield res\n while retire_idx[0] < len(results):\n for res in retire_result(): yield res\n\n#----------------------------------------------------------------------------\n\ndef create_celeba_channel_last(h5_filename, celeba_dir, cx=89, cy=121):\n print('Creating CelebA channel last dataset %s from %s' % (h5_filename, celeba_dir))\n #glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png')\n glob_pattern = os.path.join(celeba_dir, '*.jpg')\n image_filenames = sorted(glob.glob(glob_pattern))\n num_images = 202599\n print((len(image_filenames)))\n test = []\n for i in image_filenames:\n # for windows user :\n a=i.split('\\\\')[-1]\n # for linux user\n # a=i.split('\\\\')[-1]\n a=a.split('.')[0]\n test.append(int(a))\n for i in range(1,len(test)):\n if(test[i]!=test[i-1]+1):\n print((test[i-1],test[i]))\n\n if len(image_filenames) != num_images:\n print('Error: Expected to find %d images in %s' % (num_images, glob_pattern))\n return\n \n h5 = HDF5Exporter(h5_filename, 32, 3)\n for idx in range(num_images):\n print('%d / %d\\r' % (idx, num_images), end=' ')\n img = np.asarray(PIL.Image.open(image_filenames[idx]))\n assert img.shape == (218, 178, 3)\n img = img[cy - 64 : cy + 64, cx - 64 : cx + 64]\n #img = img.transpose(2, 0, 1) # HWC => CHW\n h5.add_images_channel_last(img[np.newaxis])\n\n print('%-40s\\r' % 'Flushing data...', end=' ')\n h5.close()\n print('%-40s\\r' % '', end=' ')\n print('Added %d images.' % num_images)\n\n#----------------------------------------------------------------------------\n\ndef execute_cmdline(argv):\n prog = argv[0]\n parser = argparse.ArgumentParser(\n prog = prog,\n description = 'Tool for creating, extracting, and visualizing HDF5 datasets.',\n epilog = 'Type \"%s <command> -h\" for more information.' % prog)\n \n subparsers = parser.add_subparsers(dest='command')\n def add_command(cmd, desc, example=None):\n epilog = 'Example: %s %s' % (prog, example) if example is not None else None\n return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)\n\n p.add_argument( 'h5_filename', help='HDF5 file to inspect')\n\n p = add_command( 'create_celeba_channel_last', 'Create HDF5 channel last dataset for CelebA.',\n 'create_celeba celeba-128x128.h5 ~/celeba')\n\n p.add_argument( 'h5_filename', help='HDF5 file to create')\n p.add_argument( 'celeba_dir', help='Directory to read CelebA data from')\n p.add_argument( '--cx', help='Center X coordinate (default: 89)', type=int, default=89)\n p.add_argument( '--cy', help='Center Y coordinate (default: 121)', type=int, default=121)\n\n args = parser.parse_args(argv[1:])\n func = globals()[args.command]\n del args.command\n func(**vars(args))\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n execute_cmdline(sys.argv)\n\n\n#create_celeba_channel_last('datasets/celeba_128x128.h5', 'datasets/img_align_celeba', cx=89, cy=121)\n\n\n#----------------------------------------------------------------------------\n"
] | [
[
"numpy.ceil",
"numpy.zeros",
"numpy.round",
"numpy.exp2",
"numpy.log2"
]
] |
coleman2246/camera-calibration | [
"438220d8db61bb20d74b9ce7e24d70895d762312"
] | [
"generate_calibration.py"
] | [
"import threading\nimport multiprocessing\nfrom multiprocessing import Process, Queue\n\nimport numpy as np\nimport cv2\nimport glob\n\n\n\nclass CameraCalibrator:\n def __init__(self, filename, save_format = \"pickle\"):\n self.format = save_format\n acceptable_formats = [\"pickle\",\"yaml\",\"json\"]\n\n if self.format not in acceptable_formats:\n raise ValueError(\"Invalid Save Format\")\n \n self.filename = filename\n self.objpoints = []\n \n # termination criteria\n self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n\n self.objp = np.zeros((9*6,3), np.float32)\n self.objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)\n\n # Arrays to store object points and image points from all the images.\n \n self.video = \"test.avi\"\n self.cap = cv2.VideoCapture(self.video)\n \n self.lock = threading.Lock()\n \n # Check if camera opened successfully\n if (self.cap.isOpened()== False): \n print(\"Error opening video stream or file\")\n exit()\n self.inqueue = Queue()\n self.images = []\n \n def load_images(self):\n while(self.cap.isOpened()):\n self.lock.acquire()\n ret, frame = self.cap.read()\n self.lock.release()\n if ret == True:\n self.inqueue.put(frame)\n self.images.append(frame)\n else: \n break\n\n def save_calibration(self,ret, mtx, dist, rvecs, tvecs):\n if self.format == \"pickle\":\n camaera_data = {\n \"ret\": ret,\n \"mtx\": mtx,\n \"dist\": dist,\n \"rvecs\": rvecs,\n \"tvecs\": tvecs\n }\n # writes to a pickle file using protocl 2\n with open(self.filename,\"rb\") as f:\n pickle.dump(camera_data, f, 2)\n elif self.format == \"yaml\":\n pass\n elif self.format == \"json\":\n pass\n\n def find_chessboards(self,inqueue,outqueue):\n objpoints = []\n imgpoints = []\n \n while True:\n img = inqueue.get()\n if img is None:\n outqueue.put((objpoints,imgpoints))\n break\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Find the chess board corners\n ret, corners = cv2.findChessboardCorners(gray, (9,6), None)\n # If found, add object points, image points (after refining them)\n if ret == True:\n objpoints.append(self.objp)\n corners2 = cv2.cornerSubPix(gray,corners, (11,11), (-1,-1), self.criteria)\n imgpoints.append(corners)\n # Draw and display the corners\n \n def generate_calibration(self):\n number_of_threads = multiprocessing.cpu_count()\n threads = []\n print(\"Loading Images\")\n for i in range(number_of_threads):\n threads.append(threading.Thread(target=self.load_images))\n threads[i].start()\n \n for index, thread in enumerate(threads):\n thread.join()\n self.cap.release()\n print(\"Images Loaded\")\n\n \n outqueue = multiprocessing.Queue()\n processes = []\n\n for i in range(number_of_threads):\n self.inqueue.put(None)\n print(\"Finding Chessboards\")\n for i in range(number_of_threads):\n p = Process(target=self.find_chessboards, args=(self.inqueue,outqueue,))\n processes.append(p)\n processes[i].start()\n \n for i in range(number_of_threads):\n processes[i].join()\n\n print(\"Done Finding Chessboards\")\n self.gray = cv2.cvtColor(self.images[-1], cv2.COLOR_BGR2GRAY) \n\n objpoints = [] # 3d point in real world space\n imgpoints = [] # 2d points in image plane.\n for i in range(number_of_threads):\n results = outqueue.get()\n objpoints += results[0]\n imgpoints += results[1]\n\n h, w = self.images[-1].shape[:2]\n print(h,w)\n print(\"Calculting Camera Parameters (This May Take A While)\")\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, self.gray.shape[::-1], None, None)\n print(\"Done Calculting Camera Parameters\")\n\n newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))\n print(\"Saving Calibration\")\n self.save_calibration(ret, newcameramtx, dist, rvecs, tvecs)\n print(\"Done Saving Calibration\")\nd = CameraCalibrator()\nd.generate_calibration()\n"
] | [
[
"numpy.zeros"
]
] |
BPTIlt/NERF_for_difficult_objects | [
"a7e8176b43e84d8483f0abdac3ca79acdbf65030"
] | [
"train.py"
] | [
"import os, sys\nfrom opt import get_opts\nimport torch\nfrom collections import defaultdict\n\nfrom torch.utils.data import DataLoader\nfrom datasets import dataset_dict\n\n# models\nfrom models.nerf import Embedding, NeRF\nfrom models.rendering import render_rays\n\n# optimizer, scheduler, visualization\nfrom utils import *\n\n# losses\nfrom losses import loss_dict\n\n# metrics\nfrom metrics import *\n\n# pytorch-lightning\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning import LightningModule, Trainer\nfrom pytorch_lightning.logging import TestTubeLogger, TensorBoardLogger\n\n\n# from dotenv import load_dotenv\n\n\nclass NeRFSystem(LightningModule):\n def __init__(self, hparams):\n super(NeRFSystem, self).__init__()\n self.hparams = hparams\n\n self.loss = loss_dict[hparams.loss_type]()\n\n self.embedding_xyz = Embedding(3, 10) # 10 is the default number\n self.embedding_dir = Embedding(3, 4) # 4 is the default number\n self.embeddings = [self.embedding_xyz, self.embedding_dir]\n\n self.nerf_coarse = NeRF()\n self.models = [self.nerf_coarse]\n if hparams.N_importance > 0:\n self.nerf_fine = NeRF()\n self.models += [self.nerf_fine]\n\n def decode_batch(self, batch):\n rays = batch['rays'] # (B, 8)\n rgbs = batch['rgbs'] # (B, 3)\n return rays, rgbs\n\n def forward(self, rays):\n \"\"\"Do batched inference on rays using chunk.\"\"\"\n B = rays.shape[0]\n results = defaultdict(list)\n for i in range(0, B, self.hparams.chunk):\n rendered_ray_chunks = \\\n render_rays(self.models,\n self.embeddings,\n rays[i:i+self.hparams.chunk],\n self.hparams.N_samples,\n self.hparams.use_disp,\n self.hparams.perturb,\n self.hparams.noise_std,\n self.hparams.N_importance,\n self.hparams.chunk, # chunk size is effective in val mode\n self.train_dataset.white_back)\n\n for k, v in rendered_ray_chunks.items():\n results[k] += [v]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results\n\n def prepare_data(self):\n dataset = dataset_dict[self.hparams.dataset_name]\n kwargs = {'root_dir': self.hparams.root_dir,\n 'img_wh': tuple(self.hparams.img_wh)}\n if self.hparams.dataset_name == 'llff':\n kwargs['spheric_poses'] = self.hparams.spheric_poses\n kwargs['val_num'] = self.hparams.num_gpus\n self.train_dataset = dataset(split='train', **kwargs)\n self.val_dataset = dataset(split='val', **kwargs)\n\n def configure_optimizers(self):\n self.optimizer = get_optimizer(self.hparams, self.models)\n scheduler = get_scheduler(self.hparams, self.optimizer)\n\n return [self.optimizer], [scheduler]\n\n def train_dataloader(self):\n return DataLoader(self.train_dataset,\n shuffle=True,\n num_workers=4,\n batch_size=self.hparams.batch_size,\n pin_memory=True)\n\n def val_dataloader(self):\n return DataLoader(self.val_dataset,\n shuffle=False,\n num_workers=4,\n batch_size=1, # validate one image (H*W rays) at a time\n pin_memory=True)\n\n def training_step(self, batch, batch_nb):\n log = {'lr': get_learning_rate(self.optimizer)}\n rays, rgbs = self.decode_batch(batch)\n results = self(rays)\n log['train/loss'] = loss = self.loss(results, rgbs)\n typ = 'fine' if 'rgb_fine' in results else 'coarse'\n\n with torch.no_grad():\n psnr_ = psnr(results[f'rgb_{typ}'], rgbs)\n log['train/psnr'] = psnr_\n\n return {'loss': loss,\n 'progress_bar': {'train_psnr': psnr_},\n 'log': log\n }\n\n def validation_step(self, batch, batch_nb):\n rays, rgbs = self.decode_batch(batch)\n rays = rays.squeeze() # (H*W, 3)\n rgbs = rgbs.squeeze() # (H*W, 3)\n results = self(rays)\n log = {'val_loss': self.loss(results, rgbs)}\n typ = 'fine' if 'rgb_fine' in results else 'coarse'\n\n if batch_nb == 0:\n W, H = self.hparams.img_wh\n img = results[f'rgb_{typ}'].view(H, W, 3).cpu()\n img = img.permute(2, 0, 1) # (3, H, W)\n img_gt = rgbs.view(H, W, 3).permute(2, 0, 1).cpu() # (3, H, W)\n depth = visualize_depth(results[f'depth_{typ}'].view(H, W)) # (3, H, W)\n stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)\n self.logger.experiment.add_images('val/GT_pred_depth',\n stack, self.global_step)\n\n log['val_psnr'] = psnr(results[f'rgb_{typ}'], rgbs)\n return log\n\n def validation_epoch_end(self, outputs):\n mean_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n mean_psnr = torch.stack([x['val_psnr'] for x in outputs]).mean()\n\n return {'progress_bar': {'val_loss': mean_loss,\n 'val_psnr': mean_psnr},\n 'log': {'val/loss': mean_loss,\n 'val/psnr': mean_psnr}\n }\n\n\nif __name__ == '__main__':\n # load_dotenv()\n\n hparams = get_opts()\n system = NeRFSystem(hparams)\n checkpoint_callback = ModelCheckpoint(filepath=os.path.join(f'ckpts/{hparams.exp_name}/',\n '{epoch:d}'),\n monitor='val/loss',\n mode='min',\n save_top_k=hparams.num_epochs)\n\n logger = TestTubeLogger(\n save_dir=\"logs\",\n name=hparams.exp_name,\n debug=False,\n create_git_tag=False\n )\n\n # tensorboard_logger = TensorBoardLogger(\"tb_logs\", name=hparams.exp_name)\n\n # wandb_logger = WandbLogger(\n # project=\"nerf\",\n # entity=\"bpti\",\n # name=f\"{hparams.exp_name}\"\n # )\n\n trainer = Trainer(max_epochs=hparams.num_epochs,\n checkpoint_callback=checkpoint_callback,\n resume_from_checkpoint=hparams.ckpt_path,\n logger=logger,\n early_stop_callback=None,\n weights_summary=None,\n progress_bar_refresh_rate=1,\n gpus=hparams.num_gpus,\n distributed_backend='ddp' if hparams.num_gpus>1 else None,\n num_sanity_val_steps=1,\n benchmark=True,\n profiler=hparams.num_gpus==1)\n\n trainer.fit(system)\n"
] | [
[
"torch.cat",
"torch.stack",
"torch.utils.data.DataLoader",
"torch.no_grad"
]
] |
MatiMoreyra/tensorflow-yolov4-tflite | [
"6b747d65b8dbc6848bcb05ab529d84445e25e782"
] | [
"convert_trt.py"
] | [
"from absl import app, flags, logging\nfrom absl.flags import FLAGS\nimport tensorflow as tf\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\nif len(physical_devices) > 0:\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\nimport numpy as np\nimport cv2\nfrom tensorflow.python.compiler.tensorrt import trt_convert as trt\nimport core.utils as utils\nfrom tensorflow.python.saved_model import signature_constants\nimport os\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\n\nflags.DEFINE_string('weights', './_checkpoints/yolov4-416', 'path to weights file')\nflags.DEFINE_string('output', './_checkpoints/yolov4-trt-fp16-416', 'path to output')\nflags.DEFINE_integer('input_size', 512, 'path to output')\nflags.DEFINE_string('quantize_mode', 'float16', 'quantize mode (int8, float16)')\nflags.DEFINE_integer('loop', 8, 'loop')\n\ndef representative_data_gen():\n yield tf.random.normal((FLAGS.loop, FLAGS.input_size, FLAGS.input_size, 3)),\n\ndef save_trt():\n\n if FLAGS.quantize_mode == 'int8':\n conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(\n precision_mode=trt.TrtPrecisionMode.INT8,\n max_workspace_size_bytes=4000000000,\n use_calibration=True)\n converter = trt.TrtGraphConverterV2(\n input_saved_model_dir=FLAGS.weights,\n conversion_params=conversion_params)\n converter.convert(calibration_input_fn=representative_data_gen)\n elif FLAGS.quantize_mode == 'float16':\n conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(\n precision_mode=trt.TrtPrecisionMode.FP16,\n max_workspace_size_bytes=4000000000)\n converter = trt.TrtGraphConverterV2(\n input_saved_model_dir=FLAGS.weights, conversion_params=conversion_params)\n converter.convert()\n else :\n conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(\n precision_mode=trt.TrtPrecisionMode.FP32,\n max_workspace_size_bytes=4000000000)\n converter = trt.TrtGraphConverterV2(\n input_saved_model_dir=FLAGS.weights, conversion_params=conversion_params)\n converter.convert()\n\n converter.build(input_fn=representative_data_gen)\n converter.save(output_saved_model_dir=FLAGS.output)\n print('Done Converting to TF-TRT')\n\n saved_model_loaded = tf.saved_model.load(FLAGS.output)\n graph_func = saved_model_loaded.signatures[\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n trt_graph = graph_func.graph.as_graph_def()\n for n in trt_graph.node:\n print(n.op)\n if n.op == \"TRTEngineOp\":\n print(\"Node: %s, %s\" % (n.op, n.name.replace(\"/\", \"_\")))\n else:\n print(\"Exclude Node: %s, %s\" % (n.op, n.name.replace(\"/\", \"_\")))\n logging.info(\"model saved to: {}\".format(FLAGS.output))\n\n trt_engine_nodes = len([1 for n in trt_graph.node if str(n.op) == 'TRTEngineOp'])\n print(\"numb. of trt_engine_nodes in TensorRT graph:\", trt_engine_nodes)\n all_nodes = len([1 for n in trt_graph.node])\n print(\"numb. of all_nodes in TensorRT graph:\", all_nodes)\n\ndef main(_argv):\n config = ConfigProto()\n config.gpu_options.allow_growth = True\n session = InteractiveSession(config=config)\n save_trt()\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n\n\n"
] | [
[
"tensorflow.python.compiler.tensorrt.trt_convert.TrtGraphConverterV2",
"tensorflow.random.normal",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.InteractiveSession",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.python.compiler.tensorrt.trt_convert.DEFAULT_TRT_CONVERSION_PARAMS._replace",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.saved_model.load"
]
] |
andrewfullard/tardis | [
"39994c0a509650c1300474590e05e53d979ae2ac"
] | [
"tardis/widgets/tests/test_shell_info.py"
] | [
"import pytest\nimport numpy as np\nimport pandas.testing as pdt\n\nfrom tardis.widgets.shell_info import (\n BaseShellInfo,\n SimulationShellInfo,\n HDFShellInfo,\n ShellInfoWidget,\n)\n\n\[email protected](scope=\"class\")\ndef base_shell_info(simulation_verysimple):\n return BaseShellInfo(\n simulation_verysimple.model.t_radiative,\n simulation_verysimple.model.w,\n simulation_verysimple.plasma.abundance,\n simulation_verysimple.plasma.number_density,\n simulation_verysimple.plasma.ion_number_density,\n simulation_verysimple.plasma.level_number_density,\n )\n\n\[email protected](scope=\"class\")\ndef simulation_shell_info(simulation_verysimple):\n return SimulationShellInfo(simulation_verysimple)\n\n\[email protected](scope=\"class\")\ndef hdf_shell_info(hdf_file_path, simulation_verysimple):\n simulation_verysimple.to_hdf(\n hdf_file_path, overwrite=True\n ) # save sim at hdf_file_path\n return HDFShellInfo(hdf_file_path)\n\n\nclass TestBaseShellInfo:\n def test_shells_data(self, base_shell_info, simulation_verysimple):\n shells_data = base_shell_info.shells_data()\n assert shells_data.shape == (\n len(simulation_verysimple.model.t_radiative),\n 2,\n )\n assert np.allclose(\n shells_data.iloc[:, 0].map(np.float),\n simulation_verysimple.model.t_radiative.value,\n )\n assert np.allclose(\n shells_data.iloc[:, 1].map(np.float), simulation_verysimple.model.w\n )\n\n @pytest.mark.parametrize(\"shell_num\", [1, 20])\n def test_element_count_data(\n self, base_shell_info, simulation_verysimple, shell_num\n ):\n element_count_data = base_shell_info.element_count(1)\n assert element_count_data.shape == (\n len(simulation_verysimple.plasma.abundance[shell_num - 1]),\n 2,\n )\n assert np.allclose(\n element_count_data.iloc[:, -1].map(np.float),\n simulation_verysimple.plasma.abundance[shell_num - 1],\n )\n\n @pytest.mark.parametrize((\"atomic_num\", \"shell_num\"), [(12, 1), (20, 20)])\n def test_ion_count_data(\n self, base_shell_info, simulation_verysimple, atomic_num, shell_num\n ):\n ion_count_data = base_shell_info.ion_count(atomic_num, shell_num)\n sim_ion_number_density = (\n simulation_verysimple.plasma.ion_number_density[shell_num - 1].loc[\n atomic_num\n ]\n )\n sim_element_number_density = (\n simulation_verysimple.plasma.number_density.loc[\n atomic_num, shell_num - 1\n ]\n )\n assert ion_count_data.shape == (len(sim_ion_number_density), 2)\n assert np.allclose(\n ion_count_data.iloc[:, -1].map(np.float),\n sim_ion_number_density / sim_element_number_density,\n )\n\n @pytest.mark.parametrize(\n (\"ion_num\", \"atomic_num\", \"shell_num\"), [(2, 12, 1), (3, 20, 20)]\n )\n def test_level_count_data(\n self,\n base_shell_info,\n simulation_verysimple,\n ion_num,\n atomic_num,\n shell_num,\n ):\n level_count_data = base_shell_info.level_count(\n ion_num, atomic_num, shell_num\n )\n sim_level_number_density = (\n simulation_verysimple.plasma.level_number_density[\n shell_num - 1\n ].loc[atomic_num, ion_num]\n )\n sim_ion_number_density = (\n simulation_verysimple.plasma.ion_number_density[shell_num - 1].loc[\n atomic_num, ion_num\n ]\n )\n assert level_count_data.shape == (len(sim_level_number_density), 1)\n assert np.allclose(\n level_count_data.iloc[:, 0].map(np.float),\n sim_level_number_density / sim_ion_number_density,\n )\n\n\nclass TestSimulationShellInfo(TestBaseShellInfo):\n # Override the base_shell_info fixture to use value of simulation_shell_info fixture\n @pytest.fixture\n def base_shell_info(self, simulation_shell_info):\n return simulation_shell_info\n\n\nclass TestHDFShellInfo(TestBaseShellInfo):\n # Override the base_shell_info fixture to use value of hdf_shell_info fixture\n @pytest.fixture\n def base_shell_info(self, hdf_shell_info):\n return hdf_shell_info\n\n\nclass TestShellInfoWidget:\n # Indices of each table to select for testing\n select_shell_num = 4\n select_atomic_num = 12\n select_ion_num = 3\n\n @pytest.fixture(scope=\"class\")\n def shell_info_widget(self, base_shell_info):\n shell_info_widget = ShellInfoWidget(base_shell_info)\n # To attach event listeners to table widgets of shell_info_widget\n _ = shell_info_widget.display()\n return shell_info_widget\n\n def test_selection_on_shells_table(\n self, base_shell_info, shell_info_widget\n ):\n shell_info_widget.shells_table.change_selection([self.select_shell_num])\n\n expected_element_count = base_shell_info.element_count(\n self.select_shell_num\n )\n pdt.assert_frame_equal(\n expected_element_count, shell_info_widget.element_count_table.df\n )\n\n expected_ion_count = base_shell_info.ion_count(\n expected_element_count.index[0], self.select_shell_num\n )\n pdt.assert_frame_equal(\n expected_ion_count, shell_info_widget.ion_count_table.df\n )\n\n expected_level_count = base_shell_info.level_count(\n expected_ion_count.index[0],\n expected_element_count.index[0],\n self.select_shell_num,\n )\n pdt.assert_frame_equal(\n expected_level_count, shell_info_widget.level_count_table.df\n )\n\n def test_selection_on_element_count_table(\n self, base_shell_info, shell_info_widget\n ):\n shell_info_widget.element_count_table.change_selection(\n [self.select_atomic_num]\n )\n\n expected_ion_count = base_shell_info.ion_count(\n self.select_atomic_num, self.select_shell_num\n )\n pdt.assert_frame_equal(\n expected_ion_count, shell_info_widget.ion_count_table.df\n )\n\n expected_level_count = base_shell_info.level_count(\n expected_ion_count.index[0],\n self.select_atomic_num,\n self.select_shell_num,\n )\n pdt.assert_frame_equal(\n expected_level_count, shell_info_widget.level_count_table.df\n )\n\n def test_selection_on_ion_count_table(\n self, base_shell_info, shell_info_widget\n ):\n shell_info_widget.ion_count_table.change_selection(\n [self.select_ion_num]\n )\n\n expected_level_count = base_shell_info.level_count(\n self.select_ion_num, self.select_atomic_num, self.select_shell_num\n )\n pdt.assert_frame_equal(\n expected_level_count, shell_info_widget.level_count_table.df\n )\n"
] | [
[
"pandas.testing.assert_frame_equal"
]
] |
rusty1s/pytorch_sparse | [
"7c15c3cdac46e9653ecf29a5eda6af45432fd85e"
] | [
"setup.py"
] | [
"import os\nimport sys\nimport glob\nimport os.path as osp\nfrom itertools import product\nfrom setuptools import setup, find_packages\n\nimport torch\nfrom torch.__config__ import parallel_info\nfrom torch.utils.cpp_extension import BuildExtension\nfrom torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME\n\nWITH_CUDA = torch.cuda.is_available() and CUDA_HOME is not None\nsuffices = ['cpu', 'cuda'] if WITH_CUDA else ['cpu']\nif os.getenv('FORCE_CUDA', '0') == '1':\n suffices = ['cuda', 'cpu']\nif os.getenv('FORCE_ONLY_CUDA', '0') == '1':\n suffices = ['cuda']\nif os.getenv('FORCE_ONLY_CPU', '0') == '1':\n suffices = ['cpu']\n\nBUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1'\n\nWITH_METIS = True if os.getenv('WITH_METIS', '0') == '1' else False\nWITH_MTMETIS = True if os.getenv('WITH_MTMETIS', '0') == '1' else False\n\n\ndef get_extensions():\n extensions = []\n\n extensions_dir = osp.join('csrc')\n main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))\n\n for main, suffix in product(main_files, suffices):\n define_macros = []\n libraries = []\n if WITH_METIS:\n define_macros += [('WITH_METIS', None)]\n libraries += ['metis']\n if WITH_MTMETIS:\n define_macros += [('WITH_MTMETIS', None)]\n define_macros += [('MTMETIS_64BIT_VERTICES', None)]\n define_macros += [('MTMETIS_64BIT_EDGES', None)]\n define_macros += [('MTMETIS_64BIT_WEIGHTS', None)]\n define_macros += [('MTMETIS_64BIT_PARTITIONS', None)]\n libraries += ['mtmetis', 'wildriver']\n extra_compile_args = {'cxx': ['-O2']}\n extra_link_args = ['-s']\n\n info = parallel_info()\n if ('backend: OpenMP' in info and 'OpenMP not found' not in info\n and sys.platform != 'darwin'):\n extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']\n if sys.platform == 'win32':\n extra_compile_args['cxx'] += ['/openmp']\n else:\n extra_compile_args['cxx'] += ['-fopenmp']\n else:\n print('Compiling without OpenMP...')\n\n if suffix == 'cuda':\n define_macros += [('WITH_CUDA', None)]\n nvcc_flags = os.getenv('NVCC_FLAGS', '')\n nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')\n nvcc_flags += ['--expt-relaxed-constexpr', '-O2']\n extra_compile_args['nvcc'] = nvcc_flags\n\n if sys.platform == 'win32':\n extra_link_args += ['cusparse.lib']\n else:\n extra_link_args += ['-lcusparse', '-l', 'cusparse']\n\n name = main.split(os.sep)[-1][:-4]\n sources = [main]\n\n path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')\n if osp.exists(path):\n sources += [path]\n\n path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')\n if suffix == 'cuda' and osp.exists(path):\n sources += [path]\n\n Extension = CppExtension if suffix == 'cpu' else CUDAExtension\n extension = Extension(\n f'torch_sparse._{name}_{suffix}',\n sources,\n include_dirs=[extensions_dir],\n define_macros=define_macros,\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args,\n libraries=libraries,\n )\n extensions += [extension]\n\n return extensions\n\n\ninstall_requires = ['scipy']\nsetup_requires = []\ntests_require = ['pytest', 'pytest-runner', 'pytest-cov']\n\nsetup(\n name='torch_sparse',\n version='0.6.12',\n author='Matthias Fey',\n author_email='[email protected]',\n url='https://github.com/rusty1s/pytorch_sparse',\n description=('PyTorch Extension Library of Optimized Autograd Sparse '\n 'Matrix Operations'),\n keywords=['pytorch', 'sparse', 'sparse-matrices', 'autograd'],\n license='MIT',\n python_requires='>=3.6',\n install_requires=install_requires,\n setup_requires=setup_requires,\n tests_require=tests_require,\n extras_require={'test': tests_require},\n ext_modules=get_extensions() if not BUILD_DOCS else [],\n cmdclass={\n 'build_ext':\n BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=False)\n },\n packages=find_packages(),\n)\n"
] | [
[
"torch.cuda.is_available",
"torch.__config__.parallel_info",
"torch.utils.cpp_extension.BuildExtension.with_options"
]
] |
haje01/distper | [
"bac4a24b2378600096de5ae0f8c0eae0c9c40034"
] | [
"actor.py"
] | [
"\"\"\"์กํฐ ๋ชจ๋.\"\"\"\nimport os\nimport time\nimport pickle\nfrom io import BytesIO\nfrom collections import defaultdict\n\nimport zmq\nimport numpy as np\nimport torch\n\nfrom common import ReplayBuffer, PrioReplayBuffer, ENV_NAME, ActorInfo,\\\n calc_loss, get_logger, DQN, async_recv, weights_init, array_experience,\\\n PRIORITIZED, Experience, float2byte, byte2float\nfrom wrappers import make_env\n\nSHOW_FREQ = 100 # ๋ก๊ทธ ์ถ๋ ฅ ์ฃผ๊ธฐ\nPRIO_BUF_SIZE = 1000 # ์ฐ์ ๋ฒํผ ์ ์ด ์\nSEND_SIZE = 100 # ๋ณด๋ผ ์ ์ด ์\nSEND_FREQ = 100 # ๋ณด๋ผ ๋น๋\nMODEL_UPDATE_FREQ = 300 # ๋ฌ๋์ ๋ชจ๋ธ ๊ฐ์ ธ์ฌ ์ฃผ๊ธฐ\nEPS_BASE = 0.4 # eps ๊ณ์ฐ์ฉ\nEPS_ALPHA = 3 # eps ๊ณ์ฐ์ฉ\n\nactor_id = int(os.environ.get('ACTOR_ID', '-1')) # ์กํฐ์ ID\nassert actor_id != -1\nnum_actor = int(os.environ.get('NUM_ACTOR', '-1')) # ์ ์ฒด ์กํฐ ์\nassert num_actor != -1\nmaster_ip = os.environ.get('MASTER_IP') # ๋ง์คํฐ IP\nassert master_ip is not None\n\nlog = get_logger()\n\n\ndef init_zmq():\n \"\"\"ZMQ๊ด๋ จ ์ด๊ธฐํ.\"\"\"\n context = zmq.Context()\n\n # ๋ฌ๋์์ ๋ฐ์ ์์บฃ\n lrn_sock = context.socket(zmq.SUB)\n lrn_sock.setsockopt_string(zmq.SUBSCRIBE, '')\n lrn_sock.setsockopt(zmq.CONFLATE, 1)\n lrn_sock.connect(\"tcp://{}:5557\".format(master_ip))\n\n # ๋ฒํผ๋ก ๋ณด๋ผ ์์ผ\n buf_sock = context.socket(zmq.PUSH)\n buf_sock.connect(\"tcp://{}:5558\".format(master_ip))\n return context, lrn_sock, buf_sock\n\n\nclass Agent:\n \"\"\"์์ด์ ํธ.\"\"\"\n\n def __init__(self, env, memory, epsilon, prioritized):\n \"\"\"์ด๊ธฐํ.\"\"\"\n self.env = env\n self.memory = memory\n self.epsilon = epsilon\n self.prioritized = prioritized\n self._reset()\n\n def _reset(self):\n \"\"\"๋ฆฌ์
๊ตฌํ.\"\"\"\n self.state = float2byte(self.env.reset())\n self.tot_reward = 0.0\n self.action_cnt = defaultdict(int)\n\n def show_action_rate(self):\n \"\"\"๋์๋ณ ์ ํ ๋น์จ ํ์.\"\"\"\n meanings = self.env.unwrapped.get_action_meanings()\n total = float(sum(self.action_cnt.values()))\n if total == 0:\n return\n msg = \"actions - \"\n for i, m in enumerate(meanings):\n msg += \"{}: {:.2f}, \".format(meanings[i],\n self.action_cnt[i] / total)\n log(msg)\n\n def play_step(self, net, tgt_net, epsilon, frame_idx):\n \"\"\"ํ๋ ์ด ์งํ.\"\"\"\n done_reward = None\n\n if np.random.random() < self.epsilon:\n # ์์ ๋์\n action = self.env.action_space.sample()\n else:\n # ๊ฐ์น๊ฐ ๋์ ๋์.\n state = byte2float(self.state)\n state_a = np.array([state])\n state_v = torch.tensor(state_a)\n q_vals_v = net(state_v)\n _, act_v = torch.max(q_vals_v, dim=1)\n action = int(act_v.item())\n self.action_cnt[action] += 1\n\n # ํ๊ฒฝ ์งํ\n new_state, reward, is_done, _ = self.env.step(action)\n new_state = float2byte(new_state)\n self.tot_reward += reward\n\n # ๋ฒํผ์ ์ถ๊ฐ\n if self.prioritized:\n exp = array_experience(self.state, action, reward, is_done,\n new_state)\n else:\n exp = Experience(self.state, action, reward, is_done, new_state)\n self.append_sample(exp, net, tgt_net)\n self.state = new_state\n\n if frame_idx % SHOW_FREQ == 0:\n log(\"{}: buffer size {} \".format(frame_idx, len(self.memory)))\n\n # ์ข
๋ฃ๋์์ผ๋ฉด ๋ฆฌ์
\n if is_done:\n done_reward = self.tot_reward\n self._reset()\n\n # ์ํผ์๋ ๋ฆฌ์๋ ๋ฐํ\n return done_reward\n\n def append_sample(self, sample, net, tgt_net):\n \"\"\"์ํ์ ์๋ฌ๋ฅผ ๊ตฌํด ์ํ๊ณผ ํจ๊ป ์ถ๊ฐ.\"\"\"\n if self.prioritized:\n loss_t, _, _ = calc_loss(sample, net, tgt_net)\n error = float(loss_t)\n self.memory.populate([sample], [error])\n else:\n self.memory.append(sample)\n\n def send_replay(self, buf_sock, info):\n \"\"\"์ฐ์ ์์๋ก ์ํ๋งํ ๋ฆฌํ๋ ์ด ๋ฐ์ดํฐ์ ์ ๋ณด๋ฅผ ์ ์ก.\"\"\"\n log(\"send replay - speed {} f/s\".format(info.speed))\n if self.prioritized:\n # ์ฐ์ ํ์ ์ํ๋ง ํ์ฌ ๋ณด๋\n batch, _, prios = self.memory.sample(SEND_SIZE)\n payload = pickle.dumps((actor_id, batch, prios, info))\n else:\n # ์๋๋ฉด ๋ค๋ณด๋\n payload = pickle.dumps((actor_id, self.memory, info))\n self.memory.clear()\n\n buf_sock.send(payload)\n\n\ndef receive_model(lrn_sock, net, tgt_net, block):\n \"\"\"๋ฌ๋์๊ฒ์ ๋ชจ๋ธ์ ๋ฐ์.\"\"\"\n log(\"receive model from learner.\")\n if block:\n payload = lrn_sock.recv()\n else:\n payload = async_recv(lrn_sock)\n\n if payload is None:\n # log(\"no new model. use old one.\")\n return net, tgt_net\n\n bio = BytesIO(payload)\n log(\"received new model.\")\n net = torch.load(bio, map_location={'cuda:0': 'cpu'})\n tgt_net = torch.load(bio, map_location={'cuda:0': 'cpu'})\n log('net')\n log(net.state_dict()['conv.0.weight'][0][0])\n log('tgt_net')\n log(tgt_net.state_dict()['conv.0.weight'][0][0])\n return net, tgt_net\n\n\ndef main():\n \"\"\"๋ฉ์ธ.\"\"\"\n # ํ๊ฒฝ ์์ฑ\n env = make_env(ENV_NAME)\n net = DQN(env.observation_space.shape, env.action_space.n)\n net.apply(weights_init)\n tgt_net = DQN(env.observation_space.shape, env.action_space.n)\n tgt_net.load_state_dict(net.state_dict())\n\n if PRIORITIZED:\n memory = PrioReplayBuffer(PRIO_BUF_SIZE)\n else:\n memory = ReplayBuffer(SEND_SIZE)\n\n # ๊ณ ์ eps๋ก ์์ด์ ํธ ์์ฑ\n epsilon = EPS_BASE ** (1 + actor_id / (num_actor - 1) * EPS_ALPHA)\n agent = Agent(env, memory, epsilon, PRIORITIZED)\n log(\"Actor {} - epsilon {:.5f}\".format(actor_id, epsilon))\n\n # zmq ์ด๊ธฐํ\n context, lrn_sock, buf_sock = init_zmq()\n # ๋ฌ๋์๊ฒ์ ๊ธฐ๋ณธ ๊ฐ์ค์น ๋ฐ๊ณ ์์\n net, tgt_net = receive_model(lrn_sock, net, tgt_net, True)\n\n #\n # ์๋ฎฌ๋ ์ด์
\n #\n episode = frame_idx = 0\n p_time = p_frame = None\n p_reward = -50.0\n\n while True:\n frame_idx += 1\n\n # ์คํ
์งํ (์ํผ์๋ ์ข
๋ฃ๋ฉด reset๊น์ง)\n reward = agent.play_step(net, tgt_net, epsilon, frame_idx)\n\n # ๋ฆฌ์๋๊ฐ ์๋ ๊ฒฝ์ฐ (์ํผ์๋ ์ข
๋ฃ)\n if reward is not None:\n episode += 1\n p_reward = reward\n\n # ๋ณด๋ด๊ธฐ\n if frame_idx % SEND_FREQ == 0:\n # ํ์ต๊ด๋ จ ์ ๋ณด\n if p_time is None:\n speed = 0.0\n else:\n speed = (frame_idx - p_frame) / (time.time() - p_time)\n info = ActorInfo(episode, frame_idx, p_reward, speed)\n # ๋ฆฌํ๋ ์ด ์ ๋ณด์ ์ ๋ณด ์ ์ก\n agent.send_replay(buf_sock, info)\n # ๋์ ์ ํ ํ์\n agent.show_action_rate()\n\n p_time = time.time()\n p_frame = frame_idx\n\n # ์๋ก์ด ๋ชจ๋ธ ๋ฐ๊ธฐ\n net, tgt_net = receive_model(lrn_sock, net, tgt_net, False)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"torch.max",
"torch.tensor",
"torch.load",
"numpy.random.random"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.